Changed layout for organizational coherence.

This commit is contained in:
2018-01-25 15:49:32 -08:00
parent 32d3b6b7cb
commit fc1b41178c
190 changed files with 0 additions and 0 deletions

View File

@@ -0,0 +1,50 @@
import sys
from PyQt5 import QtWidgets, QtCore, QtGui, uic
import signal
import rospy
import time
from cv_bridge import CvBridge, CvBridgeError
import cv2
import qimage2ndarray
import numpy as np
from geometry_msgs.msg import Twist
from sensor_msgs.msg import CompressedImage
#from sensor_msgs.msg import Image, CompressedImage
class DriveTest(QtCore.QThread):
publish_message_signal = QtCore.pyqtSignal()
def __init__(self):
super(DriveTest, self).__init__()
self.not_abort = True
self.message = None
self.publisher = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
rospy.init_node("test")
def run(self):
# TODO: Thread starting message here
while self.not_abort:
self.message = Twist()
self.message.linear.x = 1.0
self.message.angular.z = 1.0
self.publisher.publish(self.message)
self.msleep(100)
# TODO: Thread ending message here
def __publish_message(self):
pass
def setup_start_and_kill_signals(self, start_signal, signals_and_slots_signal, kill_signal):
start_signal.connect(self.start)
kill_signal.connect(self.on_kill_threads_requested__slot)
def on_kill_threads_requested__slot(self):
self.not_abort = False

View File

@@ -0,0 +1,127 @@
#####################################
# Imports
#####################################
# Python native imports
from PyQt5 import QtCore
from os import makedirs, rename, walk, unlink
from os.path import exists, getmtime
from os import environ
import logging
from datetime import datetime
#####################################
# Global Variables
#####################################
MAX_NUM_LOG_FILES = 30
#####################################
# Logger Definition
#####################################
class Logger(QtCore.QObject):
def __init__(self, console_output=True):
super(Logger, self).__init__()
# ########## Local class variables ##########
self.console_output = console_output
# ########## Get the settings instance ##########
self.settings = QtCore.QSettings()
# # ########## Get the Pick And Plate instance of the logger ##########
self.logger = logging.getLogger("groundstation")
# ########## Set variables with useful paths ##########
self.appdata_base_directory = environ["HOME"] + "/.groundstation"
self.log_directory = self.appdata_base_directory + "//logs"
self.log_file_path = self.log_directory + "//log.txt"
# ########## Cleanup old log files ##########
self.__cleanup_log_files()
# ########## Set up logger with desired settings ##########
self.__setup_logger()
# ########## Place divider in log file to see new program launch ##########
self.__add_startup_log_buffer_text()
def __setup_logger(self):
# Get the appdata directory and make the log path if it doesn't exist
if not exists(self.log_directory):
makedirs(self.log_directory)
# Set the debugging level
self.logger.setLevel(logging.DEBUG)
# Make a formatter with the log line format wanted
formatter = logging.Formatter(fmt='%(levelname)s : %(asctime)s : %(message)s', datefmt='%m/%d/%y %H:%M:%S')
# Set up a file handler so everything can be saved and attach it to the logger
file_handler = logging.FileHandler(filename=self.log_file_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.DEBUG)
self.logger.addHandler(file_handler)
# Enable console output if requested
if self.console_output:
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(logging.DEBUG)
self.logger.addHandler(console_handler)
def __cleanup_log_files(self):
# This copies the existing log.txt file to an old version with a datetime stamp
# It then checks if there are too many log files, and if so, deletes the oldest
if exists(self.log_directory):
# Get the number of log files
num_log_files = self.__get_num_files_in_directory(self.log_directory)
# Check that we actually have log files
if num_log_files > 0:
date_time = datetime.now().strftime("%Y%m%d-%H%M%S")
# If we do, move the current logfile to a backup in the format old_log_datetime
if exists(self.log_file_path):
rename(self.log_file_path, self.log_directory + "\\old_log_" + date_time + ".txt")
# If we have more than the max log files delete the oldest one
if num_log_files >= MAX_NUM_LOG_FILES:
unlink(self.__get_name_of_oldest_file(self.log_directory))
def __add_startup_log_buffer_text(self):
# Prints a header saying when the program started
self.logger.info("########## Application Starting ##########")
@staticmethod
def __get_name_of_oldest_file(input_path):
oldest_file_path = None
previous_oldest_time = 0
# Walk the directory passed in to get all folders and files
for dir_path, dir_names, file_names in walk(input_path):
# Go through all of the filenames found
for file in file_names:
# Recreate the full path and get the modified time of the file
current_path = dir_path + "\\" + file
time = getmtime(current_path)
# Default case for if the variables above have not been initially set
if previous_oldest_time == 0:
previous_oldest_time = time
oldest_file_path = current_path
# Saves the oldest time and file path of the current file if it's older (lower timestamp) than the
# last file saved in the variables
if time < previous_oldest_time:
previous_oldest_time = time
oldest_file_path = current_path
# Returns the path to the oldest file after checking all the files
return oldest_file_path
@staticmethod
def __get_num_files_in_directory(input_path):
# Walk the directory passed in to get all the files
for _, _, file_names in walk(input_path):
# Return the number of files found in the directory
return len(file_names)

View File

@@ -0,0 +1,45 @@
#####################################
# Imports
#####################################
# Python native imports
from PyQt5 import QtCore, QtGui, QtWidgets
import time
import logging
import socket
import rospy
# Custom Imports
#####################################
# Global Variables
#####################################
#####################################
# RoverVideoReceiver Class Definition
#####################################
class ROSMasterChecker(QtCore.QThread):
def __init__(self):
super(ROSMasterChecker, self).__init__()
# ########## Class Variables ##########
self.ros_master_present = False
self.start_time = time.time()
self.start()
def run(self):
try:
master = rospy.get_master()
master.getPid()
self.ros_master_present = True
except socket.error:
return
def master_present(self, timeout):
while self.isRunning() and (time.time() - self.start_time) < timeout:
self.msleep(100)
return self.ros_master_present

View File

@@ -0,0 +1,187 @@
#####################################
# Imports
#####################################
# Python native imports
from PyQt5 import QtCore, QtWidgets
import logging
import rospy
# Custom Imports
import RoverVideoReceiver
#####################################
# Global Variables
#####################################
CAMERA_TOPIC_PATH = "/cameras/"
EXCLUDED_CAMERAS = ["zed"]
PRIMARY_LABEL_MAX = (640, 360)
SECONDARY_LABEL_MAX = (256, 144)
TERTIARY_LABEL_MAX = (256, 144)
#####################################
# RoverVideoCoordinator Class Definition
#####################################
class RoverVideoCoordinator(QtCore.QThread):
pixmap_ready_signal = QtCore.pyqtSignal(str)
def __init__(self, shared_objects):
super(RoverVideoCoordinator, self).__init__()
# ########## Reference to class init variables ##########
self.shared_objects = shared_objects
self.right_screen = self.shared_objects["screens"]["right_screen"]
self.primary_video_display_label = self.right_screen.primary_video_label # type:QtWidgets.QLabel
self.secondary_video_display_label = self.right_screen.secondary_video_label # type:QtWidgets.QLabel
self.tertiary_video_display_label = self.right_screen.tertiary_video_label # type:QtWidgets.QLabel
# ########## Get the settings instance ##########
self.settings = QtCore.QSettings()
# ########## Get the Pick And Plate instance of the logger ##########
self.logger = logging.getLogger("groundstation")
# ########## Thread Flags ##########
self.run_thread_flag = True
self.setup_cameras_flag = True
# ########## Class Variables ##########
# Camera variables
self.camera_threads = {}
self.valid_cameras = []
self.disabled_cameras = []
# Setup cameras
self.__get_cameras()
self.__setup_video_threads()
self.primary_label_current_setting = 0
self.secondary_label_current_setting = 0
self.tertiary_label_current_setting = 0
self.primary_label_max_resolution = -1
self.secondary_label_max_resolution = -1
self.tertiary_label_max_resolution = -1
def run(self):
self.logger.debug("Starting Video Coordinator Thread")
self.__set_max_resolutions() # Do this initially so we don't try to disable cameras before they're set up
self.msleep(500)
while self.run_thread_flag:
self.__set_max_resolutions()
# self.__toggle_background_cameras_if_needed()
self.msleep(10)
self.__wait_for_camera_threads()
self.logger.debug("Stopping Video Coordinator Thread")
def __set_max_resolutions(self):
self.primary_label_max_resolution = self.camera_threads[
self.valid_cameras[self.primary_label_current_setting]].current_max_resolution
self.secondary_label_max_resolution = self.camera_threads[
self.valid_cameras[self.secondary_label_current_setting]].current_max_resolution
self.tertiary_label_max_resolution = self.camera_threads[
self.valid_cameras[self.tertiary_label_current_setting]].current_max_resolution
if self.primary_label_max_resolution != PRIMARY_LABEL_MAX:
self.camera_threads[self.valid_cameras[self.primary_label_current_setting]].change_max_resolution_setting(
PRIMARY_LABEL_MAX)
if self.secondary_label_max_resolution != SECONDARY_LABEL_MAX and self.secondary_label_current_setting != self.primary_label_current_setting:
self.camera_threads[self.valid_cameras[self.secondary_label_current_setting]].change_max_resolution_setting(
SECONDARY_LABEL_MAX)
if self.tertiary_label_max_resolution != TERTIARY_LABEL_MAX and self.tertiary_label_current_setting != self.primary_label_current_setting:
self.camera_threads[self.valid_cameras[self.tertiary_label_current_setting]].change_max_resolution_setting(
TERTIARY_LABEL_MAX)
def __toggle_background_cameras_if_needed(self):
enabled = list({self.primary_label_current_setting, self.secondary_label_current_setting,
self.tertiary_label_current_setting})
for camera_index, camera_name in enumerate(self.valid_cameras):
if camera_index not in enabled:
self.camera_threads[camera_name].toggle_video_display()
self.disabled_cameras.append(camera_index)
elif camera_index in self.disabled_cameras:
self.camera_threads[camera_name].toggle_video_display()
self.disabled_cameras.remove(camera_index)
def __get_cameras(self):
topics = rospy.get_published_topics(CAMERA_TOPIC_PATH)
names = []
for topics_group in topics:
main_topic = topics_group[0]
camera_name = main_topic.split("/")[2]
names.append(camera_name)
names = set(names)
for camera in EXCLUDED_CAMERAS:
if camera in names:
names.remove(camera)
self.valid_cameras = list(names)
def __setup_video_threads(self):
for camera in self.valid_cameras:
self.camera_threads[camera] = RoverVideoReceiver.RoverVideoReceiver(camera)
def __wait_for_camera_threads(self):
for camera in self.camera_threads:
self.camera_threads[camera].wait()
def connect_signals_and_slots(self):
for thread in self.camera_threads:
self.camera_threads[thread].image_ready_signal.connect(self.pixmap_ready__slot)
self.primary_video_display_label.mousePressEvent = self.__change_display_source_primary_mouse_press_event
self.secondary_video_display_label.mousePressEvent = self.__change_display_source_secondary_mouse_press_event
self.tertiary_video_display_label.mousePressEvent = self.__change_display_source_tertiary_mouse_press_event
def setup_signals(self, start_signal, signals_and_slots_signal, kill_signal):
start_signal.connect(self.start)
signals_and_slots_signal.connect(self.connect_signals_and_slots)
kill_signal.connect(self.on_kill_threads_requested__slot)
for camera in self.camera_threads:
self.camera_threads[camera].setup_signals(start_signal, signals_and_slots_signal, kill_signal)
def __change_display_source_primary_mouse_press_event(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.primary_label_current_setting = (self.primary_label_current_setting + 1) % len(self.valid_cameras)
elif event.button() == QtCore.Qt.RightButton:
self.camera_threads[self.valid_cameras[self.primary_label_current_setting]].toggle_video_display()
def __change_display_source_secondary_mouse_press_event(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.secondary_label_current_setting = (self.secondary_label_current_setting + 1) % len(self.valid_cameras)
elif event.button() == QtCore.Qt.RightButton:
self.camera_threads[self.valid_cameras[self.secondary_label_current_setting]].toggle_video_display()
def __change_display_source_tertiary_mouse_press_event(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.tertiary_label_current_setting = (self.tertiary_label_current_setting + 1) % len(self.valid_cameras)
elif event.button() == QtCore.Qt.RightButton:
self.camera_threads[self.valid_cameras[self.tertiary_label_current_setting]].toggle_video_display()
def pixmap_ready__slot(self, camera):
if self.valid_cameras[self.primary_label_current_setting] == camera:
self.primary_video_display_label.setPixmap(self.camera_threads[camera].pixmap_1280x720_image)
if self.valid_cameras[self.secondary_label_current_setting] == camera:
self.secondary_video_display_label.setPixmap(self.camera_threads[camera].pixmap_640x360_image)
if self.valid_cameras[self.tertiary_label_current_setting] == camera:
self.tertiary_video_display_label.setPixmap(self.camera_threads[camera].pixmap_640x360_image)
def on_kill_threads_requested__slot(self):
self.run_thread_flag = False

View File

@@ -0,0 +1,233 @@
#####################################
# Imports
#####################################
# Python native imports
from PyQt5 import QtCore, QtGui, QtWidgets
import logging
import cv2
import numpy as np
import qimage2ndarray
import rospy
from cv_bridge import CvBridge
from sensor_msgs.msg import CompressedImage
# Custom Imports
#####################################
# Global Variables
#####################################
CAMERA_TOPIC_PATH = "/cameras/"
#####################################
# RoverVideoReceiver Class Definition
#####################################
class RoverVideoReceiver(QtCore.QThread):
image_ready_signal = QtCore.pyqtSignal(str)
def __init__(self, camera_name):
super(RoverVideoReceiver, self).__init__()
# ########## Reference to class init variables ##########
self.camera_name = camera_name
# ########## Get the settings instance ##########
self.settings = QtCore.QSettings()
# ########## Get the Pick And Plate instance of the logger ##########
self.logger = logging.getLogger("groundstation")
# ########## Thread Flags ##########
self.run_thread_flag = True
# ########## Class Variables ##########
self.camera_title_name = self.camera_name.replace("_", " ").title()
self.topic_base_path = CAMERA_TOPIC_PATH + self.camera_name
self.camera_topics = {}
self.current_max_resolution = None
self.current_camera_settings = {
"resolution": None,
"quality_setting": 80,
}
self.previous_camera_settings = self.current_camera_settings.copy()
self.temp_topic_path = CAMERA_TOPIC_PATH + self.camera_name + "/image_640x360/compressed"
# Subscription variables
self.video_subscriber = None # type: rospy.Subscriber
# Image variables
self.raw_image = None
self.opencv_1280x720_image = None
self.opencv_640x360_image = None
self.pixmap_1280x720_image = None
self.pixmap_640x360_image = None
# Processing variables
self.bridge = CvBridge() # OpenCV ROS Video Data Processor
self.video_enabled = True
self.new_frame = False
# Text Drawing Variables
self.font = cv2.FONT_HERSHEY_TRIPLEX
self.font_thickness = 1
self.font_baseline = 0
self.camera_name_opencv_1280x720_image = None
self.camera_name_opencv_640x360_image = None
self.__create_camera_name_opencv_images()
self.__get_camera_available_resolutions()
def run(self):
self.logger.debug("Starting \"%s\" Camera Thread" % self.camera_title_name)
while self.run_thread_flag:
if self.video_enabled:
self.__show_video_enabled()
else:
self.__show_video_disabled()
self.msleep(10)
self.logger.debug("Stopping \"%s\" Camera Thread" % self.camera_title_name)
def __get_camera_available_resolutions(self):
topics = rospy.get_published_topics(self.topic_base_path)
resolution_options = []
for topics_group in topics:
main_topic = topics_group[0]
camera_name = main_topic.split("/")[3]
resolution_options.append(camera_name)
resolution_options = list(set(resolution_options))
for resolution in resolution_options:
# Creates a tuple in (width, height) format that we can use as the key
group = int(resolution.split("image_")[1].split("x")[0]), int(resolution.split("image_")[1].split("x")[1])
self.camera_topics[group] = self.topic_base_path + "/" + resolution + "/compressed"
def __update_camera_subscription_and_settings(self):
if self.current_camera_settings["resolution"] != self.previous_camera_settings["resolution"]:
if self.video_subscriber:
self.video_subscriber.unregister()
new_topic = self.camera_topics[self.current_camera_settings["resolution"]]
self.video_subscriber = rospy.Subscriber(new_topic, CompressedImage, self.__image_data_received_callback)
self.new_frame = False
while not self.new_frame:
self.msleep(10)
self.previous_camera_settings["resolution"] = self.current_camera_settings["resolution"]
def __show_video_enabled(self):
self.__update_camera_subscription_and_settings()
if self.new_frame and self.current_camera_settings["resolution"]:
# if self.raw_image:
opencv_image = self.bridge.compressed_imgmsg_to_cv2(self.raw_image, "rgb8")
self.__create_final_pixmaps(opencv_image)
self.image_ready_signal.emit(self.camera_name)
self.new_frame = False
def __show_video_disabled(self):
blank_frame = np.zeros((720, 1280, 3), np.uint8)
self.__create_final_pixmaps(blank_frame)
self.image_ready_signal.emit(self.camera_name)
def __create_final_pixmaps(self, opencv_image):
height, width, _ = opencv_image.shape
if width != 1280 and height != 720:
self.opencv_1280x720_image = cv2.resize(opencv_image, (1280, 720))
else:
self.opencv_1280x720_image = opencv_image
if width != 640 and height != 360:
self.opencv_640x360_image = cv2.resize(opencv_image, (640, 360))
else:
self.opencv_640x360_image = opencv_image
self.__apply_camera_name(self.opencv_1280x720_image, self.camera_name_opencv_1280x720_image)
self.__apply_camera_name(self.opencv_640x360_image, self.camera_name_opencv_640x360_image)
self.pixmap_1280x720_image = QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(
self.opencv_1280x720_image))
self.pixmap_640x360_image = QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(
self.opencv_640x360_image))
def __image_data_received_callback(self, raw_image):
self.raw_image = raw_image
self.new_frame = True
def __create_camera_name_opencv_images(self):
camera_name_text_width, camera_name_text_height = \
cv2.getTextSize(self.camera_title_name, self.font, self.font_thickness, self.font_baseline)[0]
camera_name_width_buffered = camera_name_text_width + 10
camera_name_height_buffered = camera_name_text_height + 20
camera_name_opencv_image = np.zeros(
(camera_name_height_buffered, camera_name_width_buffered, 3), np.uint8)
cv2.putText(
camera_name_opencv_image,
self.camera_title_name,
((camera_name_width_buffered - camera_name_text_width) / 2, int((camera_name_height_buffered * 2) / 3)),
self.font,
1,
(255, 255, 255),
1,
cv2.LINE_AA)
self.camera_name_opencv_1280x720_image = \
cv2.resize(camera_name_opencv_image, (camera_name_width_buffered, camera_name_height_buffered))
self.camera_name_opencv_640x360_image = \
cv2.resize(camera_name_opencv_image, (camera_name_width_buffered / 2, camera_name_height_buffered / 2))
def change_max_resolution_setting(self, resolution_max):
self.current_max_resolution = resolution_max
self.current_camera_settings["resolution"] = resolution_max
def toggle_video_display(self):
if self.video_enabled:
if self.video_subscriber:
self.video_subscriber.unregister()
self.new_frame = True
self.video_enabled = False
else:
new_topic = self.camera_topics[self.current_camera_settings["resolution"]]
self.video_subscriber = rospy.Subscriber(new_topic, CompressedImage, self.__image_data_received_callback)
# self.video_subscriber = rospy.Subscriber(self.temp_topic_path, CompressedImage,
# self.__image_data_received_callback)
self.video_enabled = True
def connect_signals_and_slots(self):
pass
def setup_signals(self, start_signal, signals_and_slots_signal, kill_signal):
start_signal.connect(self.start)
signals_and_slots_signal.connect(self.connect_signals_and_slots)
kill_signal.connect(self.on_kill_threads_requested__slot)
def on_kill_threads_requested__slot(self):
self.run_thread_flag = False
@staticmethod
def __apply_camera_name(opencv_image, font_opencv_image):
opencv_image[0:0 + font_opencv_image.shape[0], 0:0 + font_opencv_image.shape[1]] = \
font_opencv_image

View File

@@ -0,0 +1,134 @@
import sys
from PyQt5 import QtWidgets, QtCore, QtGui, uic
import signal
import rospy
import time
from cv_bridge import CvBridge, CvBridgeError
import cv2
import qimage2ndarray
import numpy as np
from geometry_msgs.msg import Twist
from sensor_msgs.msg import CompressedImage
#from sensor_msgs.msg import Image, CompressedImage
class VideoTest(QtCore.QThread):
publish_message_signal = QtCore.pyqtSignal()
image_ready_signal = QtCore.pyqtSignal()
def __init__(self, shared_objects, screen_label, video_size=None, sub_path=None):
super(VideoTest, self).__init__()
self.not_abort = True
self.shared_objects = shared_objects
self.right_screen_label = screen_label # type: QtGui.QPixmap
self.video_size = video_size
self.message = None
self.publisher = rospy.Subscriber(sub_path, CompressedImage, self.__receive_message)
self.raw_image = None
self.cv_image = None
self.pixmap = None
self.bridge = CvBridge()
# self.bridge.com
self.new_frame = False
self.frame_count = 0
self.last_frame_time = time.time()
self.fps = 0
self.name = sub_path.split("/")[2].replace("_", " ").title()
self.font = cv2.FONT_HERSHEY_TRIPLEX
thickness = 1
baseline = 0
text_size = cv2.getTextSize(self.name, self.font, thickness, baseline)
print text_size
text_width, text_height = text_size[0]
width = text_width + 10
height = text_height + 20
self.blank_image = np.zeros((height, width, 3), np.uint8)
cv2.putText(self.blank_image, self.name, ((width - text_width) / 2, int((height * 2) / 3)), self.font, 1, (255, 255, 255), 1, cv2.LINE_AA)
self.blank_image = cv2.resize(self.blank_image, (width / 2, height / 2))
def run(self):
# TODO: Thread starting message here
y_offset = 0
x_offset = 0
while self.not_abort:
if self.raw_image and self.new_frame:
self.cv_image = self.bridge.compressed_imgmsg_to_cv2(self.raw_image, "rgb8")
self.cv_image = self.__show_fps(self.cv_image)
self.cv_image[y_offset:y_offset + self.blank_image.shape[0], x_offset:x_offset + self.blank_image.shape[1]] = self.blank_image
if self.video_size:
self.cv_image = cv2.resize(self.cv_image, self.video_size)
self.pixmap = QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(self.cv_image))
self.image_ready_signal.emit()
self.new_frame = False
if (time.time() - self.last_frame_time) >= 0.5:
self.fps = int(self.frame_count / (time.time() - self. last_frame_time))
self.last_frame_time = time.time()
self.frame_count = 0
self.msleep(18)
# TODO: Thread ending message here
def __show_fps(self, image):
thickness = 1
baseline = 0
fps_string = str(self.fps)
text_size = cv2.getTextSize(fps_string, self.font, thickness, baseline)
text_width, text_height = text_size[0]
width = text_width + 10
height = text_height + 20
fps_image = np.zeros((height, width, 3), np.uint8)
cv2.putText(fps_image, fps_string, ((width - text_width) / 2, int((height * 2) / 3)), self.font, 1, (255, 255, 255), 1, cv2.LINE_AA)
fps_image = cv2.resize(fps_image, (width / 2, height / 2))
y_offset = 0
x_offset = (image.shape[1] - fps_image.shape[1]) / 2
image[y_offset:y_offset + fps_image.shape[0], x_offset:x_offset + fps_image.shape[1]] = fps_image
return image
def __on_image_update_ready(self):
self.right_screen_label.setPixmap(self.pixmap)
def __receive_message(self, message):
self.raw_image = message
self.new_frame = True
self.frame_count += 1
def connect_signals_and_slots(self):
self.image_ready_signal.connect(self.__on_image_update_ready)
def setup_signals(self, start_signal, signals_and_slots_signal, kill_signal):
start_signal.connect(self.start)
signals_and_slots_signal.connect(self.connect_signals_and_slots)
kill_signal.connect(self.on_kill_threads_requested__slot)
def on_kill_threads_requested__slot(self):
self.not_abort = False