mirror of
https://github.com/OSURoboticsClub/Rover_2017_2018.git
synced 2025-11-08 10:11:14 +00:00
Got changing video sources working, along with showing the name of the active video feed
This commit is contained in:
@@ -2,16 +2,10 @@
|
|||||||
# Imports
|
# Imports
|
||||||
#####################################
|
#####################################
|
||||||
# Python native imports
|
# Python native imports
|
||||||
from PyQt5 import QtCore, QtGui, QtWidgets
|
from PyQt5 import QtCore, QtWidgets
|
||||||
import logging
|
import logging
|
||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
import qimage2ndarray
|
|
||||||
import pprint
|
|
||||||
|
|
||||||
import rospy
|
import rospy
|
||||||
from cv_bridge import CvBridge
|
|
||||||
from sensor_msgs.msg import CompressedImage
|
|
||||||
|
|
||||||
# Custom Imports
|
# Custom Imports
|
||||||
import RoverVideoReceiver
|
import RoverVideoReceiver
|
||||||
@@ -60,12 +54,9 @@ class RoverVideoCoordinator(QtCore.QThread):
|
|||||||
self.__setup_video_threads()
|
self.__setup_video_threads()
|
||||||
|
|
||||||
self.primary_label_current_setting = 0
|
self.primary_label_current_setting = 0
|
||||||
self.secondary_label_current_setting = 1
|
self.secondary_label_current_setting = 0
|
||||||
self.tertiary_label_current_setting = 0
|
self.tertiary_label_current_setting = 0
|
||||||
|
|
||||||
self.camera_data = {x: {"new_opencv": False} for x in self.valid_cameras}
|
|
||||||
print self.camera_data
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.logger.debug("Starting Video Coordinator Thread")
|
self.logger.debug("Starting Video Coordinator Thread")
|
||||||
|
|
||||||
@@ -107,6 +98,10 @@ class RoverVideoCoordinator(QtCore.QThread):
|
|||||||
for thread in self.camera_threads:
|
for thread in self.camera_threads:
|
||||||
self.camera_threads[thread].image_ready_signal.connect(self.pixmap_ready__slot)
|
self.camera_threads[thread].image_ready_signal.connect(self.pixmap_ready__slot)
|
||||||
|
|
||||||
|
self.primary_video_display_label.mousePressEvent = self.__change_display_source_primary_mouse_press_event
|
||||||
|
self.secondary_video_display_label.mousePressEvent = self.__change_display_source_secondary_mouse_press_event
|
||||||
|
self.tertiary_video_display_label.mousePressEvent = self.__change_display_source_tertiary_mouse_press_event
|
||||||
|
|
||||||
def setup_signals(self, start_signal, signals_and_slots_signal, kill_signal):
|
def setup_signals(self, start_signal, signals_and_slots_signal, kill_signal):
|
||||||
start_signal.connect(self.start)
|
start_signal.connect(self.start)
|
||||||
signals_and_slots_signal.connect(self.connect_signals_and_slots)
|
signals_and_slots_signal.connect(self.connect_signals_and_slots)
|
||||||
@@ -115,6 +110,24 @@ class RoverVideoCoordinator(QtCore.QThread):
|
|||||||
for camera in self.camera_threads:
|
for camera in self.camera_threads:
|
||||||
self.camera_threads[camera].setup_signals(start_signal, signals_and_slots_signal, kill_signal)
|
self.camera_threads[camera].setup_signals(start_signal, signals_and_slots_signal, kill_signal)
|
||||||
|
|
||||||
|
def __change_display_source_primary_mouse_press_event(self, event):
|
||||||
|
if event.button() == QtCore.Qt.LeftButton:
|
||||||
|
self.primary_label_current_setting = (self.primary_label_current_setting + 1) % len(self.valid_cameras)
|
||||||
|
elif event.button() == QtCore.Qt.RightButton:
|
||||||
|
self.camera_threads[self.valid_cameras[self.primary_label_current_setting]].toggle_video_display()
|
||||||
|
|
||||||
|
def __change_display_source_secondary_mouse_press_event(self, event):
|
||||||
|
if event.button() == QtCore.Qt.LeftButton:
|
||||||
|
self.secondary_label_current_setting = (self.secondary_label_current_setting + 1) % len(self.valid_cameras)
|
||||||
|
elif event.button() == QtCore.Qt.RightButton:
|
||||||
|
self.camera_threads[self.valid_cameras[self.secondary_label_current_setting]].toggle_video_display()
|
||||||
|
|
||||||
|
def __change_display_source_tertiary_mouse_press_event(self, event):
|
||||||
|
if event.button() == QtCore.Qt.LeftButton:
|
||||||
|
self.tertiary_label_current_setting = (self.tertiary_label_current_setting + 1) % len(self.valid_cameras)
|
||||||
|
elif event.button() == QtCore.Qt.RightButton:
|
||||||
|
self.camera_threads[self.valid_cameras[self.tertiary_label_current_setting]].toggle_video_display()
|
||||||
|
|
||||||
def pixmap_ready__slot(self, camera):
|
def pixmap_ready__slot(self, camera):
|
||||||
if self.valid_cameras[self.primary_label_current_setting] == camera:
|
if self.valid_cameras[self.primary_label_current_setting] == camera:
|
||||||
self.primary_video_display_label.setPixmap(self.camera_threads[camera].pixmap_1280x720_image)
|
self.primary_video_display_label.setPixmap(self.camera_threads[camera].pixmap_1280x720_image)
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import logging
|
|||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import qimage2ndarray
|
import qimage2ndarray
|
||||||
import pprint
|
|
||||||
|
|
||||||
import rospy
|
import rospy
|
||||||
from cv_bridge import CvBridge
|
from cv_bridge import CvBridge
|
||||||
@@ -64,8 +63,15 @@ class RoverVideoReceiver(QtCore.QThread):
|
|||||||
self.video_enabled = True
|
self.video_enabled = True
|
||||||
self.new_frame = False
|
self.new_frame = False
|
||||||
|
|
||||||
# Assign local callbacks
|
# Text Drawing Variables
|
||||||
self.__set_local_callbacks()
|
self.font = cv2.FONT_HERSHEY_TRIPLEX
|
||||||
|
self.font_thickness = 1
|
||||||
|
self.font_baseline = 0
|
||||||
|
|
||||||
|
self.camera_name_opencv_1280x720_image = None
|
||||||
|
self.camera_name_opencv_640x360_image = None
|
||||||
|
|
||||||
|
self.__create_camera_name_opencv_images()
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.logger.debug("Starting \"%s\" Camera Thread" % self.camera_title_name)
|
self.logger.debug("Starting \"%s\" Camera Thread" % self.camera_title_name)
|
||||||
@@ -84,43 +90,72 @@ class RoverVideoReceiver(QtCore.QThread):
|
|||||||
if self.new_frame:
|
if self.new_frame:
|
||||||
opencv_image = self.bridge.compressed_imgmsg_to_cv2(self.raw_image, "rgb8")
|
opencv_image = self.bridge.compressed_imgmsg_to_cv2(self.raw_image, "rgb8")
|
||||||
|
|
||||||
height, width, _ = opencv_image.shape
|
self.__create_final_pixmaps(opencv_image)
|
||||||
|
|
||||||
if width != 1280 and height != 720:
|
|
||||||
self.opencv_1280x720_image = cv2.resize(opencv_image, (1280, 720))
|
|
||||||
else:
|
|
||||||
self.opencv_1280x720_image = opencv_image
|
|
||||||
|
|
||||||
if width != 640 and height != 360:
|
|
||||||
self.opencv_640x360_image = cv2.resize(opencv_image, (640, 360))
|
|
||||||
else:
|
|
||||||
self.opencv_640x360_image = opencv_image
|
|
||||||
|
|
||||||
self.pixmap_1280x720_image = QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(
|
|
||||||
self.opencv_1280x720_image))
|
|
||||||
self.pixmap_640x360_image = QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(
|
|
||||||
self.opencv_640x360_image))
|
|
||||||
|
|
||||||
self.image_ready_signal.emit(self.camera_name)
|
self.image_ready_signal.emit(self.camera_name)
|
||||||
self.new_frame = False
|
self.new_frame = False
|
||||||
|
|
||||||
def __show_video_disabled(self):
|
def __show_video_disabled(self):
|
||||||
if self.new_frame:
|
blank_frame = np.zeros((720, 1280, 3), np.uint8)
|
||||||
fps_image = np.zeros((720, 1280, 3), np.uint8)
|
|
||||||
|
|
||||||
self.pixmap = QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(fps_image))
|
self.__create_final_pixmaps(blank_frame)
|
||||||
self.image_ready_signal.emit(self.camera_name)
|
|
||||||
self.new_frame = False
|
self.image_ready_signal.emit(self.camera_name)
|
||||||
|
|
||||||
|
def __create_final_pixmaps(self, opencv_image):
|
||||||
|
|
||||||
|
|
||||||
|
height, width, _ = opencv_image.shape
|
||||||
|
|
||||||
|
if width != 1280 and height != 720:
|
||||||
|
self.opencv_1280x720_image = cv2.resize(opencv_image, (1280, 720))
|
||||||
|
else:
|
||||||
|
self.opencv_1280x720_image = opencv_image
|
||||||
|
|
||||||
|
if width != 640 and height != 360:
|
||||||
|
self.opencv_640x360_image = cv2.resize(opencv_image, (640, 360))
|
||||||
|
else:
|
||||||
|
self.opencv_640x360_image = opencv_image
|
||||||
|
|
||||||
|
self.__apply_camera_name(self.opencv_1280x720_image, self.camera_name_opencv_1280x720_image)
|
||||||
|
self.__apply_camera_name(self.opencv_640x360_image, self.camera_name_opencv_640x360_image)
|
||||||
|
|
||||||
|
self.pixmap_1280x720_image = QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(
|
||||||
|
self.opencv_1280x720_image))
|
||||||
|
self.pixmap_640x360_image = QtGui.QPixmap.fromImage(qimage2ndarray.array2qimage(
|
||||||
|
self.opencv_640x360_image))
|
||||||
|
|
||||||
def __image_data_received_callback(self, raw_image):
|
def __image_data_received_callback(self, raw_image):
|
||||||
self.raw_image = raw_image
|
self.raw_image = raw_image
|
||||||
self.new_frame = True
|
self.new_frame = True
|
||||||
|
|
||||||
def __set_local_callbacks(self):
|
def __create_camera_name_opencv_images(self):
|
||||||
pass
|
camera_name_text_width, camera_name_text_height = \
|
||||||
# self.video_display_label.mouseDoubleClickEvent = self.toggle_video_display
|
cv2.getTextSize(self.camera_title_name, self.font, self.font_thickness, self.font_baseline)[0]
|
||||||
|
|
||||||
def toggle_video_display(self, _):
|
camera_name_width_buffered = camera_name_text_width + 10
|
||||||
|
camera_name_height_buffered = camera_name_text_height + 20
|
||||||
|
|
||||||
|
camera_name_opencv_image = np.zeros(
|
||||||
|
(camera_name_height_buffered, camera_name_width_buffered, 3), np.uint8)
|
||||||
|
|
||||||
|
cv2.putText(
|
||||||
|
camera_name_opencv_image,
|
||||||
|
self.camera_title_name,
|
||||||
|
((camera_name_width_buffered - camera_name_text_width) / 2, int((camera_name_height_buffered * 2) / 3)),
|
||||||
|
self.font,
|
||||||
|
1,
|
||||||
|
(255, 255, 255),
|
||||||
|
1,
|
||||||
|
cv2.LINE_AA)
|
||||||
|
|
||||||
|
self.camera_name_opencv_1280x720_image = \
|
||||||
|
cv2.resize(camera_name_opencv_image, (camera_name_width_buffered, camera_name_height_buffered))
|
||||||
|
|
||||||
|
self.camera_name_opencv_640x360_image = \
|
||||||
|
cv2.resize(camera_name_opencv_image, (camera_name_width_buffered / 2, camera_name_height_buffered / 2))
|
||||||
|
|
||||||
|
def toggle_video_display(self):
|
||||||
if self.video_enabled:
|
if self.video_enabled:
|
||||||
self.video_subscriber.unregister()
|
self.video_subscriber.unregister()
|
||||||
self.new_frame = True
|
self.new_frame = True
|
||||||
@@ -140,3 +175,8 @@ class RoverVideoReceiver(QtCore.QThread):
|
|||||||
|
|
||||||
def on_kill_threads_requested__slot(self):
|
def on_kill_threads_requested__slot(self):
|
||||||
self.run_thread_flag = False
|
self.run_thread_flag = False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def __apply_camera_name(opencv_image, font_opencv_image):
|
||||||
|
opencv_image[0:0 + font_opencv_image.shape[0], 0:0 + font_opencv_image.shape[1]] = \
|
||||||
|
font_opencv_image
|
||||||
|
|||||||
Reference in New Issue
Block a user