Quellcode durchsuchen

Merge branch 'camera' of dk/kraken into master

dk vor 1 Jahr
Ursprung
Commit
f5144b6a36
4 geänderte Dateien mit 108 neuen und 263 gelöschten Zeilen
  1. 47 261
      backend/device/video.py
  2. 61 0
      backend/tests/test_video.py
  3. 0 1
      environment.yml
  4. 0 1
      requirements.txt

+ 47 - 261
backend/device/video.py

@@ -1,11 +1,11 @@
 """ Common function for camera based method """
-from fractions import Fraction
 import json
 import logging
 import os
 import time
+from datetime import datetime
+import threading
 
-import av
 import cv2
 import numpy as np
 
@@ -14,268 +14,54 @@ from settings.config import settings
 logger = logging.getLogger(__name__)
 
 
-class VideoAnalyser(object):
-    """摄像头/视频数据分析的基类, 实现逐帧分析
-
-    Attributes:
-        t_start_save_video (float): 开始保存视频的时间,当使用av保存视频时需要此参数计算pts
-        out_stream: 使用opencv保存数据时使用
-        container:使用av保存视频时使用
-        stream: 使用av保存时使用
-    """
-
-    def __init__(self, camera_id=0, input_video=None):
-        if not input_video:
-            # For webcam input:
-            self.camera_id = camera_id
-            self.cap = cv2.VideoCapture(camera_id)
-            # TODO: cv2.CAP_DSHOW 能加速摄像头开启,但会导致视频保存出错?
-            # self.cap = cv2.VideoCapture(
-            #     camera_id) if camera_id == 0 else cv2.VideoCapture(
-            #         camera_id, cv2.CAP_DSHOW)  # 调用外部摄像头需设置cv2.CAP_DSHOW
-            self.is_camera = True
-        else:
-            self.cap = cv2.VideoCapture(input_video)
-            self.is_camera = False
-
-        # self.cap.setExceptionMode(True)
-        # opencv 4.6 的自动旋转错误,采用自定义的旋转方式
-        # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0.0)
-        # self.rotate_code = self.check_rotation(
-        #     self.cap.get(cv2.CAP_PROP_ORIENTATION_META))
-        self.rotate_code = None
-        self.t_start_save_video = None
-
-        self.save_with_av = False
-        self.out_stream = None
-        self.container = None
-        self.stream = None
-        self.previous_pts = 0
-
-    def __del__(self):
-        # self.cap.release()
-        # logger.info('Camera(%s) closed.', self.__class__.__name__)
-        # if self.out_stream:
-        #     self.out_stream.release()
-        # if self.container and self.t_start_save_video:
-        #     self.release_container()
-        self.close()
-
-    def get_save_fps(self):
-        return int(self.cap.get(cv2.CAP_PROP_FPS))
-
-    def open_camera(self):
-        success = self.cap.open(self.camera_id)
-        if success:
-            logger.info('Open camera(%s) succeed.', self.__class__.__name__)
-        else:
-            logger.error('Open camera(%s) failed.', self.__class__.__name__)
-        # if camera_id == 0:
-        #     self.cap.open(camera_id)
-        # else:
-        #     self.cap.open(camera_id, cv2.CAP_DSHOW)
-
-    def close(self, only_save: bool = False):
-        """关闭摄像头与结束视频保存
-
-        如果only_save为true,则结束视频保存,但不关闭摄像头;否则关闭摄像头与结束视频保存
-
-        Args:
-            only_save (bool, optional): 是否仅结束视频保存. Defaults to False.
-        """
-        if not only_save:
-            self.cap.release()
-            logger.info('Camera(%s) closed.', self.__class__.__name__)
-        if self.out_stream:
-            self.out_stream.release()
-            self.out_stream = None
-        self.release_container()
-        self.container = None
-
-    def set_output_video(self, output_video, save_with_av=False):
-        """ 设置输出视频
-
-        使用摄像头的情况下,必须在开摄像头之后调用,否则参数获取失败,无法正确设置输出视频
-
-        Args:
-            output_video (string): 要保存的视频文件路径
-            save_with_av (bool, optional): 使用av库进行保存
-        """
-        self.save_with_av = save_with_av
-        if not self.save_with_av:
-            # video info
-            # fourcc = int(self.cap.get(cv2.CAP_PROP_FOURCC))
-            # NOTICE: 这里需用 avc1 否则前端无法正常显示
-            fourcc = cv2.VideoWriter_fourcc(*'avc1')
-            fps = self.get_save_fps()
-            frame_size = (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
-                        int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
-
-            # file to save video
-            self.out_stream = cv2.VideoWriter(output_video, fourcc, fps,
-                                              frame_size)
-        else:
-            assert self.is_camera,\
-                'Do not save video with av when process recorded video!'
-            self.container = av.open(output_video, mode='w')
-            # NOTICE: 这里需使用 h264, 否则前端无法正常显示
-            self.stream = self.container.add_stream(
-                'h264', rate=int(self.cap.get(cv2.CAP_PROP_FPS)))  # alibi frame rate
-            self.stream.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
-            self.stream.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
-            self.stream.pix_fmt = 'yuv420p'
-            self.stream.codec_context.time_base = Fraction(
-                1, int(self.cap.get(cv2.CAP_PROP_FPS)))
-
-    def is_ok(self):
-        if self.cap and self.cap.isOpened():
-            return True
-        else:
-            logger.debug('Camera not ready!!!')
-            return False
-
-    def check_rotation(self, rotate):
-        rotate_code = None
-        if int(rotate) == 270:
-            rotate_code = cv2.ROTATE_90_CLOCKWISE
-        elif int(rotate) == 180:
-            rotate_code = cv2.ROTATE_180
-        elif int(rotate) == 90:
-            rotate_code = cv2.ROTATE_90_COUNTERCLOCKWISE
-
-        return rotate_code
-
-    def correct_rotation(self, frame, rotate_code):
-        return cv2.rotate(frame, rotate_code)
-
-    def process(self, save=True):
+class VideoCaptureThread:
+    def __init__(self, output_dir, video_source=0, sync_device=None):
+        super(VideoCaptureThread, self).__init__()
+        self.video_source = video_source
+        self.cap = cv2.VideoCapture(self.video_source, cv2.CAP_DSHOW)
+        while not self.cap.isOpened():
+            pass  # Wait for the capture to be initialized
+        self.cap.set(cv2.CAP_PROP_FPS, 30.0)
+        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280),
+        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
+        
+        self.sync_device = sync_device
+        
+        now = datetime.now()
+        date_time_str = now.strftime("%m-%d-%Y-%H-%M-%S")
         try:
-            success, image = self.cap.read()
-            if not success:
-                logger.debug('Ignoring empty camera frame.')
-
-            if self.rotate_code is not None:
-                image = self.correct_rotation(image, self.rotate_code)
-        except cv2.error as exc:
-            logger.error(
-                'read data from camera(%s) failed, it may be disconnected: %s',
-                self.__class__.__name__, exc)
-            raise exc
-        t_read = time.time()
-
-        if success and save:
-            self.save_video(image, t_read)
-
-        return success, image
-
-    def save_video(self, image, t_read):
-        if self.save_with_av:
-            self.save_video_with_av(image, t_read)
-        else:
-            self.save_video_with_opencv(image)
-
-    def save_video_with_opencv(self, image):
-        if not self.out_stream:
-            return
-        try:
-            assert self.out_stream.isOpened(), 'Cannot open video for writing'
-            self.out_stream.write(image)
-        except Exception as exc:
-            logger.error('Fail to save video %s: %s', self.out_stream, exc)
-
-    def save_video_with_av(self, image, t_start):
-        """Save video with [av](https://github.com/PyAV-Org/PyAV)
-
-        Args:
-            image (np.ndarray): frame to save
-            t_start (float): timestamp of this frame
-        """
-        if not self.container:
+            os.makedirs(output_dir)
+        except FileExistsError:
+            pass
+        self.output_path = os.path.join(output_dir, f'video_recording_{date_time_str}.mp4')
+        
+        self.out = cv2.VideoWriter(self.output_path, cv2.VideoWriter_fourcc(*'mp4v'), 30.0, (1280, 720))
+
+        self.videothread = threading.Thread(target=self.run)
+        self.videothread.start()
+
+    def run(self):
+        logger.info("Camera starting")
+        self.capture_video()
+    
+    def capture_video(self):
+        ret, frame = self.cap.read()
+        if not ret:
+            logger.error("Error: Couldn't read frame. Exit.")
             return
-        try:
-            if not self.t_start_save_video:
-                self.t_start_save_video = t_start
-
-            frame = av.VideoFrame.from_ndarray(image, format='bgr24')
-            # Presentation Time Stamp (seconds -> counts of time_base)
-            delta_t = t_start - self.t_start_save_video
-            if delta_t < 0.0:
-                return
-            pts = int(round(delta_t / self.stream.codec_context.time_base))
-            logger.debug('pts: %d', pts)
-            if pts > self.previous_pts:
-                frame.pts = pts
-                self.previous_pts = frame.pts
-                for packet in self.stream.encode(frame):
-                    self.container.mux(packet)
-        except ValueError as exc:
-            logger.debug('Fail to save frame of video %s: %s', self.container, exc)
-
-    def release_container(self):
-        if self.t_start_save_video:
-            self.av_finish_with_a_blank_frame()
-
-        # Close the file
-        if self.container:
-            self.container.close()
-        self.t_start_save_video = None
-        self.previous_pts = 0
-
-    def av_finish_with_a_blank_frame(self):
-        # finish it with a blank frame, so the "last" frame actually gets
-        # shown for some time this black frame will probably be shown for
-        # 1/fps time at least, that is the analysis of ffprobe
-        try:
-            image = np.zeros((self.stream.height, self.stream.width, 3),
-                            dtype=np.uint8)
-            frame = av.VideoFrame.from_ndarray(image, format='bgr24')
-            pts = int(
-                round((time.time() - self.t_start_save_video) /
-                    self.stream.codec_context.time_base))
-            logger.debug('last pts: %d', pts)
-            frame.pts = pts if pts > self.previous_pts else self.previous_pts + 1
-            for packet in self.stream.encode(frame):
-                self.container.mux(packet)
-
-            # Flush stream
-            for packet in self.stream.encode():
-                self.container.mux(packet)
-        except ValueError as exc:
-            logger.debug('Fail to save frame of video %s: %s', self.container, exc)
-
-    def generator(self):
-        while self.is_ok():
-            success, frame = self.process()
-            # 使用generator函数输出视频流, 每次请求输出的content类型是image/jpeg
-            if success:
-                # 因为opencv读取的图片并非jpeg格式,因此要用motion JPEG模式需要先将图片转码成jpg格式图片
-                ret, jpeg = cv2.imencode('.jpg', frame)
-                # t_end = time.time()
-                # logger.debug("Time for process: %fs", t_end - t_start)
-                yield (b'--frame\r\n'
-                       b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() +
-                       b'\r\n\r\n')
-
+        # synchronize after getting the first frame
+        if self.sync_device is not None:
+            self.sync_device.send_trigger(0xff)  # 255 for video ready
 
-def create_data_dir(subject_id, train_id):
-    """为保存视频数据创建文件夹
+        while self.cap.isOpened():
+            ret, frame = self.cap.read()
 
-    Args:
-        subject_id (_type_): _description_
-        train_id (_type_): _description_
-    """
-    path = f'{settings.DATA_PATH}/{subject_id}/{train_id}'
-    try:
-        os.makedirs(path)
-    except OSError:
-        logger.debug('Folder already exists!')
-    return path
+            # TODO: online analysis  (500ms step, asychronize)
 
+            self.out.write(frame)
 
-def json_generator(feeder):
-    while feeder.is_ok():
-        # time.sleep(1 / 30.0)
-        success, _, data = feeder.process(only_keypoint=False)
-        if success:
-            json_data = json.dumps(data)
-            yield f'data:{json_data}\n\n'
+    def close(self):
+        logger.info("Camera ended")
+        self.cap.release()
+        self.out.release()
+        self.videothread.join()

+ 61 - 0
backend/tests/test_video.py

@@ -0,0 +1,61 @@
+import unittest
+import time
+import os
+import shutil
+from glob import glob
+
+import cv2
+import numpy as np
+
+from device.video import VideoCaptureThread
+from device.trigger_box import TriggerNeuracle
+from device.data_client import NeuracleDataClient
+
+
+def get_video_length(file_path):
+    cap = cv2.VideoCapture(file_path)
+
+    if not cap.isOpened():
+        return None
+
+    # Get the frames per second (fps) and total number of frames
+    fps = cap.get(cv2.CAP_PROP_FPS)
+    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
+
+    # Calculate the duration of the video in seconds
+    duration = total_frames / fps
+
+    # Release the video capture object
+    cap.release()
+
+    return duration
+
+
+class TestVideo(unittest.TestCase):
+    def test_video_recording(self):
+        output_dir = './tests/data/video'
+        video_cam = VideoCaptureThread(output_dir=output_dir, video_source=1)
+        time.sleep(10)
+        video_cam.close()
+
+        # read video files
+        file = glob(os.path.join(output_dir, '*.mp4'))[0]
+        duration = get_video_length(file)
+        self.assertTrue(isinstance(duration, float))
+        self.assertTrue(duration > 0)
+
+        shutil.rmtree(output_dir)
+    
+    def test_video_sync(self):
+        output_dir = './tests/data/video'
+        trigger = TriggerNeuracle()
+        data_client = NeuracleDataClient(buffer_len=10.)
+        video_cam = VideoCaptureThread(output_dir=output_dir, video_source=1, sync_device=trigger)
+        time.sleep(5)
+        video_cam.close()
+        events = data_client.get_trial_data(clear=True)[1]
+        self.assertEqual(len(events), 1)
+        self.assertAlmostEqual(events[0, 2], 255)
+        data_client.close()
+
+        shutil.rmtree(output_dir)

+ 0 - 1
environment.yml

@@ -5,7 +5,6 @@ dependencies:
   - python=3.10.11
   - pip
   - pip:
-      - av==10.0.0
       - mne==1.5.1
       - pydantic==2.4.2
       - pyedflib==0.1.36

+ 0 - 1
requirements.txt

@@ -1,4 +1,3 @@
-av==10.0.0
 joblib==1.3.2
 matplotlib==3.8.1
 mne==1.5.1