gemercheung před 3 roky
revize
2800471ced
10 změnil soubory, kde provedl 410 přidání a 0 odebrání
  1. 2 0
      .gitignore
  2. 9 0
      cut.py
  3. 46 0
      exetract.py
  4. 83 0
      frame.py
  5. 89 0
      live.py
  6. 32 0
      livePipe.py
  7. 45 0
      requirements.txt
  8. 0 0
      start.sh
  9. 24 0
      webrtc.py
  10. 80 0
      worker.py

+ 2 - 0
.gitignore

@@ -0,0 +1,2 @@
+__pycache__/
+xVerse

+ 9 - 0
cut.py

@@ -0,0 +1,9 @@
+
+from moviepy.editor import *
+
+
+clip = VideoFileClip("/Users/gemer/Desktop/test-video-1/3.mp4").subclip(5, 9)
+
+
+# 把最后生成的视频导出到文件内
+clip.write_videofile("/Users/gemer/Desktop/test-video-1/3-1.mp4")

+ 46 - 0
exetract.py

@@ -0,0 +1,46 @@
+import cv2
+import time
+import os
+
+
+def video_to_frames(input_loc, output_loc):
+    """Function to extract frames from input video file
+    and save them as separate frames in an output directory.
+    Args:
+        input_loc: Input video file.
+        output_loc: Output directory to save the frames.
+    Returns:
+        None
+    """
+    try:
+        os.mkdir(output_loc)
+    except OSError:
+        pass
+    # Log the time
+    time_start = time.time()
+    # Start capturing the feed
+    cap = cv2.VideoCapture(input_loc)
+    # Find the number of frames
+    video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1
+    print("Number of frames: ", video_length)
+    count = 0
+    print("Converting video..\n")
+    # Start converting the video
+    while cap.isOpened():
+        # Extract the frame
+        ret, frame = cap.read()
+        if not ret:
+            continue
+        # Write the results back to output location.
+        cv2.imwrite(output_loc + "/%#05d.jpg" % (count+1), frame)
+        count = count + 1
+        # If there are no more frames left
+        if (count > (video_length-1)):
+            # Log the time again
+            time_end = time.time()
+            # Release the feed
+            cap.release()
+            # Print stats
+            print("Done extracting frames.\n%d frames extracted" % count)
+            print("It took %d seconds forconversion." % (time_end-time_start))
+            break

+ 83 - 0
frame.py

@@ -0,0 +1,83 @@
+import os
+from tracemalloc import start
+import cv2
+import glob
+import numpy as np
+
+
+# 视频来源 地址需要替换自己的可识别文件地址
+
+# videoPath = '/Users/gemer/Desktop/test-video'
+# folders = glob.glob('/Users/gemer/Desktop/test-video-1')
+folders = sorted(filter(os.path.isfile,
+                        glob.glob('/Users/gemer/Desktop/test-video-1' + '/*.264', recursive=True)))
+
+imagePath = '/Users/gemer/Desktop/test-video-1/image/'
+
+
+videonames_list = []
+
+print('list_of_files', folders)
+
+for folder in folders:
+    videonames_list.append(folder)
+    print(folder)
+
+print('There are {} videos in Folder'.format(len(videonames_list)))
+
+
+filePath = '/Users/gemer/Desktop/'
+
+out_video = np.empty([30, 1920, 1080, 3], dtype=np.uint8)
+out_video = out_video.astype(np.uint8)
+
+start_frame = 12
+
+count = 0
+# for i in range(0, len(videonames_list)):
+#     video_data = videonames_list[i]
+#     video = cv2.VideoCapture(video_data)
+#     success = True
+#     while success:
+#         success, image = video.read()
+#         name = imagePath + str(i) + '-' + str(count)+'.jpg'
+#         if success == True:
+#             if count == 10:
+#                 cv2.imwrite(name, image)
+#                 print('video {} '.format(i))
+#                 print('Frame {} Extracted Successfully'.format(count))
+
+#             count += 1
+#         else:
+#             count = 0
+
+#     print('\n\n\nVideo {} Extracted Successfully\n\n\n'.format(video_data))
+
+
+caps = [cv2.VideoCapture(i) for i in videonames_list]
+print('cap', caps)
+frameCount = 0
+vidIndex = 0
+# for vid in caps:
+for index, vid in enumerate(caps):
+    # print(x)
+    fourcc = cv2.VideoWriter_fourcc('H', '2', '6', '4')
+    size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
+            int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
+    fps = vid.get(cv2.CAP_PROP_FPS)  # 30p/self
+    fps = int(fps)
+    hz = int(1000.0 / fps)
+    sizeStr = str(size[0]) + 'x' + str(size[1])
+
+    print('index {}'.format(index))
+
+    while(True):
+        success, image = vid.read()
+        if success == True:
+            name = imagePath + str(frameCount)+'.jpg'
+            cv2.imwrite(name, image)
+            print('Frame {} Extracted Successfully'.format(frameCount))
+
+            frameCount += 1
+        else:
+            frameCount = 0

+ 89 - 0
live.py

@@ -0,0 +1,89 @@
+import cv2
+import subprocess as sp
+import exetract
+import livePipe
+
+rtspUrl = 'rtsp://localhost:8554/mystream'
+
+# 视频来源 地址需要替换自己的可识别文件地址
+filePath = '/Users/gemer/Desktop/'
+testPath = '/Users/gemer/Desktop/test-video/'
+camera = cv2.VideoCapture(
+    filePath+"test.mkv")  # 从文件读取视频
+
+names = ['output000.mkv', 'output001.mkv', 'output001.mkv', 'output001.mkv']
+
+
+# 视频属性
+size = (int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)),
+        int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)))
+sizeStr = str(size[0]) + 'x' + str(size[1])
+fps = camera.get(cv2.CAP_PROP_FPS)  # 30p/self
+# fps = 24
+
+fps = int(fps)
+hz = int(1000.0 / fps)
+print('size:' + sizeStr + ' fps:' + str(fps) + ' hz:' + str(hz))
+
+
+# 主视频文件输出
+# fourcc = cv2.VideoWriter_fourcc(*'XVID')
+fourcc = cv2.VideoWriter_fourcc('H', '2', '6', '4')
+out = cv2.VideoWriter(filePath + 'res_mv.avi', fourcc, fps, size)
+
+# caps = [cv2.VideoCapture(testPath+i) for i in names]
+
+# print('cap', caps)
+# for x in caps:
+#     print(x)
+#     fourcc = cv2.VideoWriter_fourcc('H', '2', '6', '4')
+#     fps = int(fps)
+#     hz = int(1000.0 / fps)
+
+
+# 直播管道输出
+# ffmpeg推送rtmp 重点 : 通过管道 共享数据的方式
+command = ['ffmpeg',
+           '-y',
+           '-hide_banner',
+           #    '-loglevel', 'warning',
+           #    '-hwaccel', 'videotoolbox',
+           '-f', 'rawvideo',
+           '-vcodec', 'rawvideo',
+           '-pix_fmt', 'bgr24',
+           '-s', sizeStr,
+           '-r', str(fps),
+           '-i', '-',
+           '-b:v', '3000K',
+           #    '-c:v', 'h264_videotoolbox',
+           '-c:v', 'libx264',
+           '-pix_fmt', 'yuv420p',
+           '-preset', 'ultrafast',
+           '-f', 'rtsp',
+           rtspUrl]
+# 管道特性配置
+# pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
+# pipe = sp.Popen(command, stdin=sp.PIPE, bufsize=10**8)  # ,shell=False
+newPipe = livePipe.LivePipe(size, 30)
+
+# pipe.stdin.write(frame.tostring())
+while True:
+    ret, frame = camera.read()  # 逐帧采集视频流
+    # if not ret:
+    #     break
+    if cv2.waitKey(1) & 0xFF == ord('q'):
+        break
+    # 图片输出
+    # 结果帧处理 存入文件 / 推流 / ffmpeg 再处理
+    # print('frame', frame)
+    newPipe.stdin.write(frame.tobytes())  # 存入管道用于直播
+    out.write(frame)  # 同时 存入视频文件 记录直播帧数据
+    pass
+
+
+camera.release()
+# Release everything if job is finished
+out.release()
+# out.release()
+
+print("Over!")

+ 32 - 0
livePipe.py

@@ -0,0 +1,32 @@
+# 直播管道输出
+# ffmpeg推送rtmp 重点 : 通过管道 共享数据的方式
+import subprocess as sp
+
+rtspUrl = 'rtsp://localhost:8554/mystream'
+
+
+def LivePipe(size, fps):
+    sizeStr = str(size[0]) + 'x' + str(size[1])
+    command = ['ffmpeg',
+               '-y',
+               '-hide_banner',
+               #    '-loglevel', 'warning',
+               #    '-hwaccel', 'videotoolbox',
+               '-f', 'rawvideo',
+               '-vcodec', 'rawvideo',
+               '-pix_fmt', 'bgr24',
+               '-s', sizeStr,
+               '-r', str(fps),
+               '-i', '-',
+               '-b:v', '3000K',
+               #    '-c:v', 'h264_videotoolbox',
+               '-c:v', 'libx264',
+               '-pix_fmt', 'yuv420p',
+               '-preset', 'ultrafast',
+               '-f', 'rtsp',
+               rtspUrl]
+    # 管道特性配置
+    # pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
+    pipe = sp.Popen(command, stdin=sp.PIPE, bufsize=10**8)  # ,shell=False
+
+    return pipe

+ 45 - 0
requirements.txt

@@ -0,0 +1,45 @@
+aioice==0.7.6
+aiortc==1.3.1
+anyio==3.5.0
+asgiref==3.5.0
+asyncio==3.4.3
+autopep8==1.6.0
+av==9.0.1
+certifi==2021.10.8
+cffi==1.15.0
+charset-normalizer==2.0.12
+click==8.0.4
+colorlog==6.6.0
+cryptography==36.0.2
+Cython==0.29.28
+decorator==4.4.2
+dnspython==2.2.1
+ffmpeg-python==0.2.0
+future==0.18.2
+google-crc32c==1.3.0
+h11==0.13.0
+idna==3.3
+imageio==2.16.1
+imageio-ffmpeg==0.4.5
+Jinja2==3.0.3
+MarkupSafe==2.1.1
+moviepy==1.0.3
+netifaces==0.11.0
+numpy==1.22.3
+opencv-python==4.5.5.64
+Pillow==9.0.1
+proglog==0.1.9
+pycodestyle==2.8.0
+pycparser==2.21
+pyee==9.0.4
+pylibsrtp==0.7.1
+requests==2.27.1
+simplejpeg==1.6.4
+sniffio==1.2.0
+starlette==0.19.0
+toml==0.10.2
+tqdm==4.63.0
+typing_extensions==4.1.1
+urllib3==1.26.9
+uvicorn==0.17.6
+vidgear==0.2.5

+ 0 - 0
start.sh


+ 24 - 0
webrtc.py

@@ -0,0 +1,24 @@
+import uvicorn
+from vidgear.gears.asyncio import WebGear
+
+# various performance tweaks
+options = {
+    "frame_size_reduction": 30,
+    "jpeg_compression_quality": 80,
+    "jpeg_compression_fastdct": True,
+    "jpeg_compression_fastupsample": False,
+    "framerate": 30,
+    "resolution": (1280, 1080)
+
+}
+
+testUrl = '/Users/gemer/Desktop/test.mkv'
+
+# initialize WebGear app
+web = WebGear(source=testUrl, logging=True, **options)
+
+# run this app on Uvicorn server at address http://localhost:8000/
+uvicorn.run(web(), host="localhost", port=8000)
+
+# close app safely
+web.shutdown()

+ 80 - 0
worker.py

@@ -0,0 +1,80 @@
+import cv2
+import subprocess as sp
+
+rtspUrl = 'rtsp://localhost:8554/mystream'
+
+# 视频来源 地址需要替换自己的可识别文件地址
+filePath = '/Users/gemer/Desktop/'
+testPath = '/Users/gemer/Desktop/test-video/'
+camera = cv2.VideoCapture(
+    filePath+"test.mp4")  # 从文件读取视频
+
+names = ['output000.mkv', 'output001.mkv', 'output001.mkv', 'output001.mkv']
+
+
+# 视频属性
+size = (int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)),
+        int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)))
+sizeStr = str(size[0]) + 'x' + str(size[1])
+fps = camera.get(cv2.CAP_PROP_FPS)  # 30p/self
+# fps = 24
+
+fps = int(fps)
+hz = int(1000.0 / fps)
+print('size:' + sizeStr + ' fps:' + str(fps) + ' hz:' + str(hz))
+
+
+# 主视频文件输出
+# fourcc = cv2.VideoWriter_fourcc(*'XVID')
+fourcc = cv2.VideoWriter_fourcc('H', '2', '6', '4')
+out = cv2.VideoWriter(filePath + 'res_mv.avi', fourcc, fps, size)
+
+# caps = [cv2.VideoCapture(testPath+i) for i in names]
+# print('cap', caps)
+# for x in caps:
+#     print(x)
+#     fourcc = cv2.VideoWriter_fourcc('H', '2', '6', '4')
+#     fps = int(fps)
+#     hz = int(1000.0 / fps)
+
+
+# 直播管道输出
+# ffmpeg推送rtmp 重点 : 通过管道 共享数据的方式
+command = ['ffmpeg',
+           '-y',
+           '-hide_banner',
+           '-loglevel', 'warning',
+           #    '-hwaccel', 'videotoolbox',
+           '-f', 'rawvideo',
+           '-vcodec', 'rawvideo',
+           '-pix_fmt', 'bgr24',
+           '-s', sizeStr,
+           '-r', str(fps),
+           '-i', '-',
+           '-b:v', '3000K',
+           #    '-c:v', 'h264_videotoolbox',
+           '-c:v', 'libx264',
+           '-pix_fmt', 'yuv420p',
+           '-preset', 'ultrafast',
+           '-f', 'rtsp',
+           rtspUrl]
+# 管道特性配置
+# pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
+pipe = sp.Popen(command, stdin=sp.PIPE)  # ,shell=False
+# pipe.stdin.write(frame.tostring())
+while True:
+    ret, frame = camera.read()  # 逐帧采集视频流
+    # if not ret:
+    #     break
+    # 图片输出
+    # 结果帧处理 存入文件 / 推流 / ffmpeg 再处理
+    # print('frame', frame)
+    pipe.stdin.write(frame.tobytes())  # 存入管道用于直播
+    out.write(frame)  # 同时 存入视频文件 记录直播帧数据
+    pass
+camera.release()
+# Release everything if job is finished
+out.release()
+out.release()
+
+print("Over!")