svjack commited on
Commit
db7320d
·
verified ·
1 Parent(s): 0c5ae9f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -95
app.py CHANGED
@@ -3,38 +3,36 @@ import gradio as gr
3
  import cv2
4
  import numpy as np
5
  import os
6
-
7
  from scenedetect import open_video, SceneManager
8
  from scenedetect.detectors import ContentDetector
9
-
10
- from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
11
-
12
 
13
  def convert_to_tuple(list):
14
- return tuple(list);
15
-
16
 
17
  def clear_app():
18
  return None, 27, None, None, None
19
 
20
-
21
  def find_scenes(video_path, threshold):
22
- # file name without extension
 
 
 
 
23
  filename = os.path.splitext(os.path.basename(video_path))[0]
24
- # Open our video, create a scene manager, and add a detector.
25
  video = open_video(video_path)
26
  scene_manager = SceneManager()
27
- scene_manager.add_detector(
28
- ContentDetector(threshold=threshold))
29
 
30
- # Start detection
31
  scene_manager.detect_scenes(video, show_progress=True)
32
  scene_list = scene_manager.get_scene_list()
33
 
34
- # Push the list of scenes into data_outputs
 
 
35
  data_outputs.append(scene_list)
36
  gradio_components_outputs.append("json")
37
- #print(scene_list)
38
 
39
  timecodes = []
40
  if not scene_list:
@@ -46,128 +44,92 @@ def find_scenes(video_path, threshold):
46
  shots = []
47
  stills = []
48
 
49
- # For each shot found, set entry and exit points as seconds from frame number
50
- # Then split video into chunks and store them into shots List
51
- # Then extract first frame of each shot as thumbnail for the gallery
52
  for i, shot in enumerate(scene_list):
53
-
54
- # STEP 1
55
- # Get timecode in seconds
56
  framerate = shot[0].get_framerate()
57
  shot_in = shot[0].get_frames() / framerate
58
  shot_out = shot[1].get_frames() / framerate
59
 
60
  tc_in = shot[0].get_timecode()
61
  tc_out = shot[1].get_timecode()
62
-
63
  frame_in = shot[0].get_frames()
64
  frame_out = shot[1].get_frames()
65
 
66
  timecode = {"tc_in": tc_in, "tc_out": tc_out, "frame_in": frame_in, "frame_out": frame_out}
67
  timecodes.append(timecode)
68
 
69
- # Set name template for each shot
70
- target_name = "shot_" + str(i+1) + "_" + str(filename) + ".mp4"
71
 
72
- # Split chunk
73
- ffmpeg_extract_subclip(video_path, shot_in, shot_out, targetname=target_name)
 
 
 
 
 
 
 
 
74
 
75
- # Push chunk into shots List
76
  shots.append(target_name)
77
-
78
- # Push each chunk into data_outputs
79
  data_outputs.append(target_name)
80
  gradio_components_outputs.append("video")
81
-
82
- # —————————————————————————————————————————————————
83
-
84
- # STEP 2
85
- # extract first frame of each shot with cv2
86
- vid = cv2.VideoCapture(video_path)
87
- fps = vid.get(cv2.CAP_PROP_FPS)
88
- print('frames per second =',fps)
89
-
90
- frame_id = shot[0].get_frames() # value from scene_list from step 1
91
 
 
 
92
  vid.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
93
  ret, frame = vid.read()
94
 
95
- # Save frame as PNG file
96
- img = str(frame_id) + '_screenshot.png'
97
- cv2.imwrite(img,frame)
98
-
99
- # Push image into stills List
100
- stills.append((img, 'shot ' + str(i+1)))
101
 
102
- # Push the list of video shots into data_outputs for Gradio file component
103
  data_outputs.append(shots)
104
  gradio_components_outputs.append("file")
105
-
106
- # Push the list of still images into data_outputs
107
  data_outputs.append(stills)
108
  gradio_components_outputs.append("gallery")
109
 
110
- # This would have been used as gradio outputs,
111
- # if we could set number of outputs after the interface launch
112
- # That's not (yet ?) possible
113
- results = convert_to_tuple(data_outputs)
114
- print(results)
115
-
116
- # return List of shots as JSON, List of video chunks, List of still images
117
- # *
118
- # Would be nice to be able to return my results tuple as outputs,
119
- # while number of chunks found is not fixed:
120
- # return results
121
- return timecodes, shots, stills
122
-
123
-
124
- # —————————————————————————————————————————————————
125
-
126
- # SET DATA AND COMPONENTS OUTPUTS
127
-
128
- # This would be filled like this:
129
- # data_outputs = [ [List from detection], "video_chunk_n0.mp4", "video_chunk_n1.mp4", ... , "video_chunk_n.mp4", [List of video filepath to download], [List of still images from each shot found] ]
130
- data_outputs = []
131
-
132
- # This would be filled like this:
133
- # gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
134
- gradio_components_outputs = []
135
-
136
-
137
- #SET OUTPUTS
138
-
139
- # This would be nice if number of outputs could be set after Interface Launch:
140
- # because we do not know how many shots will be detected
141
- # gradio_components_outputs = [ "json", "video", "video", ... , "video", "file", "gallery" ]
142
- # outputs = gradio_components_outputs
143
-
144
- # ANOTHER SOLUTION WOULD BE USING A (FUTURE ?) "VIDEO GALLERY" GRADIO COMPONENT FROM LIST :)
145
-
146
 
 
147
 
148
  with gr.Blocks() as demo:
149
  with gr.Column():
150
  gr.Markdown("""
151
  # Scene Edit Detection
152
- Copy of @fffiloni's gradio demo of PySceneDetect.
153
  Automatically find all the shots in a video.
154
  Accepts mp4 format. Works only with videos that have cuts in them.
155
  """)
 
156
  with gr.Row():
157
  with gr.Column():
158
- video_input = gr.Video(sources="upload", format="mp4", label="Video Sequence", mirror_webcam = False)
159
- threshold = gr.Slider(label="Threshold pixel comparison: if exceeded, triggers a scene cut. Default: 27.0", minimum=15.0, maximum=40.0, value=27.0)
160
  with gr.Row():
161
- clear_button = gr.Button(value=("Clear"))
162
- run_button = gr.Button(value = "Submit", variant = "primary")
163
-
164
 
165
  with gr.Column():
166
- json_output = gr.JSON(label="Shots detected")
167
- file_output = gr.File(label="Downloadable Shots")
168
- gallery_output = gr.Gallery(label="Still Images from each shot", object_fit = "cover", columns = 3)
169
 
170
  run_button.click(fn=find_scenes, inputs=[video_input, threshold], outputs=[json_output, file_output, gallery_output])
171
- clear_button.click(fn=clear_app, inputs = None, outputs=[video_input, threshold, json_output, file_output, gallery_output])
172
-
173
- demo.queue().launch(debug=True, share = True)
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import cv2
4
  import numpy as np
5
  import os
6
+ from datetime import datetime # 新增时间模块
7
  from scenedetect import open_video, SceneManager
8
  from scenedetect.detectors import ContentDetector
9
+ from moviepy.editor import VideoFileClip
 
 
10
 
11
  def convert_to_tuple(list):
12
+ return tuple(list)
 
13
 
14
  def clear_app():
15
  return None, 27, None, None, None
16
 
 
17
  def find_scenes(video_path, threshold):
18
+ # 创建时间戳目录
19
+ timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
20
+ output_dir = f"output_{timestamp}"
21
+ os.makedirs(output_dir, exist_ok=True)
22
+
23
  filename = os.path.splitext(os.path.basename(video_path))[0]
 
24
  video = open_video(video_path)
25
  scene_manager = SceneManager()
26
+ scene_manager.add_detector(ContentDetector(threshold=threshold))
 
27
 
 
28
  scene_manager.detect_scenes(video, show_progress=True)
29
  scene_list = scene_manager.get_scene_list()
30
 
31
+ data_outputs = [] # 改为局部变量
32
+ gradio_components_outputs = [] # 改为局部变量
33
+
34
  data_outputs.append(scene_list)
35
  gradio_components_outputs.append("json")
 
36
 
37
  timecodes = []
38
  if not scene_list:
 
44
  shots = []
45
  stills = []
46
 
 
 
 
47
  for i, shot in enumerate(scene_list):
 
 
 
48
  framerate = shot[0].get_framerate()
49
  shot_in = shot[0].get_frames() / framerate
50
  shot_out = shot[1].get_frames() / framerate
51
 
52
  tc_in = shot[0].get_timecode()
53
  tc_out = shot[1].get_timecode()
 
54
  frame_in = shot[0].get_frames()
55
  frame_out = shot[1].get_frames()
56
 
57
  timecode = {"tc_in": tc_in, "tc_out": tc_out, "frame_in": frame_in, "frame_out": frame_out}
58
  timecodes.append(timecode)
59
 
60
+ # 修改输出路径到时间戳目录
61
+ target_name = os.path.join(output_dir, f"shot_{i+1}_{filename}.mp4")
62
 
63
+ with VideoFileClip(video_path) as clip:
64
+ subclip = clip.subclip(shot_in, shot_out)
65
+ subclip.write_videofile(
66
+ target_name,
67
+ codec="libx264",
68
+ audio_codec="aac",
69
+ threads=4,
70
+ preset="fast",
71
+ ffmpeg_params=["-crf", "23"]
72
+ )
73
 
 
74
  shots.append(target_name)
 
 
75
  data_outputs.append(target_name)
76
  gradio_components_outputs.append("video")
 
 
 
 
 
 
 
 
 
 
77
 
78
+ vid = cv2.VideoCapture(video_path)
79
+ frame_id = shot[0].get_frames()
80
  vid.set(cv2.CAP_PROP_POS_FRAMES, frame_id)
81
  ret, frame = vid.read()
82
 
83
+ # 修改截图保存路径
84
+ img = os.path.join(output_dir, f"{frame_id}_screenshot.png")
85
+ cv2.imwrite(img, frame)
86
+ stills.append((img, f'shot {i+1}'))
87
+ vid.release()
 
88
 
 
89
  data_outputs.append(shots)
90
  gradio_components_outputs.append("file")
 
 
91
  data_outputs.append(stills)
92
  gradio_components_outputs.append("gallery")
93
 
94
+ results = convert_to_tuple(data_outputs)
95
+ print(f"所有输出文件保存在:{os.path.abspath(output_dir)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
+ return timecodes, shots, stills
98
 
99
  with gr.Blocks() as demo:
100
  with gr.Column():
101
  gr.Markdown("""
102
  # Scene Edit Detection
 
103
  Automatically find all the shots in a video.
104
  Accepts mp4 format. Works only with videos that have cuts in them.
105
  """)
106
+
107
  with gr.Row():
108
  with gr.Column():
109
+ video_input = gr.Video(sources="upload", format="mp4", label="视频输入", mirror_webcam=False)
110
+ threshold = gr.Slider(label="场景切换检测阈值(越低越敏感)", minimum=15.0, maximum=40.0, value=27.0)
111
  with gr.Row():
112
+ clear_button = gr.Button("清除")
113
+ run_button = gr.Button("开始处理", variant="primary")
 
114
 
115
  with gr.Column():
116
+ json_output = gr.JSON(label="场景分析结果")
117
+ file_output = gr.File(label="下载分割片段")
118
+ gallery_output = gr.Gallery(label="场景缩略图", object_fit="cover", columns=3)
119
 
120
  run_button.click(fn=find_scenes, inputs=[video_input, threshold], outputs=[json_output, file_output, gallery_output])
121
+ clear_button.click(fn=clear_app, inputs=None, outputs=[video_input, threshold, json_output, file_output, gallery_output])
122
+
123
+ # 添加示例部分
124
+ gr.Examples(
125
+ examples=[
126
+ ["anime_kiss.mp4", 27],
127
+ ],
128
+ inputs=[video_input, threshold],
129
+ outputs=[json_output, file_output, gallery_output],
130
+ fn=find_scenes,
131
+ cache_examples=False,
132
+ label="示例视频"
133
+ )
134
+
135
+ demo.queue().launch(debug=True, share=True)