Update app.py
Browse files
app.py
CHANGED
@@ -1,285 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
-
from PIL import Image, ImageFilter
|
4 |
-
import cv2
|
5 |
-
import os
|
6 |
import torch
|
7 |
-
|
8 |
-
from torchvision import
|
9 |
-
|
10 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
|
13 |
|
14 |
-
|
15 |
-
import spaces
|
16 |
-
@spaces.GPU()
|
17 |
-
# VAAPI acceleration check
|
18 |
-
def check_vaapi_support():
|
19 |
-
"""Check if VAAPI is available for hardware acceleration"""
|
20 |
-
try:
|
21 |
-
# Check if VAAPI devices are available
|
22 |
-
vaapi_devices = [f for f in os.listdir('/dev/dri') if f.startswith('render')]
|
23 |
-
return len(vaapi_devices) > 0
|
24 |
-
except:
|
25 |
-
return False
|
26 |
-
HAS_VAAPI = check_vaapi_support()
|
27 |
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
mode='bicubic',
|
45 |
-
align_corners=False,
|
46 |
-
antialias=True
|
47 |
-
)
|
48 |
-
|
49 |
-
def lanczos_torch(self, image_tensor, scale_factor):
|
50 |
-
"""GPU-accelerated Lanczos-style upscaling"""
|
51 |
-
return F.interpolate(
|
52 |
-
image_tensor,
|
53 |
-
scale_factor=scale_factor,
|
54 |
-
mode='bicubic',
|
55 |
-
align_corners=False,
|
56 |
-
antialias=True
|
57 |
-
)
|
58 |
-
|
59 |
-
def esrgan_style_upscale(self, image_tensor, scale_factor):
|
60 |
-
"""Simple ESRGAN-style upscaling using convolutions"""
|
61 |
-
b, c, h, w = image_tensor.shape
|
62 |
-
upscaled = F.interpolate(image_tensor, scale_factor=scale_factor, mode='bicubic', align_corners=False)
|
63 |
-
kernel = torch.tensor([[[[-1, -1, -1],
|
64 |
-
[-1, 9, -1],
|
65 |
-
[-1, -1, -1]]]], dtype=torch.float32, device=self.device)
|
66 |
-
kernel = kernel.repeat(c, 1, 1, 1)
|
67 |
-
sharpened = F.conv2d(upscaled, kernel, padding=1, groups=c)
|
68 |
-
result = 0.8 * upscaled + 0.2 * sharpened
|
69 |
-
return torch.clamp(result, 0, 1)
|
70 |
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
return None
|
85 |
-
try:
|
86 |
-
h, w = image_array.shape[:2]
|
87 |
-
new_h, new_w = int(h * scale_factor), int(w * scale_factor)
|
88 |
-
if method == "VAAPI_BICUBIC":
|
89 |
-
return cv2.resize(image_array, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
|
90 |
-
elif method == "VAAPI_LANCZOS":
|
91 |
-
return cv2.resize(image_array, (new_w, new_h), interpolation=cv2.INTER_LANCZOS4)
|
92 |
-
except Exception as e:
|
93 |
-
print(f"VAAPI upscaling failed: {e}")
|
94 |
-
return None
|
95 |
|
96 |
-
|
97 |
-
|
|
|
|
|
|
|
98 |
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
upscaled_array = vaapi_upscaler.upscale_vaapi(img_array, scale_factor, method)
|
129 |
-
upscaled = Image.fromarray(upscaled_array) if upscaled_array is not None else image.resize((new_width, new_height), Image.BICUBIC)
|
130 |
-
|
131 |
-
else:
|
132 |
-
print("Using CPU methods")
|
133 |
-
if method == "Bicubic":
|
134 |
-
upscaled = image.resize((new_width, new_height), Image.BICUBIC)
|
135 |
-
elif method == "Lanczos":
|
136 |
-
upscaled = image.resize((new_width, new_height), Image.LANCZOS)
|
137 |
-
else:
|
138 |
-
upscaled = image.resize((new_width, new_height), Image.BICUBIC)
|
139 |
-
|
140 |
-
if enhance_quality:
|
141 |
-
upscaled = upscaled.filter(ImageFilter.UnsharpMask(radius=1, percent=120, threshold=3))
|
142 |
-
|
143 |
-
return upscaled
|
144 |
-
|
145 |
-
except Exception as e:
|
146 |
-
print(f"Error during upscaling: {e}")
|
147 |
-
return image
|
148 |
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
methods.extend(["VAAPI_BICUBIC", "VAAPI_LANCZOS"])
|
155 |
-
return methods
|
156 |
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
info.append(f"🚀 CUDA GPU: {gpu_name} ({gpu_memory:.1f} GB)")
|
163 |
-
else:
|
164 |
-
info.append("❌ CUDA not available")
|
165 |
-
if HAS_ZEROGPU:
|
166 |
-
info.append("✅ ZeroGPU support enabled")
|
167 |
-
if HAS_VAAPI:
|
168 |
-
info.append("✅ VAAPI hardware acceleration available")
|
169 |
-
return "\n".join(info)
|
170 |
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
original_info = f"Original: {image.size[0]} × {image.size[1]} pixels"
|
176 |
-
result = upscale_image_accelerated(image, scale_factor, method, enhance_quality, use_gpu_acceleration)
|
177 |
-
if result is None:
|
178 |
-
return None, "Error processing image"
|
179 |
-
|
180 |
-
result_info = f"Upscaled: {result.size[0]} × {result.size[1]} pixels"
|
181 |
-
accel_info = "GPU/Hardware" if use_gpu_acceleration else "CPU"
|
182 |
-
|
183 |
-
combined_info = f"""
|
184 |
-
## Processing Details
|
185 |
-
{original_info}
|
186 |
-
{result_info}
|
187 |
-
**Scale Factor:** {scale_factor}x
|
188 |
-
**Method:** {method}
|
189 |
-
**Acceleration:** {accel_info}
|
190 |
-
**Quality Enhancement:** {'✅' if enhance_quality else '❌'}
|
191 |
-
|
192 |
-
## System Status
|
193 |
-
{get_system_info()}
|
194 |
-
"""
|
195 |
-
return result, combined_info
|
196 |
|
197 |
-
|
198 |
-
available_methods = get_available_methods()
|
199 |
-
|
200 |
-
gr.Markdown("## 🚀 Accelerated Image Upscaler")
|
201 |
-
with gr.Row():
|
202 |
-
with gr.Column(scale=1):
|
203 |
-
input_image = gr.Image(type="pil", label="Upload Image", sources=["upload", "clipboard"])
|
204 |
-
scale_factor = gr.Slider(minimum=1.5, maximum=4.0, step=0.5, value=2.0, label="Scale Factor")
|
205 |
-
method = gr.Dropdown(choices=available_methods, value=available_methods[0], label="Upscaling Method")
|
206 |
-
use_gpu_acceleration = gr.Checkbox(label="Use GPU Acceleration", value=torch.cuda.is_available())
|
207 |
-
enhance_quality = gr.Checkbox(label="Apply Quality Enhancement", value=True)
|
208 |
-
process_btn = gr.Button("🚀 Upscale Image", variant="primary")
|
209 |
-
|
210 |
-
with gr.Column(scale=2):
|
211 |
-
output_image = gr.Image(label="Upscaled Image", type="pil")
|
212 |
-
image_info = gr.Markdown(value=f"## System Status\n{get_system_info()}", label="Processing Information")
|
213 |
-
|
214 |
-
process_btn.click(
|
215 |
-
fn=process_and_info_accelerated,
|
216 |
-
inputs=[input_image, scale_factor, method, enhance_quality, use_gpu_acceleration],
|
217 |
-
outputs=[output_image, image_info]
|
218 |
-
)
|
219 |
|
220 |
-
|
221 |
-
gr.Markdown("## 🚀 Video Upscaler and Frame Interpolator")
|
222 |
-
with gr.Row():
|
223 |
-
with gr.Column(scale=1):
|
224 |
-
input_video = gr.Video(label="Upload Video", sources=["upload"])
|
225 |
-
scale_factor = gr.Slider(minimum=1.5, maximum=4.0, step=0.5, value=2.0, label="Scale Factor")
|
226 |
-
multi = gr.Slider(minimum=2, maximum=8, step=1, value=2, label="Frame Multiplier")
|
227 |
-
use_gpu_acceleration = gr.Checkbox(label="Use GPU Acceleration", value=torch.cuda.is_available())
|
228 |
-
process_btn = gr.Button("🚀 Process Video", variant="primary")
|
229 |
-
|
230 |
-
with gr.Column(scale=2):
|
231 |
-
output_video = gr.Video(label="Processed Video")
|
232 |
-
processing_info = gr.Markdown(value=f"## System Status\n{get_system_info()}", label="Processing Information")
|
233 |
-
|
234 |
-
process_btn.click(
|
235 |
-
fn=process_video_wrapper,
|
236 |
-
inputs=[input_video, scale_factor, multi, use_gpu_acceleration],
|
237 |
-
outputs=[output_video, processing_info]
|
238 |
-
)
|
239 |
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
)
|
|
|
261 |
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
with gr.Blocks(title="Accelerated Media Processor", theme=gr.themes.Soft()) as demo:
|
274 |
-
with gr.Tab("Image Upscaler"):
|
275 |
-
create_accelerated_upscaler_ui()
|
276 |
-
with gr.Tab("Video Processing"):
|
277 |
-
create_video_interface_ui()
|
278 |
|
|
|
279 |
if __name__ == "__main__":
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
share=False,
|
284 |
-
debug=True
|
285 |
-
)
|
|
|
1 |
+
|
2 |
+
try:
|
3 |
+
2
|
4 |
+
import spaces
|
5 |
+
3
|
6 |
+
except ImportError:
|
7 |
+
4
|
8 |
+
# Create a dummy decorator if spaces is not available
|
9 |
+
5
|
10 |
+
def spaces_gpu(func):
|
11 |
+
6
|
12 |
+
return func
|
13 |
+
7
|
14 |
+
spaces = type('spaces', (), {'GPU': spaces_gpu})()
|
15 |
+
8
|
16 |
+
|
17 |
+
9
|
18 |
import gradio as gr
|
19 |
+
10
|
|
|
|
|
|
|
20 |
import torch
|
21 |
+
11
|
22 |
+
from torchvision.transforms import functional as F
|
23 |
+
12
|
24 |
+
from PIL import Image
|
25 |
+
13
|
26 |
+
import os
|
27 |
+
14
|
28 |
+
import cv2
|
29 |
+
15
|
30 |
+
import numpy as np
|
31 |
+
16
|
32 |
+
from super_image import EdsrModel, ImageLoader
|
33 |
+
17
|
34 |
|
35 |
+
18
|
36 |
|
37 |
+
19
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
20
|
40 |
+
@spaces.GPU
|
41 |
+
21
|
42 |
+
def upscale_video(video_path, scale_factor, progress=gr.Progress()):
|
43 |
+
22
|
44 |
+
"""
|
45 |
+
23
|
46 |
+
Upscales a video using EDSR model.
|
47 |
+
24
|
48 |
+
This function is decorated with @spaces.GPU to run on ZeroGPU.
|
49 |
+
25
|
50 |
+
"""
|
51 |
+
26
|
52 |
+
# Load models inside the function for ZeroGPU compatibility
|
53 |
+
27
|
54 |
+
if scale_factor == 2:
|
55 |
+
28
|
56 |
+
model = EdsrModel.from_pretrained('eugenesiow/edsr-base', scale=2)
|
57 |
+
29
|
58 |
+
elif scale_factor == 4:
|
59 |
+
30
|
60 |
+
model = EdsrModel.from_pretrained('eugenesiow/edsr-base', scale=4)
|
61 |
+
31
|
62 |
+
else:
|
63 |
+
32
|
64 |
+
raise gr.Error("Invalid scale factor. Choose 2 or 4.")
|
65 |
+
33
|
66 |
+
|
67 |
+
34
|
68 |
+
if not os.path.exists(video_path):
|
69 |
+
35
|
70 |
+
raise gr.Error(f"Input file not found at {video_path}")
|
71 |
+
36
|
72 |
+
|
73 |
+
37
|
74 |
+
video_capture = cv2.VideoCapture(video_path)
|
75 |
+
38
|
76 |
+
if not video_capture.isOpened():
|
77 |
+
39
|
78 |
+
raise gr.Error(f"Could not open video file {video_path}")
|
79 |
+
40
|
80 |
+
|
81 |
+
41
|
82 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
83 |
+
42
|
84 |
+
fps = video_capture.get(cv2.CAP_PROP_FPS)
|
85 |
+
43
|
86 |
+
width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
|
87 |
+
44
|
88 |
+
height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
89 |
+
45
|
90 |
+
frame_count = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
|
91 |
+
46
|
92 |
+
|
93 |
+
47
|
94 |
+
output_width = width * scale_factor
|
95 |
+
48
|
96 |
+
output_height = height * scale_factor
|
97 |
+
49
|
98 |
|
99 |
+
50
|
100 |
+
output_path = f"upscaled_{scale_factor}x_{os.path.basename(video_path)}"
|
101 |
+
51
|
102 |
+
video_writer = cv2.VideoWriter(output_path, fourcc, fps, (output_width, output_height))
|
103 |
+
52
|
104 |
+
|
105 |
+
53
|
106 |
+
for i in progress.tqdm(range(frame_count), desc=f"Upscaling {scale_factor}x"):
|
107 |
+
54
|
108 |
+
ret, frame = video_capture.read()
|
109 |
+
55
|
110 |
+
if not ret:
|
111 |
+
56
|
112 |
+
break
|
113 |
+
57
|
114 |
+
|
115 |
+
58
|
116 |
+
pil_frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
117 |
+
59
|
118 |
|
119 |
+
60
|
120 |
+
inputs = ImageLoader.load_image(pil_frame)
|
121 |
+
61
|
122 |
+
preds = model(inputs)
|
123 |
+
62
|
124 |
+
output_frame = ImageLoader.save_image(preds, mode='RGB').convert("RGB")
|
125 |
+
63
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
|
127 |
+
64
|
128 |
+
video_writer.write(cv2.cvtColor(np.array(output_frame), cv2.COLOR_RGB2BGR))
|
129 |
+
65
|
130 |
+
|
131 |
+
66
|
132 |
+
video_capture.release()
|
133 |
+
67
|
134 |
+
video_writer.release()
|
135 |
+
68
|
136 |
|
137 |
+
69
|
138 |
+
return output_path
|
139 |
+
70
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
|
141 |
+
71
|
142 |
+
from RIFE import Model as RIFEModel
|
143 |
+
72
|
144 |
+
from safetensors.torch import load_file
|
145 |
+
73
|
146 |
|
147 |
+
74
|
148 |
+
# ... (existing code)
|
149 |
+
75
|
150 |
+
|
151 |
+
76
|
152 |
+
@spaces.GPU
|
153 |
+
77
|
154 |
+
def rife_interpolate_video(video_path, progress=gr.Progress()):
|
155 |
+
78
|
156 |
+
"""
|
157 |
+
79
|
158 |
+
Interpolates a video using the RIFE model.
|
159 |
+
80
|
160 |
+
This function is decorated with @spaces.GPU to run on ZeroGPU.
|
161 |
+
81
|
162 |
+
"""
|
163 |
+
82
|
164 |
+
if not os.path.exists(video_path):
|
165 |
+
83
|
166 |
+
raise gr.Error(f"Input file not found at {video_path}")
|
167 |
+
84
|
168 |
+
|
169 |
+
85
|
170 |
+
# Load the RIFE model
|
171 |
+
86
|
172 |
+
model = RIFEModel()
|
173 |
+
87
|
174 |
+
model.load_state_dict(load_file("/Users/craigellenwood/Workspace/video_upscaler_rife_interpolator/rife_model_new/rife-flownet-4.13.2.safetensors"))
|
175 |
+
88
|
176 |
+
model.eval()
|
177 |
+
89
|
178 |
+
model.cuda()
|
179 |
+
90
|
180 |
+
|
181 |
+
91
|
182 |
+
video_capture = cv2.VideoCapture(video_path)
|
183 |
+
92
|
184 |
+
if not video_capture.isOpened():
|
185 |
+
93
|
186 |
+
raise gr.Error(f"Could not open video file {video_path}")
|
187 |
+
94
|
188 |
+
|
189 |
+
95
|
190 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
191 |
+
96
|
192 |
+
fps = video_capture.get(cv2.CAP_PROP_FPS)
|
193 |
+
97
|
194 |
+
width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
|
195 |
+
98
|
196 |
+
height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
197 |
+
99
|
198 |
+
frame_count = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
|
199 |
+
100
|
200 |
+
|
201 |
+
101
|
202 |
+
output_path = f"interpolated_{os.path.basename(video_path)}"
|
203 |
+
102
|
204 |
+
video_writer = cv2.VideoWriter(output_path, fourcc, fps * 2, (width, height))
|
205 |
+
103
|
206 |
+
|
207 |
+
104
|
208 |
+
prev_frame = None
|
209 |
+
105
|
210 |
+
for i in progress.tqdm(range(frame_count), desc="Interpolating"):
|
211 |
+
106
|
212 |
+
ret, frame = video_capture.read()
|
213 |
+
107
|
214 |
+
if not ret:
|
215 |
+
108
|
216 |
+
break
|
217 |
+
109
|
218 |
+
|
219 |
+
110
|
220 |
+
if prev_frame is not None:
|
221 |
+
111
|
222 |
+
# Preprocess frames
|
223 |
+
112
|
224 |
+
img0 = torch.from_numpy(prev_frame.transpose(2, 0, 1)).float().unsqueeze(0).cuda() / 255.
|
225 |
+
113
|
226 |
+
img1 = torch.from_numpy(frame.transpose(2, 0, 1)).float().unsqueeze(0).cuda() / 255.
|
227 |
+
114
|
228 |
|
229 |
+
115
|
230 |
+
# Run inference
|
231 |
+
116
|
232 |
+
with torch.no_grad():
|
233 |
+
117
|
234 |
+
interpolated_frame = model.inference(img0, img1)[0].cpu().numpy().transpose(1, 2, 0) * 255
|
235 |
+
118
|
236 |
|
237 |
+
119
|
238 |
+
video_writer.write(interpolated_frame.astype(np.uint8))
|
239 |
+
120
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
240 |
|
241 |
+
121
|
242 |
+
video_writer.write(frame)
|
243 |
+
122
|
244 |
+
prev_frame = frame
|
245 |
+
123
|
|
|
|
|
246 |
|
247 |
+
124
|
248 |
+
video_capture.release()
|
249 |
+
125
|
250 |
+
video_writer.release()
|
251 |
+
126
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
|
253 |
+
127
|
254 |
+
return output_path
|
255 |
+
128
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
256 |
|
257 |
+
129
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
|
259 |
+
130
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
|
261 |
+
131
|
262 |
+
|
263 |
+
132
|
264 |
+
with gr.Blocks() as demo:
|
265 |
+
133
|
266 |
+
gr.Markdown("# Video Upscaler and Frame Interpolator")
|
267 |
+
134
|
268 |
+
with gr.Tab("Upscale"):
|
269 |
+
135
|
270 |
+
with gr.Row():
|
271 |
+
136
|
272 |
+
with gr.Column():
|
273 |
+
137
|
274 |
+
video_input_upscale = gr.Video(label="Input Video")
|
275 |
+
138
|
276 |
+
scale_factor = gr.Radio([2, 4], label="Scale Factor", value=2)
|
277 |
+
139
|
278 |
+
upscale_button = gr.Button("Upscale Video")
|
279 |
+
140
|
280 |
+
with gr.Column():
|
281 |
+
141
|
282 |
+
video_output_upscale = gr.Video(label="Upscaled Video")
|
283 |
+
142
|
284 |
+
with gr.Tab("Interpolate"):
|
285 |
+
143
|
286 |
+
with gr.Row():
|
287 |
+
144
|
288 |
+
with gr.Column():
|
289 |
+
145
|
290 |
+
video_input_rife = gr.Video(label="Input Video")
|
291 |
+
146
|
292 |
+
rife_button = gr.Button("Interpolate Frames")
|
293 |
+
147
|
294 |
+
with gr.Column():
|
295 |
+
148
|
296 |
+
video_output_rife = gr.Video(label="Interpolated Video")
|
297 |
+
149
|
298 |
+
|
299 |
+
150
|
300 |
+
upscale_button.click(
|
301 |
+
151
|
302 |
+
fn=upscale_video,
|
303 |
+
152
|
304 |
+
inputs=[video_input_upscale, scale_factor],
|
305 |
+
153
|
306 |
+
outputs=video_output_upscale
|
307 |
+
154
|
308 |
)
|
309 |
+
155
|
310 |
|
311 |
+
156
|
312 |
+
rife_button.click(
|
313 |
+
157
|
314 |
+
fn=rife_interpolate_video,
|
315 |
+
158
|
316 |
+
inputs=[video_input_rife],
|
317 |
+
159
|
318 |
+
outputs=video_output_rife
|
319 |
+
160
|
320 |
+
)
|
321 |
+
161
|
|
|
|
|
|
|
|
|
|
|
322 |
|
323 |
+
162
|
324 |
if __name__ == "__main__":
|
325 |
+
163
|
326 |
+
demo.launch(share=True)
|
327 |
+
164
|
|
|
|
|
|