Spaces:
Running
Running
Commit
Β·
1d3bf88
1
Parent(s):
c914404
π οΈ Fix Overlapping/Unmatching Regions From Appearing
Browse files- App.py +175 -77
- Models/2x_OpenProteus_Compact_i2_70K.pth +3 -0
- Scripts/App-Old.py +433 -0
App.py
CHANGED
@@ -46,7 +46,7 @@ logging.basicConfig(
|
|
46 |
show_path=False,
|
47 |
)],
|
48 |
)
|
49 |
-
Logger = logging.getLogger('
|
50 |
logging.getLogger('httpx').setLevel(logging.WARNING)
|
51 |
|
52 |
# ============================== #
|
@@ -89,7 +89,6 @@ def HumanizedBytes(Size):
|
|
89 |
# Main Processing Logic #
|
90 |
# ============================== #
|
91 |
|
92 |
-
@spaces.GPU
|
93 |
class Upscaler:
|
94 |
def __init__(self):
|
95 |
pass
|
@@ -121,7 +120,63 @@ class Upscaler:
|
|
121 |
self.UnloadModel()
|
122 |
Logger.info('π§Ή Temporary Files Cleaned Up')
|
123 |
|
124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
if not InputVideo:
|
126 |
Logger.warning('β No Video Provided')
|
127 |
App.Warning('β No Video Provided')
|
@@ -146,76 +201,53 @@ class Upscaler:
|
|
146 |
StartTime = time.time()
|
147 |
Times = []
|
148 |
|
|
|
|
|
|
|
|
|
149 |
while True:
|
150 |
Ret, Frame = Video.read()
|
151 |
if not Ret:
|
152 |
break
|
153 |
|
154 |
-
|
155 |
-
FrameForTorch = FrameRgb.transpose(2, 0, 1)
|
156 |
-
FrameForTorch = torch.from_numpy(FrameForTorch).unsqueeze(0).to(Device).float() / 255.0
|
157 |
-
|
158 |
-
RetNext, NextFrame = Video.read()
|
159 |
-
if not RetNext:
|
160 |
-
NextFrame = Frame
|
161 |
|
162 |
-
|
163 |
-
Frame,
|
164 |
-
NextFrame,
|
165 |
-
Threshold=InputThreshold,
|
166 |
-
Rows=12,
|
167 |
-
Columns=20,
|
168 |
-
Padding=InputPadding
|
169 |
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
Cols = DiffResult['Columns']
|
177 |
-
Rows = DiffResult['Rows']
|
178 |
-
FrameHeight, FrameWidth = Frame.shape[:2]
|
179 |
-
SegmentWidth = FrameWidth // Cols
|
180 |
-
SegmentHeight = FrameHeight // Rows
|
181 |
-
for X, Y, W, H in Rectangles:
|
182 |
-
X1 = X * SegmentWidth
|
183 |
-
Y1 = Y * SegmentHeight
|
184 |
-
X2 = FrameWidth if X + W == Cols else X1 + W * SegmentWidth
|
185 |
-
Y2 = FrameHeight if Y + H == Rows else Y1 + H * SegmentHeight
|
186 |
-
|
187 |
-
Region = Frame[Y1:Y2, X1:X2]
|
188 |
-
RegionRgb = cv2.cvtColor(Region, cv2.COLOR_BGR2RGB)
|
189 |
-
RegionTorch = torch.from_numpy(RegionRgb.transpose(2, 0, 1)).unsqueeze(0).to(Device).float() / 255.0
|
190 |
-
UpscaledRegion = Model(RegionTorch)[0].cpu().numpy().transpose(1, 2, 0) * 255.0 # type: ignore
|
191 |
-
UpscaledRegion = cv2.cvtColor(UpscaledRegion.astype('uint8'), cv2.COLOR_RGB2BGR)
|
192 |
-
RegionHeight, RegionWidth = Region.shape[:2]
|
193 |
-
UpscaledRegion = cv2.resize(UpscaledRegion, (RegionWidth, RegionHeight), interpolation=cv2.INTER_CUBIC)
|
194 |
-
Frame[Y1:Y2, X1:X2] = UpscaledRegion
|
195 |
-
OutputFrame = Frame
|
196 |
else:
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
OutputFrame = cv2.resize(OutputFrame, (Width, Height), interpolation=cv2.INTER_CUBIC)
|
201 |
|
202 |
-
CurrentFrameNumber = int(Video.get(cv2.CAP_PROP_POS_FRAMES))
|
203 |
if Times:
|
204 |
AverageTime = sum(Times) / len(Times)
|
205 |
-
Eta = HumanizeSeconds((FrameCount -
|
206 |
else:
|
207 |
Eta = None
|
208 |
|
209 |
-
|
210 |
-
|
|
|
|
|
|
|
|
|
211 |
|
212 |
-
cv2.imwrite(f'{TempDir}/Upscaled_Frame_{
|
213 |
|
214 |
DeltaTime = time.time() - StartTime
|
215 |
Times.append(DeltaTime)
|
216 |
StartTime = time.time()
|
217 |
FrameProgress += PerFrameProgress
|
218 |
|
|
|
|
|
|
|
219 |
Progress(1, desc='π¦ Cleaning Up')
|
220 |
self.CleanUp()
|
221 |
return InputVideo, InputVideo
|
@@ -225,30 +257,53 @@ class Upscaler:
|
|
225 |
# ============================== #
|
226 |
|
227 |
with App.Blocks(
|
228 |
-
title='Video Upscaler', theme=Theme, delete_cache=(-1, 1800)
|
229 |
) as Interface:
|
230 |
-
App.Markdown('# ποΈ Video Upscaler')
|
231 |
-
App.
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
237 |
with App.Row():
|
238 |
with App.Column():
|
239 |
with App.Group():
|
240 |
-
with App.Accordion(label='π Instructions', open=False):
|
241 |
-
App.Markdown('''
|
242 |
-
### How To Use The Video Upscaler
|
243 |
-
1. **Upload A Video:** Begin by uploading your video file using the 'Input Video' section.
|
244 |
-
2. **Select A Model:** Choose an appropriate upscaling model from the 'Select Model' dropdown menu.
|
245 |
-
3. **Adjust Settings (Optional):**
|
246 |
-
Modify the 'Frame Rate' slider if you want to change the output video's frame rate.
|
247 |
-
Adjust the 'Tile Grid Size' for memory optimization. Larger models might require a higher grid size, but processing could be slower.
|
248 |
-
4. **Start Processing:** Click the 'π Upscale Video' button to begin the upscaling process.
|
249 |
-
5. **Download The Result:** Once the process is complete, download the upscaled video using the 'πΎ Download Video' button.
|
250 |
-
> Tip: If you get a CUDA out of memory error, try increasing the Tile Grid Size. This will split the image into smaller tiles for processing, which can help reduce memory usage.
|
251 |
-
''')
|
252 |
InputVideo = App.Video(
|
253 |
label='Input Video', sources=['upload'], height=300
|
254 |
)
|
@@ -260,6 +315,15 @@ with App.Blocks(
|
|
260 |
value=ModelNames[0],
|
261 |
)
|
262 |
with App.Accordion(label='βοΈ Advanced Settings', open=False):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
with App.Group():
|
264 |
InputUseRegions = App.Checkbox(
|
265 |
label='Use Regions',
|
@@ -269,9 +333,9 @@ with App.Blocks(
|
|
269 |
)
|
270 |
InputThreshold = App.Slider(
|
271 |
label='Threshold',
|
272 |
-
value=
|
273 |
minimum=0,
|
274 |
-
maximum=
|
275 |
step=0.5,
|
276 |
info='Threshold for the SAD algorithm to detect different regions',
|
277 |
interactive=False
|
@@ -287,7 +351,7 @@ with App.Blocks(
|
|
287 |
)
|
288 |
InputMinPercentage = App.Slider(
|
289 |
label='Min Percentage',
|
290 |
-
value=
|
291 |
minimum=0,
|
292 |
maximum=100,
|
293 |
step=1,
|
@@ -298,11 +362,39 @@ with App.Blocks(
|
|
298 |
label='Max Rectangles',
|
299 |
value=8,
|
300 |
minimum=1,
|
301 |
-
maximum=
|
302 |
step=1,
|
303 |
info='Maximum number of rectangles to consider upscaling the full frame',
|
304 |
interactive=False
|
305 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
306 |
SubmitButton = App.Button('π Upscale Video')
|
307 |
|
308 |
with App.Column(show_progress=True):
|
@@ -320,12 +412,15 @@ with App.Blocks(
|
|
320 |
App.update(interactive=UseRegions),
|
321 |
App.update(interactive=UseRegions),
|
322 |
App.update(interactive=UseRegions),
|
|
|
|
|
|
|
323 |
)
|
324 |
|
325 |
InputUseRegions.change(
|
326 |
fn=ToggleRegionInputs,
|
327 |
inputs=[InputUseRegions],
|
328 |
-
outputs=[InputThreshold, InputMinPercentage, InputMaxRectangles, InputPadding],
|
329 |
)
|
330 |
|
331 |
SubmitButton.click(
|
@@ -337,7 +432,10 @@ with App.Blocks(
|
|
337 |
InputThreshold,
|
338 |
InputMinPercentage,
|
339 |
InputMaxRectangles,
|
340 |
-
InputPadding
|
|
|
|
|
|
|
341 |
],
|
342 |
outputs=[OutputVideo, OutputDownload],
|
343 |
)
|
|
|
46 |
show_path=False,
|
47 |
)],
|
48 |
)
|
49 |
+
Logger = logging.getLogger('Zero2x')
|
50 |
logging.getLogger('httpx').setLevel(logging.WARNING)
|
51 |
|
52 |
# ============================== #
|
|
|
89 |
# Main Processing Logic #
|
90 |
# ============================== #
|
91 |
|
|
|
92 |
class Upscaler:
|
93 |
def __init__(self):
|
94 |
pass
|
|
|
120 |
self.UnloadModel()
|
121 |
Logger.info('π§Ή Temporary Files Cleaned Up')
|
122 |
|
123 |
+
@spaces.GPU
|
124 |
+
def UpscaleFullFrame(self, Model, Frame):
|
125 |
+
FrameRgb = cv2.cvtColor(Frame, cv2.COLOR_BGR2RGB)
|
126 |
+
FrameForTorch = FrameRgb.transpose(2, 0, 1)
|
127 |
+
FrameForTorch = torch.from_numpy(FrameForTorch).unsqueeze(0).to(Device).float() / 255.0
|
128 |
+
OutputFrame = Model(FrameForTorch)[0].cpu().numpy().transpose(1, 2, 0) * 255.0
|
129 |
+
OutputFrame = cv2.cvtColor(OutputFrame.astype('uint8'), cv2.COLOR_RGB2BGR)
|
130 |
+
return OutputFrame
|
131 |
+
|
132 |
+
@spaces.GPU
|
133 |
+
def UpscaleRegions(self, Model, Frame, PrevFrame, UpscaledPrevFrame, InputThreshold, InputMinPercentage, InputMaxRectangles, InputPadding, InputSegmentRows, InputSegmentColumns):
|
134 |
+
DiffResult = GetDifferenceRectangles(
|
135 |
+
PrevFrame,
|
136 |
+
Frame,
|
137 |
+
Threshold=InputThreshold,
|
138 |
+
Rows=InputSegmentRows,
|
139 |
+
Columns=InputSegmentColumns,
|
140 |
+
Padding=InputPadding
|
141 |
+
)
|
142 |
+
SimilarityPercentage = DiffResult['SimilarPercentage']
|
143 |
+
Rectangles = DiffResult['Rectangles']
|
144 |
+
Cols = DiffResult['Columns']
|
145 |
+
Rows = DiffResult['Rows']
|
146 |
+
FrameHeight, FrameWidth = Frame.shape[:2]
|
147 |
+
SegmentWidth = FrameWidth // Cols
|
148 |
+
SegmentHeight = FrameHeight // Rows
|
149 |
+
UseRegions = False
|
150 |
+
RegionLog = 'π₯'
|
151 |
+
if SimilarityPercentage > InputMinPercentage and len(Rectangles) < InputMaxRectangles:
|
152 |
+
UpscaleFactorY = UpscaledPrevFrame.shape[0] // FrameHeight
|
153 |
+
UpscaleFactorX = UpscaledPrevFrame.shape[1] // FrameWidth
|
154 |
+
OutputFrame = UpscaledPrevFrame.copy()
|
155 |
+
for X, Y, W, H in Rectangles:
|
156 |
+
X1 = X * SegmentWidth
|
157 |
+
Y1 = Y * SegmentHeight
|
158 |
+
X2 = FrameWidth if X + W == Cols else X1 + W * SegmentWidth
|
159 |
+
Y2 = FrameHeight if Y + H == Rows else Y1 + H * SegmentHeight
|
160 |
+
Region = Frame[Y1:Y2, X1:X2]
|
161 |
+
RegionRgb = cv2.cvtColor(Region, cv2.COLOR_BGR2RGB)
|
162 |
+
RegionTorch = torch.from_numpy(RegionRgb.transpose(2, 0, 1)).unsqueeze(0).to(Device).float() / 255.0
|
163 |
+
UpscaledRegion = Model(RegionTorch)[0].cpu().numpy().transpose(1, 2, 0) * 255.0
|
164 |
+
UpscaledRegion = cv2.cvtColor(UpscaledRegion.astype('uint8'), cv2.COLOR_RGB2BGR)
|
165 |
+
RegionHeight, RegionWidth = Region.shape[:2]
|
166 |
+
UpscaledRegion = cv2.resize(UpscaledRegion, (RegionWidth * UpscaleFactorX, RegionHeight * UpscaleFactorY), interpolation=cv2.INTER_CUBIC)
|
167 |
+
UX1 = X1 * UpscaleFactorX
|
168 |
+
UY1 = Y1 * UpscaleFactorY
|
169 |
+
UX2 = UX1 + UpscaledRegion.shape[1]
|
170 |
+
UY2 = UY1 + UpscaledRegion.shape[0]
|
171 |
+
OutputFrame[UY1:UY2, UX1:UX2] = UpscaledRegion
|
172 |
+
RegionLog = 'π©'
|
173 |
+
UseRegions = True
|
174 |
+
else:
|
175 |
+
OutputFrame = self.UpscaleFullFrame(Model, Frame)
|
176 |
+
return OutputFrame, SimilarityPercentage, Rectangles, RegionLog, UseRegions
|
177 |
+
|
178 |
+
@spaces.GPU
|
179 |
+
def Process(self, InputVideo, InputModel, InputUseRegions, InputThreshold, InputMinPercentage, InputMaxRectangles, InputPadding, InputSegmentRows, InputSegmentColumns, InputFullFrameInterval, Progress=App.Progress()):
|
180 |
if not InputVideo:
|
181 |
Logger.warning('β No Video Provided')
|
182 |
App.Warning('β No Video Provided')
|
|
|
201 |
StartTime = time.time()
|
202 |
Times = []
|
203 |
|
204 |
+
CurrentFrameIndex = 0
|
205 |
+
PrevFrame = None
|
206 |
+
UpscaledPrevFrame = None
|
207 |
+
|
208 |
while True:
|
209 |
Ret, Frame = Video.read()
|
210 |
if not Ret:
|
211 |
break
|
212 |
|
213 |
+
CurrentFrameIndex += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
|
215 |
+
ForceFull = (CurrentFrameIndex == 1 or not InputUseRegions or (InputFullFrameInterval > 0 and CurrentFrameIndex % InputFullFrameInterval == 0))
|
|
|
|
|
|
|
|
|
|
|
|
|
216 |
|
217 |
+
if ForceFull:
|
218 |
+
OutputFrame = self.UpscaleFullFrame(Model, Frame)
|
219 |
+
SimilarityPercentage = 0
|
220 |
+
Rectangles = []
|
221 |
+
RegionLog = 'π₯'
|
222 |
+
UseRegions = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
else:
|
224 |
+
OutputFrame, SimilarityPercentage, Rectangles, RegionLog, UseRegions = self.UpscaleRegions(
|
225 |
+
Model, Frame, PrevFrame, UpscaledPrevFrame, InputThreshold, InputMinPercentage, InputMaxRectangles, InputPadding, InputSegmentRows, InputSegmentColumns
|
226 |
+
)
|
|
|
227 |
|
|
|
228 |
if Times:
|
229 |
AverageTime = sum(Times) / len(Times)
|
230 |
+
Eta = HumanizeSeconds((FrameCount - CurrentFrameIndex) * AverageTime)
|
231 |
else:
|
232 |
Eta = None
|
233 |
|
234 |
+
if UseRegions:
|
235 |
+
Logger.info(f'{RegionLog} Frame {CurrentFrameIndex}: {SimilarityPercentage:.2f}% Similar, {len(Rectangles)} Regions To Upscale')
|
236 |
+
else:
|
237 |
+
Logger.info(f'{RegionLog} Frame {CurrentFrameIndex}: Upscaling Full Frame')
|
238 |
+
|
239 |
+
Progress(FrameProgress, desc=f'π¦ Processed Frame {CurrentFrameIndex}/{FrameCount} - {Eta}')
|
240 |
|
241 |
+
cv2.imwrite(f'{TempDir}/Upscaled_Frame_{CurrentFrameIndex:05d}.png', OutputFrame)
|
242 |
|
243 |
DeltaTime = time.time() - StartTime
|
244 |
Times.append(DeltaTime)
|
245 |
StartTime = time.time()
|
246 |
FrameProgress += PerFrameProgress
|
247 |
|
248 |
+
PrevFrame = Frame.copy()
|
249 |
+
UpscaledPrevFrame = OutputFrame.copy()
|
250 |
+
|
251 |
Progress(1, desc='π¦ Cleaning Up')
|
252 |
self.CleanUp()
|
253 |
return InputVideo, InputVideo
|
|
|
257 |
# ============================== #
|
258 |
|
259 |
with App.Blocks(
|
260 |
+
title='Zero2x Video Upscaler', theme=Theme, delete_cache=(-1, 1800)
|
261 |
) as Interface:
|
262 |
+
App.Markdown('# ποΈ Zero2x Video Upscaler')
|
263 |
+
with App.Accordion(label='βοΈ About Zero2x', open=False):
|
264 |
+
App.Markdown('''
|
265 |
+
**Zero2x** is a work-in-progress video upscaling tool that uses deep learning models to enhance your videos frame by frame.
|
266 |
+
This app leverages region-based difference detection to speed up processing and reduce unnecessary computation.
|
267 |
+
|
268 |
+
---
|
269 |
+
|
270 |
+
## β¨ Features
|
271 |
+
|
272 |
+
- **Multiple Upscaling Models:** Choose from a selection of pre-trained models for different styles and quality.
|
273 |
+
- **Region-Based Upscaling:** Only upscale parts of the frame that have changed, making processing faster and more memory-efficient.
|
274 |
+
- **Full Frame Upscaling:** Optionally upscale every frame in its entirety for maximum quality.
|
275 |
+
- **Customizable Settings:** Fine-tune thresholds, padding, and region detection for your specific needs.
|
276 |
+
- **Progress Tracking:** See estimated time remaining and per-frame progress.
|
277 |
+
- **Downloadable Results:** Download your upscaled video when processing is complete.
|
278 |
+
|
279 |
+
---
|
280 |
+
|
281 |
+
## π§βπ¬ Technique
|
282 |
+
|
283 |
+
This app uses the Segmented Absolute Differences (SAD) (Created by me) program to compare each frame with the previous one.
|
284 |
+
If only small regions have changed, only those regions are upscaled using the selected model.
|
285 |
+
If the whole frame is different, the entire frame is upscaled.
|
286 |
+
This hybrid approach balances speed and quality.
|
287 |
+
|
288 |
+
---
|
289 |
|
290 |
+
## π§ Work In Progress
|
291 |
+
|
292 |
+
- More models and settings will be added soon.
|
293 |
+
- Some features may be experimental or incomplete.
|
294 |
+
- Feedback and suggestions are welcome!
|
295 |
+
- The quality of the upscaled video may vary depending on the model and settings used.
|
296 |
+
|
297 |
+
---
|
298 |
+
|
299 |
+
**Tip:** If you encounter CUDA out-of-memory errors, try increasing the segment grid size or lowering the region count.
|
300 |
+
**Note:** The reason i named this project Zero2x is because i was inspired by Video2x, but i wanted my own version with a different approach.
|
301 |
+
It is running on HuggingFace's ZeroGPU hardware, which is why i came up with the name.
|
302 |
+
|
303 |
+
''')
|
304 |
with App.Row():
|
305 |
with App.Column():
|
306 |
with App.Group():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
307 |
InputVideo = App.Video(
|
308 |
label='Input Video', sources=['upload'], height=300
|
309 |
)
|
|
|
315 |
value=ModelNames[0],
|
316 |
)
|
317 |
with App.Accordion(label='βοΈ Advanced Settings', open=False):
|
318 |
+
with App.Accordion(label='π Settings Explained', open=False):
|
319 |
+
App.Markdown('''
|
320 |
+
- **Use Regions:** When enabled, only changed areas between frames are upscaled. This is faster but may miss subtle changes.
|
321 |
+
- **Threshold:** Controls how sensitive the difference detection is. I found high values to introduce unmatching regions, be careful.
|
322 |
+
- **Padding:** Adds extra pixels around detected regions to include out of bounds pixels.
|
323 |
+
- **Min Percentage:** If the similarity between frames is above this value, only regions are upscaled; otherwise, the full frame is upscaled.
|
324 |
+
- **Max Rectangles:** Limits the number of regions to process per frame for performance.
|
325 |
+
- **Segment Rows/Columns:** Controls the grid size for region detection. More segments allow finer detection but may increase processing time.
|
326 |
+
''')
|
327 |
with App.Group():
|
328 |
InputUseRegions = App.Checkbox(
|
329 |
label='Use Regions',
|
|
|
333 |
)
|
334 |
InputThreshold = App.Slider(
|
335 |
label='Threshold',
|
336 |
+
value=1,
|
337 |
minimum=0,
|
338 |
+
maximum=10,
|
339 |
step=0.5,
|
340 |
info='Threshold for the SAD algorithm to detect different regions',
|
341 |
interactive=False
|
|
|
351 |
)
|
352 |
InputMinPercentage = App.Slider(
|
353 |
label='Min Percentage',
|
354 |
+
value=50,
|
355 |
minimum=0,
|
356 |
maximum=100,
|
357 |
step=1,
|
|
|
362 |
label='Max Rectangles',
|
363 |
value=8,
|
364 |
minimum=1,
|
365 |
+
maximum=15,
|
366 |
step=1,
|
367 |
info='Maximum number of rectangles to consider upscaling the full frame',
|
368 |
interactive=False
|
369 |
)
|
370 |
+
with App.Row():
|
371 |
+
InputSegmentRows = App.Slider(
|
372 |
+
label='Segment Rows',
|
373 |
+
value=12,
|
374 |
+
minimum=1,
|
375 |
+
maximum=20,
|
376 |
+
step=1,
|
377 |
+
info='Number of rows to segment the video into for processing',
|
378 |
+
interactive=False
|
379 |
+
)
|
380 |
+
InputSegmentColumns = App.Slider(
|
381 |
+
label='Segment Columns',
|
382 |
+
value=20,
|
383 |
+
minimum=1,
|
384 |
+
maximum=30,
|
385 |
+
step=1,
|
386 |
+
info='Number of columns to segment the video into for processing',
|
387 |
+
interactive=False
|
388 |
+
)
|
389 |
+
InputFullFrameInterval = App.Slider(
|
390 |
+
label='Full Frame Interval',
|
391 |
+
value=5,
|
392 |
+
minimum=1,
|
393 |
+
maximum=30,
|
394 |
+
step=1,
|
395 |
+
info='Force a full-frame upscale every N frames (set to 1 to always upscale full frame)',
|
396 |
+
interactive=False
|
397 |
+
)
|
398 |
SubmitButton = App.Button('π Upscale Video')
|
399 |
|
400 |
with App.Column(show_progress=True):
|
|
|
412 |
App.update(interactive=UseRegions),
|
413 |
App.update(interactive=UseRegions),
|
414 |
App.update(interactive=UseRegions),
|
415 |
+
App.update(interactive=UseRegions),
|
416 |
+
App.update(interactive=UseRegions),
|
417 |
+
App.update(interactive=UseRegions)
|
418 |
)
|
419 |
|
420 |
InputUseRegions.change(
|
421 |
fn=ToggleRegionInputs,
|
422 |
inputs=[InputUseRegions],
|
423 |
+
outputs=[InputThreshold, InputMinPercentage, InputMaxRectangles, InputPadding, InputSegmentRows, InputSegmentColumns, InputFullFrameInterval],
|
424 |
)
|
425 |
|
426 |
SubmitButton.click(
|
|
|
432 |
InputThreshold,
|
433 |
InputMinPercentage,
|
434 |
InputMaxRectangles,
|
435 |
+
InputPadding,
|
436 |
+
InputSegmentRows,
|
437 |
+
InputSegmentColumns,
|
438 |
+
InputFullFrameInterval
|
439 |
],
|
440 |
outputs=[OutputVideo, OutputDownload],
|
441 |
)
|
Models/2x_OpenProteus_Compact_i2_70K.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a943566242a830ace576b768921a63ddde2baf826b90712e869d937603008708
|
3 |
+
size 2419483
|
Scripts/App-Old.py
ADDED
@@ -0,0 +1,433 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from spandrel import ModelLoader
|
2 |
+
import torch
|
3 |
+
from pathlib import Path
|
4 |
+
import gradio as App
|
5 |
+
import logging
|
6 |
+
import spaces
|
7 |
+
import time
|
8 |
+
import cv2
|
9 |
+
import os
|
10 |
+
|
11 |
+
from gradio import themes
|
12 |
+
from rich.console import Console
|
13 |
+
from rich.logging import RichHandler
|
14 |
+
|
15 |
+
from Scripts.SAD import GetDifferenceRectangles
|
16 |
+
|
17 |
+
# ============================== #
|
18 |
+
# Core Settings #
|
19 |
+
# ============================== #
|
20 |
+
|
21 |
+
Theme = themes.Citrus(
|
22 |
+
primary_hue='blue',
|
23 |
+
secondary_hue='blue',
|
24 |
+
radius_size=themes.sizes.radius_xxl
|
25 |
+
).set(
|
26 |
+
link_text_color='blue'
|
27 |
+
)
|
28 |
+
ModelDir = Path('./Models')
|
29 |
+
TempDir = Path('./Temp')
|
30 |
+
os.environ['GRADIO_TEMP_DIR'] = str(TempDir)
|
31 |
+
ModelFileType = '.pth'
|
32 |
+
|
33 |
+
# ============================== #
|
34 |
+
# Logging #
|
35 |
+
# ============================== #
|
36 |
+
|
37 |
+
logging.basicConfig(
|
38 |
+
level=logging.INFO,
|
39 |
+
format='%(message)s',
|
40 |
+
datefmt='[%X]',
|
41 |
+
handlers=[RichHandler(
|
42 |
+
console=Console(),
|
43 |
+
rich_tracebacks=True,
|
44 |
+
omit_repeated_times=False,
|
45 |
+
markup=True,
|
46 |
+
show_path=False,
|
47 |
+
)],
|
48 |
+
)
|
49 |
+
Logger = logging.getLogger('Zero2x')
|
50 |
+
logging.getLogger('httpx').setLevel(logging.WARNING)
|
51 |
+
|
52 |
+
# ============================== #
|
53 |
+
# Device Configuration #
|
54 |
+
# ============================== #
|
55 |
+
|
56 |
+
@spaces.GPU
|
57 |
+
def GetDeviceName():
|
58 |
+
Device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
59 |
+
Logger.info(f'π§ͺ Using device: {str(Device).upper()}')
|
60 |
+
return Device
|
61 |
+
|
62 |
+
Device = GetDeviceName()
|
63 |
+
|
64 |
+
# ============================== #
|
65 |
+
# Utility Functions #
|
66 |
+
# ============================== #
|
67 |
+
|
68 |
+
def HumanizeSeconds(Seconds):
|
69 |
+
Hours = int(Seconds // 3600)
|
70 |
+
Minutes = int((Seconds % 3600) // 60)
|
71 |
+
Seconds = int(Seconds % 60)
|
72 |
+
|
73 |
+
if Hours > 0:
|
74 |
+
return f'{Hours}h {Minutes}m {Seconds}s'
|
75 |
+
elif Minutes > 0:
|
76 |
+
return f'{Minutes}m {Seconds}s'
|
77 |
+
else:
|
78 |
+
return f'{Seconds}s'
|
79 |
+
|
80 |
+
def HumanizedBytes(Size):
|
81 |
+
Units = ['B', 'KB', 'MB', 'GB', 'TB']
|
82 |
+
Index = 0
|
83 |
+
while Size >= 1024 and Index < len(Units) - 1:
|
84 |
+
Size /= 1024.0
|
85 |
+
Index += 1
|
86 |
+
return f'{Size:.2f} {Units[Index]}'
|
87 |
+
|
88 |
+
# ============================== #
|
89 |
+
# Main Processing Logic #
|
90 |
+
# ============================== #
|
91 |
+
|
92 |
+
class Upscaler:
|
93 |
+
def __init__(self):
|
94 |
+
pass
|
95 |
+
|
96 |
+
def ListModels(self):
|
97 |
+
Models = sorted(
|
98 |
+
[File.name for File in ModelDir.glob('*' + ModelFileType) if File.is_file()]
|
99 |
+
)
|
100 |
+
Logger.info(f'π Found {len(Models)} Models In Directory')
|
101 |
+
return Models
|
102 |
+
|
103 |
+
def LoadModel(self, ModelName):
|
104 |
+
torch.cuda.empty_cache()
|
105 |
+
Model = (
|
106 |
+
ModelLoader()
|
107 |
+
.load_from_file(ModelDir / (ModelName + ModelFileType))
|
108 |
+
.to(Device)
|
109 |
+
.eval()
|
110 |
+
)
|
111 |
+
Logger.info(f'π€ Loaded Model {ModelName} Onto {str(Device).upper()}')
|
112 |
+
return Model
|
113 |
+
|
114 |
+
def UnloadModel(self):
|
115 |
+
if Device.type == 'cuda':
|
116 |
+
torch.cuda.empty_cache()
|
117 |
+
Logger.info('π€ Model Unloaded Successfully')
|
118 |
+
|
119 |
+
def CleanUp(self):
|
120 |
+
self.UnloadModel()
|
121 |
+
Logger.info('π§Ή Temporary Files Cleaned Up')
|
122 |
+
|
123 |
+
@spaces.GPU
|
124 |
+
def UpscaleFullFrame(self, Model, Frame, Width, Height):
|
125 |
+
FrameRgb = cv2.cvtColor(Frame, cv2.COLOR_BGR2RGB)
|
126 |
+
FrameForTorch = FrameRgb.transpose(2, 0, 1)
|
127 |
+
FrameForTorch = torch.from_numpy(FrameForTorch).unsqueeze(0).to(Device).float() / 255.0
|
128 |
+
OutputFrame = Model(FrameForTorch)[0].cpu().numpy().transpose(1, 2, 0) * 255.0
|
129 |
+
OutputFrame = cv2.cvtColor(OutputFrame.astype('uint8'), cv2.COLOR_RGB2BGR)
|
130 |
+
return OutputFrame
|
131 |
+
|
132 |
+
@spaces.GPU
|
133 |
+
def UpscaleRegions(self, Model, Frame, PrevFrame, InputThreshold, InputMinPercentage, InputMaxRectangles, InputPadding, InputSegmentRows, InputSegmentColumns):
|
134 |
+
DiffResult = GetDifferenceRectangles(
|
135 |
+
PrevFrame,
|
136 |
+
Frame,
|
137 |
+
Threshold=InputThreshold,
|
138 |
+
Rows=InputSegmentRows,
|
139 |
+
Columns=InputSegmentColumns,
|
140 |
+
Padding=InputPadding
|
141 |
+
)
|
142 |
+
SimilarityPercentage = DiffResult['SimilarPercentage']
|
143 |
+
Rectangles = DiffResult['Rectangles']
|
144 |
+
Cols = DiffResult['Columns']
|
145 |
+
Rows = DiffResult['Rows']
|
146 |
+
FrameHeight, FrameWidth = Frame.shape[:2]
|
147 |
+
SegmentWidth = FrameWidth // Cols
|
148 |
+
SegmentHeight = FrameHeight // Rows
|
149 |
+
UseRegions = False
|
150 |
+
RegionLog = 'π₯'
|
151 |
+
if SimilarityPercentage > InputMinPercentage and len(Rectangles) < InputMaxRectangles:
|
152 |
+
UpscaledPrevFrame = self.UpscaleFullFrame(Model, PrevFrame, FrameWidth, FrameHeight)
|
153 |
+
UpscaleFactorY = UpscaledPrevFrame.shape[0] // FrameHeight
|
154 |
+
UpscaleFactorX = UpscaledPrevFrame.shape[1] // FrameWidth
|
155 |
+
OutputFrame = UpscaledPrevFrame.copy()
|
156 |
+
for X, Y, W, H in Rectangles:
|
157 |
+
X1 = X * SegmentWidth
|
158 |
+
Y1 = Y * SegmentHeight
|
159 |
+
X2 = FrameWidth if X + W == Cols else X1 + W * SegmentWidth
|
160 |
+
Y2 = FrameHeight if Y + H == Rows else Y1 + H * SegmentHeight
|
161 |
+
Region = Frame[Y1:Y2, X1:X2]
|
162 |
+
RegionRgb = cv2.cvtColor(Region, cv2.COLOR_BGR2RGB)
|
163 |
+
RegionTorch = torch.from_numpy(RegionRgb.transpose(2, 0, 1)).unsqueeze(0).to(Device).float() / 255.0
|
164 |
+
UpscaledRegion = Model(RegionTorch)[0].cpu().numpy().transpose(1, 2, 0) * 255.0
|
165 |
+
UpscaledRegion = cv2.cvtColor(UpscaledRegion.astype('uint8'), cv2.COLOR_RGB2BGR)
|
166 |
+
RegionHeight, RegionWidth = Region.shape[:2]
|
167 |
+
UpscaledRegion = cv2.resize(UpscaledRegion, (RegionWidth * UpscaleFactorX, RegionHeight * UpscaleFactorY), interpolation=cv2.INTER_CUBIC)
|
168 |
+
UX1 = X1 * UpscaleFactorX
|
169 |
+
UY1 = Y1 * UpscaleFactorY
|
170 |
+
UX2 = UX1 + UpscaledRegion.shape[1]
|
171 |
+
UY2 = UY1 + UpscaledRegion.shape[0]
|
172 |
+
OutputFrame[UY1:UY2, UX1:UX2] = UpscaledRegion
|
173 |
+
RegionLog = 'π©'
|
174 |
+
UseRegions = True
|
175 |
+
else:
|
176 |
+
OutputFrame = self.UpscaleFullFrame(Model, Frame, FrameWidth, FrameHeight)
|
177 |
+
return OutputFrame, SimilarityPercentage, Rectangles, RegionLog, UseRegions
|
178 |
+
|
179 |
+
@spaces.GPU
|
180 |
+
def Process(self, InputVideo, InputModel, InputUseRegions, InputThreshold, InputMinPercentage, InputMaxRectangles, InputPadding, InputSegmentRows, InputSegmentColumns, Progress=App.Progress()):
|
181 |
+
if not InputVideo:
|
182 |
+
Logger.warning('β No Video Provided')
|
183 |
+
App.Warning('β No Video Provided')
|
184 |
+
return None, None
|
185 |
+
|
186 |
+
Progress(0, desc='βοΈ Loading Model')
|
187 |
+
Model = self.LoadModel(InputModel)
|
188 |
+
|
189 |
+
Logger.info(f'πΌ Processing Video: {Path(InputVideo).name}')
|
190 |
+
Progress(0, desc='πΌ Processing Video')
|
191 |
+
Video = cv2.VideoCapture(InputVideo)
|
192 |
+
|
193 |
+
FrameRate = Video.get(cv2.CAP_PROP_FPS)
|
194 |
+
FrameCount = int(Video.get(cv2.CAP_PROP_FRAME_COUNT))
|
195 |
+
Width = int(Video.get(cv2.CAP_PROP_FRAME_WIDTH))
|
196 |
+
Height = int(Video.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
197 |
+
|
198 |
+
Logger.info(f'π Video Properties: {FrameCount} Frames, {FrameRate} FPS, {Width}x{Height}')
|
199 |
+
|
200 |
+
PerFrameProgress = 1 / FrameCount
|
201 |
+
FrameProgress = 0.0
|
202 |
+
StartTime = time.time()
|
203 |
+
Times = []
|
204 |
+
|
205 |
+
CurrentFrameIndex = 0
|
206 |
+
PrevFrame = None
|
207 |
+
|
208 |
+
while True:
|
209 |
+
Ret, Frame = Video.read()
|
210 |
+
if not Ret:
|
211 |
+
break
|
212 |
+
|
213 |
+
CurrentFrameIndex += 1
|
214 |
+
|
215 |
+
if CurrentFrameIndex == 1 or not InputUseRegions:
|
216 |
+
OutputFrame = self.UpscaleFullFrame(Model, Frame, Width, Height)
|
217 |
+
SimilarityPercentage = 0
|
218 |
+
Rectangles = []
|
219 |
+
RegionLog = 'π₯'
|
220 |
+
UseRegions = False
|
221 |
+
else:
|
222 |
+
OutputFrame, SimilarityPercentage, Rectangles, RegionLog, UseRegions = self.UpscaleRegions(
|
223 |
+
Model, Frame, PrevFrame, InputThreshold, InputMinPercentage, InputMaxRectangles, InputPadding, InputSegmentRows, InputSegmentColumns
|
224 |
+
)
|
225 |
+
|
226 |
+
if Times:
|
227 |
+
AverageTime = sum(Times) / len(Times)
|
228 |
+
Eta = HumanizeSeconds((FrameCount - CurrentFrameIndex) * AverageTime)
|
229 |
+
else:
|
230 |
+
Eta = None
|
231 |
+
|
232 |
+
if UseRegions:
|
233 |
+
Logger.info(f'{RegionLog} Frame {CurrentFrameIndex}: {SimilarityPercentage:.2f}% Similar, {len(Rectangles)} Regions To Upscale')
|
234 |
+
else:
|
235 |
+
Logger.info(f'{RegionLog} Frame {CurrentFrameIndex}: Upscaling Full Frame')
|
236 |
+
|
237 |
+
Progress(FrameProgress, desc=f'π¦ Processed Frame {CurrentFrameIndex}/{FrameCount} - {Eta}')
|
238 |
+
|
239 |
+
cv2.imwrite(f'{TempDir}/Upscaled_Frame_{CurrentFrameIndex:05d}.png', OutputFrame)
|
240 |
+
|
241 |
+
DeltaTime = time.time() - StartTime
|
242 |
+
Times.append(DeltaTime)
|
243 |
+
StartTime = time.time()
|
244 |
+
FrameProgress += PerFrameProgress
|
245 |
+
|
246 |
+
PrevFrame = Frame.copy()
|
247 |
+
|
248 |
+
Progress(1, desc='π¦ Cleaning Up')
|
249 |
+
self.CleanUp()
|
250 |
+
return InputVideo, InputVideo
|
251 |
+
|
252 |
+
# ============================== #
|
253 |
+
# Streamlined UI #
|
254 |
+
# ============================== #
|
255 |
+
|
256 |
+
with App.Blocks(
|
257 |
+
title='Zero2x Video Upscaler', theme=Theme, delete_cache=(-1, 1800)
|
258 |
+
) as Interface:
|
259 |
+
App.Markdown('# ποΈ Zero2x Video Upscaler')
|
260 |
+
with App.Accordion(label='βοΈ About Zero2x', open=False):
|
261 |
+
App.Markdown('''
|
262 |
+
**Zero2x** is a work-in-progress video upscaling tool that uses deep learning models to enhance your videos frame by frame.
|
263 |
+
This app leverages region-based difference detection to speed up processing and reduce unnecessary computation.
|
264 |
+
|
265 |
+
---
|
266 |
+
|
267 |
+
## β¨ Features
|
268 |
+
|
269 |
+
- **Multiple Upscaling Models:** Choose from a selection of pre-trained models for different styles and quality.
|
270 |
+
- **Region-Based Upscaling:** Only upscale parts of the frame that have changed, making processing faster and more memory-efficient.
|
271 |
+
- **Full Frame Upscaling:** Optionally upscale every frame in its entirety for maximum quality.
|
272 |
+
- **Customizable Settings:** Fine-tune thresholds, padding, and region detection for your specific needs.
|
273 |
+
- **Progress Tracking:** See estimated time remaining and per-frame progress.
|
274 |
+
- **Downloadable Results:** Download your upscaled video when processing is complete.
|
275 |
+
|
276 |
+
---
|
277 |
+
|
278 |
+
## π§βπ¬ Technique
|
279 |
+
|
280 |
+
This app uses the Segmented Absolute Differences (SAD) (Created by me) program to compare each frame with the previous one.
|
281 |
+
If only small regions have changed, only those regions are upscaled using the selected model.
|
282 |
+
If the whole frame is different, the entire frame is upscaled.
|
283 |
+
This hybrid approach balances speed and quality.
|
284 |
+
|
285 |
+
---
|
286 |
+
|
287 |
+
## π§ Work In Progress
|
288 |
+
|
289 |
+
- More models and settings will be added soon.
|
290 |
+
- Some features may be experimental or incomplete.
|
291 |
+
- Feedback and suggestions are welcome!
|
292 |
+
- The quality of the upscaled video may vary depending on the model and settings used.
|
293 |
+
|
294 |
+
---
|
295 |
+
|
296 |
+
**Tip:** If you encounter CUDA out-of-memory errors, try increasing the segment grid size or lowering the region count.
|
297 |
+
**Note:** The reason i named this project Zero2x is because i was inspired by Video2x, but i wanted my own version with a different approach.
|
298 |
+
It is running on HuggingFace's ZeroGPU hardware, which is why i came up with the name.
|
299 |
+
|
300 |
+
''')
|
301 |
+
with App.Row():
|
302 |
+
with App.Column():
|
303 |
+
with App.Group():
|
304 |
+
InputVideo = App.Video(
|
305 |
+
label='Input Video', sources=['upload'], height=300
|
306 |
+
)
|
307 |
+
ModelList = Upscaler().ListModels()
|
308 |
+
ModelNames = [Path(Model).stem for Model in ModelList]
|
309 |
+
InputModel = App.Dropdown(
|
310 |
+
choices=ModelNames,
|
311 |
+
label='Select Model',
|
312 |
+
value=ModelNames[0],
|
313 |
+
)
|
314 |
+
with App.Accordion(label='βοΈ Advanced Settings', open=False):
|
315 |
+
with App.Accordion(label='π Settings Explained', open=False):
|
316 |
+
App.Markdown('''
|
317 |
+
- **Use Regions:** When enabled, only changed areas between frames are upscaled. This is faster but may miss subtle changes.
|
318 |
+
- **Threshold:** Controls how sensitive the difference detection is. Lower values detect smaller changes.
|
319 |
+
- **Padding:** Adds extra pixels around detected regions to avoid artifacts at the edges.
|
320 |
+
- **Min Percentage:** If the similarity between frames is above this value, only regions are upscaled; otherwise, the full frame is upscaled.
|
321 |
+
- **Max Rectangles:** Limits the number of regions to process per frame for performance.
|
322 |
+
- **Segment Rows/Columns:** Controls the grid size for region detection. More segments allow finer detection but may increase processing time.
|
323 |
+
''')
|
324 |
+
with App.Group():
|
325 |
+
InputUseRegions = App.Checkbox(
|
326 |
+
label='Use Regions',
|
327 |
+
value=False,
|
328 |
+
info='Use regions to upscale only the different parts of the video (β‘οΈ Experimental, Faster)',
|
329 |
+
interactive=True
|
330 |
+
)
|
331 |
+
InputThreshold = App.Slider(
|
332 |
+
label='Threshold',
|
333 |
+
value=5,
|
334 |
+
minimum=0,
|
335 |
+
maximum=20,
|
336 |
+
step=0.5,
|
337 |
+
info='Threshold for the SAD algorithm to detect different regions',
|
338 |
+
interactive=False
|
339 |
+
)
|
340 |
+
InputPadding = App.Slider(
|
341 |
+
label='Padding',
|
342 |
+
value=1,
|
343 |
+
minimum=0,
|
344 |
+
maximum=5,
|
345 |
+
step=1,
|
346 |
+
info='Extra padding to include neighboring pixels in the SAD algorithm',
|
347 |
+
interactive=False
|
348 |
+
)
|
349 |
+
InputMinPercentage = App.Slider(
|
350 |
+
label='Min Percentage',
|
351 |
+
value=70,
|
352 |
+
minimum=0,
|
353 |
+
maximum=100,
|
354 |
+
step=1,
|
355 |
+
info='Minimum percentage of similarity to consider upscaling the full frame',
|
356 |
+
interactive=False
|
357 |
+
)
|
358 |
+
InputMaxRectangles = App.Slider(
|
359 |
+
label='Max Rectangles',
|
360 |
+
value=8,
|
361 |
+
minimum=1,
|
362 |
+
maximum=10,
|
363 |
+
step=1,
|
364 |
+
info='Maximum number of rectangles to consider upscaling the full frame',
|
365 |
+
interactive=False
|
366 |
+
)
|
367 |
+
with App.Row():
|
368 |
+
InputSegmentRows = App.Slider(
|
369 |
+
label='Segment Rows',
|
370 |
+
value=12,
|
371 |
+
minimum=1,
|
372 |
+
maximum=20,
|
373 |
+
step=1,
|
374 |
+
info='Number of rows to segment the video into for processing',
|
375 |
+
interactive=False
|
376 |
+
)
|
377 |
+
InputSegmentColumns = App.Slider(
|
378 |
+
label='Segment Columns',
|
379 |
+
value=20,
|
380 |
+
minimum=1,
|
381 |
+
maximum=20,
|
382 |
+
step=1,
|
383 |
+
info='Number of columns to segment the video into for processing',
|
384 |
+
interactive=False
|
385 |
+
)
|
386 |
+
SubmitButton = App.Button('π Upscale Video')
|
387 |
+
|
388 |
+
with App.Column(show_progress=True):
|
389 |
+
with App.Group():
|
390 |
+
OutputVideo = App.Video(
|
391 |
+
label='Output Video', height=300, interactive=False, format=None
|
392 |
+
)
|
393 |
+
OutputDownload = App.DownloadButton(
|
394 |
+
label='πΎ Download Video', interactive=False
|
395 |
+
)
|
396 |
+
|
397 |
+
def ToggleRegionInputs(UseRegions):
|
398 |
+
return (
|
399 |
+
App.update(interactive=UseRegions),
|
400 |
+
App.update(interactive=UseRegions),
|
401 |
+
App.update(interactive=UseRegions),
|
402 |
+
App.update(interactive=UseRegions),
|
403 |
+
App.update(interactive=UseRegions),
|
404 |
+
App.update(interactive=UseRegions)
|
405 |
+
)
|
406 |
+
|
407 |
+
InputUseRegions.change(
|
408 |
+
fn=ToggleRegionInputs,
|
409 |
+
inputs=[InputUseRegions],
|
410 |
+
outputs=[InputThreshold, InputMinPercentage, InputMaxRectangles, InputPadding, InputSegmentRows, InputSegmentColumns],
|
411 |
+
)
|
412 |
+
|
413 |
+
SubmitButton.click(
|
414 |
+
fn=Upscaler().Process,
|
415 |
+
inputs=[
|
416 |
+
InputVideo,
|
417 |
+
InputModel,
|
418 |
+
InputUseRegions,
|
419 |
+
InputThreshold,
|
420 |
+
InputMinPercentage,
|
421 |
+
InputMaxRectangles,
|
422 |
+
InputPadding,
|
423 |
+
InputSegmentRows,
|
424 |
+
InputSegmentColumns,
|
425 |
+
],
|
426 |
+
outputs=[OutputVideo, OutputDownload],
|
427 |
+
)
|
428 |
+
|
429 |
+
if __name__ == '__main__':
|
430 |
+
os.makedirs(ModelDir, exist_ok=True)
|
431 |
+
os.makedirs(TempDir, exist_ok=True)
|
432 |
+
Logger.info('π Starting Video Upscaler')
|
433 |
+
Interface.launch(pwa=True)
|