Hyphonical commited on
Commit
b09e573
Β·
1 Parent(s): 66c8ea4

πŸš€ Upload Project From My GitHub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
37
+ *.jpg filter=lfs diff=lfs merge=lfs -text
38
+ Models/* filter=lfs diff=lfs merge=lfs -text
39
+ Temp/* filter=lfs diff=lfs merge=lfs -text
40
+ *.webm filter=lfs diff=lfs merge=lfs -text
.github/copilot-instructions.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Use Pascal Case for Variables, Classes, Functions and Methods
2
+ Do not include comments under functions
3
+ use single quotes
Diff.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import time
4
+ import logging
5
+
6
+ # Set up logging
7
+ logging.basicConfig(level=logging.INFO)
8
+ Logger = logging.getLogger(__name__)
9
+
10
+ def MergeBoxes(Boxes, Padding=5):
11
+ if len(Boxes) <= 1:
12
+ return Boxes
13
+ MergedOccurred = True
14
+ while MergedOccurred:
15
+ MergedOccurred = False
16
+ NewBoxes = []
17
+ Boxes.sort(key=lambda b: b[0])
18
+ Used = [False] * len(Boxes)
19
+ for Index in range(len(Boxes)):
20
+ if Used[Index]:
21
+ continue
22
+ CurrentBox = list(Boxes[Index])
23
+ Used[Index] = True
24
+ for J in range(Index + 1, len(Boxes)):
25
+ if Used[J]:
26
+ continue
27
+ NextBox = Boxes[J]
28
+ OverlapX = max(CurrentBox[0], NextBox[0]) <= min(CurrentBox[0] + CurrentBox[2], NextBox[0] + NextBox[2]) + Padding
29
+ OverlapY = max(CurrentBox[1], NextBox[1]) <= min(CurrentBox[1] + CurrentBox[3], NextBox[1] + NextBox[3]) + Padding
30
+ if OverlapX and OverlapY:
31
+ NewX = min(CurrentBox[0], NextBox[0])
32
+ NewY = min(CurrentBox[1], NextBox[1])
33
+ NewW = max(CurrentBox[0] + CurrentBox[2], NextBox[0] + NextBox[2]) - NewX
34
+ NewH = max(CurrentBox[1] + CurrentBox[3], NextBox[1] + NextBox[3]) - NewY
35
+ CurrentBox = [NewX, NewY, NewW, NewH]
36
+ Used[J] = True
37
+ MergedOccurred = True
38
+ NewBoxes.append(tuple(CurrentBox))
39
+ Boxes = NewBoxes
40
+ return Boxes
41
+
42
+ def GetChangeMask(Image1, Image2, Threshold=25, MinArea=100):
43
+ if Image1.shape != Image2.shape:
44
+ Logger.warning(f'Image shapes differ: {Image1.shape} vs {Image2.shape}. Resizing Image2.')
45
+ Image2 = cv2.resize(Image2, (Image1.shape[1], Image1.shape[0]))
46
+
47
+ Gray1 = cv2.cvtColor(Image1, cv2.COLOR_BGR2GRAY)
48
+ Gray2 = cv2.cvtColor(Image2, cv2.COLOR_BGR2GRAY)
49
+ Blur1 = cv2.GaussianBlur(Gray1, (5, 5), 0)
50
+ Blur2 = cv2.GaussianBlur(Gray2, (5, 5), 0)
51
+ DiffFrame = cv2.absdiff(Blur1, Blur2)
52
+ _, ThresholdCalc = cv2.threshold(DiffFrame, Threshold, 255, cv2.THRESH_BINARY)
53
+ Kernel = np.ones((5, 5), np.uint8)
54
+ DilatedThreshold = cv2.dilate(ThresholdCalc, Kernel, iterations=2)
55
+
56
+ Contours, _ = cv2.findContours(DilatedThreshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
57
+ OutputMask = np.zeros_like(DilatedThreshold)
58
+ ValidContours = 0
59
+ if Contours:
60
+ for Contour in Contours:
61
+ if cv2.contourArea(Contour) > MinArea:
62
+ cv2.drawContours(OutputMask, [Contour], -1, 255, -1) # type: ignore
63
+ ValidContours +=1
64
+ Logger.info(f'GetChangeMask: Found {len(Contours)} raw contours, kept {ValidContours} after MinArea filter ({MinArea}).')
65
+ return OutputMask
66
+
67
+ def VisualizeDifferences(Image1Path, Image2Path, OutputPath, Threshold=25, MinArea=100, OutlineColor=(0, 255, 0), FillColor=(0, 180, 0), FillAlpha=0.3):
68
+ Logger.info(f'🎨 Visualizing differences between {Image1Path} and {Image2Path}')
69
+ Image1 = cv2.imread(Image1Path)
70
+ Image2 = cv2.imread(Image2Path)
71
+
72
+ if Image1 is None or Image2 is None:
73
+ Logger.error(f'❌ Error loading images for visualization: {Image1Path} or {Image2Path}')
74
+ return
75
+
76
+ if Image1.shape != Image2.shape:
77
+ Logger.warning(f'⚠️ Image shapes differ: {Image1.shape} vs {Image2.shape}. Resizing Image2 for visualization.')
78
+ Image2 = cv2.resize(Image2, (Image1.shape[1], Image1.shape[0]))
79
+
80
+ ChangedMask = GetChangeMask(Image1, Image2, Threshold, MinArea)
81
+ OutputImage = Image2.copy()
82
+ Overlay = OutputImage.copy()
83
+
84
+ # Apply fill color to changed areas
85
+ Overlay[ChangedMask == 255] = FillColor
86
+ cv2.addWeighted(Overlay, FillAlpha, OutputImage, 1 - FillAlpha, 0, OutputImage)
87
+
88
+ # Find contours of the changed areas to draw outlines
89
+ Contours, _ = cv2.findContours(ChangedMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
90
+ cv2.drawContours(OutputImage, Contours, -1, OutlineColor, 2)
91
+ Logger.info(f'🎨 Drew {len(Contours)} difference regions.')
92
+
93
+ try:
94
+ cv2.imwrite(OutputPath, OutputImage)
95
+ Logger.info(f'πŸ’Ύ Saved difference visualization to {OutputPath}')
96
+ except Exception as E:
97
+ Logger.error(f'❌ Failed to save visualization {OutputPath}: {E}')
98
+
99
+ # --- Function to be used in App.py for upscaling ---
100
+ def GetChangedRegions(Image1, Image2, Threshold=25, Padding=10, MinArea=100, MergePadding=5):
101
+ StartTime = time.time()
102
+ Logger.info('πŸ”„ Comparing images...')
103
+
104
+ if Image1 is None or Image2 is None:
105
+ Logger.error('❌ Cannot compare None images.')
106
+ return []
107
+
108
+ if Image1.shape != Image2.shape:
109
+ Logger.warning(f'⚠️ Image shapes differ: {Image1.shape} vs {Image2.shape}. Resizing Image2.')
110
+ Image2 = cv2.resize(Image2, (Image1.shape[1], Image1.shape[0]))
111
+
112
+ Gray1 = cv2.cvtColor(Image1, cv2.COLOR_BGR2GRAY)
113
+ Gray2 = cv2.cvtColor(Image2, cv2.COLOR_BGR2GRAY)
114
+ Blur1 = cv2.GaussianBlur(Gray1, (5, 5), 0)
115
+ Blur2 = cv2.GaussianBlur(Gray2, (5, 5), 0)
116
+ DiffFrame = cv2.absdiff(Blur1, Blur2)
117
+ _, ThresholdCalc = cv2.threshold(DiffFrame, Threshold, 255, cv2.THRESH_BINARY)
118
+ Kernel = np.ones((5, 5), np.uint8)
119
+ DilatedThreshold = cv2.dilate(ThresholdCalc, Kernel, iterations=2)
120
+ Contours, _ = cv2.findContours(DilatedThreshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
121
+ Logger.info(f'πŸ”Ž Found {len(Contours)} raw contours.')
122
+
123
+ BoundingBoxes = []
124
+ if Contours:
125
+ ValidContours = 0
126
+ for Contour in Contours:
127
+ ContourArea = cv2.contourArea(Contour)
128
+ if ContourArea > MinArea:
129
+ ValidContours += 1
130
+ X, Y, W, H = cv2.boundingRect(Contour)
131
+ PaddedX = max(0, X - Padding)
132
+ PaddedY = max(0, Y - Padding)
133
+ MaxW = Image1.shape[1] - PaddedX
134
+ MaxH = Image1.shape[0] - PaddedY
135
+ PaddedW = min(W + (Padding * 2), MaxW)
136
+ PaddedH = min(H + (Padding * 2), MaxH)
137
+ BoundingBoxes.append((PaddedX, PaddedY, PaddedW, PaddedH))
138
+ Logger.info(f'πŸ“Š Filtered {ValidContours} contours based on MinArea ({MinArea}).')
139
+
140
+ InitialBoxCount = len(BoundingBoxes)
141
+ MergedBoundingBoxes = MergeBoxes(BoundingBoxes, MergePadding)
142
+ EndTime = time.time()
143
+ if MergedBoundingBoxes:
144
+ Logger.info(f'πŸ“¦ Merged {InitialBoxCount} boxes into {len(MergedBoundingBoxes)} regions.')
145
+ else:
146
+ Logger.info('❌ No significant changed regions found after filtering and merging.')
147
+ Logger.info(f'⏱️ Region finding took {EndTime - StartTime:.3f}s')
148
+ return MergedBoundingBoxes
149
+
150
+ # Example call for the new visualization function
151
+ VisualizeDifferences(r'C:\Users\joris\Pictures\frame_01660.png', r'C:\Users\joris\Pictures\frame_01661.png', './Diff.png', 25, 100, (0, 255, 0), (0, 180, 0), 0.3)
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Boyang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
Models/2x_AniSD_G6i1_SPAN_215K.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12269fb1f76c8f62a3ccf099abcd4d4ef25989a9cf3c023cec77eec6eb9f9f2f
3
+ size 8958448
Models/2x_AniScale2_Omni_i16_40K.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df55746a97a22e157cf4a7fd0841da06d7f2b383ee7f7cd940a3a3d4cba5926b
3
+ size 3405370
Models/2x_ModernSpanimationV2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fa4785bf6808edf3c9bc859da15444cfb7fdcedb201d2ee38f57f3b5c2ed89d
3
+ size 8958239
Models/2x_sudo_shuffle_span_10.5m.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73c650d82db19f571e1c7312629aa9021a7fde409c78f1dc462256784c6e963c
3
+ size 16493242
Models/BSRGAN.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d505a0766160921e0388d76e1ddf08cb114303990f9080432bf2b1c988b1c54
3
+ size 67046751
Models/cugan_pro-conservative-up2x.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8ae5225d2d515aa3c33ef1318aadc532a42ea5ed8d564471b5a5b586783e964
3
+ size 5155761
Models/cugan_pro-conservative-up3x.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9f3c783a04b15c793b95e332bfdac524cfa30ba186cb829c1290593e28ad9e7
3
+ size 5162673
Models/cugan_pro-denoise3x-up2x.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e80ca8fc7c261e3dc8f4c0ce0656ac5501d71a476543071615c43392dbeb4c0d
3
+ size 5155761
Models/cugan_pro-denoise3x-up3x.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ddd14e2430db0d75d186c6dda934db34929c50da8a88a0c6f4accb871fe4b70
3
+ size 5162673
Models/cugan_pro-no-denoise-up2x.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccce1f535d94c50ce38e268a53687bc7e68ef7215e3c5e6b3bfd1bfc1dacf0fa
3
+ size 5155761
Models/cugan_pro-no-denoise-up3x.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c14d693a6d3316b8a3eba362e7576f178aea3407e1d89ca0bcb34e1c61269b0f
3
+ size 5162673
Models/cugan_up2x-latest-conservative.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cfe3b23687915d08ba96010f25198d9cfe8a683aa4131f1acf7eaa58ee1de93
3
+ size 5147249
Models/cugan_up2x-latest-denoise1x.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e783c39da6a6394fbc250fdd069c55eaedc43971c4f2405322f18949ce38573
3
+ size 5147249
Models/cugan_up2x-latest-denoise2x.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8188b3faef4258cf748c59360cbc8086ebedf4a63eb9d5d6637d45f819d32496
3
+ size 5147249
Models/cugan_up2x-latest-denoise3x.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a14739f3f5fcbd74ec3ce2806d13a47916c916b20afe4a39d95f6df4ca6abd8
3
+ size 5147249
Models/cugan_up2x-latest-no-denoise.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f491f9ecf6964ead9f3a36bf03e83527f32c6a341b683f7378ac6c1e2a5f0d16
3
+ size 5147249
Models/cugan_up3x-latest-conservative.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6ea5fd20380413beb2701182483fd80c2e86f3b3f08053eb3df4975184aefe3
3
+ size 5154161
Models/cugan_up3x-latest-denoise3x.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39f1e6e90d50e5528a63f4ba1866bad23365a737cbea22a80769b2ec4c1c3285
3
+ size 5154161
Models/cugan_up3x-latest-no-denoise.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:763f0a87e70d744673f1a41db5396d5f334d22de97fff68ffc40deb91404a584
3
+ size 5154161
Models/cugan_up4x-latest-conservative.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8c8185def699b0883662a02df0ef2e6db3b0275170b6cc0d28089b64b273427
3
+ size 5636403
Models/cugan_up4x-latest-denoise3x.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42bd8fcdae37c12c5b25ed59625266bfa65780071a8d38192d83756cb85e98dd
3
+ size 5636403
Models/cugan_up4x-latest-no-denoise.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaf3ef78a488cce5d3842154925eb70ff8423b8298e2cd189ec66eb7f6f66fae
3
+ size 5636403
Models/sudo_UltraCompact_2x_1.121.175_G.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e53987f0312dee424b4dbd9dce7b2eacbe03fdf1380e44a11f8a4d2ca88c99e3
3
+ size 1226766
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: indigo
5
  colorTo: pink
6
  sdk: gradio
7
  sdk_version: 5.29.0
8
- app_file: app.py
9
  pinned: false
10
  license: mit
11
  short_description: ♻️ Upscale any video using several models
 
5
  colorTo: pink
6
  sdk: gradio
7
  sdk_version: 5.29.0
8
+ app_file: App.py
9
  pinned: false
10
  license: mit
11
  short_description: ♻️ Upscale any video using several models
app.py CHANGED
@@ -1,7 +1,285 @@
1
- import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from spandrel import ModelLoader
2
+ import torch
3
+ from pathlib import Path
4
+ from PIL import Image
5
+ import gradio as App
6
+ import numpy as np
7
+ import subprocess
8
+ import logging
9
+ import spaces
10
+ import time
11
+ import os
12
+ import gc
13
+ import io
14
+ import cv2
15
 
16
+ from gradio import themes
17
+ from rich.console import Console
18
+ from rich.logging import RichHandler
19
 
20
+ # ============================== #
21
+ # Core Settings #
22
+ # ============================== #
23
+
24
+ Theme = themes.Citrus(primary_hue='blue', radius_size=themes.sizes.radius_xxl)
25
+ ModelDir = Path('./Models')
26
+ TempDir = Path('./Temp')
27
+ os.environ['GRADIO_TEMP_DIR'] = str(TempDir)
28
+ ModelFileType = '.pth'
29
+
30
+ # ============================== #
31
+ # Enhanced Logging #
32
+ # ============================== #
33
+
34
+ logging.basicConfig(level=logging.INFO, format='%(message)s', datefmt='[%X]',
35
+ handlers=[RichHandler(console=Console(), rich_tracebacks=True)])
36
+ Logger = logging.getLogger('Video2x')
37
+ logging.getLogger('httpx').setLevel(logging.WARNING)
38
+
39
+ # ============================== #
40
+ # Device Configuration #
41
+ # ============================== #
42
+
43
+ @spaces.GPU
44
+ def GetDeviceName():
45
+ Device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
46
+ Logger.info(f'βš™οΈ Using device: {Device}')
47
+ return Device
48
+
49
+ Device = GetDeviceName()
50
+
51
+ # ============================== #
52
+ # Optimized Functions #
53
+ # ============================== #
54
+
55
+ def FormatTimeEstimate(Seconds):
56
+ Hours = int(Seconds // 3600)
57
+ Minutes = int((Seconds % 3600) // 60)
58
+ Seconds = int(Seconds % 60)
59
+
60
+ if Hours > 0:
61
+ return f'{Hours}h {Minutes}m {Seconds}s'
62
+ elif Minutes > 0:
63
+ return f'{Minutes}m {Seconds}s'
64
+ else:
65
+ return f'{Seconds}s'
66
+
67
+ def ListModels():
68
+ Models = sorted([File.name for File in ModelDir.glob('*' + ModelFileType) if File.is_file()])
69
+ Logger.info(f'πŸ“š Found {len(Models)} Models In Directory')
70
+ return Models
71
+
72
+ def LoadModel(ModelName):
73
+ if Device.type == 'cuda':
74
+ torch.cuda.empty_cache()
75
+ Logger.info(f'πŸ”„ Loading model: {ModelName} onto {Device}')
76
+ Model = ModelLoader().load_from_file(ModelDir / (ModelName + ModelFileType)).to(Device).eval() # Use .to(Device)
77
+ Logger.info('βœ… Model Loaded Successfully')
78
+ return Model
79
+
80
+ @spaces.GPU
81
+ def ProcessSingleFrame(OriginalImage, Model, TileGridSize):
82
+ if TileGridSize > 1:
83
+ Logger.info(f'🧩 Processing With Tile Grid {TileGridSize}x{TileGridSize}')
84
+ Width, Height = OriginalImage.size
85
+ TileWidth, TileHeight = Width // TileGridSize, Height // TileGridSize
86
+ UpscaledTilesGrid = []
87
+
88
+ for Row in range(TileGridSize):
89
+ CurrentRowTiles = []
90
+ for Col in range(TileGridSize):
91
+ Tile = OriginalImage.crop((Col * TileWidth, Row * TileHeight,
92
+ (Col + 1) * TileWidth, (Row + 1) * TileHeight))
93
+ TileTensor = torch.from_numpy(np.array(Tile)).permute(2, 0, 1).unsqueeze(0).float().to(Device) / 255.0
94
+
95
+ with torch.no_grad():
96
+ UpscaledTileTensor = Model(TileTensor)
97
+
98
+ UpscaledTileNumpy = UpscaledTileTensor.squeeze(0).permute(1, 2, 0).cpu().numpy()
99
+ CurrentRowTiles.append(Image.fromarray(np.uint8(UpscaledTileNumpy.clip(0.0, 1.0) * 255.0), mode='RGB'))
100
+ del TileTensor, UpscaledTileTensor, UpscaledTileNumpy
101
+ UpscaledTilesGrid.append(CurrentRowTiles)
102
+
103
+ FirstTileWidth, FirstTileHeight = UpscaledTilesGrid[0][0].size
104
+ UpscaledImage = Image.new('RGB', (FirstTileWidth * TileGridSize, FirstTileHeight * TileGridSize))
105
+
106
+ for Row in range(TileGridSize):
107
+ for Col in range(TileGridSize):
108
+ UpscaledImage.paste(UpscaledTilesGrid[Row][Col], (Col * FirstTileWidth, Row * FirstTileHeight))
109
+ else:
110
+ TorchImage = torch.from_numpy(np.array(OriginalImage)).permute(2, 0, 1).unsqueeze(0).float().to(Device) / 255.0
111
+ with torch.no_grad():
112
+ ResultTensor = Model(TorchImage)
113
+ ResultNumpy = ResultTensor.squeeze(0).permute(1, 2, 0).cpu().numpy()
114
+ UpscaledImage = Image.fromarray(np.uint8(ResultNumpy.clip(0.0, 1.0) * 255.0), mode='RGB')
115
+ del TorchImage, ResultTensor, ResultNumpy
116
+
117
+ return UpscaledImage
118
+
119
+ @spaces.GPU
120
+ def Process(VideoInputPath, ModelName, FrameRateValue, TileGridSize, FileType, Progress=App.Progress()):
121
+ # First yield should match the order of outputs in the click function
122
+ yield None, App.update(interactive=False, value=None)
123
+
124
+ if not VideoInputPath or not ModelName or not FileType:
125
+ Logger.error('β›” Missing Inputs!')
126
+ return None, None
127
+
128
+ VideoPath = Path(VideoInputPath)
129
+ OutputVideoPath = VideoPath.parent / f'{VideoPath.stem}_{Path(ModelName).stem}{"_Tiled" + str(TileGridSize) if TileGridSize > 1 else ""}{FileType}'
130
+
131
+ # Load model
132
+ Progress(0.0, 'πŸ”„ Loading Model')
133
+ Model = LoadModel(ModelName)
134
+
135
+ # Extract video info
136
+ Logger.info(f'🎬 Extracting Video Information From {VideoPath.name}')
137
+ VideoCapture = cv2.VideoCapture(str(VideoPath))
138
+ FrameCount = int(VideoCapture.get(cv2.CAP_PROP_FRAME_COUNT))
139
+
140
+ if not FrameRateValue:
141
+ FrameRateValue = VideoCapture.get(cv2.CAP_PROP_FPS)
142
+
143
+ Logger.info(f'🎞️ Processing {FrameCount} Frames At {FrameRateValue} FPS')
144
+
145
+ # In-memory frames processing
146
+ FrameBuffer = []
147
+ AllFrames = []
148
+
149
+ # Time tracking variables
150
+ StartTime = time.time()
151
+ FrameProcessingTime = None
152
+
153
+ for FrameIndex in range(FrameCount):
154
+ FrameStartTime = time.time()
155
+
156
+ Success, Frame = VideoCapture.read()
157
+ if not Success:
158
+ Logger.warning(f'⚠️ Failed To Read Frame {FrameIndex}')
159
+ continue
160
+
161
+ # Convert from BGR to RGB
162
+ OriginalImage = Image.fromarray(cv2.cvtColor(Frame, cv2.COLOR_BGR2RGB))
163
+ UpscaledImage = ProcessSingleFrame(OriginalImage, Model, TileGridSize)
164
+
165
+ # Store for preview
166
+ ResizedOriginalImage = OriginalImage.resize(UpscaledImage.size, Image.Resampling.LANCZOS)
167
+ AllFrames.append((ResizedOriginalImage, UpscaledImage.copy()))
168
+
169
+ # Save to buffer for video output
170
+ ImageBytes = io.BytesIO()
171
+ UpscaledImage.save(ImageBytes, format='PNG')
172
+ FrameBuffer.append(ImageBytes.getvalue())
173
+
174
+ # Calculate time estimates
175
+ CurrentFrameTime = time.time() - FrameStartTime
176
+
177
+ if FrameIndex == 0:
178
+ FrameProcessingTime = CurrentFrameTime
179
+ Logger.info(f'⏱️ First Frame Took {FrameProcessingTime:.2f}s To Process')
180
+
181
+ # Calculate remaining time based on average processing time so far
182
+ ElapsedTime = time.time() - StartTime
183
+ AverageTimePerFrame = ElapsedTime / (FrameIndex + 1)
184
+ RemainingFrames = FrameCount - (FrameIndex + 1)
185
+ EstimatedRemainingTime = RemainingFrames * AverageTimePerFrame
186
+
187
+ # Format time estimates for display
188
+ RemainingTimeFormatted = FormatTimeEstimate(EstimatedRemainingTime)
189
+
190
+ Progress(
191
+ (FrameIndex + 1) / FrameCount,
192
+ f'πŸ”„ Frame {FrameIndex+1}/{FrameCount} | ETA: {RemainingTimeFormatted}'
193
+ )
194
+
195
+ del OriginalImage, UpscaledImage, ImageBytes
196
+ gc.collect()
197
+
198
+ VideoCapture.release()
199
+
200
+ # Write frames to temporary files for ffmpeg
201
+ Logger.info('πŸ’Ύ Preparing Frames For Video Encoding')
202
+ os.makedirs(TempDir, exist_ok=True)
203
+
204
+ for Index, FrameData in enumerate(FrameBuffer):
205
+ with open(f'{TempDir}/Frame_{Index:06d}.png', 'wb') as f:
206
+ f.write(FrameData)
207
+
208
+ # Create video
209
+ Progress(1.0, 'πŸŽ₯ Encoding Video')
210
+ Logger.info('πŸŽ₯ Encoding Final Video')
211
+ FfmpegCmd = f'ffmpeg -y -framerate {FrameRateValue} -i "{TempDir}/Frame_%06d.png" -c:v libx264 -pix_fmt yuv420p "{OutputVideoPath}" -hide_banner -loglevel error'
212
+ subprocess.run(FfmpegCmd, shell=True, check=True)
213
+
214
+ # Clean up
215
+ for File in Path(TempDir).glob('Frame_*.png'):
216
+ File.unlink()
217
+
218
+ Logger.info(f'πŸŽ‰ Video Saved To: {OutputVideoPath}')
219
+
220
+ # Update UI - return values directly in the order specified in the click function
221
+ FirstFrame = AllFrames[0] if AllFrames else None
222
+ DownloadValue = App.update(interactive=True, value=str(OutputVideoPath))
223
+ yield FirstFrame, DownloadValue
224
+
225
+ # Release resources
226
+ del Model, FrameBuffer, AllFrames
227
+ Progress(1.0, '🧹 Cleaning Up Resources')
228
+ gc.collect()
229
+ if Device.type == 'cuda':
230
+ torch.cuda.empty_cache()
231
+ Logger.info('🧹 CUDA Memory Cleaned Up')
232
+ Logger.info('🧹 Model Unloaded')
233
+ Progress(1.0, 'πŸ“¦ Done!')
234
+
235
+ # ============================== #
236
+ # Streamlined UI #
237
+ # ============================== #
238
+
239
+ with App.Blocks(title='Video Upscaler', theme=Theme, delete_cache=(60, 600)) as Interface:
240
+ App.Markdown('# 🎞️ Video Upscaler')
241
+ App.Markdown('''
242
+ Space created by [Hyphonical](https://huggingface.co/Hyphonical), this space uses several models from [styler00dollar/VSGAN-tensorrt-docker](https://github.com/styler00dollar/VSGAN-tensorrt-docker/releases/tag/models)
243
+ You may always request adding more models by opening a [new discussion](https://huggingface.co/spaces/Hyphonical/Video2x/discussions/new). The main program uses spandrel to load the models and ffmpeg to process the video.
244
+ You may run out of time using the ZeroGPU, you could clone the space or run it locally for better performance.
245
+ ''')
246
+
247
+ with App.Row():
248
+ with App.Column(scale=1):
249
+ with App.Group():
250
+ InputVideo = App.Video(label='Input Video', sources=['upload'], height=300)
251
+ ModelList = ListModels()
252
+ ModelNames = [Path(Model).stem for Model in ModelList]
253
+ InputModel = App.Dropdown(choices=ModelNames, label='Select Model', value=ModelNames[0] if ModelNames else None)
254
+ with App.Row():
255
+ InputFrameRate = App.Slider(label='Frame Rate', minimum=1, maximum=60, value=23.976, step=0.001)
256
+ InputTileGridSize = App.Slider(label='Tile Grid Size', minimum=1, maximum=6, value=1, step=1, show_reset_button=False)
257
+ InputFileType = App.Dropdown(choices=['.mp4', '.mkv'], label='Output File Type', value='.mkv', interactive=True)
258
+ SubmitButton = App.Button('πŸš€ Upscale Video')
259
+
260
+ with App.Column(scale=1, show_progress=True):
261
+ OutputSlider = App.ImageSlider(label='Output Preview', value=None, height=300)
262
+ DownloadOutput = App.DownloadButton(label='πŸ’Ύ Download Video', interactive=False)
263
+ with App.Accordion(label='πŸ“œ Instructions', open=False):
264
+ App.Markdown('''
265
+ ### How To Use The Video Upscaler
266
+
267
+ 1. **Upload A Video:** Begin by uploading your video file using the 'Input Video' section.
268
+ 2. **Select A Model:** Choose an appropriate upscaling model from the 'Select Model' dropdown menu.
269
+ 3. **Adjust Settings (Optional):**
270
+ Modify the 'Frame Rate' slider if you want to change the output video's frame rate.
271
+ Adjust the 'Tile Grid Size' for memory optimization. Larger models might require a higher grid size, but processing could be slower.
272
+ 4. **Start Processing:** Click the 'πŸš€ Upscale Video' button to begin the upscaling process.
273
+ 5. **Download The Result:** Once the process is complete, download the upscaled video using the 'πŸ’Ύ Download Video' button.
274
+
275
+ > Tip: If you get a CUDA out of memory error, try increasing the Tile Grid Size. This will split the image into smaller tiles for processing, which can help reduce memory usage.
276
+ ''')
277
+
278
+ SubmitButton.click(fn=Process, inputs=[InputVideo, InputModel, InputFrameRate, InputTileGridSize, InputFileType],
279
+ outputs=[OutputSlider, DownloadOutput])
280
+
281
+ if __name__ == '__main__':
282
+ os.makedirs(ModelDir, exist_ok=True)
283
+ os.makedirs(TempDir, exist_ok=True)
284
+ Logger.info('πŸš€ Starting Video Upscaler')
285
+ Interface.launch(pwa=True)
packages.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ python3-opencv
2
+ ffmpeg
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio
2
+ rich
3
+ numpy
4
+ spandrel
5
+ Pillow
6
+ opencv-python