Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
c914404
1
Parent(s):
1a52c98
📦 Add Experimental Region Support, Add SAD Script
Browse files- App.py +266 -202
- Diff.py +0 -151
- Models/1x_Anime1080Fixer_SuperUltraCompact.pth +3 -0
- Scripts/SAD.py +120 -0
App.py
CHANGED
@@ -1,38 +1,51 @@
|
|
1 |
from spandrel import ModelLoader
|
2 |
import torch
|
3 |
from pathlib import Path
|
4 |
-
from PIL import Image
|
5 |
import gradio as App
|
6 |
-
import numpy as np
|
7 |
-
import subprocess
|
8 |
import logging
|
9 |
import spaces
|
10 |
import time
|
11 |
-
import os
|
12 |
-
import gc
|
13 |
-
import io
|
14 |
import cv2
|
|
|
15 |
|
16 |
from gradio import themes
|
17 |
from rich.console import Console
|
18 |
from rich.logging import RichHandler
|
19 |
|
|
|
|
|
20 |
# ============================== #
|
21 |
# Core Settings #
|
22 |
# ============================== #
|
23 |
|
24 |
-
Theme = themes.Citrus(
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
ModelDir = Path('./Models')
|
26 |
TempDir = Path('./Temp')
|
27 |
os.environ['GRADIO_TEMP_DIR'] = str(TempDir)
|
28 |
ModelFileType = '.pth'
|
29 |
|
30 |
# ============================== #
|
31 |
-
#
|
32 |
# ============================== #
|
33 |
|
34 |
-
logging.basicConfig(
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
Logger = logging.getLogger('Video2x')
|
37 |
logging.getLogger('httpx').setLevel(logging.WARNING)
|
38 |
|
@@ -43,16 +56,16 @@ logging.getLogger('httpx').setLevel(logging.WARNING)
|
|
43 |
@spaces.GPU
|
44 |
def GetDeviceName():
|
45 |
Device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
46 |
-
Logger.info(f'
|
47 |
return Device
|
48 |
|
49 |
Device = GetDeviceName()
|
50 |
|
51 |
# ============================== #
|
52 |
-
#
|
53 |
# ============================== #
|
54 |
|
55 |
-
def
|
56 |
Hours = int(Seconds // 3600)
|
57 |
Minutes = int((Seconds % 3600) // 60)
|
58 |
Seconds = int(Seconds % 60)
|
@@ -64,179 +77,156 @@ def FormatTimeEstimate(Seconds):
|
|
64 |
else:
|
65 |
return f'{Seconds}s'
|
66 |
|
67 |
-
def
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
torch.cuda.empty_cache()
|
75 |
-
Logger.info(f'🔄 Loading model: {ModelName} onto {Device}')
|
76 |
-
Model = ModelLoader().load_from_file(ModelDir / (ModelName + ModelFileType)).to(Device).eval() # Use .to(Device)
|
77 |
-
Logger.info('✅ Model Loaded Successfully')
|
78 |
-
return Model
|
79 |
-
|
80 |
-
@spaces.GPU
|
81 |
-
def ProcessSingleFrame(OriginalImage, Model, TileGridSize):
|
82 |
-
if TileGridSize > 1:
|
83 |
-
Logger.info(f'🧩 Processing With Tile Grid {TileGridSize}x{TileGridSize}')
|
84 |
-
Width, Height = OriginalImage.size
|
85 |
-
TileWidth, TileHeight = Width // TileGridSize, Height // TileGridSize
|
86 |
-
UpscaledTilesGrid = []
|
87 |
-
|
88 |
-
for Row in range(TileGridSize):
|
89 |
-
CurrentRowTiles = []
|
90 |
-
for Col in range(TileGridSize):
|
91 |
-
Tile = OriginalImage.crop((Col * TileWidth, Row * TileHeight,
|
92 |
-
(Col + 1) * TileWidth, (Row + 1) * TileHeight))
|
93 |
-
TileTensor = torch.from_numpy(np.array(Tile)).permute(2, 0, 1).unsqueeze(0).float().to(Device) / 255.0
|
94 |
-
|
95 |
-
with torch.no_grad():
|
96 |
-
UpscaledTileTensor = Model(TileTensor)
|
97 |
-
|
98 |
-
UpscaledTileNumpy = UpscaledTileTensor.squeeze(0).permute(1, 2, 0).cpu().numpy()
|
99 |
-
CurrentRowTiles.append(Image.fromarray(np.uint8(UpscaledTileNumpy.clip(0.0, 1.0) * 255.0), mode='RGB'))
|
100 |
-
del TileTensor, UpscaledTileTensor, UpscaledTileNumpy
|
101 |
-
UpscaledTilesGrid.append(CurrentRowTiles)
|
102 |
-
|
103 |
-
FirstTileWidth, FirstTileHeight = UpscaledTilesGrid[0][0].size
|
104 |
-
UpscaledImage = Image.new('RGB', (FirstTileWidth * TileGridSize, FirstTileHeight * TileGridSize))
|
105 |
-
|
106 |
-
for Row in range(TileGridSize):
|
107 |
-
for Col in range(TileGridSize):
|
108 |
-
UpscaledImage.paste(UpscaledTilesGrid[Row][Col], (Col * FirstTileWidth, Row * FirstTileHeight))
|
109 |
-
else:
|
110 |
-
TorchImage = torch.from_numpy(np.array(OriginalImage)).permute(2, 0, 1).unsqueeze(0).float().to(Device) / 255.0
|
111 |
-
with torch.no_grad():
|
112 |
-
ResultTensor = Model(TorchImage)
|
113 |
-
ResultNumpy = ResultTensor.squeeze(0).permute(1, 2, 0).cpu().numpy()
|
114 |
-
UpscaledImage = Image.fromarray(np.uint8(ResultNumpy.clip(0.0, 1.0) * 255.0), mode='RGB')
|
115 |
-
del TorchImage, ResultTensor, ResultNumpy
|
116 |
|
117 |
-
|
|
|
|
|
118 |
|
119 |
@spaces.GPU
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
if not VideoInputPath or not ModelName or not FileType:
|
125 |
-
Logger.error('⛔ Missing Inputs!')
|
126 |
-
return None, None
|
127 |
-
|
128 |
-
VideoPath = Path(VideoInputPath)
|
129 |
-
OutputVideoPath = VideoPath.parent / f'{VideoPath.stem}_{Path(ModelName).stem}{"_Tiled" + str(TileGridSize) if TileGridSize > 1 else ""}{FileType}'
|
130 |
-
|
131 |
-
# Load model
|
132 |
-
Progress(0.0, '🔄 Loading Model')
|
133 |
-
Model = LoadModel(ModelName)
|
134 |
-
|
135 |
-
# Extract video info
|
136 |
-
Logger.info(f'🎬 Extracting Video Information From {VideoPath.name}')
|
137 |
-
VideoCapture = cv2.VideoCapture(str(VideoPath))
|
138 |
-
FrameCount = int(VideoCapture.get(cv2.CAP_PROP_FRAME_COUNT))
|
139 |
-
|
140 |
-
if not FrameRateValue:
|
141 |
-
FrameRateValue = VideoCapture.get(cv2.CAP_PROP_FPS)
|
142 |
-
|
143 |
-
Logger.info(f'🎞️ Processing {FrameCount} Frames At {FrameRateValue} FPS')
|
144 |
-
|
145 |
-
# In-memory frames processing
|
146 |
-
FrameBuffer = []
|
147 |
-
AllFrames = []
|
148 |
-
|
149 |
-
# Time tracking variables
|
150 |
-
StartTime = time.time()
|
151 |
-
FrameProcessingTime = None
|
152 |
-
|
153 |
-
for FrameIndex in range(FrameCount):
|
154 |
-
FrameStartTime = time.time()
|
155 |
-
|
156 |
-
Success, Frame = VideoCapture.read()
|
157 |
-
if not Success:
|
158 |
-
Logger.warning(f'⚠️ Failed To Read Frame {FrameIndex}')
|
159 |
-
continue
|
160 |
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
# Store for preview
|
166 |
-
ResizedOriginalImage = OriginalImage.resize(UpscaledImage.size, Image.Resampling.LANCZOS)
|
167 |
-
AllFrames.append((ResizedOriginalImage, UpscaledImage.copy()))
|
168 |
-
|
169 |
-
# Save to buffer for video output
|
170 |
-
ImageBytes = io.BytesIO()
|
171 |
-
UpscaledImage.save(ImageBytes, format='PNG')
|
172 |
-
FrameBuffer.append(ImageBytes.getvalue())
|
173 |
-
|
174 |
-
# Calculate time estimates
|
175 |
-
CurrentFrameTime = time.time() - FrameStartTime
|
176 |
-
|
177 |
-
if FrameIndex == 0:
|
178 |
-
FrameProcessingTime = CurrentFrameTime
|
179 |
-
Logger.info(f'⏱️ First Frame Took {FrameProcessingTime:.2f}s To Process')
|
180 |
-
|
181 |
-
# Calculate remaining time based on average processing time so far
|
182 |
-
ElapsedTime = time.time() - StartTime
|
183 |
-
AverageTimePerFrame = ElapsedTime / (FrameIndex + 1)
|
184 |
-
RemainingFrames = FrameCount - (FrameIndex + 1)
|
185 |
-
EstimatedRemainingTime = RemainingFrames * AverageTimePerFrame
|
186 |
-
|
187 |
-
# Format time estimates for display
|
188 |
-
RemainingTimeFormatted = FormatTimeEstimate(EstimatedRemainingTime)
|
189 |
-
|
190 |
-
Progress(
|
191 |
-
(FrameIndex + 1) / FrameCount,
|
192 |
-
f'🔄 Frame {FrameIndex+1}/{FrameCount} | ETA: {RemainingTimeFormatted}'
|
193 |
)
|
|
|
|
|
194 |
|
195 |
-
|
196 |
-
gc.collect()
|
197 |
-
|
198 |
-
VideoCapture.release()
|
199 |
-
|
200 |
-
# Write frames to temporary files for ffmpeg
|
201 |
-
Logger.info('💾 Preparing Frames For Video Encoding')
|
202 |
-
os.makedirs(TempDir, exist_ok=True)
|
203 |
-
|
204 |
-
for Index, FrameData in enumerate(FrameBuffer):
|
205 |
-
with open(f'{TempDir}/Frame_{Index:06d}.png', 'wb') as f:
|
206 |
-
f.write(FrameData)
|
207 |
-
|
208 |
-
# Create video
|
209 |
-
Progress(1.0, '🎥 Encoding Video')
|
210 |
-
Logger.info('🎥 Encoding Final Video')
|
211 |
-
FfmpegCmd = f'ffmpeg -y -framerate {FrameRateValue} -i "{TempDir}/Frame_%06d.png" -c:v libx264 -pix_fmt yuv420p "{OutputVideoPath}" -hide_banner -loglevel error'
|
212 |
-
subprocess.run(FfmpegCmd, shell=True, check=True)
|
213 |
-
|
214 |
-
# Clean up
|
215 |
-
for File in Path(TempDir).glob('Frame_*.png'):
|
216 |
-
File.unlink()
|
217 |
-
|
218 |
-
Logger.info(f'🎉 Video Saved To: {OutputVideoPath}')
|
219 |
-
|
220 |
-
# Update UI - return values directly in the order specified in the click function
|
221 |
-
FirstFrame = AllFrames[0] if AllFrames else None
|
222 |
-
DownloadValue = App.update(interactive=True, value=str(OutputVideoPath))
|
223 |
-
yield FirstFrame, DownloadValue
|
224 |
-
|
225 |
-
# Release resources
|
226 |
-
del Model, FrameBuffer, AllFrames
|
227 |
-
Progress(1.0, '🧹 Cleaning Up Resources')
|
228 |
-
gc.collect()
|
229 |
-
if Device.type == 'cuda':
|
230 |
torch.cuda.empty_cache()
|
231 |
-
|
232 |
-
|
233 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
|
235 |
# ============================== #
|
236 |
# Streamlined UI #
|
237 |
# ============================== #
|
238 |
|
239 |
-
with App.Blocks(
|
|
|
|
|
240 |
App.Markdown('# 🎞️ Video Upscaler')
|
241 |
App.Markdown('''
|
242 |
Space created by [Hyphonical](https://huggingface.co/Hyphonical), this space uses several models from [styler00dollar/VSGAN-tensorrt-docker](https://github.com/styler00dollar/VSGAN-tensorrt-docker/releases/tag/models)
|
@@ -245,38 +235,112 @@ with App.Blocks(title='Video Upscaler', theme=Theme, delete_cache=(60, 600)) as
|
|
245 |
''')
|
246 |
|
247 |
with App.Row():
|
248 |
-
with App.Column(
|
249 |
with App.Group():
|
250 |
-
|
251 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
ModelNames = [Path(Model).stem for Model in ModelList]
|
253 |
-
InputModel = App.Dropdown(
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
SubmitButton = App.Button('🚀 Upscale Video')
|
259 |
|
260 |
-
with App.Column(
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
280 |
|
281 |
if __name__ == '__main__':
|
282 |
os.makedirs(ModelDir, exist_ok=True)
|
|
|
1 |
from spandrel import ModelLoader
|
2 |
import torch
|
3 |
from pathlib import Path
|
|
|
4 |
import gradio as App
|
|
|
|
|
5 |
import logging
|
6 |
import spaces
|
7 |
import time
|
|
|
|
|
|
|
8 |
import cv2
|
9 |
+
import os
|
10 |
|
11 |
from gradio import themes
|
12 |
from rich.console import Console
|
13 |
from rich.logging import RichHandler
|
14 |
|
15 |
+
from Scripts.SAD import GetDifferenceRectangles
|
16 |
+
|
17 |
# ============================== #
|
18 |
# Core Settings #
|
19 |
# ============================== #
|
20 |
|
21 |
+
Theme = themes.Citrus(
|
22 |
+
primary_hue='blue',
|
23 |
+
secondary_hue='blue',
|
24 |
+
radius_size=themes.sizes.radius_xxl
|
25 |
+
).set(
|
26 |
+
link_text_color='blue'
|
27 |
+
)
|
28 |
ModelDir = Path('./Models')
|
29 |
TempDir = Path('./Temp')
|
30 |
os.environ['GRADIO_TEMP_DIR'] = str(TempDir)
|
31 |
ModelFileType = '.pth'
|
32 |
|
33 |
# ============================== #
|
34 |
+
# Logging #
|
35 |
# ============================== #
|
36 |
|
37 |
+
logging.basicConfig(
|
38 |
+
level=logging.INFO,
|
39 |
+
format='%(message)s',
|
40 |
+
datefmt='[%X]',
|
41 |
+
handlers=[RichHandler(
|
42 |
+
console=Console(),
|
43 |
+
rich_tracebacks=True,
|
44 |
+
omit_repeated_times=False,
|
45 |
+
markup=True,
|
46 |
+
show_path=False,
|
47 |
+
)],
|
48 |
+
)
|
49 |
Logger = logging.getLogger('Video2x')
|
50 |
logging.getLogger('httpx').setLevel(logging.WARNING)
|
51 |
|
|
|
56 |
@spaces.GPU
|
57 |
def GetDeviceName():
|
58 |
Device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
59 |
+
Logger.info(f'🧪 Using device: {str(Device).upper()}')
|
60 |
return Device
|
61 |
|
62 |
Device = GetDeviceName()
|
63 |
|
64 |
# ============================== #
|
65 |
+
# Utility Functions #
|
66 |
# ============================== #
|
67 |
|
68 |
+
def HumanizeSeconds(Seconds):
|
69 |
Hours = int(Seconds // 3600)
|
70 |
Minutes = int((Seconds % 3600) // 60)
|
71 |
Seconds = int(Seconds % 60)
|
|
|
77 |
else:
|
78 |
return f'{Seconds}s'
|
79 |
|
80 |
+
def HumanizedBytes(Size):
|
81 |
+
Units = ['B', 'KB', 'MB', 'GB', 'TB']
|
82 |
+
Index = 0
|
83 |
+
while Size >= 1024 and Index < len(Units) - 1:
|
84 |
+
Size /= 1024.0
|
85 |
+
Index += 1
|
86 |
+
return f'{Size:.2f} {Units[Index]}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
+
# ============================== #
|
89 |
+
# Main Processing Logic #
|
90 |
+
# ============================== #
|
91 |
|
92 |
@spaces.GPU
|
93 |
+
class Upscaler:
|
94 |
+
def __init__(self):
|
95 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
+
def ListModels(self):
|
98 |
+
Models = sorted(
|
99 |
+
[File.name for File in ModelDir.glob('*' + ModelFileType) if File.is_file()]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
)
|
101 |
+
Logger.info(f'📚 Found {len(Models)} Models In Directory')
|
102 |
+
return Models
|
103 |
|
104 |
+
def LoadModel(self, ModelName):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
torch.cuda.empty_cache()
|
106 |
+
Model = (
|
107 |
+
ModelLoader()
|
108 |
+
.load_from_file(ModelDir / (ModelName + ModelFileType))
|
109 |
+
.to(Device)
|
110 |
+
.eval()
|
111 |
+
)
|
112 |
+
Logger.info(f'🤖 Loaded Model {ModelName} Onto {str(Device).upper()}')
|
113 |
+
return Model
|
114 |
+
|
115 |
+
def UnloadModel(self):
|
116 |
+
if Device.type == 'cuda':
|
117 |
+
torch.cuda.empty_cache()
|
118 |
+
Logger.info('🤖 Model Unloaded Successfully')
|
119 |
+
|
120 |
+
def CleanUp(self):
|
121 |
+
self.UnloadModel()
|
122 |
+
Logger.info('🧹 Temporary Files Cleaned Up')
|
123 |
+
|
124 |
+
def Process(self, InputVideo, InputModel, InputUseRegions, InputThreshold, InputMinPercentage, InputMaxRectangles, InputPadding, Progress=App.Progress()):
|
125 |
+
if not InputVideo:
|
126 |
+
Logger.warning('❌ No Video Provided')
|
127 |
+
App.Warning('❌ No Video Provided')
|
128 |
+
return None, None
|
129 |
+
|
130 |
+
Progress(0, desc='⚙️ Loading Model')
|
131 |
+
Model = self.LoadModel(InputModel)
|
132 |
+
|
133 |
+
Logger.info(f'📼 Processing Video: {Path(InputVideo).name}')
|
134 |
+
Progress(0, desc='📼 Processing Video')
|
135 |
+
Video = cv2.VideoCapture(InputVideo)
|
136 |
+
|
137 |
+
FrameRate = Video.get(cv2.CAP_PROP_FPS)
|
138 |
+
FrameCount = int(Video.get(cv2.CAP_PROP_FRAME_COUNT))
|
139 |
+
Width = int(Video.get(cv2.CAP_PROP_FRAME_WIDTH))
|
140 |
+
Height = int(Video.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
141 |
+
|
142 |
+
Logger.info(f'📏 Video Properties: {FrameCount} Frames, {FrameRate} FPS, {Width}x{Height}')
|
143 |
+
|
144 |
+
PerFrameProgress = 1 / FrameCount
|
145 |
+
FrameProgress = 0.0
|
146 |
+
StartTime = time.time()
|
147 |
+
Times = []
|
148 |
+
|
149 |
+
while True:
|
150 |
+
Ret, Frame = Video.read()
|
151 |
+
if not Ret:
|
152 |
+
break
|
153 |
+
|
154 |
+
FrameRgb = cv2.cvtColor(Frame, cv2.COLOR_BGR2RGB)
|
155 |
+
FrameForTorch = FrameRgb.transpose(2, 0, 1)
|
156 |
+
FrameForTorch = torch.from_numpy(FrameForTorch).unsqueeze(0).to(Device).float() / 255.0
|
157 |
+
|
158 |
+
RetNext, NextFrame = Video.read()
|
159 |
+
if not RetNext:
|
160 |
+
NextFrame = Frame
|
161 |
+
|
162 |
+
DiffResult = GetDifferenceRectangles(
|
163 |
+
Frame,
|
164 |
+
NextFrame,
|
165 |
+
Threshold=InputThreshold,
|
166 |
+
Rows=12,
|
167 |
+
Columns=20,
|
168 |
+
Padding=InputPadding
|
169 |
+
|
170 |
+
)
|
171 |
+
SimilarityPercentage = DiffResult['SimilarPercentage']
|
172 |
+
Rectangles = DiffResult['Rectangles']
|
173 |
+
|
174 |
+
if SimilarityPercentage > InputMinPercentage and len(Rectangles) < InputMaxRectangles and InputUseRegions:
|
175 |
+
Logger.info(f'🟩 Frame {int(Video.get(cv2.CAP_PROP_POS_FRAMES))}: {SimilarityPercentage:.2f}% Similar, {len(Rectangles)} Regions To Upscale')
|
176 |
+
Cols = DiffResult['Columns']
|
177 |
+
Rows = DiffResult['Rows']
|
178 |
+
FrameHeight, FrameWidth = Frame.shape[:2]
|
179 |
+
SegmentWidth = FrameWidth // Cols
|
180 |
+
SegmentHeight = FrameHeight // Rows
|
181 |
+
for X, Y, W, H in Rectangles:
|
182 |
+
X1 = X * SegmentWidth
|
183 |
+
Y1 = Y * SegmentHeight
|
184 |
+
X2 = FrameWidth if X + W == Cols else X1 + W * SegmentWidth
|
185 |
+
Y2 = FrameHeight if Y + H == Rows else Y1 + H * SegmentHeight
|
186 |
+
|
187 |
+
Region = Frame[Y1:Y2, X1:X2]
|
188 |
+
RegionRgb = cv2.cvtColor(Region, cv2.COLOR_BGR2RGB)
|
189 |
+
RegionTorch = torch.from_numpy(RegionRgb.transpose(2, 0, 1)).unsqueeze(0).to(Device).float() / 255.0
|
190 |
+
UpscaledRegion = Model(RegionTorch)[0].cpu().numpy().transpose(1, 2, 0) * 255.0 # type: ignore
|
191 |
+
UpscaledRegion = cv2.cvtColor(UpscaledRegion.astype('uint8'), cv2.COLOR_RGB2BGR)
|
192 |
+
RegionHeight, RegionWidth = Region.shape[:2]
|
193 |
+
UpscaledRegion = cv2.resize(UpscaledRegion, (RegionWidth, RegionHeight), interpolation=cv2.INTER_CUBIC)
|
194 |
+
Frame[Y1:Y2, X1:X2] = UpscaledRegion
|
195 |
+
OutputFrame = Frame
|
196 |
+
else:
|
197 |
+
Logger.info(f'🟥 Frame {int(Video.get(cv2.CAP_PROP_POS_FRAMES))}: {SimilarityPercentage:.2f}% Similar, Upscaling Full Frame')
|
198 |
+
OutputFrame = Model(FrameForTorch)[0].cpu().numpy().transpose(1, 2, 0) * 255.0 # type: ignore
|
199 |
+
OutputFrame = cv2.cvtColor(OutputFrame.astype('uint8'), cv2.COLOR_RGB2BGR)
|
200 |
+
OutputFrame = cv2.resize(OutputFrame, (Width, Height), interpolation=cv2.INTER_CUBIC)
|
201 |
+
|
202 |
+
CurrentFrameNumber = int(Video.get(cv2.CAP_PROP_POS_FRAMES))
|
203 |
+
if Times:
|
204 |
+
AverageTime = sum(Times) / len(Times)
|
205 |
+
Eta = HumanizeSeconds((FrameCount - CurrentFrameNumber) * AverageTime)
|
206 |
+
else:
|
207 |
+
Eta = None
|
208 |
+
|
209 |
+
Progress(FrameProgress, desc=f'📦 Processed Frame {len(Times)+1}/{FrameCount} - {Eta}')
|
210 |
+
Logger.info(f'📦 Processed Frame {len(Times)+1}/{FrameCount} - {Eta}')
|
211 |
+
|
212 |
+
cv2.imwrite(f'{TempDir}/Upscaled_Frame_{CurrentFrameNumber:05d}.png', OutputFrame)
|
213 |
+
|
214 |
+
DeltaTime = time.time() - StartTime
|
215 |
+
Times.append(DeltaTime)
|
216 |
+
StartTime = time.time()
|
217 |
+
FrameProgress += PerFrameProgress
|
218 |
+
|
219 |
+
Progress(1, desc='📦 Cleaning Up')
|
220 |
+
self.CleanUp()
|
221 |
+
return InputVideo, InputVideo
|
222 |
|
223 |
# ============================== #
|
224 |
# Streamlined UI #
|
225 |
# ============================== #
|
226 |
|
227 |
+
with App.Blocks(
|
228 |
+
title='Video Upscaler', theme=Theme, delete_cache=(-1, 1800)
|
229 |
+
) as Interface:
|
230 |
App.Markdown('# 🎞️ Video Upscaler')
|
231 |
App.Markdown('''
|
232 |
Space created by [Hyphonical](https://huggingface.co/Hyphonical), this space uses several models from [styler00dollar/VSGAN-tensorrt-docker](https://github.com/styler00dollar/VSGAN-tensorrt-docker/releases/tag/models)
|
|
|
235 |
''')
|
236 |
|
237 |
with App.Row():
|
238 |
+
with App.Column():
|
239 |
with App.Group():
|
240 |
+
with App.Accordion(label='📜 Instructions', open=False):
|
241 |
+
App.Markdown('''
|
242 |
+
### How To Use The Video Upscaler
|
243 |
+
1. **Upload A Video:** Begin by uploading your video file using the 'Input Video' section.
|
244 |
+
2. **Select A Model:** Choose an appropriate upscaling model from the 'Select Model' dropdown menu.
|
245 |
+
3. **Adjust Settings (Optional):**
|
246 |
+
Modify the 'Frame Rate' slider if you want to change the output video's frame rate.
|
247 |
+
Adjust the 'Tile Grid Size' for memory optimization. Larger models might require a higher grid size, but processing could be slower.
|
248 |
+
4. **Start Processing:** Click the '🚀 Upscale Video' button to begin the upscaling process.
|
249 |
+
5. **Download The Result:** Once the process is complete, download the upscaled video using the '💾 Download Video' button.
|
250 |
+
> Tip: If you get a CUDA out of memory error, try increasing the Tile Grid Size. This will split the image into smaller tiles for processing, which can help reduce memory usage.
|
251 |
+
''')
|
252 |
+
InputVideo = App.Video(
|
253 |
+
label='Input Video', sources=['upload'], height=300
|
254 |
+
)
|
255 |
+
ModelList = Upscaler().ListModels()
|
256 |
ModelNames = [Path(Model).stem for Model in ModelList]
|
257 |
+
InputModel = App.Dropdown(
|
258 |
+
choices=ModelNames,
|
259 |
+
label='Select Model',
|
260 |
+
value=ModelNames[0],
|
261 |
+
)
|
262 |
+
with App.Accordion(label='⚙️ Advanced Settings', open=False):
|
263 |
+
with App.Group():
|
264 |
+
InputUseRegions = App.Checkbox(
|
265 |
+
label='Use Regions',
|
266 |
+
value=False,
|
267 |
+
info='Use regions to upscale only the different parts of the video (⚡️ Experimental, Faster)',
|
268 |
+
interactive=True
|
269 |
+
)
|
270 |
+
InputThreshold = App.Slider(
|
271 |
+
label='Threshold',
|
272 |
+
value=5,
|
273 |
+
minimum=0,
|
274 |
+
maximum=20,
|
275 |
+
step=0.5,
|
276 |
+
info='Threshold for the SAD algorithm to detect different regions',
|
277 |
+
interactive=False
|
278 |
+
)
|
279 |
+
InputPadding = App.Slider(
|
280 |
+
label='Padding',
|
281 |
+
value=1,
|
282 |
+
minimum=0,
|
283 |
+
maximum=5,
|
284 |
+
step=1,
|
285 |
+
info='Extra padding to include neighboring pixels in the SAD algorithm',
|
286 |
+
interactive=False
|
287 |
+
)
|
288 |
+
InputMinPercentage = App.Slider(
|
289 |
+
label='Min Percentage',
|
290 |
+
value=70,
|
291 |
+
minimum=0,
|
292 |
+
maximum=100,
|
293 |
+
step=1,
|
294 |
+
info='Minimum percentage of similarity to consider upscaling the full frame',
|
295 |
+
interactive=False
|
296 |
+
)
|
297 |
+
InputMaxRectangles = App.Slider(
|
298 |
+
label='Max Rectangles',
|
299 |
+
value=8,
|
300 |
+
minimum=1,
|
301 |
+
maximum=10,
|
302 |
+
step=1,
|
303 |
+
info='Maximum number of rectangles to consider upscaling the full frame',
|
304 |
+
interactive=False
|
305 |
+
)
|
306 |
SubmitButton = App.Button('🚀 Upscale Video')
|
307 |
|
308 |
+
with App.Column(show_progress=True):
|
309 |
+
with App.Group():
|
310 |
+
OutputVideo = App.Video(
|
311 |
+
label='Output Video', height=300, interactive=False, format=None
|
312 |
+
)
|
313 |
+
OutputDownload = App.DownloadButton(
|
314 |
+
label='💾 Download Video', interactive=False
|
315 |
+
)
|
316 |
+
|
317 |
+
def ToggleRegionInputs(UseRegions):
|
318 |
+
return (
|
319 |
+
App.update(interactive=UseRegions),
|
320 |
+
App.update(interactive=UseRegions),
|
321 |
+
App.update(interactive=UseRegions),
|
322 |
+
App.update(interactive=UseRegions),
|
323 |
+
)
|
324 |
+
|
325 |
+
InputUseRegions.change(
|
326 |
+
fn=ToggleRegionInputs,
|
327 |
+
inputs=[InputUseRegions],
|
328 |
+
outputs=[InputThreshold, InputMinPercentage, InputMaxRectangles, InputPadding],
|
329 |
+
)
|
330 |
+
|
331 |
+
SubmitButton.click(
|
332 |
+
fn=Upscaler().Process,
|
333 |
+
inputs=[
|
334 |
+
InputVideo,
|
335 |
+
InputModel,
|
336 |
+
InputUseRegions,
|
337 |
+
InputThreshold,
|
338 |
+
InputMinPercentage,
|
339 |
+
InputMaxRectangles,
|
340 |
+
InputPadding
|
341 |
+
],
|
342 |
+
outputs=[OutputVideo, OutputDownload],
|
343 |
+
)
|
344 |
|
345 |
if __name__ == '__main__':
|
346 |
os.makedirs(ModelDir, exist_ok=True)
|
Diff.py
DELETED
@@ -1,151 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import cv2
|
3 |
-
import time
|
4 |
-
import logging
|
5 |
-
|
6 |
-
# Set up logging
|
7 |
-
logging.basicConfig(level=logging.INFO)
|
8 |
-
Logger = logging.getLogger(__name__)
|
9 |
-
|
10 |
-
def MergeBoxes(Boxes, Padding=5):
|
11 |
-
if len(Boxes) <= 1:
|
12 |
-
return Boxes
|
13 |
-
MergedOccurred = True
|
14 |
-
while MergedOccurred:
|
15 |
-
MergedOccurred = False
|
16 |
-
NewBoxes = []
|
17 |
-
Boxes.sort(key=lambda b: b[0])
|
18 |
-
Used = [False] * len(Boxes)
|
19 |
-
for Index in range(len(Boxes)):
|
20 |
-
if Used[Index]:
|
21 |
-
continue
|
22 |
-
CurrentBox = list(Boxes[Index])
|
23 |
-
Used[Index] = True
|
24 |
-
for J in range(Index + 1, len(Boxes)):
|
25 |
-
if Used[J]:
|
26 |
-
continue
|
27 |
-
NextBox = Boxes[J]
|
28 |
-
OverlapX = max(CurrentBox[0], NextBox[0]) <= min(CurrentBox[0] + CurrentBox[2], NextBox[0] + NextBox[2]) + Padding
|
29 |
-
OverlapY = max(CurrentBox[1], NextBox[1]) <= min(CurrentBox[1] + CurrentBox[3], NextBox[1] + NextBox[3]) + Padding
|
30 |
-
if OverlapX and OverlapY:
|
31 |
-
NewX = min(CurrentBox[0], NextBox[0])
|
32 |
-
NewY = min(CurrentBox[1], NextBox[1])
|
33 |
-
NewW = max(CurrentBox[0] + CurrentBox[2], NextBox[0] + NextBox[2]) - NewX
|
34 |
-
NewH = max(CurrentBox[1] + CurrentBox[3], NextBox[1] + NextBox[3]) - NewY
|
35 |
-
CurrentBox = [NewX, NewY, NewW, NewH]
|
36 |
-
Used[J] = True
|
37 |
-
MergedOccurred = True
|
38 |
-
NewBoxes.append(tuple(CurrentBox))
|
39 |
-
Boxes = NewBoxes
|
40 |
-
return Boxes
|
41 |
-
|
42 |
-
def GetChangeMask(Image1, Image2, Threshold=25, MinArea=100):
|
43 |
-
if Image1.shape != Image2.shape:
|
44 |
-
Logger.warning(f'Image shapes differ: {Image1.shape} vs {Image2.shape}. Resizing Image2.')
|
45 |
-
Image2 = cv2.resize(Image2, (Image1.shape[1], Image1.shape[0]))
|
46 |
-
|
47 |
-
Gray1 = cv2.cvtColor(Image1, cv2.COLOR_BGR2GRAY)
|
48 |
-
Gray2 = cv2.cvtColor(Image2, cv2.COLOR_BGR2GRAY)
|
49 |
-
Blur1 = cv2.GaussianBlur(Gray1, (5, 5), 0)
|
50 |
-
Blur2 = cv2.GaussianBlur(Gray2, (5, 5), 0)
|
51 |
-
DiffFrame = cv2.absdiff(Blur1, Blur2)
|
52 |
-
_, ThresholdCalc = cv2.threshold(DiffFrame, Threshold, 255, cv2.THRESH_BINARY)
|
53 |
-
Kernel = np.ones((5, 5), np.uint8)
|
54 |
-
DilatedThreshold = cv2.dilate(ThresholdCalc, Kernel, iterations=2)
|
55 |
-
|
56 |
-
Contours, _ = cv2.findContours(DilatedThreshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
57 |
-
OutputMask = np.zeros_like(DilatedThreshold)
|
58 |
-
ValidContours = 0
|
59 |
-
if Contours:
|
60 |
-
for Contour in Contours:
|
61 |
-
if cv2.contourArea(Contour) > MinArea:
|
62 |
-
cv2.drawContours(OutputMask, [Contour], -1, 255, -1) # type: ignore
|
63 |
-
ValidContours +=1
|
64 |
-
Logger.info(f'GetChangeMask: Found {len(Contours)} raw contours, kept {ValidContours} after MinArea filter ({MinArea}).')
|
65 |
-
return OutputMask
|
66 |
-
|
67 |
-
def VisualizeDifferences(Image1Path, Image2Path, OutputPath, Threshold=25, MinArea=100, OutlineColor=(0, 255, 0), FillColor=(0, 180, 0), FillAlpha=0.3):
|
68 |
-
Logger.info(f'🎨 Visualizing differences between {Image1Path} and {Image2Path}')
|
69 |
-
Image1 = cv2.imread(Image1Path)
|
70 |
-
Image2 = cv2.imread(Image2Path)
|
71 |
-
|
72 |
-
if Image1 is None or Image2 is None:
|
73 |
-
Logger.error(f'❌ Error loading images for visualization: {Image1Path} or {Image2Path}')
|
74 |
-
return
|
75 |
-
|
76 |
-
if Image1.shape != Image2.shape:
|
77 |
-
Logger.warning(f'⚠️ Image shapes differ: {Image1.shape} vs {Image2.shape}. Resizing Image2 for visualization.')
|
78 |
-
Image2 = cv2.resize(Image2, (Image1.shape[1], Image1.shape[0]))
|
79 |
-
|
80 |
-
ChangedMask = GetChangeMask(Image1, Image2, Threshold, MinArea)
|
81 |
-
OutputImage = Image2.copy()
|
82 |
-
Overlay = OutputImage.copy()
|
83 |
-
|
84 |
-
# Apply fill color to changed areas
|
85 |
-
Overlay[ChangedMask == 255] = FillColor
|
86 |
-
cv2.addWeighted(Overlay, FillAlpha, OutputImage, 1 - FillAlpha, 0, OutputImage)
|
87 |
-
|
88 |
-
# Find contours of the changed areas to draw outlines
|
89 |
-
Contours, _ = cv2.findContours(ChangedMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
90 |
-
cv2.drawContours(OutputImage, Contours, -1, OutlineColor, 2)
|
91 |
-
Logger.info(f'🎨 Drew {len(Contours)} difference regions.')
|
92 |
-
|
93 |
-
try:
|
94 |
-
cv2.imwrite(OutputPath, OutputImage)
|
95 |
-
Logger.info(f'💾 Saved difference visualization to {OutputPath}')
|
96 |
-
except Exception as E:
|
97 |
-
Logger.error(f'❌ Failed to save visualization {OutputPath}: {E}')
|
98 |
-
|
99 |
-
# --- Function to be used in App.py for upscaling ---
|
100 |
-
def GetChangedRegions(Image1, Image2, Threshold=25, Padding=10, MinArea=100, MergePadding=5):
|
101 |
-
StartTime = time.time()
|
102 |
-
Logger.info('🔄 Comparing images...')
|
103 |
-
|
104 |
-
if Image1 is None or Image2 is None:
|
105 |
-
Logger.error('❌ Cannot compare None images.')
|
106 |
-
return []
|
107 |
-
|
108 |
-
if Image1.shape != Image2.shape:
|
109 |
-
Logger.warning(f'⚠️ Image shapes differ: {Image1.shape} vs {Image2.shape}. Resizing Image2.')
|
110 |
-
Image2 = cv2.resize(Image2, (Image1.shape[1], Image1.shape[0]))
|
111 |
-
|
112 |
-
Gray1 = cv2.cvtColor(Image1, cv2.COLOR_BGR2GRAY)
|
113 |
-
Gray2 = cv2.cvtColor(Image2, cv2.COLOR_BGR2GRAY)
|
114 |
-
Blur1 = cv2.GaussianBlur(Gray1, (5, 5), 0)
|
115 |
-
Blur2 = cv2.GaussianBlur(Gray2, (5, 5), 0)
|
116 |
-
DiffFrame = cv2.absdiff(Blur1, Blur2)
|
117 |
-
_, ThresholdCalc = cv2.threshold(DiffFrame, Threshold, 255, cv2.THRESH_BINARY)
|
118 |
-
Kernel = np.ones((5, 5), np.uint8)
|
119 |
-
DilatedThreshold = cv2.dilate(ThresholdCalc, Kernel, iterations=2)
|
120 |
-
Contours, _ = cv2.findContours(DilatedThreshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
121 |
-
Logger.info(f'🔎 Found {len(Contours)} raw contours.')
|
122 |
-
|
123 |
-
BoundingBoxes = []
|
124 |
-
if Contours:
|
125 |
-
ValidContours = 0
|
126 |
-
for Contour in Contours:
|
127 |
-
ContourArea = cv2.contourArea(Contour)
|
128 |
-
if ContourArea > MinArea:
|
129 |
-
ValidContours += 1
|
130 |
-
X, Y, W, H = cv2.boundingRect(Contour)
|
131 |
-
PaddedX = max(0, X - Padding)
|
132 |
-
PaddedY = max(0, Y - Padding)
|
133 |
-
MaxW = Image1.shape[1] - PaddedX
|
134 |
-
MaxH = Image1.shape[0] - PaddedY
|
135 |
-
PaddedW = min(W + (Padding * 2), MaxW)
|
136 |
-
PaddedH = min(H + (Padding * 2), MaxH)
|
137 |
-
BoundingBoxes.append((PaddedX, PaddedY, PaddedW, PaddedH))
|
138 |
-
Logger.info(f'📊 Filtered {ValidContours} contours based on MinArea ({MinArea}).')
|
139 |
-
|
140 |
-
InitialBoxCount = len(BoundingBoxes)
|
141 |
-
MergedBoundingBoxes = MergeBoxes(BoundingBoxes, MergePadding)
|
142 |
-
EndTime = time.time()
|
143 |
-
if MergedBoundingBoxes:
|
144 |
-
Logger.info(f'📦 Merged {InitialBoxCount} boxes into {len(MergedBoundingBoxes)} regions.')
|
145 |
-
else:
|
146 |
-
Logger.info('❌ No significant changed regions found after filtering and merging.')
|
147 |
-
Logger.info(f'⏱️ Region finding took {EndTime - StartTime:.3f}s')
|
148 |
-
return MergedBoundingBoxes
|
149 |
-
|
150 |
-
# Example call for the new visualization function
|
151 |
-
VisualizeDifferences(r'C:\Users\joris\Pictures\frame_01660.png', r'C:\Users\joris\Pictures\frame_01661.png', './Diff.png', 25, 100, (0, 255, 0), (0, 180, 0), 0.3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Models/1x_Anime1080Fixer_SuperUltraCompact.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dca070132d022f8c82036aa687e45d552e8c583be7cdb81094cb835638df6e29
|
3 |
+
size 183907
|
Scripts/SAD.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
|
4 |
+
# Segmented Absolute Difference (SAD)
|
5 |
+
# Compares two frames, highlights differences in segments, and returns rectangles of changed areas
|
6 |
+
|
7 |
+
def HighlightDifferences(BaseFrame: np.ndarray, NextFrame: np.ndarray, Columns: int = 20, Rows: int = 12, Threshold: float = 10, Padding: int = 1):
|
8 |
+
FrameHeight, FrameWidth = BaseFrame.shape[:2]
|
9 |
+
SegmentWidth = FrameWidth // Columns
|
10 |
+
SegmentHeight = FrameHeight // Rows
|
11 |
+
HighlightedFrame = BaseFrame.copy()
|
12 |
+
TotalSegments = 0
|
13 |
+
SimilarSegments = 0
|
14 |
+
DifferentSegments = 0
|
15 |
+
DifferentSegmentMask = np.zeros((Rows, Columns), dtype=bool)
|
16 |
+
for Row in range(Rows):
|
17 |
+
for Col in range(Columns):
|
18 |
+
Y = Row * SegmentHeight
|
19 |
+
X = Col * SegmentWidth
|
20 |
+
Y2 = FrameHeight if Row == Rows - 1 else Y + SegmentHeight
|
21 |
+
X2 = FrameWidth if Col == Columns - 1 else X + SegmentWidth
|
22 |
+
TotalSegments += 1
|
23 |
+
SegmentBase = BaseFrame[Y:Y2, X:X2]
|
24 |
+
SegmentNext = NextFrame[Y:Y2, X:X2]
|
25 |
+
GreyBase = cv2.cvtColor(SegmentBase, cv2.COLOR_BGR2GRAY)
|
26 |
+
GreyNext = cv2.cvtColor(SegmentNext, cv2.COLOR_BGR2GRAY)
|
27 |
+
BlurredBase = cv2.GaussianBlur(GreyBase, (5, 5), 0)
|
28 |
+
BlurredNext = cv2.GaussianBlur(GreyNext, (5, 5), 0)
|
29 |
+
AbsDiff = cv2.absdiff(BlurredBase, BlurredNext)
|
30 |
+
MeanDiff = np.mean(AbsDiff) # type: ignore
|
31 |
+
if MeanDiff > Threshold:
|
32 |
+
DifferentSegments += 1
|
33 |
+
DifferentSegmentMask[Row, Col] = True
|
34 |
+
else:
|
35 |
+
SimilarSegments += 1
|
36 |
+
PaddedMask = DifferentSegmentMask.copy()
|
37 |
+
for Row in range(Rows):
|
38 |
+
for Col in range(Columns):
|
39 |
+
if DifferentSegmentMask[Row, Col]:
|
40 |
+
for PR in range(max(0, Row - Padding), min(Rows, Row + Padding + 1)):
|
41 |
+
for PC in range(max(0, Col - Padding), min(Columns, Col + Padding + 1)):
|
42 |
+
PaddedMask[PR, PC] = True
|
43 |
+
for Row in range(Rows):
|
44 |
+
for Col in range(Columns):
|
45 |
+
Y = Row * SegmentHeight
|
46 |
+
X = Col * SegmentWidth
|
47 |
+
Y2 = FrameHeight if Row == Rows - 1 else Y + SegmentHeight
|
48 |
+
X2 = FrameWidth if Col == Columns - 1 else X + SegmentWidth
|
49 |
+
SegmentBase = BaseFrame[Y:Y2, X:X2]
|
50 |
+
if PaddedMask[Row, Col]:
|
51 |
+
HighlightedFrame[Y:Y2, X:X2] = cv2.addWeighted(
|
52 |
+
HighlightedFrame[Y:Y2, X:X2], 0.5,
|
53 |
+
np.full_like(SegmentBase, (0, 0, 255)), 0.2, 0
|
54 |
+
)
|
55 |
+
else:
|
56 |
+
HighlightedFrame[Y:Y2, X:X2] = cv2.addWeighted(
|
57 |
+
HighlightedFrame[Y:Y2, X:X2], 0.5,
|
58 |
+
np.full_like(SegmentBase, (0, 255, 0)), 0.2, 0
|
59 |
+
)
|
60 |
+
SimilarityPercentage = (SimilarSegments / TotalSegments) * 100
|
61 |
+
TileCoords = []
|
62 |
+
for Row in range(Rows):
|
63 |
+
for Col in range(Columns):
|
64 |
+
if PaddedMask[Row, Col]:
|
65 |
+
TileCoords.append((Col, Row))
|
66 |
+
return HighlightedFrame, DifferentSegments, SimilarityPercentage, TileCoords
|
67 |
+
|
68 |
+
def GetRectanglesFromTiles(TileMask: np.ndarray, MinDifferentRatio: float = 0.8):
|
69 |
+
Height, Width = TileMask.shape
|
70 |
+
Visited = np.zeros_like(TileMask, dtype=bool)
|
71 |
+
Rectangles = []
|
72 |
+
for Y in range(Height):
|
73 |
+
for X in range(Width):
|
74 |
+
if TileMask[Y, X] and not Visited[Y, X]:
|
75 |
+
W = 1
|
76 |
+
H = 1
|
77 |
+
Expand = True
|
78 |
+
while Expand:
|
79 |
+
Expand = False
|
80 |
+
if X + W < Width:
|
81 |
+
NewCol = TileMask[Y:Y+H, X+W] & ~Visited[Y:Y+H, X+W]
|
82 |
+
if np.any(NewCol):
|
83 |
+
NewRect = TileMask[Y:Y+H, X:X+W+1] & ~Visited[Y:Y+H, X:X+W+1]
|
84 |
+
Total = NewRect.size
|
85 |
+
Diff = np.count_nonzero(NewRect)
|
86 |
+
Ratio = Diff / Total
|
87 |
+
if Ratio >= MinDifferentRatio and not np.any(Visited[Y:Y+H, X+W]):
|
88 |
+
W += 1
|
89 |
+
Expand = True
|
90 |
+
if Y + H < Height:
|
91 |
+
NewRow = TileMask[Y+H, X:X+W] & ~Visited[Y+H, X:X+W]
|
92 |
+
if np.any(NewRow):
|
93 |
+
NewRect = TileMask[Y:Y+H+1, X:X+W] & ~Visited[Y:Y+H+1, X:X+W]
|
94 |
+
Total = NewRect.size
|
95 |
+
Diff = np.count_nonzero(NewRect)
|
96 |
+
Ratio = Diff / Total
|
97 |
+
if Ratio >= MinDifferentRatio and not np.any(Visited[Y+H, X:X+W]):
|
98 |
+
H += 1
|
99 |
+
Expand = True
|
100 |
+
Visited[Y:Y+H, X:X+W] = True
|
101 |
+
Rectangles.append((X, Y, W, H))
|
102 |
+
return Rectangles
|
103 |
+
|
104 |
+
def GetDifferenceRectangles(BaseFrame, NextFrame, Columns=20, Rows=12, Threshold=5, Padding=1):
|
105 |
+
HighlightedFrame, DifferentSegments, SimilarPercentage, TileCoords = HighlightDifferences(
|
106 |
+
BaseFrame, NextFrame, Columns=Columns, Rows=Rows, Threshold=Threshold, Padding=Padding
|
107 |
+
)
|
108 |
+
TileMask = np.zeros((Rows, Columns), dtype=bool)
|
109 |
+
for Col, Row in TileCoords:
|
110 |
+
if Row < TileMask.shape[0] and Col < TileMask.shape[1]:
|
111 |
+
TileMask[Row, Col] = True
|
112 |
+
Rectangles = GetRectanglesFromTiles(TileMask, MinDifferentRatio=0.7)
|
113 |
+
return {
|
114 |
+
'HighlightedFrame': HighlightedFrame,
|
115 |
+
'Rectangles': Rectangles,
|
116 |
+
'SimilarPercentage': SimilarPercentage,
|
117 |
+
'TileCoords': TileCoords,
|
118 |
+
'Columns': Columns,
|
119 |
+
'Rows': Rows
|
120 |
+
}
|