Spaces:
Running
Running
Commit
·
0e0fda4
1
Parent(s):
8e65ec5
Initial commit of the FrameLens project, including core application logic in app.py, configuration files (.python-version, pyproject.toml, requirements.txt), example video data (data.json), and necessary assets (.DS_Store). The application supports frame-by-frame video comparison using various metrics.
Browse files- .DS_Store +0 -0
- .python-version +1 -0
- README.md +2 -2
- app.py +1457 -0
- data.json +22 -0
- examples/.DS_Store +0 -0
- pyproject.toml +16 -0
- requirements.txt +66 -0
- uv.lock +0 -0
.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.12
|
README.md
CHANGED
|
@@ -6,9 +6,9 @@ colorTo: yellow
|
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.38.2
|
| 8 |
app_file: app.py
|
| 9 |
-
pinned:
|
| 10 |
license: apache-2.0
|
| 11 |
-
short_description:
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.38.2
|
| 8 |
app_file: app.py
|
| 9 |
+
pinned: true
|
| 10 |
license: apache-2.0
|
| 11 |
+
short_description: Tool for frame-by-frame video or image metric comparison
|
| 12 |
---
|
| 13 |
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,1457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import cv2
|
| 5 |
+
import gradio as gr
|
| 6 |
+
import imagehash
|
| 7 |
+
import numpy as np
|
| 8 |
+
import plotly.graph_objects as go
|
| 9 |
+
from PIL import Image
|
| 10 |
+
from plotly.subplots import make_subplots
|
| 11 |
+
from scipy.stats import pearsonr
|
| 12 |
+
from skimage.metrics import mean_squared_error as mse_skimage
|
| 13 |
+
from skimage.metrics import peak_signal_noise_ratio as psnr_skimage
|
| 14 |
+
from skimage.metrics import structural_similarity as ssim
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class FrameMetrics:
|
| 18 |
+
"""Class to compute and store frame-by-frame metrics"""
|
| 19 |
+
|
| 20 |
+
def __init__(self):
|
| 21 |
+
self.metrics = {}
|
| 22 |
+
|
| 23 |
+
def compute_ssim(self, frame1, frame2):
|
| 24 |
+
"""Compute SSIM between two frames"""
|
| 25 |
+
if frame1 is None or frame2 is None:
|
| 26 |
+
return None
|
| 27 |
+
|
| 28 |
+
try:
|
| 29 |
+
# Convert to grayscale for SSIM computation
|
| 30 |
+
gray1 = (
|
| 31 |
+
cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY)
|
| 32 |
+
if len(frame1.shape) == 3
|
| 33 |
+
else frame1
|
| 34 |
+
)
|
| 35 |
+
gray2 = (
|
| 36 |
+
cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY)
|
| 37 |
+
if len(frame2.shape) == 3
|
| 38 |
+
else frame2
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
# Ensure both frames have the same dimensions
|
| 42 |
+
if gray1.shape != gray2.shape:
|
| 43 |
+
# Resize to match the smaller dimension
|
| 44 |
+
h = min(gray1.shape[0], gray2.shape[0])
|
| 45 |
+
w = min(gray1.shape[1], gray2.shape[1])
|
| 46 |
+
gray1 = cv2.resize(gray1, (w, h))
|
| 47 |
+
gray2 = cv2.resize(gray2, (w, h))
|
| 48 |
+
|
| 49 |
+
# Compute SSIM
|
| 50 |
+
ssim_value = ssim(gray1, gray2, data_range=255)
|
| 51 |
+
return ssim_value
|
| 52 |
+
|
| 53 |
+
except Exception as e:
|
| 54 |
+
print(f"SSIM computation failed: {e}")
|
| 55 |
+
return None
|
| 56 |
+
|
| 57 |
+
def compute_ms_ssim(self, frame1, frame2):
|
| 58 |
+
"""Compute Multi-Scale SSIM between two frames"""
|
| 59 |
+
if frame1 is None or frame2 is None:
|
| 60 |
+
return None
|
| 61 |
+
|
| 62 |
+
try:
|
| 63 |
+
# Convert to grayscale for MS-SSIM computation
|
| 64 |
+
gray1 = (
|
| 65 |
+
cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY)
|
| 66 |
+
if len(frame1.shape) == 3
|
| 67 |
+
else frame1
|
| 68 |
+
)
|
| 69 |
+
gray2 = (
|
| 70 |
+
cv2.cvtColor(frame2, cv2.COLOR_RGB2GRAY)
|
| 71 |
+
if len(frame2.shape) == 3
|
| 72 |
+
else frame2
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
# Ensure both frames have the same dimensions
|
| 76 |
+
if gray1.shape != gray2.shape:
|
| 77 |
+
h = min(gray1.shape[0], gray2.shape[0])
|
| 78 |
+
w = min(gray1.shape[1], gray2.shape[1])
|
| 79 |
+
gray1 = cv2.resize(gray1, (w, h))
|
| 80 |
+
gray2 = cv2.resize(gray2, (w, h))
|
| 81 |
+
|
| 82 |
+
# Ensure minimum size for multi-scale analysis
|
| 83 |
+
min_size = 32
|
| 84 |
+
if min(gray1.shape) < min_size:
|
| 85 |
+
return None
|
| 86 |
+
|
| 87 |
+
# Compute MS-SSIM using multiple scales
|
| 88 |
+
from skimage.metrics import structural_similarity
|
| 89 |
+
|
| 90 |
+
# Use win_size that works with image dimensions
|
| 91 |
+
win_size = min(7, min(gray1.shape) // 4)
|
| 92 |
+
if win_size < 3:
|
| 93 |
+
win_size = 3
|
| 94 |
+
|
| 95 |
+
ms_ssim_val = structural_similarity(
|
| 96 |
+
gray1, gray2, data_range=255, win_size=win_size, multichannel=False
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
return ms_ssim_val
|
| 100 |
+
|
| 101 |
+
except Exception as e:
|
| 102 |
+
print(f"MS-SSIM computation failed: {e}")
|
| 103 |
+
return None
|
| 104 |
+
|
| 105 |
+
def compute_psnr(self, frame1, frame2):
|
| 106 |
+
"""Compute PSNR between two frames"""
|
| 107 |
+
if frame1 is None or frame2 is None:
|
| 108 |
+
return None
|
| 109 |
+
|
| 110 |
+
try:
|
| 111 |
+
# Ensure both frames have the same dimensions
|
| 112 |
+
if frame1.shape != frame2.shape:
|
| 113 |
+
h = min(frame1.shape[0], frame2.shape[0])
|
| 114 |
+
w = min(frame1.shape[1], frame2.shape[1])
|
| 115 |
+
c = (
|
| 116 |
+
min(frame1.shape[2], frame2.shape[2])
|
| 117 |
+
if len(frame1.shape) == 3
|
| 118 |
+
else 1
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
if len(frame1.shape) == 3:
|
| 122 |
+
frame1 = cv2.resize(frame1, (w, h))[:, :, :c]
|
| 123 |
+
frame2 = cv2.resize(frame2, (w, h))[:, :, :c]
|
| 124 |
+
else:
|
| 125 |
+
frame1 = cv2.resize(frame1, (w, h))
|
| 126 |
+
frame2 = cv2.resize(frame2, (w, h))
|
| 127 |
+
|
| 128 |
+
# Compute PSNR
|
| 129 |
+
return psnr_skimage(frame1, frame2, data_range=255)
|
| 130 |
+
except Exception as e:
|
| 131 |
+
print(f"PSNR computation failed: {e}")
|
| 132 |
+
return None
|
| 133 |
+
|
| 134 |
+
def compute_mse(self, frame1, frame2):
|
| 135 |
+
"""Compute MSE between two frames"""
|
| 136 |
+
if frame1 is None or frame2 is None:
|
| 137 |
+
return None
|
| 138 |
+
|
| 139 |
+
try:
|
| 140 |
+
# Ensure both frames have the same dimensions
|
| 141 |
+
if frame1.shape != frame2.shape:
|
| 142 |
+
h = min(frame1.shape[0], frame2.shape[0])
|
| 143 |
+
w = min(frame1.shape[1], frame2.shape[1])
|
| 144 |
+
c = (
|
| 145 |
+
min(frame1.shape[2], frame2.shape[2])
|
| 146 |
+
if len(frame1.shape) == 3
|
| 147 |
+
else 1
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
if len(frame1.shape) == 3:
|
| 151 |
+
frame1 = cv2.resize(frame1, (w, h))[:, :, :c]
|
| 152 |
+
frame2 = cv2.resize(frame2, (w, h))[:, :, :c]
|
| 153 |
+
else:
|
| 154 |
+
frame1 = cv2.resize(frame1, (w, h))
|
| 155 |
+
frame2 = cv2.resize(frame2, (w, h))
|
| 156 |
+
|
| 157 |
+
# Compute MSE
|
| 158 |
+
return mse_skimage(frame1, frame2)
|
| 159 |
+
except Exception as e:
|
| 160 |
+
print(f"MSE computation failed: {e}")
|
| 161 |
+
return None
|
| 162 |
+
|
| 163 |
+
def compute_phash(self, frame1, frame2):
|
| 164 |
+
"""Compute perceptual hash similarity between two frames"""
|
| 165 |
+
if frame1 is None or frame2 is None:
|
| 166 |
+
return None
|
| 167 |
+
|
| 168 |
+
try:
|
| 169 |
+
# Convert to PIL Images for imagehash
|
| 170 |
+
pil1 = Image.fromarray(frame1)
|
| 171 |
+
pil2 = Image.fromarray(frame2)
|
| 172 |
+
|
| 173 |
+
# Compute perceptual hashes
|
| 174 |
+
hash1 = imagehash.phash(pil1)
|
| 175 |
+
hash2 = imagehash.phash(pil2)
|
| 176 |
+
|
| 177 |
+
# Calculate similarity (lower hamming distance = more similar)
|
| 178 |
+
hamming_distance = hash1 - hash2
|
| 179 |
+
# Convert to similarity score (0-1, where 1 is identical)
|
| 180 |
+
max_distance = len(str(hash1)) * 4 # 4 bits per hex char
|
| 181 |
+
similarity = 1 - (hamming_distance / max_distance)
|
| 182 |
+
|
| 183 |
+
return similarity
|
| 184 |
+
except Exception as e:
|
| 185 |
+
print(f"pHash computation failed: {e}")
|
| 186 |
+
return None
|
| 187 |
+
|
| 188 |
+
def compute_color_histogram_correlation(self, frame1, frame2):
|
| 189 |
+
"""Compute color histogram correlation between two frames"""
|
| 190 |
+
if frame1 is None or frame2 is None:
|
| 191 |
+
return None
|
| 192 |
+
|
| 193 |
+
try:
|
| 194 |
+
# Ensure both frames have the same dimensions
|
| 195 |
+
if frame1.shape != frame2.shape:
|
| 196 |
+
h = min(frame1.shape[0], frame2.shape[0])
|
| 197 |
+
w = min(frame1.shape[1], frame2.shape[1])
|
| 198 |
+
frame1 = cv2.resize(frame1, (w, h))
|
| 199 |
+
frame2 = cv2.resize(frame2, (w, h))
|
| 200 |
+
|
| 201 |
+
# Compute histograms for each channel
|
| 202 |
+
correlations = []
|
| 203 |
+
|
| 204 |
+
if len(frame1.shape) == 3: # Color image
|
| 205 |
+
for i in range(3): # R, G, B channels
|
| 206 |
+
hist1 = cv2.calcHist([frame1], [i], None, [256], [0, 256])
|
| 207 |
+
hist2 = cv2.calcHist([frame2], [i], None, [256], [0, 256])
|
| 208 |
+
|
| 209 |
+
# Flatten histograms
|
| 210 |
+
hist1 = hist1.flatten()
|
| 211 |
+
hist2 = hist2.flatten()
|
| 212 |
+
|
| 213 |
+
# Compute correlation
|
| 214 |
+
if np.std(hist1) > 0 and np.std(hist2) > 0:
|
| 215 |
+
corr, _ = pearsonr(hist1, hist2)
|
| 216 |
+
correlations.append(corr)
|
| 217 |
+
|
| 218 |
+
# Return average correlation across channels
|
| 219 |
+
return np.mean(correlations) if correlations else 0.0
|
| 220 |
+
else: # Grayscale
|
| 221 |
+
hist1 = cv2.calcHist([frame1], [0], None, [256], [0, 256]).flatten()
|
| 222 |
+
hist2 = cv2.calcHist([frame2], [0], None, [256], [0, 256]).flatten()
|
| 223 |
+
|
| 224 |
+
if np.std(hist1) > 0 and np.std(hist2) > 0:
|
| 225 |
+
corr, _ = pearsonr(hist1, hist2)
|
| 226 |
+
return corr
|
| 227 |
+
else:
|
| 228 |
+
return 0.0
|
| 229 |
+
|
| 230 |
+
except Exception as e:
|
| 231 |
+
print(f"Color histogram correlation computation failed: {e}")
|
| 232 |
+
return None
|
| 233 |
+
|
| 234 |
+
def compute_sharpness(self, frame):
|
| 235 |
+
"""Compute sharpness using Laplacian variance method"""
|
| 236 |
+
if frame is None:
|
| 237 |
+
return None
|
| 238 |
+
|
| 239 |
+
# Convert to grayscale if needed
|
| 240 |
+
gray = (
|
| 241 |
+
cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) if len(frame.shape) == 3 else frame
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
# Compute Laplacian variance (higher values = sharper)
|
| 245 |
+
laplacian = cv2.Laplacian(gray, cv2.CV_64F)
|
| 246 |
+
sharpness = laplacian.var()
|
| 247 |
+
|
| 248 |
+
return sharpness
|
| 249 |
+
|
| 250 |
+
def compute_frame_metrics(self, frame1, frame2, frame_idx):
|
| 251 |
+
"""Compute all metrics for a frame pair"""
|
| 252 |
+
metrics = {
|
| 253 |
+
"frame_index": frame_idx,
|
| 254 |
+
"ssim": self.compute_ssim(frame1, frame2),
|
| 255 |
+
"psnr": self.compute_psnr(frame1, frame2),
|
| 256 |
+
"mse": self.compute_mse(frame1, frame2),
|
| 257 |
+
"phash": self.compute_phash(frame1, frame2),
|
| 258 |
+
"color_hist_corr": self.compute_color_histogram_correlation(frame1, frame2),
|
| 259 |
+
"sharpness1": self.compute_sharpness(frame1),
|
| 260 |
+
"sharpness2": self.compute_sharpness(frame2),
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
# Compute average sharpness for the pair
|
| 264 |
+
if metrics["sharpness1"] is not None and metrics["sharpness2"] is not None:
|
| 265 |
+
metrics["sharpness_avg"] = (
|
| 266 |
+
metrics["sharpness1"] + metrics["sharpness2"]
|
| 267 |
+
) / 2
|
| 268 |
+
metrics["sharpness_diff"] = abs(
|
| 269 |
+
metrics["sharpness1"] - metrics["sharpness2"]
|
| 270 |
+
)
|
| 271 |
+
else:
|
| 272 |
+
metrics["sharpness_avg"] = None
|
| 273 |
+
metrics["sharpness_diff"] = None
|
| 274 |
+
|
| 275 |
+
return metrics
|
| 276 |
+
|
| 277 |
+
def compute_all_metrics(self, frames1, frames2):
|
| 278 |
+
"""Compute metrics for all frame pairs"""
|
| 279 |
+
all_metrics = []
|
| 280 |
+
max_frames = max(len(frames1), len(frames2))
|
| 281 |
+
|
| 282 |
+
for i in range(max_frames):
|
| 283 |
+
frame1 = frames1[i] if i < len(frames1) else None
|
| 284 |
+
frame2 = frames2[i] if i < len(frames2) else None
|
| 285 |
+
|
| 286 |
+
if frame1 is not None or frame2 is not None:
|
| 287 |
+
metrics = self.compute_frame_metrics(frame1, frame2, i)
|
| 288 |
+
all_metrics.append(metrics)
|
| 289 |
+
else:
|
| 290 |
+
# Handle cases where both frames are missing
|
| 291 |
+
all_metrics.append(
|
| 292 |
+
{
|
| 293 |
+
"frame_index": i,
|
| 294 |
+
"ssim": None,
|
| 295 |
+
"ms_ssim": None,
|
| 296 |
+
"psnr": None,
|
| 297 |
+
"mse": None,
|
| 298 |
+
"phash": None,
|
| 299 |
+
"color_hist_corr": None,
|
| 300 |
+
"sharpness1": None,
|
| 301 |
+
"sharpness2": None,
|
| 302 |
+
"sharpness_avg": None,
|
| 303 |
+
"sharpness_diff": None,
|
| 304 |
+
}
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
return all_metrics
|
| 308 |
+
|
| 309 |
+
def get_metric_summary(self, metrics_list):
|
| 310 |
+
"""Compute summary statistics for all metrics"""
|
| 311 |
+
metric_names = [
|
| 312 |
+
"ssim",
|
| 313 |
+
"psnr",
|
| 314 |
+
"mse",
|
| 315 |
+
"phash",
|
| 316 |
+
"color_hist_corr",
|
| 317 |
+
"sharpness1",
|
| 318 |
+
"sharpness2",
|
| 319 |
+
"sharpness_avg",
|
| 320 |
+
"sharpness_diff",
|
| 321 |
+
]
|
| 322 |
+
|
| 323 |
+
summary = {
|
| 324 |
+
"total_frames": len(metrics_list),
|
| 325 |
+
"valid_frames": len([m for m in metrics_list if m.get("ssim") is not None]),
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
# Compute statistics for each metric
|
| 329 |
+
for metric_name in metric_names:
|
| 330 |
+
valid_values = [
|
| 331 |
+
m[metric_name] for m in metrics_list if m.get(metric_name) is not None
|
| 332 |
+
]
|
| 333 |
+
|
| 334 |
+
if valid_values:
|
| 335 |
+
summary.update(
|
| 336 |
+
{
|
| 337 |
+
f"{metric_name}_mean": np.mean(valid_values),
|
| 338 |
+
f"{metric_name}_min": np.min(valid_values),
|
| 339 |
+
f"{metric_name}_max": np.max(valid_values),
|
| 340 |
+
f"{metric_name}_std": np.std(valid_values),
|
| 341 |
+
}
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
return summary
|
| 345 |
+
|
| 346 |
+
def create_modern_plot(self, metrics_list, current_frame=0):
|
| 347 |
+
"""Create a comprehensive multi-metric visualization with shared hover"""
|
| 348 |
+
if not metrics_list:
|
| 349 |
+
return None
|
| 350 |
+
|
| 351 |
+
# Extract frame indices and metric values
|
| 352 |
+
frame_indices = [m["frame_index"] for m in metrics_list]
|
| 353 |
+
|
| 354 |
+
# Create 3x2 subplots with quality overview at the top
|
| 355 |
+
fig = make_subplots(
|
| 356 |
+
rows=3,
|
| 357 |
+
cols=2,
|
| 358 |
+
subplot_titles=(
|
| 359 |
+
"Quality Overview (Combined Score)",
|
| 360 |
+
"", # Empty title for merged cell
|
| 361 |
+
"SSIM",
|
| 362 |
+
"PSNR vs MSE",
|
| 363 |
+
"Perceptual Hash vs Color Histogram",
|
| 364 |
+
"Individual Sharpness (Video 1 vs Video 2)",
|
| 365 |
+
),
|
| 366 |
+
specs=[
|
| 367 |
+
[
|
| 368 |
+
{"colspan": 2, "secondary_y": False},
|
| 369 |
+
None,
|
| 370 |
+
], # Row 1: Quality Overview (single axis)
|
| 371 |
+
[
|
| 372 |
+
{"secondary_y": False},
|
| 373 |
+
{"secondary_y": True},
|
| 374 |
+
], # Row 2: SSIM (single axis), PSNR vs MSE
|
| 375 |
+
[
|
| 376 |
+
{"secondary_y": True},
|
| 377 |
+
{"secondary_y": True},
|
| 378 |
+
], # Row 3: pHash vs Color, Individual Sharpness
|
| 379 |
+
],
|
| 380 |
+
vertical_spacing=0.12,
|
| 381 |
+
horizontal_spacing=0.1,
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
# Helper function to get valid data
|
| 385 |
+
def get_valid_data(metric_name):
|
| 386 |
+
values = [m.get(metric_name) for m in metrics_list]
|
| 387 |
+
valid_indices = [i for i, v in enumerate(values) if v is not None]
|
| 388 |
+
valid_values = [values[i] for i in valid_indices]
|
| 389 |
+
valid_frames = [frame_indices[i] for i in valid_indices]
|
| 390 |
+
return valid_frames, valid_values
|
| 391 |
+
|
| 392 |
+
# Plot 1: Quality Overview - Combined Score Only (row 1, full width)
|
| 393 |
+
ssim_frames, ssim_values = get_valid_data("ssim")
|
| 394 |
+
psnr_frames, psnr_values = get_valid_data("psnr")
|
| 395 |
+
|
| 396 |
+
# Show only combined quality score
|
| 397 |
+
if ssim_values and psnr_values and len(ssim_values) == len(psnr_values):
|
| 398 |
+
# Normalize metrics to 0-1 scale for comparison
|
| 399 |
+
ssim_norm = np.array(ssim_values)
|
| 400 |
+
psnr_norm = np.clip(np.array(psnr_values) / 50, 0, 1)
|
| 401 |
+
quality_score = (ssim_norm + psnr_norm) / 2
|
| 402 |
+
|
| 403 |
+
fig.add_trace(
|
| 404 |
+
go.Scatter(
|
| 405 |
+
x=ssim_frames,
|
| 406 |
+
y=quality_score,
|
| 407 |
+
mode="lines+markers",
|
| 408 |
+
name="Quality Score ↑",
|
| 409 |
+
line=dict(color="gold", width=4),
|
| 410 |
+
marker=dict(size=8),
|
| 411 |
+
hovertemplate="<b>Frame %{x}</b><br>Quality Score: %{y:.3f}<extra></extra>",
|
| 412 |
+
fill="tonexty",
|
| 413 |
+
),
|
| 414 |
+
row=1,
|
| 415 |
+
col=1,
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
# Plot 2: SSIM (row 2, col 1)
|
| 419 |
+
if ssim_values:
|
| 420 |
+
fig.add_trace(
|
| 421 |
+
go.Scatter(
|
| 422 |
+
x=ssim_frames,
|
| 423 |
+
y=ssim_values,
|
| 424 |
+
mode="lines+markers",
|
| 425 |
+
name="SSIM ↑",
|
| 426 |
+
line=dict(color="blue", width=3),
|
| 427 |
+
marker=dict(size=6),
|
| 428 |
+
hovertemplate="<b>Frame %{x}</b><br>SSIM: %{y:.4f}<extra></extra>",
|
| 429 |
+
),
|
| 430 |
+
row=2,
|
| 431 |
+
col=1,
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
# Get pHash data for later use
|
| 435 |
+
phash_frames, phash_values = get_valid_data("phash")
|
| 436 |
+
|
| 437 |
+
# Plot 3: PSNR vs MSE (row 2, col 2) - keep as is since already shows individual metrics
|
| 438 |
+
if psnr_values:
|
| 439 |
+
fig.add_trace(
|
| 440 |
+
go.Scatter(
|
| 441 |
+
x=psnr_frames,
|
| 442 |
+
y=psnr_values,
|
| 443 |
+
mode="lines+markers",
|
| 444 |
+
name="PSNR ↑",
|
| 445 |
+
line=dict(color="green", width=2),
|
| 446 |
+
hovertemplate="<b>Frame %{x}</b><br>PSNR: %{y:.2f} dB<extra></extra>",
|
| 447 |
+
),
|
| 448 |
+
row=2,
|
| 449 |
+
col=2,
|
| 450 |
+
)
|
| 451 |
+
|
| 452 |
+
mse_frames, mse_values = get_valid_data("mse")
|
| 453 |
+
if mse_values:
|
| 454 |
+
fig.add_trace(
|
| 455 |
+
go.Scatter(
|
| 456 |
+
x=mse_frames,
|
| 457 |
+
y=mse_values,
|
| 458 |
+
mode="lines+markers",
|
| 459 |
+
name="MSE ↓",
|
| 460 |
+
line=dict(color="red", width=2),
|
| 461 |
+
hovertemplate="<b>Frame %{x}</b><br>MSE: %{y:.2f}<extra></extra>",
|
| 462 |
+
yaxis="y6",
|
| 463 |
+
),
|
| 464 |
+
row=2,
|
| 465 |
+
col=2,
|
| 466 |
+
secondary_y=True,
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
# Plot 4: Perceptual Hash vs Color Histogram (row 3, col 1) - keep as is
|
| 470 |
+
if phash_values:
|
| 471 |
+
fig.add_trace(
|
| 472 |
+
go.Scatter(
|
| 473 |
+
x=phash_frames,
|
| 474 |
+
y=phash_values,
|
| 475 |
+
mode="lines+markers",
|
| 476 |
+
name="pHash ↑",
|
| 477 |
+
line=dict(color="purple", width=2),
|
| 478 |
+
hovertemplate="<b>Frame %{x}</b><br>pHash: %{y:.4f}<extra></extra>",
|
| 479 |
+
),
|
| 480 |
+
row=3,
|
| 481 |
+
col=1,
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
hist_frames, hist_values = get_valid_data("color_hist_corr")
|
| 485 |
+
if hist_values:
|
| 486 |
+
fig.add_trace(
|
| 487 |
+
go.Scatter(
|
| 488 |
+
x=hist_frames,
|
| 489 |
+
y=hist_values,
|
| 490 |
+
mode="lines+markers",
|
| 491 |
+
name="Color Hist ↑",
|
| 492 |
+
line=dict(color="orange", width=2),
|
| 493 |
+
hovertemplate="<b>Frame %{x}</b><br>Hist Corr: %{y:.4f}<extra></extra>",
|
| 494 |
+
yaxis="y8",
|
| 495 |
+
),
|
| 496 |
+
row=3,
|
| 497 |
+
col=1,
|
| 498 |
+
secondary_y=True,
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
# Plot 5: Individual Sharpness - Video 1 vs Video 2 (row 3, col 2)
|
| 502 |
+
sharp1_frames, sharp1_values = get_valid_data("sharpness1")
|
| 503 |
+
sharp2_frames, sharp2_values = get_valid_data("sharpness2")
|
| 504 |
+
|
| 505 |
+
if sharp1_values:
|
| 506 |
+
fig.add_trace(
|
| 507 |
+
go.Scatter(
|
| 508 |
+
x=sharp1_frames,
|
| 509 |
+
y=sharp1_values,
|
| 510 |
+
mode="lines+markers",
|
| 511 |
+
name="Video 1 Sharpness ↑",
|
| 512 |
+
line=dict(color="darkgreen", width=2),
|
| 513 |
+
hovertemplate="<b>Frame %{x}</b><br>Video 1 Sharpness: %{y:.1f}<extra></extra>",
|
| 514 |
+
),
|
| 515 |
+
row=3,
|
| 516 |
+
col=2,
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
if sharp2_values:
|
| 520 |
+
fig.add_trace(
|
| 521 |
+
go.Scatter(
|
| 522 |
+
x=sharp2_frames,
|
| 523 |
+
y=sharp2_values,
|
| 524 |
+
mode="lines+markers",
|
| 525 |
+
name="Video 2 Sharpness ↑",
|
| 526 |
+
line=dict(color="darkblue", width=2),
|
| 527 |
+
hovertemplate="<b>Frame %{x}</b><br>Video 2 Sharpness: %{y:.1f}<extra></extra>",
|
| 528 |
+
yaxis="y10",
|
| 529 |
+
),
|
| 530 |
+
row=3,
|
| 531 |
+
col=2,
|
| 532 |
+
secondary_y=True,
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
# Add current frame marker to all plots
|
| 536 |
+
if current_frame is not None:
|
| 537 |
+
# Add vertical line to each subplot to show current frame
|
| 538 |
+
# Subplot (1,1): Quality Overview (full width)
|
| 539 |
+
fig.add_vline(
|
| 540 |
+
x=current_frame,
|
| 541 |
+
line_dash="dash",
|
| 542 |
+
line_color="red",
|
| 543 |
+
line_width=2,
|
| 544 |
+
row=1,
|
| 545 |
+
col=1,
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
# Subplot (2,1): Similarity Metrics
|
| 549 |
+
fig.add_vline(
|
| 550 |
+
x=current_frame,
|
| 551 |
+
line_dash="dash",
|
| 552 |
+
line_color="red",
|
| 553 |
+
line_width=2,
|
| 554 |
+
row=2,
|
| 555 |
+
col=1,
|
| 556 |
+
)
|
| 557 |
+
|
| 558 |
+
# Subplot (2,2): PSNR vs MSE
|
| 559 |
+
fig.add_vline(
|
| 560 |
+
x=current_frame,
|
| 561 |
+
line_dash="dash",
|
| 562 |
+
line_color="red",
|
| 563 |
+
line_width=2,
|
| 564 |
+
row=2,
|
| 565 |
+
col=2,
|
| 566 |
+
)
|
| 567 |
+
|
| 568 |
+
# Subplot (3,1): pHash vs Color Histogram
|
| 569 |
+
fig.add_vline(
|
| 570 |
+
x=current_frame,
|
| 571 |
+
line_dash="dash",
|
| 572 |
+
line_color="red",
|
| 573 |
+
line_width=2,
|
| 574 |
+
row=3,
|
| 575 |
+
col=1,
|
| 576 |
+
)
|
| 577 |
+
|
| 578 |
+
# Subplot (3,2): Individual Sharpness
|
| 579 |
+
fig.add_vline(
|
| 580 |
+
x=current_frame,
|
| 581 |
+
line_dash="dash",
|
| 582 |
+
line_color="red",
|
| 583 |
+
line_width=2,
|
| 584 |
+
row=3,
|
| 585 |
+
col=2,
|
| 586 |
+
)
|
| 587 |
+
|
| 588 |
+
# Update layout with shared hover mode and other improvements
|
| 589 |
+
fig.update_layout(
|
| 590 |
+
height=900,
|
| 591 |
+
showlegend=True,
|
| 592 |
+
hovermode="x unified", # Shared hover pointer across subplots
|
| 593 |
+
dragmode=False,
|
| 594 |
+
title={
|
| 595 |
+
"text": "📊 Multi-Metric Video Quality Analysis Dashboard",
|
| 596 |
+
"x": 0.5,
|
| 597 |
+
"xanchor": "center",
|
| 598 |
+
"font": {"size": 16},
|
| 599 |
+
},
|
| 600 |
+
legend={
|
| 601 |
+
"orientation": "h",
|
| 602 |
+
"yanchor": "bottom",
|
| 603 |
+
"y": 1.02,
|
| 604 |
+
"xanchor": "center",
|
| 605 |
+
"x": 0.5,
|
| 606 |
+
"font": {"size": 10},
|
| 607 |
+
},
|
| 608 |
+
margin=dict(t=100, b=50, l=50, r=50),
|
| 609 |
+
plot_bgcolor="rgba(0,0,0,0)",
|
| 610 |
+
paper_bgcolor="rgba(0,0,0,0)",
|
| 611 |
+
)
|
| 612 |
+
|
| 613 |
+
# Update axes labels and ranges with improved configuration
|
| 614 |
+
fig.update_xaxes(title_text="Frame", fixedrange=True)
|
| 615 |
+
|
| 616 |
+
# Quality Overview axis (row 1, col 1) - focused range to emphasize differences
|
| 617 |
+
quality_values = []
|
| 618 |
+
if ssim_values and psnr_values and len(ssim_values) == len(psnr_values):
|
| 619 |
+
ssim_norm = np.array(ssim_values)
|
| 620 |
+
psnr_norm = np.clip(np.array(psnr_values) / 50, 0, 1)
|
| 621 |
+
quality_values = (ssim_norm + psnr_norm) / 2
|
| 622 |
+
|
| 623 |
+
if len(quality_values) > 0:
|
| 624 |
+
# Use dynamic range based on data with some padding for better visualization
|
| 625 |
+
min_qual = float(np.min(quality_values))
|
| 626 |
+
max_qual = float(np.max(quality_values))
|
| 627 |
+
range_padding = (max_qual - min_qual) * 0.1 # 10% padding
|
| 628 |
+
y_min = max(0, min_qual - range_padding)
|
| 629 |
+
y_max = min(1, max_qual + range_padding)
|
| 630 |
+
# Ensure minimum range for visibility
|
| 631 |
+
if (y_max - y_min) < 0.1:
|
| 632 |
+
center = (y_max + y_min) / 2
|
| 633 |
+
y_min = max(0, center - 0.05)
|
| 634 |
+
y_max = min(1, center + 0.05)
|
| 635 |
+
else:
|
| 636 |
+
# Fallback range
|
| 637 |
+
y_min, y_max = 0.5, 1.0
|
| 638 |
+
|
| 639 |
+
fig.update_yaxes(
|
| 640 |
+
title_text="Quality Score",
|
| 641 |
+
row=1,
|
| 642 |
+
col=1,
|
| 643 |
+
fixedrange=True,
|
| 644 |
+
range=[y_min, y_max],
|
| 645 |
+
)
|
| 646 |
+
|
| 647 |
+
# SSIM axis (row 2, col 1)
|
| 648 |
+
fig.update_yaxes(
|
| 649 |
+
title_text="SSIM", row=2, col=1, fixedrange=True, range=[0, 1.05]
|
| 650 |
+
)
|
| 651 |
+
|
| 652 |
+
# PSNR vs MSE axes (row 2, col 2)
|
| 653 |
+
fig.update_yaxes(title_text="PSNR (dB)", row=2, col=2, fixedrange=True)
|
| 654 |
+
fig.update_yaxes(
|
| 655 |
+
title_text="MSE", row=2, col=2, secondary_y=True, fixedrange=True
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
# pHash vs Color Histogram axes (row 3, col 1)
|
| 659 |
+
fig.update_yaxes(title_text="pHash Similarity", row=3, col=1, fixedrange=True)
|
| 660 |
+
fig.update_yaxes(
|
| 661 |
+
title_text="Histogram Correlation",
|
| 662 |
+
row=3,
|
| 663 |
+
col=1,
|
| 664 |
+
secondary_y=True,
|
| 665 |
+
fixedrange=True,
|
| 666 |
+
)
|
| 667 |
+
|
| 668 |
+
# Individual Sharpness axes (row 3, col 2)
|
| 669 |
+
fig.update_yaxes(title_text="Video 1 Sharpness", row=3, col=2, fixedrange=True)
|
| 670 |
+
fig.update_yaxes(
|
| 671 |
+
title_text="Video 2 Sharpness",
|
| 672 |
+
row=3,
|
| 673 |
+
col=2,
|
| 674 |
+
secondary_y=True,
|
| 675 |
+
fixedrange=True,
|
| 676 |
+
)
|
| 677 |
+
|
| 678 |
+
return fig
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
class VideoFrameComparator:
|
| 682 |
+
def __init__(self):
|
| 683 |
+
self.video1_frames = []
|
| 684 |
+
self.video2_frames = []
|
| 685 |
+
self.max_frames = 0
|
| 686 |
+
self.frame_metrics = FrameMetrics()
|
| 687 |
+
self.computed_metrics = []
|
| 688 |
+
self.metrics_summary = {}
|
| 689 |
+
|
| 690 |
+
def extract_frames(self, video_path):
|
| 691 |
+
"""Extract all frames from a video file or URL"""
|
| 692 |
+
if not video_path:
|
| 693 |
+
return []
|
| 694 |
+
|
| 695 |
+
# Check if it's a URL or local file
|
| 696 |
+
is_url = video_path.startswith(("http://", "https://"))
|
| 697 |
+
|
| 698 |
+
if not is_url and not os.path.exists(video_path):
|
| 699 |
+
print(f"Warning: Local video file not found: {video_path}")
|
| 700 |
+
return []
|
| 701 |
+
|
| 702 |
+
frames = []
|
| 703 |
+
cap = cv2.VideoCapture(video_path)
|
| 704 |
+
|
| 705 |
+
if not cap.isOpened():
|
| 706 |
+
print(
|
| 707 |
+
f"Error: Could not open video {'URL' if is_url else 'file'}: {video_path}"
|
| 708 |
+
)
|
| 709 |
+
return []
|
| 710 |
+
|
| 711 |
+
try:
|
| 712 |
+
frame_count = 0
|
| 713 |
+
while True:
|
| 714 |
+
ret, frame = cap.read()
|
| 715 |
+
if not ret:
|
| 716 |
+
break
|
| 717 |
+
# Convert BGR to RGB for display
|
| 718 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 719 |
+
frames.append(frame_rgb)
|
| 720 |
+
frame_count += 1
|
| 721 |
+
|
| 722 |
+
# Add progress feedback for URLs (which might be slower)
|
| 723 |
+
if is_url and frame_count % 30 == 0:
|
| 724 |
+
print(f"Processed {frame_count} frames from URL...")
|
| 725 |
+
|
| 726 |
+
except Exception as e:
|
| 727 |
+
print(f"Error processing video: {e}")
|
| 728 |
+
finally:
|
| 729 |
+
cap.release()
|
| 730 |
+
|
| 731 |
+
print(
|
| 732 |
+
f"Successfully extracted {len(frames)} frames from {'URL' if is_url else 'file'}: {video_path}"
|
| 733 |
+
)
|
| 734 |
+
return frames
|
| 735 |
+
|
| 736 |
+
def is_comparison_in_data_json(
|
| 737 |
+
self, video1_path, video2_path, json_file_path="data.json"
|
| 738 |
+
):
|
| 739 |
+
"""Check if this video comparison exists in data.json"""
|
| 740 |
+
try:
|
| 741 |
+
with open(json_file_path, "r") as f:
|
| 742 |
+
data = json.load(f)
|
| 743 |
+
|
| 744 |
+
for comparison in data.get("comparisons", []):
|
| 745 |
+
videos = comparison.get("videos", [])
|
| 746 |
+
if len(videos) == 2:
|
| 747 |
+
# Check both orders (works for both local files and URLs)
|
| 748 |
+
if (videos[0] == video1_path and videos[1] == video2_path) or (
|
| 749 |
+
videos[0] == video2_path and videos[1] == video1_path
|
| 750 |
+
):
|
| 751 |
+
return True
|
| 752 |
+
|
| 753 |
+
return False
|
| 754 |
+
except:
|
| 755 |
+
return False
|
| 756 |
+
|
| 757 |
+
def load_videos(self, video1_path, video2_path):
|
| 758 |
+
"""Load both videos and extract frames"""
|
| 759 |
+
if not video1_path and not video2_path:
|
| 760 |
+
return "Please upload at least one video.", 0, None, None, "", None
|
| 761 |
+
|
| 762 |
+
# Extract frames from both videos
|
| 763 |
+
self.video1_frames = self.extract_frames(video1_path) if video1_path else []
|
| 764 |
+
self.video2_frames = self.extract_frames(video2_path) if video2_path else []
|
| 765 |
+
|
| 766 |
+
# Determine maximum number of frames
|
| 767 |
+
self.max_frames = max(len(self.video1_frames), len(self.video2_frames))
|
| 768 |
+
|
| 769 |
+
if self.max_frames == 0:
|
| 770 |
+
return (
|
| 771 |
+
"No valid frames found in the uploaded videos.",
|
| 772 |
+
0,
|
| 773 |
+
None,
|
| 774 |
+
None,
|
| 775 |
+
"",
|
| 776 |
+
None,
|
| 777 |
+
)
|
| 778 |
+
|
| 779 |
+
# Compute metrics if both videos are present and not in data.json
|
| 780 |
+
metrics_info = ""
|
| 781 |
+
metrics_plot = None
|
| 782 |
+
|
| 783 |
+
if (
|
| 784 |
+
video1_path
|
| 785 |
+
and video2_path
|
| 786 |
+
and not self.is_comparison_in_data_json(video1_path, video2_path)
|
| 787 |
+
):
|
| 788 |
+
print("Computing comprehensive frame-by-frame metrics...")
|
| 789 |
+
self.computed_metrics = self.frame_metrics.compute_all_metrics(
|
| 790 |
+
self.video1_frames, self.video2_frames
|
| 791 |
+
)
|
| 792 |
+
self.metrics_summary = self.frame_metrics.get_metric_summary(
|
| 793 |
+
self.computed_metrics
|
| 794 |
+
)
|
| 795 |
+
|
| 796 |
+
# Build metrics info string
|
| 797 |
+
metrics_info = "\n\n📊 Computed Metrics Summary:\n"
|
| 798 |
+
|
| 799 |
+
metric_display = {
|
| 800 |
+
"ssim": ("SSIM", ".4f", "", "↑ Higher=Better"),
|
| 801 |
+
"psnr": ("PSNR", ".2f", " dB", "↑ Higher=Better"),
|
| 802 |
+
"mse": ("MSE", ".2f", "", "↓ Lower=Better"),
|
| 803 |
+
"phash": ("pHash", ".4f", "", "↑ Higher=Better"),
|
| 804 |
+
"color_hist_corr": ("Color Hist", ".4f", "", "↑ Higher=Better"),
|
| 805 |
+
"sharpness_avg": ("Sharpness", ".1f", "", "↑ Higher=Better"),
|
| 806 |
+
}
|
| 807 |
+
|
| 808 |
+
for metric_key, (
|
| 809 |
+
display_name,
|
| 810 |
+
format_str,
|
| 811 |
+
unit,
|
| 812 |
+
direction,
|
| 813 |
+
) in metric_display.items():
|
| 814 |
+
if self.metrics_summary.get(f"{metric_key}_mean") is not None:
|
| 815 |
+
mean_val = self.metrics_summary[f"{metric_key}_mean"]
|
| 816 |
+
std_val = self.metrics_summary[f"{metric_key}_std"]
|
| 817 |
+
metrics_info += f"{display_name}: μ={mean_val:{format_str}}{unit}, σ={std_val:{format_str}}{unit} ({direction})\n"
|
| 818 |
+
|
| 819 |
+
metrics_info += f"Valid Frames: {self.metrics_summary['valid_frames']}/{self.metrics_summary['total_frames']}"
|
| 820 |
+
|
| 821 |
+
# Generate initial plot
|
| 822 |
+
metrics_plot = self.frame_metrics.create_modern_plot(
|
| 823 |
+
self.computed_metrics, 0
|
| 824 |
+
)
|
| 825 |
+
else:
|
| 826 |
+
self.computed_metrics = []
|
| 827 |
+
self.metrics_summary = {}
|
| 828 |
+
if video1_path and video2_path:
|
| 829 |
+
metrics_info = "\n\n📋 Note: This comparison is predefined in data.json (metrics not computed)"
|
| 830 |
+
|
| 831 |
+
# Get initial frames
|
| 832 |
+
frame1 = (
|
| 833 |
+
self.video1_frames[0]
|
| 834 |
+
if self.video1_frames
|
| 835 |
+
else np.zeros((480, 640, 3), dtype=np.uint8)
|
| 836 |
+
)
|
| 837 |
+
frame2 = (
|
| 838 |
+
self.video2_frames[0]
|
| 839 |
+
if self.video2_frames
|
| 840 |
+
else np.zeros((480, 640, 3), dtype=np.uint8)
|
| 841 |
+
)
|
| 842 |
+
|
| 843 |
+
status_msg = "Videos loaded successfully!\n"
|
| 844 |
+
status_msg += f"Video 1: {len(self.video1_frames)} frames\n"
|
| 845 |
+
status_msg += f"Video 2: {len(self.video2_frames)} frames\n"
|
| 846 |
+
status_msg += (
|
| 847 |
+
f"Use the slider to navigate through frames (0-{self.max_frames - 1})"
|
| 848 |
+
)
|
| 849 |
+
status_msg += metrics_info
|
| 850 |
+
|
| 851 |
+
return (
|
| 852 |
+
status_msg,
|
| 853 |
+
self.max_frames - 1,
|
| 854 |
+
frame1,
|
| 855 |
+
frame2,
|
| 856 |
+
self.get_current_frame_info(0),
|
| 857 |
+
metrics_plot,
|
| 858 |
+
)
|
| 859 |
+
|
| 860 |
+
def get_frames_at_index(self, frame_index):
|
| 861 |
+
"""Get frames at specific index from both videos"""
|
| 862 |
+
frame_index = int(frame_index)
|
| 863 |
+
|
| 864 |
+
# Get frame from video 1
|
| 865 |
+
if frame_index < len(self.video1_frames):
|
| 866 |
+
frame1 = self.video1_frames[frame_index]
|
| 867 |
+
else:
|
| 868 |
+
# Create a placeholder if frame doesn't exist
|
| 869 |
+
frame1 = np.zeros((480, 640, 3), dtype=np.uint8)
|
| 870 |
+
cv2.putText(
|
| 871 |
+
frame1,
|
| 872 |
+
f"Frame {frame_index} not available",
|
| 873 |
+
(50, 240),
|
| 874 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 875 |
+
1,
|
| 876 |
+
(255, 255, 255),
|
| 877 |
+
2,
|
| 878 |
+
)
|
| 879 |
+
|
| 880 |
+
# Get frame from video 2
|
| 881 |
+
if frame_index < len(self.video2_frames):
|
| 882 |
+
frame2 = self.video2_frames[frame_index]
|
| 883 |
+
else:
|
| 884 |
+
# Create a placeholder if frame doesn't exist
|
| 885 |
+
frame2 = np.zeros((480, 640, 3), dtype=np.uint8)
|
| 886 |
+
cv2.putText(
|
| 887 |
+
frame2,
|
| 888 |
+
f"Frame {frame_index} not available",
|
| 889 |
+
(50, 240),
|
| 890 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 891 |
+
1,
|
| 892 |
+
(255, 255, 255),
|
| 893 |
+
2,
|
| 894 |
+
)
|
| 895 |
+
|
| 896 |
+
return frame1, frame2
|
| 897 |
+
|
| 898 |
+
def get_current_frame_info(self, frame_index):
|
| 899 |
+
"""Get information about the current frame including metrics"""
|
| 900 |
+
frame_index = int(frame_index)
|
| 901 |
+
info = f"Current Frame: {frame_index} / {self.max_frames - 1}"
|
| 902 |
+
|
| 903 |
+
# Add metrics info if available
|
| 904 |
+
if self.computed_metrics and frame_index < len(self.computed_metrics):
|
| 905 |
+
metrics = self.computed_metrics[frame_index]
|
| 906 |
+
|
| 907 |
+
# === COMPARISON METRICS (Between Videos) ===
|
| 908 |
+
comparison_metrics = []
|
| 909 |
+
|
| 910 |
+
# SSIM with quality assessment
|
| 911 |
+
if metrics.get("ssim") is not None:
|
| 912 |
+
ssim_val = metrics["ssim"]
|
| 913 |
+
if ssim_val >= 0.9:
|
| 914 |
+
quality = "🟢 Excellent"
|
| 915 |
+
elif ssim_val >= 0.8:
|
| 916 |
+
quality = "🔵 Good"
|
| 917 |
+
elif ssim_val >= 0.6:
|
| 918 |
+
quality = "🟡 Fair"
|
| 919 |
+
else:
|
| 920 |
+
quality = "🔴 Poor"
|
| 921 |
+
comparison_metrics.append(f"SSIM: {ssim_val:.4f} ↑ ({quality})")
|
| 922 |
+
|
| 923 |
+
# PSNR with quality indicator
|
| 924 |
+
if metrics.get("psnr") is not None:
|
| 925 |
+
psnr_val = metrics["psnr"]
|
| 926 |
+
if psnr_val >= 40:
|
| 927 |
+
psnr_quality = "🟢"
|
| 928 |
+
elif psnr_val >= 30:
|
| 929 |
+
psnr_quality = "🔵"
|
| 930 |
+
elif psnr_val >= 20:
|
| 931 |
+
psnr_quality = "🟡"
|
| 932 |
+
else:
|
| 933 |
+
psnr_quality = "🔴"
|
| 934 |
+
comparison_metrics.append(f"PSNR: {psnr_val:.1f}dB ↑ {psnr_quality}")
|
| 935 |
+
|
| 936 |
+
# MSE with quality indicator (lower is better)
|
| 937 |
+
if metrics.get("mse") is not None:
|
| 938 |
+
mse_val = metrics["mse"]
|
| 939 |
+
if mse_val <= 50:
|
| 940 |
+
mse_quality = "🟢"
|
| 941 |
+
elif mse_val <= 100:
|
| 942 |
+
mse_quality = "🔵"
|
| 943 |
+
elif mse_val <= 200:
|
| 944 |
+
mse_quality = "🟡"
|
| 945 |
+
else:
|
| 946 |
+
mse_quality = "🔴"
|
| 947 |
+
comparison_metrics.append(f"MSE: {mse_val:.1f} ↓ {mse_quality}")
|
| 948 |
+
|
| 949 |
+
# pHash with quality indicator
|
| 950 |
+
if metrics.get("phash") is not None:
|
| 951 |
+
phash_val = metrics["phash"]
|
| 952 |
+
if phash_val >= 0.95:
|
| 953 |
+
phash_quality = "🟢"
|
| 954 |
+
elif phash_val >= 0.9:
|
| 955 |
+
phash_quality = "🔵"
|
| 956 |
+
elif phash_val >= 0.8:
|
| 957 |
+
phash_quality = "🟡"
|
| 958 |
+
else:
|
| 959 |
+
phash_quality = "🔴"
|
| 960 |
+
comparison_metrics.append(f"pHash: {phash_val:.3f} ↑ {phash_quality}")
|
| 961 |
+
|
| 962 |
+
# Color Histogram Correlation
|
| 963 |
+
if metrics.get("color_hist_corr") is not None:
|
| 964 |
+
color_val = metrics["color_hist_corr"]
|
| 965 |
+
if color_val >= 0.9:
|
| 966 |
+
color_quality = "🟢"
|
| 967 |
+
elif color_val >= 0.8:
|
| 968 |
+
color_quality = "🔵"
|
| 969 |
+
elif color_val >= 0.6:
|
| 970 |
+
color_quality = "🟡"
|
| 971 |
+
else:
|
| 972 |
+
color_quality = "🔴"
|
| 973 |
+
comparison_metrics.append(f"Color: {color_val:.3f} ↑ {color_quality}")
|
| 974 |
+
|
| 975 |
+
# Add comparison metrics to info
|
| 976 |
+
if comparison_metrics:
|
| 977 |
+
info += " | " + " | ".join(comparison_metrics)
|
| 978 |
+
|
| 979 |
+
# === INDIVIDUAL IMAGE METRICS ===
|
| 980 |
+
individual_metrics = []
|
| 981 |
+
|
| 982 |
+
# Individual Sharpness for each video
|
| 983 |
+
if metrics.get("sharpness1") is not None:
|
| 984 |
+
sharp1 = metrics["sharpness1"]
|
| 985 |
+
if sharp1 >= 200:
|
| 986 |
+
sharp1_quality = "🟢"
|
| 987 |
+
elif sharp1 >= 100:
|
| 988 |
+
sharp1_quality = "🔵"
|
| 989 |
+
elif sharp1 >= 50:
|
| 990 |
+
sharp1_quality = "🟡"
|
| 991 |
+
else:
|
| 992 |
+
sharp1_quality = "🔴"
|
| 993 |
+
individual_metrics.append(
|
| 994 |
+
f"V1 Sharpness: {sharp1:.0f} ↑ {sharp1_quality}"
|
| 995 |
+
)
|
| 996 |
+
|
| 997 |
+
if metrics.get("sharpness2") is not None:
|
| 998 |
+
sharp2 = metrics["sharpness2"]
|
| 999 |
+
if sharp2 >= 200:
|
| 1000 |
+
sharp2_quality = "🟢"
|
| 1001 |
+
elif sharp2 >= 100:
|
| 1002 |
+
sharp2_quality = "🔵"
|
| 1003 |
+
elif sharp2 >= 50:
|
| 1004 |
+
sharp2_quality = "🟡"
|
| 1005 |
+
else:
|
| 1006 |
+
sharp2_quality = "🔴"
|
| 1007 |
+
individual_metrics.append(
|
| 1008 |
+
f"V2 Sharpness: {sharp2:.0f} ↑ {sharp2_quality}"
|
| 1009 |
+
)
|
| 1010 |
+
|
| 1011 |
+
# Sharpness comparison and winner
|
| 1012 |
+
if (
|
| 1013 |
+
metrics.get("sharpness1") is not None
|
| 1014 |
+
and metrics.get("sharpness2") is not None
|
| 1015 |
+
):
|
| 1016 |
+
sharp1 = metrics["sharpness1"]
|
| 1017 |
+
sharp2 = metrics["sharpness2"]
|
| 1018 |
+
|
| 1019 |
+
# Determine winner
|
| 1020 |
+
if sharp1 > sharp2:
|
| 1021 |
+
winner = "V1"
|
| 1022 |
+
winner_emoji = "🏆"
|
| 1023 |
+
elif sharp2 > sharp1:
|
| 1024 |
+
winner = "V2"
|
| 1025 |
+
winner_emoji = "🏆"
|
| 1026 |
+
else:
|
| 1027 |
+
winner = "Tie"
|
| 1028 |
+
winner_emoji = "⚖️"
|
| 1029 |
+
|
| 1030 |
+
diff_pct = abs(sharp1 - sharp2) / max(sharp1, sharp2) * 100
|
| 1031 |
+
|
| 1032 |
+
# Add significance
|
| 1033 |
+
if diff_pct > 20:
|
| 1034 |
+
significance = "Major"
|
| 1035 |
+
elif diff_pct > 10:
|
| 1036 |
+
significance = "Moderate"
|
| 1037 |
+
elif diff_pct > 5:
|
| 1038 |
+
significance = "Minor"
|
| 1039 |
+
else:
|
| 1040 |
+
significance = "Negligible"
|
| 1041 |
+
|
| 1042 |
+
individual_metrics.append(
|
| 1043 |
+
f"Sharpness Winner: {winner_emoji}{winner} ({significance})"
|
| 1044 |
+
)
|
| 1045 |
+
|
| 1046 |
+
# Add individual metrics to info
|
| 1047 |
+
if individual_metrics:
|
| 1048 |
+
info += "\n📊 Individual: " + " | ".join(individual_metrics)
|
| 1049 |
+
|
| 1050 |
+
# === OVERALL QUALITY ASSESSMENT ===
|
| 1051 |
+
quality_score = 0
|
| 1052 |
+
quality_count = 0
|
| 1053 |
+
|
| 1054 |
+
# Calculate overall quality score
|
| 1055 |
+
if metrics.get("ssim") is not None:
|
| 1056 |
+
quality_score += metrics["ssim"]
|
| 1057 |
+
quality_count += 1
|
| 1058 |
+
|
| 1059 |
+
if metrics.get("psnr") is not None:
|
| 1060 |
+
# Normalize PSNR to 0-1 scale (assume 50dB max)
|
| 1061 |
+
psnr_norm = min(metrics["psnr"] / 50, 1.0)
|
| 1062 |
+
quality_score += psnr_norm
|
| 1063 |
+
quality_count += 1
|
| 1064 |
+
|
| 1065 |
+
if metrics.get("phash") is not None:
|
| 1066 |
+
quality_score += metrics["phash"]
|
| 1067 |
+
quality_count += 1
|
| 1068 |
+
|
| 1069 |
+
if quality_count > 0:
|
| 1070 |
+
avg_quality = quality_score / quality_count
|
| 1071 |
+
|
| 1072 |
+
# Add overall assessment
|
| 1073 |
+
if avg_quality >= 0.9:
|
| 1074 |
+
overall = "✨ Excellent Match"
|
| 1075 |
+
elif avg_quality >= 0.8:
|
| 1076 |
+
overall = "✅ Good Match"
|
| 1077 |
+
elif avg_quality >= 0.6:
|
| 1078 |
+
overall = "⚠️ Fair Match"
|
| 1079 |
+
else:
|
| 1080 |
+
overall = "❌ Poor Match"
|
| 1081 |
+
|
| 1082 |
+
info += f"\n🎯 Overall: {overall}"
|
| 1083 |
+
|
| 1084 |
+
return info
|
| 1085 |
+
|
| 1086 |
+
def get_updated_plot(self, frame_index):
|
| 1087 |
+
"""Get updated plot with current frame highlighted"""
|
| 1088 |
+
if self.computed_metrics:
|
| 1089 |
+
return self.frame_metrics.create_modern_plot(
|
| 1090 |
+
self.computed_metrics, int(frame_index)
|
| 1091 |
+
)
|
| 1092 |
+
return None
|
| 1093 |
+
|
| 1094 |
+
|
| 1095 |
+
def load_examples_from_json(json_file_path="data.json"):
|
| 1096 |
+
"""Load example video pairs from JSON configuration file"""
|
| 1097 |
+
try:
|
| 1098 |
+
with open(json_file_path, "r") as f:
|
| 1099 |
+
data = json.load(f)
|
| 1100 |
+
|
| 1101 |
+
examples = []
|
| 1102 |
+
|
| 1103 |
+
# Extract video pairs from the comparisons
|
| 1104 |
+
for comparison in data.get("comparisons", []):
|
| 1105 |
+
videos = comparison.get("videos", [])
|
| 1106 |
+
|
| 1107 |
+
# Validate that video files/URLs exist or are accessible
|
| 1108 |
+
valid_videos = []
|
| 1109 |
+
for video_path in videos:
|
| 1110 |
+
if video_path: # Check if not empty/None
|
| 1111 |
+
# Check if it's a URL
|
| 1112 |
+
if video_path.startswith(("http://", "https://")):
|
| 1113 |
+
# For URLs, we'll assume they're valid (can't easily check without downloading)
|
| 1114 |
+
# OpenCV will handle the validation during actual loading
|
| 1115 |
+
valid_videos.append(video_path)
|
| 1116 |
+
print(f"Added video URL: {video_path}")
|
| 1117 |
+
elif os.path.exists(video_path):
|
| 1118 |
+
# For local files, check existence
|
| 1119 |
+
valid_videos.append(video_path)
|
| 1120 |
+
print(f"Added local video file: {video_path}")
|
| 1121 |
+
else:
|
| 1122 |
+
print(f"Warning: Local video file not found: {video_path}")
|
| 1123 |
+
|
| 1124 |
+
# Add to examples if we have valid videos
|
| 1125 |
+
if len(valid_videos) == 2:
|
| 1126 |
+
examples.append(valid_videos)
|
| 1127 |
+
elif len(valid_videos) == 1:
|
| 1128 |
+
# Single video example (compare with None)
|
| 1129 |
+
examples.append([valid_videos[0], None])
|
| 1130 |
+
|
| 1131 |
+
return examples
|
| 1132 |
+
|
| 1133 |
+
except FileNotFoundError:
|
| 1134 |
+
print(f"Warning: {json_file_path} not found. No examples will be loaded.")
|
| 1135 |
+
return []
|
| 1136 |
+
except json.JSONDecodeError as e:
|
| 1137 |
+
print(f"Error parsing {json_file_path}: {e}")
|
| 1138 |
+
return []
|
| 1139 |
+
except Exception as e:
|
| 1140 |
+
print(f"Error loading examples: {e}")
|
| 1141 |
+
return []
|
| 1142 |
+
|
| 1143 |
+
|
| 1144 |
+
def get_all_videos_from_json(json_file_path="data.json"):
|
| 1145 |
+
"""Get list of all unique videos mentioned in the JSON file"""
|
| 1146 |
+
try:
|
| 1147 |
+
with open(json_file_path, "r") as f:
|
| 1148 |
+
data = json.load(f)
|
| 1149 |
+
|
| 1150 |
+
all_videos = set()
|
| 1151 |
+
|
| 1152 |
+
# Extract all unique video paths/URLs from comparisons
|
| 1153 |
+
for comparison in data.get("comparisons", []):
|
| 1154 |
+
videos = comparison.get("videos", [])
|
| 1155 |
+
for video_path in videos:
|
| 1156 |
+
if video_path: # Only add non-empty paths
|
| 1157 |
+
# Check if it's a URL or local file
|
| 1158 |
+
if video_path.startswith(("http://", "https://")):
|
| 1159 |
+
# For URLs, add them directly
|
| 1160 |
+
all_videos.add(video_path)
|
| 1161 |
+
elif os.path.exists(video_path):
|
| 1162 |
+
# For local files, check existence before adding
|
| 1163 |
+
all_videos.add(video_path)
|
| 1164 |
+
|
| 1165 |
+
return sorted(list(all_videos))
|
| 1166 |
+
|
| 1167 |
+
except FileNotFoundError:
|
| 1168 |
+
print(f"Warning: {json_file_path} not found.")
|
| 1169 |
+
return []
|
| 1170 |
+
except json.JSONDecodeError as e:
|
| 1171 |
+
print(f"Error parsing {json_file_path}: {e}")
|
| 1172 |
+
return []
|
| 1173 |
+
except Exception as e:
|
| 1174 |
+
print(f"Error loading videos: {e}")
|
| 1175 |
+
return []
|
| 1176 |
+
|
| 1177 |
+
|
| 1178 |
+
def create_app():
|
| 1179 |
+
comparator = VideoFrameComparator()
|
| 1180 |
+
example_pairs = load_examples_from_json()
|
| 1181 |
+
all_videos = get_all_videos_from_json()
|
| 1182 |
+
|
| 1183 |
+
with gr.Blocks(
|
| 1184 |
+
title="FrameLens - Video Frame Comparator",
|
| 1185 |
+
# theme=gr.themes.Soft(),
|
| 1186 |
+
) as app:
|
| 1187 |
+
gr.Markdown("""
|
| 1188 |
+
# 🎬 FrameLens - Professional Video Quality Analysis
|
| 1189 |
+
|
| 1190 |
+
Upload two videos and compare them using comprehensive quality metrics.
|
| 1191 |
+
Perfect for analyzing compression effects, processing artifacts, and visual quality assessment.
|
| 1192 |
+
|
| 1193 |
+
**✨ Features**: SSIM, PSNR, MSE, pHash, Color Histogram & Sharpness Analysis!
|
| 1194 |
+
""")
|
| 1195 |
+
|
| 1196 |
+
with gr.Row():
|
| 1197 |
+
with gr.Column():
|
| 1198 |
+
gr.Markdown("### Video 1")
|
| 1199 |
+
video1_input = gr.File(
|
| 1200 |
+
label="Upload Video 1",
|
| 1201 |
+
file_types=[
|
| 1202 |
+
".mp4",
|
| 1203 |
+
".avi",
|
| 1204 |
+
".mov",
|
| 1205 |
+
".mkv",
|
| 1206 |
+
".wmv",
|
| 1207 |
+
".flv",
|
| 1208 |
+
".webm",
|
| 1209 |
+
],
|
| 1210 |
+
type="filepath",
|
| 1211 |
+
)
|
| 1212 |
+
|
| 1213 |
+
with gr.Column():
|
| 1214 |
+
gr.Markdown("### Video 2")
|
| 1215 |
+
video2_input = gr.File(
|
| 1216 |
+
label="Upload Video 2",
|
| 1217 |
+
file_types=[
|
| 1218 |
+
".mp4",
|
| 1219 |
+
".avi",
|
| 1220 |
+
".mov",
|
| 1221 |
+
".mkv",
|
| 1222 |
+
".wmv",
|
| 1223 |
+
".flv",
|
| 1224 |
+
".webm",
|
| 1225 |
+
],
|
| 1226 |
+
type="filepath",
|
| 1227 |
+
)
|
| 1228 |
+
|
| 1229 |
+
# Add examples if available (this auto-populates inputs when clicked)
|
| 1230 |
+
if example_pairs:
|
| 1231 |
+
gr.Markdown("### 📁 Example Video Comparisons")
|
| 1232 |
+
gr.Examples(
|
| 1233 |
+
examples=example_pairs,
|
| 1234 |
+
inputs=[video1_input, video2_input],
|
| 1235 |
+
label="Click any example to load video pairs:",
|
| 1236 |
+
examples_per_page=10,
|
| 1237 |
+
)
|
| 1238 |
+
|
| 1239 |
+
load_btn = gr.Button("🔄 Load Videos", variant="primary", size="lg")
|
| 1240 |
+
|
| 1241 |
+
# Frame comparison section (initially hidden)
|
| 1242 |
+
frame_display = gr.Row(visible=False)
|
| 1243 |
+
with frame_display:
|
| 1244 |
+
with gr.Column():
|
| 1245 |
+
gr.Markdown("### Video 1 - Current Frame")
|
| 1246 |
+
frame1_output = gr.Image(
|
| 1247 |
+
label="Video 1 Frame", type="numpy", interactive=False, height=400
|
| 1248 |
+
)
|
| 1249 |
+
|
| 1250 |
+
with gr.Column():
|
| 1251 |
+
gr.Markdown("### Video 2 - Current Frame")
|
| 1252 |
+
frame2_output = gr.Image(
|
| 1253 |
+
label="Video 2 Frame", type="numpy", interactive=False, height=400
|
| 1254 |
+
)
|
| 1255 |
+
|
| 1256 |
+
# Frame navigation (initially hidden) - moved underneath frames
|
| 1257 |
+
frame_controls = gr.Row(visible=False)
|
| 1258 |
+
with frame_controls:
|
| 1259 |
+
frame_slider = gr.Slider(
|
| 1260 |
+
minimum=0,
|
| 1261 |
+
maximum=0,
|
| 1262 |
+
step=1,
|
| 1263 |
+
value=0,
|
| 1264 |
+
label="Frame Number",
|
| 1265 |
+
interactive=False,
|
| 1266 |
+
)
|
| 1267 |
+
|
| 1268 |
+
# Comprehensive metrics visualization (initially hidden)
|
| 1269 |
+
metrics_section = gr.Row(visible=False)
|
| 1270 |
+
with metrics_section:
|
| 1271 |
+
with gr.Column():
|
| 1272 |
+
# Frame info moved above the plot
|
| 1273 |
+
frame_info = gr.Textbox(
|
| 1274 |
+
label="Frame Information & Metrics",
|
| 1275 |
+
interactive=False,
|
| 1276 |
+
value="",
|
| 1277 |
+
lines=3,
|
| 1278 |
+
)
|
| 1279 |
+
gr.Markdown("### 📊 Comprehensive Metrics Analysis")
|
| 1280 |
+
metrics_plot = gr.Plot(
|
| 1281 |
+
label="Multi-Metric Quality Analysis",
|
| 1282 |
+
show_label=False,
|
| 1283 |
+
)
|
| 1284 |
+
|
| 1285 |
+
# Status and frame info (moved below plots, initially hidden)
|
| 1286 |
+
info_section = gr.Row(visible=False)
|
| 1287 |
+
with info_section:
|
| 1288 |
+
with gr.Column():
|
| 1289 |
+
status_output = gr.Textbox(label="Status", interactive=False, lines=8)
|
| 1290 |
+
|
| 1291 |
+
# Event handlers
|
| 1292 |
+
def load_videos_handler(video1, video2):
|
| 1293 |
+
status, max_frames, frame1, frame2, info, plot = comparator.load_videos(
|
| 1294 |
+
video1, video2
|
| 1295 |
+
)
|
| 1296 |
+
|
| 1297 |
+
# Update slider
|
| 1298 |
+
slider_update = gr.Slider(
|
| 1299 |
+
minimum=0,
|
| 1300 |
+
maximum=max_frames,
|
| 1301 |
+
step=1,
|
| 1302 |
+
value=0,
|
| 1303 |
+
interactive=True if max_frames > 0 else False,
|
| 1304 |
+
)
|
| 1305 |
+
|
| 1306 |
+
# Show/hide sections based on whether videos were loaded successfully
|
| 1307 |
+
videos_loaded = max_frames > 0
|
| 1308 |
+
|
| 1309 |
+
return (
|
| 1310 |
+
status, # status_output
|
| 1311 |
+
slider_update, # frame_slider
|
| 1312 |
+
frame1, # frame1_output
|
| 1313 |
+
frame2, # frame2_output
|
| 1314 |
+
info, # frame_info
|
| 1315 |
+
plot, # metrics_plot
|
| 1316 |
+
gr.Row(visible=videos_loaded), # frame_controls
|
| 1317 |
+
gr.Row(visible=videos_loaded), # frame_display
|
| 1318 |
+
gr.Row(visible=videos_loaded), # metrics_section
|
| 1319 |
+
gr.Row(visible=videos_loaded), # info_section
|
| 1320 |
+
)
|
| 1321 |
+
|
| 1322 |
+
def update_frames(frame_index):
|
| 1323 |
+
if comparator.max_frames == 0:
|
| 1324 |
+
return None, None, "No videos loaded", None
|
| 1325 |
+
|
| 1326 |
+
frame1, frame2 = comparator.get_frames_at_index(frame_index)
|
| 1327 |
+
info = comparator.get_current_frame_info(frame_index)
|
| 1328 |
+
plot = comparator.get_updated_plot(frame_index)
|
| 1329 |
+
|
| 1330 |
+
return frame1, frame2, info, plot
|
| 1331 |
+
|
| 1332 |
+
# Auto-load when examples populate the inputs
|
| 1333 |
+
def auto_load_when_examples_change(video1, video2):
|
| 1334 |
+
# Only auto-load if both inputs are provided (from examples)
|
| 1335 |
+
if video1 and video2:
|
| 1336 |
+
return load_videos_handler(video1, video2)
|
| 1337 |
+
# If only one or no videos, return default empty state
|
| 1338 |
+
return (
|
| 1339 |
+
"Please upload videos or select an example", # status_output
|
| 1340 |
+
gr.Slider(
|
| 1341 |
+
minimum=0, maximum=0, step=1, value=0, interactive=False
|
| 1342 |
+
), # frame_slider
|
| 1343 |
+
None, # frame1_output
|
| 1344 |
+
None, # frame2_output
|
| 1345 |
+
"", # frame_info (now in metrics_section)
|
| 1346 |
+
None, # metrics_plot
|
| 1347 |
+
gr.Row(visible=False), # frame_controls
|
| 1348 |
+
gr.Row(visible=False), # frame_display
|
| 1349 |
+
gr.Row(visible=False), # metrics_section
|
| 1350 |
+
gr.Row(visible=False), # info_section
|
| 1351 |
+
)
|
| 1352 |
+
|
| 1353 |
+
# Connect events
|
| 1354 |
+
load_btn.click(
|
| 1355 |
+
fn=load_videos_handler,
|
| 1356 |
+
inputs=[video1_input, video2_input],
|
| 1357 |
+
outputs=[
|
| 1358 |
+
status_output,
|
| 1359 |
+
frame_slider,
|
| 1360 |
+
frame1_output,
|
| 1361 |
+
frame2_output,
|
| 1362 |
+
frame_info,
|
| 1363 |
+
metrics_plot,
|
| 1364 |
+
frame_controls,
|
| 1365 |
+
frame_display,
|
| 1366 |
+
metrics_section,
|
| 1367 |
+
info_section,
|
| 1368 |
+
],
|
| 1369 |
+
)
|
| 1370 |
+
|
| 1371 |
+
# Auto-load when both video inputs change (triggered by examples)
|
| 1372 |
+
video1_input.change(
|
| 1373 |
+
fn=auto_load_when_examples_change,
|
| 1374 |
+
inputs=[video1_input, video2_input],
|
| 1375 |
+
outputs=[
|
| 1376 |
+
status_output,
|
| 1377 |
+
frame_slider,
|
| 1378 |
+
frame1_output,
|
| 1379 |
+
frame2_output,
|
| 1380 |
+
frame_info,
|
| 1381 |
+
metrics_plot,
|
| 1382 |
+
frame_controls,
|
| 1383 |
+
frame_display,
|
| 1384 |
+
metrics_section,
|
| 1385 |
+
info_section,
|
| 1386 |
+
],
|
| 1387 |
+
)
|
| 1388 |
+
|
| 1389 |
+
video2_input.change(
|
| 1390 |
+
fn=auto_load_when_examples_change,
|
| 1391 |
+
inputs=[video1_input, video2_input],
|
| 1392 |
+
outputs=[
|
| 1393 |
+
status_output,
|
| 1394 |
+
frame_slider,
|
| 1395 |
+
frame1_output,
|
| 1396 |
+
frame2_output,
|
| 1397 |
+
frame_info,
|
| 1398 |
+
metrics_plot,
|
| 1399 |
+
frame_controls,
|
| 1400 |
+
frame_display,
|
| 1401 |
+
metrics_section,
|
| 1402 |
+
info_section,
|
| 1403 |
+
],
|
| 1404 |
+
)
|
| 1405 |
+
|
| 1406 |
+
frame_slider.change(
|
| 1407 |
+
fn=update_frames,
|
| 1408 |
+
inputs=[frame_slider],
|
| 1409 |
+
outputs=[frame1_output, frame2_output, frame_info, metrics_plot],
|
| 1410 |
+
)
|
| 1411 |
+
|
| 1412 |
+
# Add comprehensive usage guide
|
| 1413 |
+
gr.Markdown(f"""
|
| 1414 |
+
### 💡 Professional Features:
|
| 1415 |
+
- Upload videos in common formats (MP4, AVI, MOV, etc.) or use URLs
|
| 1416 |
+
- **6 Quality Metrics**: SSIM, PSNR, MSE, pHash, Color Histogram, Sharpness
|
| 1417 |
+
- **Comprehensive Visualization**: 6-panel analysis dashboard
|
| 1418 |
+
- **Real-time Analysis**: Navigate frames with live metric updates
|
| 1419 |
+
- **Smart Comparisons**: See which video performs better per metric
|
| 1420 |
+
- **Correlation Analysis**: Understand relationships between metrics
|
| 1421 |
+
{"- Click examples above for instant analysis!" if example_pairs else ""}
|
| 1422 |
+
|
| 1423 |
+
### 📊 Metrics Explained (with Directionality):
|
| 1424 |
+
- **SSIM** ↑: Structural Similarity (1.0 = identical, 0.0 = completely different)
|
| 1425 |
+
- **PSNR** ↑: Peak Signal-to-Noise Ratio in dB (higher = better quality)
|
| 1426 |
+
- **MSE** ↓: Mean Squared Error (lower = more similar)
|
| 1427 |
+
- **pHash** ↑: Perceptual Hash similarity (1.0 = visually identical)
|
| 1428 |
+
- **Color Histogram** ↑: Color distribution correlation (1.0 = identical colors)
|
| 1429 |
+
- **Sharpness** ↑: Laplacian variance (higher = sharper images)
|
| 1430 |
+
|
| 1431 |
+
### 🎯 Quality Assessment Scale:
|
| 1432 |
+
- 🟢 **Excellent**: SSIM ≥ 0.9, PSNR ≥ 40dB, MSE ≤ 50
|
| 1433 |
+
- 🔵 **Good**: SSIM ≥ 0.8, PSNR ≥ 30dB, MSE ≤ 100
|
| 1434 |
+
- 🟡 **Fair**: SSIM ≥ 0.6, PSNR ≥ 20dB, MSE ≤ 200
|
| 1435 |
+
- 🔴 **Poor**: Below fair thresholds
|
| 1436 |
+
|
| 1437 |
+
### 🏆 Comparison Indicators:
|
| 1438 |
+
- **V1/V2 Winner**: Shows which video performs better per metric
|
| 1439 |
+
- **Significance**: Major (>20%), Moderate (10-20%), Minor (5-10%), Negligible (<5%)
|
| 1440 |
+
- **Overall Match**: Combined quality assessment across all metrics
|
| 1441 |
+
- **Arrows**: ↑ = Higher is Better, ↓ = Lower is Better
|
| 1442 |
+
|
| 1443 |
+
### 📁 Configuration:
|
| 1444 |
+
{f"Loaded {len(example_pairs)} example comparisons from data.json" if example_pairs else "No examples found in data.json"}
|
| 1445 |
+
{f"Available videos: {len(all_videos)} files" if all_videos else ""}
|
| 1446 |
+
""")
|
| 1447 |
+
|
| 1448 |
+
return app
|
| 1449 |
+
|
| 1450 |
+
|
| 1451 |
+
def main():
|
| 1452 |
+
app = create_app()
|
| 1453 |
+
app.launch(server_name="0.0.0.0", server_port=7860, share=False, debug=True)
|
| 1454 |
+
|
| 1455 |
+
|
| 1456 |
+
if __name__ == "__main__":
|
| 1457 |
+
main()
|
data.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"comparisons": [
|
| 3 |
+
{
|
| 4 |
+
"videos": [
|
| 5 |
+
"examples/dog/1.mp4",
|
| 6 |
+
"examples/dog/2.mp4"
|
| 7 |
+
]
|
| 8 |
+
},
|
| 9 |
+
{
|
| 10 |
+
"videos": [
|
| 11 |
+
"examples/dog/2.mp4",
|
| 12 |
+
"examples/dog/3.mp4"
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"videos": [
|
| 17 |
+
"examples/dog/1.mp4",
|
| 18 |
+
"examples/dog/3.mp4"
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
]
|
| 22 |
+
}
|
examples/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
pyproject.toml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "framelens"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Tool for frame-by-frame video or image metric comparison"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.12"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"gradio>=5.38.2",
|
| 9 |
+
"opencv-python>=4.8.0",
|
| 10 |
+
"numpy>=1.24.0",
|
| 11 |
+
"pillow>=10.0.0",
|
| 12 |
+
"scikit-image>=0.21.0",
|
| 13 |
+
"plotly>=5.17.0",
|
| 14 |
+
"imagehash>=4.3.1",
|
| 15 |
+
"scipy>=1.11.0",
|
| 16 |
+
]
|
requirements.txt
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
aiofiles==24.1.0
|
| 2 |
+
annotated-types==0.7.0
|
| 3 |
+
anyio==4.9.0
|
| 4 |
+
brotli==1.1.0
|
| 5 |
+
certifi==2025.7.14
|
| 6 |
+
charset-normalizer==3.4.2
|
| 7 |
+
click==8.2.1
|
| 8 |
+
fastapi==0.116.1
|
| 9 |
+
ffmpy==0.6.1
|
| 10 |
+
filelock==3.18.0
|
| 11 |
+
fsspec==2025.7.0
|
| 12 |
+
gradio==5.38.2
|
| 13 |
+
gradio-client==1.11.0
|
| 14 |
+
groovy==0.1.2
|
| 15 |
+
h11==0.16.0
|
| 16 |
+
hf-xet==1.1.5
|
| 17 |
+
httpcore==1.0.9
|
| 18 |
+
httpx==0.28.1
|
| 19 |
+
huggingface-hub==0.34.3
|
| 20 |
+
idna==3.10
|
| 21 |
+
imagehash==4.3.2
|
| 22 |
+
imageio==2.37.0
|
| 23 |
+
jinja2==3.1.6
|
| 24 |
+
lazy-loader==0.4
|
| 25 |
+
markdown-it-py==3.0.0
|
| 26 |
+
markupsafe==3.0.2
|
| 27 |
+
mdurl==0.1.2
|
| 28 |
+
narwhals==2.0.1
|
| 29 |
+
networkx==3.5
|
| 30 |
+
numpy==2.3.2
|
| 31 |
+
opencv-python==4.11.0.86
|
| 32 |
+
orjson==3.11.1
|
| 33 |
+
packaging==25.0
|
| 34 |
+
pandas==2.3.1
|
| 35 |
+
pillow==11.3.0
|
| 36 |
+
plotly==6.2.0
|
| 37 |
+
pydantic==2.11.7
|
| 38 |
+
pydantic-core==2.33.2
|
| 39 |
+
pydub==0.25.1
|
| 40 |
+
pygments==2.19.2
|
| 41 |
+
python-dateutil==2.9.0.post0
|
| 42 |
+
python-multipart==0.0.20
|
| 43 |
+
pytz==2025.2
|
| 44 |
+
pywavelets==1.8.0
|
| 45 |
+
pyyaml==6.0.2
|
| 46 |
+
requests==2.32.4
|
| 47 |
+
rich==14.1.0
|
| 48 |
+
ruff==0.12.5
|
| 49 |
+
safehttpx==0.1.6
|
| 50 |
+
scikit-image==0.25.2
|
| 51 |
+
scipy==1.16.1
|
| 52 |
+
semantic-version==2.10.0
|
| 53 |
+
shellingham==1.5.4
|
| 54 |
+
six==1.17.0
|
| 55 |
+
sniffio==1.3.1
|
| 56 |
+
starlette==0.47.2
|
| 57 |
+
tifffile==2025.6.11
|
| 58 |
+
tomlkit==0.13.3
|
| 59 |
+
tqdm==4.67.1
|
| 60 |
+
typer==0.16.0
|
| 61 |
+
typing-extensions==4.14.1
|
| 62 |
+
typing-inspection==0.4.1
|
| 63 |
+
tzdata==2025.2
|
| 64 |
+
urllib3==2.5.0
|
| 65 |
+
uvicorn==0.35.0
|
| 66 |
+
websockets==15.0.1
|
uv.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|