Spaces:
Runtime error
Runtime error
fix version
Browse files- app.py +8 -1
- requirements.txt +3 -3
app.py
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
import json
|
| 4 |
import torch
|
|
|
|
| 5 |
from huggingface_hub import snapshot_download
|
| 6 |
|
| 7 |
from inference_bokehK import load_models as load_bokeh_models, run_inference as run_bokeh_inference, OmegaConf
|
|
@@ -37,7 +40,7 @@ shutter_pipeline, shutter_device = load_shutter_models(shutter_cfg)
|
|
| 37 |
color_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_color_temperature.yaml")
|
| 38 |
color_pipeline, color_device = load_color_models(color_cfg)
|
| 39 |
|
| 40 |
-
|
| 41 |
def generate_bokeh_video(base_scene, bokehK_list):
|
| 42 |
try:
|
| 43 |
torch.manual_seed(42)
|
|
@@ -51,6 +54,7 @@ def generate_bokeh_video(base_scene, bokehK_list):
|
|
| 51 |
except Exception as e:
|
| 52 |
return f"Error: {str(e)}"
|
| 53 |
|
|
|
|
| 54 |
def generate_focal_video(base_scene, focal_length_list):
|
| 55 |
try:
|
| 56 |
torch.manual_seed(42)
|
|
@@ -64,6 +68,7 @@ def generate_focal_video(base_scene, focal_length_list):
|
|
| 64 |
except Exception as e:
|
| 65 |
return f"Error: {str(e)}"
|
| 66 |
|
|
|
|
| 67 |
def generate_shutter_video(base_scene, shutter_speed_list):
|
| 68 |
try:
|
| 69 |
torch.manual_seed(42)
|
|
@@ -77,6 +82,8 @@ def generate_shutter_video(base_scene, shutter_speed_list):
|
|
| 77 |
except Exception as e:
|
| 78 |
return f"Error: {str(e)}"
|
| 79 |
|
|
|
|
|
|
|
| 80 |
def generate_color_video(base_scene, color_temperature_list):
|
| 81 |
try:
|
| 82 |
torch.manual_seed(42)
|
|
|
|
| 1 |
+
|
| 2 |
+
import spaces
|
| 3 |
import os
|
| 4 |
import gradio as gr
|
| 5 |
import json
|
| 6 |
import torch
|
| 7 |
+
|
| 8 |
from huggingface_hub import snapshot_download
|
| 9 |
|
| 10 |
from inference_bokehK import load_models as load_bokeh_models, run_inference as run_bokeh_inference, OmegaConf
|
|
|
|
| 40 |
color_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_color_temperature.yaml")
|
| 41 |
color_pipeline, color_device = load_color_models(color_cfg)
|
| 42 |
|
| 43 |
+
@spaces.GPU(duration=30)
|
| 44 |
def generate_bokeh_video(base_scene, bokehK_list):
|
| 45 |
try:
|
| 46 |
torch.manual_seed(42)
|
|
|
|
| 54 |
except Exception as e:
|
| 55 |
return f"Error: {str(e)}"
|
| 56 |
|
| 57 |
+
@spaces.GPU(duration=30)
|
| 58 |
def generate_focal_video(base_scene, focal_length_list):
|
| 59 |
try:
|
| 60 |
torch.manual_seed(42)
|
|
|
|
| 68 |
except Exception as e:
|
| 69 |
return f"Error: {str(e)}"
|
| 70 |
|
| 71 |
+
@spaces.GPU(duration=30)
|
| 72 |
def generate_shutter_video(base_scene, shutter_speed_list):
|
| 73 |
try:
|
| 74 |
torch.manual_seed(42)
|
|
|
|
| 82 |
except Exception as e:
|
| 83 |
return f"Error: {str(e)}"
|
| 84 |
|
| 85 |
+
|
| 86 |
+
@spaces.GPU(duration=30)
|
| 87 |
def generate_color_video(base_scene, color_temperature_list):
|
| 88 |
try:
|
| 89 |
torch.manual_seed(42)
|
requirements.txt
CHANGED
|
@@ -1,13 +1,13 @@
|
|
| 1 |
-
--extra-index-url https://download.pytorch.org/whl/cu121
|
| 2 |
torch==2.1.1
|
| 3 |
torchvision==0.16.1
|
| 4 |
torchaudio==2.1.1
|
| 5 |
-
diffusers==0.
|
| 6 |
imageio==2.36.0
|
| 7 |
imageio-ffmpeg
|
| 8 |
numpy==1.24.4
|
| 9 |
transformers==4.39.3
|
| 10 |
accelerate==0.30.0
|
|
|
|
| 11 |
opencv-python
|
| 12 |
gdown
|
| 13 |
einops
|
|
@@ -17,4 +17,4 @@ safetensors
|
|
| 17 |
gradio
|
| 18 |
wandb
|
| 19 |
triton
|
| 20 |
-
|
|
|
|
|
|
|
| 1 |
torch==2.1.1
|
| 2 |
torchvision==0.16.1
|
| 3 |
torchaudio==2.1.1
|
| 4 |
+
diffusers==0.24.0
|
| 5 |
imageio==2.36.0
|
| 6 |
imageio-ffmpeg
|
| 7 |
numpy==1.24.4
|
| 8 |
transformers==4.39.3
|
| 9 |
accelerate==0.30.0
|
| 10 |
+
huggingface_hub==0.25.1
|
| 11 |
opencv-python
|
| 12 |
gdown
|
| 13 |
einops
|
|
|
|
| 17 |
gradio
|
| 18 |
wandb
|
| 19 |
triton
|
| 20 |
+
spaces
|