Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -109,6 +109,9 @@ def generate_image_with_flux(
|
|
109 |
# Initialize FLUX pipeline here
|
110 |
dtype = torch.bfloat16
|
111 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
|
|
|
112 |
flux_pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device)
|
113 |
|
114 |
if randomize_seed:
|
@@ -151,13 +154,10 @@ def merge_audio_files(mp3_names: List[str]) -> str:
|
|
151 |
@spaces.GPU()
|
152 |
def get_output_video(text, seed, randomize_seed, width, height, num_inference_steps):
|
153 |
print("DEBUG: Starting get_output_video function...")
|
154 |
-
|
155 |
# Set the device here, inside the GPU-accelerated function
|
156 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
157 |
-
|
158 |
# Move the model to the GPU
|
159 |
model.to(device)
|
160 |
-
|
161 |
# Summarize the input text
|
162 |
print("DEBUG: Summarizing text...")
|
163 |
inputs = tokenizer(
|
@@ -174,9 +174,7 @@ def get_output_video(text, seed, randomize_seed, width, height, num_inference_st
|
|
174 |
)
|
175 |
plot = list(summary[0].split('.'))
|
176 |
print(f"DEBUG: Summary generated: {plot}")
|
177 |
-
|
178 |
image_system ="Generate a realistic picture about this: "
|
179 |
-
|
180 |
# Generate images for each sentence in the plot
|
181 |
generated_images = []
|
182 |
for i, senten in enumerate(plot[:-1]):
|
@@ -197,8 +195,8 @@ def get_output_video(text, seed, randomize_seed, width, height, num_inference_st
|
|
197 |
print(f"DEBUG: Image generated and saved to {image_path}")
|
198 |
|
199 |
#del min_dalle_model # No need to delete the model here
|
200 |
-
#
|
201 |
-
#
|
202 |
|
203 |
# Create subtitles from the plot
|
204 |
sentences = plot[:-1]
|
|
|
109 |
# Initialize FLUX pipeline here
|
110 |
dtype = torch.bfloat16
|
111 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
112 |
+
|
113 |
+
torch.cuda.empty_cache() # Clear cache
|
114 |
+
gc.collect() # Run garbage collection
|
115 |
flux_pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device)
|
116 |
|
117 |
if randomize_seed:
|
|
|
154 |
@spaces.GPU()
|
155 |
def get_output_video(text, seed, randomize_seed, width, height, num_inference_steps):
|
156 |
print("DEBUG: Starting get_output_video function...")
|
|
|
157 |
# Set the device here, inside the GPU-accelerated function
|
158 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
159 |
# Move the model to the GPU
|
160 |
model.to(device)
|
|
|
161 |
# Summarize the input text
|
162 |
print("DEBUG: Summarizing text...")
|
163 |
inputs = tokenizer(
|
|
|
174 |
)
|
175 |
plot = list(summary[0].split('.'))
|
176 |
print(f"DEBUG: Summary generated: {plot}")
|
|
|
177 |
image_system ="Generate a realistic picture about this: "
|
|
|
178 |
# Generate images for each sentence in the plot
|
179 |
generated_images = []
|
180 |
for i, senten in enumerate(plot[:-1]):
|
|
|
195 |
print(f"DEBUG: Image generated and saved to {image_path}")
|
196 |
|
197 |
#del min_dalle_model # No need to delete the model here
|
198 |
+
#torch.cuda.empty_cache() # No need to empty cache here
|
199 |
+
#gc.collect() # No need to collect garbage here
|
200 |
|
201 |
# Create subtitles from the plot
|
202 |
sentences = plot[:-1]
|