Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
import google.generativeai as genai
|
4 |
-
from elevenlabs.client import ElevenLabs
|
5 |
from tavily import TavilyClient
|
6 |
import requests
|
7 |
import subprocess
|
@@ -14,9 +14,12 @@ try:
|
|
14 |
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
15 |
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
|
16 |
RUNWAY_API_KEY = os.environ["RUNWAY_API_KEY"]
|
17 |
-
elevenlabs_client
|
|
|
18 |
except KeyError as e:
|
19 |
-
|
|
|
|
|
20 |
|
21 |
# --- 2. DEFINE API ENDPOINTS AND HEADERS ---
|
22 |
RUNWAY_API_URL = "https://api.runwayml.com/v2"
|
@@ -60,31 +63,30 @@ def generate_video_from_topic(topic_prompt, progress=gr.Progress(track_tqdm=True
|
|
60 |
except (json.JSONDecodeError, KeyError) as e:
|
61 |
raise gr.Error(f"Gemini did not return valid JSON. Error: {e}. Response was: {response.text}")
|
62 |
|
63 |
-
# STEP 3: VOICE OVER (ElevenLabs)
|
64 |
-
progress(0.3, desc="ποΈ
|
65 |
audio_path = f"audio_{job_id}.mp3"
|
66 |
intermediate_files.append(audio_path)
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
print(f"
|
|
|
77 |
|
78 |
# STEP 4: VISUALS (Runway)
|
79 |
video_clip_paths = []
|
80 |
for i, scene_prompt in enumerate(scene_prompts):
|
81 |
progress(0.4 + (i * 0.12), desc=f"π¬ Generating video scene {i+1}/{len(scene_prompts)}...")
|
82 |
|
83 |
-
# !!!!!!!!!!! THE FINAL, VERIFIED RUNWAY FIX V2 !!!!!!!!!!!
|
84 |
runway_payload = {"text_prompt": scene_prompt}
|
85 |
# The POST request goes to the specific /tasks/gen2 endpoint
|
86 |
post_response = requests.post(f"{RUNWAY_API_URL}/tasks/gen2", headers=RUNWAY_HEADERS, json=runway_payload)
|
87 |
-
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
88 |
|
89 |
if post_response.status_code != 200:
|
90 |
raise gr.Error(f"Runway API Error (start job): {post_response.status_code} - {post_response.text}")
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
import google.generativeai as genai
|
4 |
+
# from elevenlabs.client import ElevenLabs # Temporarily disabled
|
5 |
from tavily import TavilyClient
|
6 |
import requests
|
7 |
import subprocess
|
|
|
14 |
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
15 |
tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
|
16 |
RUNWAY_API_KEY = os.environ["RUNWAY_API_KEY"]
|
17 |
+
# We don't need to initialize the elevenlabs_client for now
|
18 |
+
# elevenlabs_client = ElevenLabs(api_key=os.environ["ELEVENLABS_API_KEY"])
|
19 |
except KeyError as e:
|
20 |
+
# We remove ELEVENLABS_API_KEY from the required list for now
|
21 |
+
if 'ELEVENLABS_API_KEY' not in str(e):
|
22 |
+
raise ValueError(f"API Key Error: Please set the {e} secret in your Hugging Face Space settings.")
|
23 |
|
24 |
# --- 2. DEFINE API ENDPOINTS AND HEADERS ---
|
25 |
RUNWAY_API_URL = "https://api.runwayml.com/v2"
|
|
|
63 |
except (json.JSONDecodeError, KeyError) as e:
|
64 |
raise gr.Error(f"Gemini did not return valid JSON. Error: {e}. Response was: {response.text}")
|
65 |
|
66 |
+
# !!!!!!!!!!! STEP 3: VOICE OVER (ElevenLabs) - TEMPORARILY DISABLED !!!!!!!!!!!
|
67 |
+
progress(0.3, desc="ποΈ MOCKING voiceover to save credits...")
|
68 |
audio_path = f"audio_{job_id}.mp3"
|
69 |
intermediate_files.append(audio_path)
|
70 |
|
71 |
+
# --- We now create a silent placeholder audio file instead of calling the API ---
|
72 |
+
# This creates a silent mp3 file with a realistic length based on the script.
|
73 |
+
narration_duration = len(narration.split()) / 2.5 # Rough estimate: 2.5 words per second
|
74 |
+
subprocess.run([
|
75 |
+
'ffmpeg', '-f', 'lavfi', '-i', f'anullsrc=r=44100:cl=mono',
|
76 |
+
'-t', str(narration_duration), '-q:a', '9', '-acodec', 'libmp3lame',
|
77 |
+
audio_path, '-y'
|
78 |
+
], check=True)
|
79 |
+
print(f"MOCK audio file saved (no credits used): {audio_path}")
|
80 |
+
# !!!!!!!!!!! END OF DISABLED SECTION !!!!!!!!!!!
|
81 |
|
82 |
# STEP 4: VISUALS (Runway)
|
83 |
video_clip_paths = []
|
84 |
for i, scene_prompt in enumerate(scene_prompts):
|
85 |
progress(0.4 + (i * 0.12), desc=f"π¬ Generating video scene {i+1}/{len(scene_prompts)}...")
|
86 |
|
|
|
87 |
runway_payload = {"text_prompt": scene_prompt}
|
88 |
# The POST request goes to the specific /tasks/gen2 endpoint
|
89 |
post_response = requests.post(f"{RUNWAY_API_URL}/tasks/gen2", headers=RUNWAY_HEADERS, json=runway_payload)
|
|
|
90 |
|
91 |
if post_response.status_code != 200:
|
92 |
raise gr.Error(f"Runway API Error (start job): {post_response.status_code} - {post_response.text}")
|