mgbam commited on
Commit
5c746f8
Β·
verified Β·
1 Parent(s): 21571e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -86
app.py CHANGED
@@ -1,35 +1,27 @@
1
  import gradio as gr
2
  import os
3
- import google.generativeai as genai
4
- # from elevenlabs.client import ElevenLabs # Temporarily disabled
5
- from tavily import TavilyClient
6
- import requests
7
- import subprocess
8
  import json
9
  import time
10
  import random
 
 
 
 
11
 
12
- # --- 1. CONFIGURE API KEYS FROM HUGGING FACE SECRETS ---
13
  try:
14
  genai.configure(api_key=os.environ["GEMINI_API_KEY"])
15
  tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
16
  RUNWAY_API_KEY = os.environ["RUNWAY_API_KEY"]
 
17
  except KeyError as e:
18
- if 'ELEVENLABS_API_KEY' not in str(e):
19
- raise ValueError(f"API Key Error: Please set the {e} secret in your Hugging Face Space settings.")
20
 
21
- # --- 2. DEFINE API ENDPOINTS AND HEADERS ---
22
- RUNWAY_API_URL = "https://api.runwayml.com/v2"
23
- RUNWAY_HEADERS = {
24
- "Authorization": f"Bearer {RUNWAY_API_KEY}",
25
- "Content-Type": "application/json"
26
- }
27
-
28
- # --- 3. THE CORE VIDEO GENERATION FUNCTION ---
29
  def generate_video_from_topic(topic_prompt, progress=gr.Progress(track_tqdm=True)):
30
  job_id = f"{int(time.time())}_{random.randint(1000, 9999)}"
31
  print(f"--- Starting New Job: {job_id} for topic: '{topic_prompt}' ---")
32
-
33
  intermediate_files = []
34
 
35
  try:
@@ -37,7 +29,10 @@ def generate_video_from_topic(topic_prompt, progress=gr.Progress(track_tqdm=True
37
  progress(0.1, desc="πŸ” Researching topic with Tavily...")
38
  facts = "No research data available."
39
  try:
40
- research_results = tavily_client.search(query=f"Key facts and interesting points about {topic_prompt}", search_depth="basic")
 
 
 
41
  if research_results and 'results' in research_results:
42
  facts = "\n".join([res['content'] for res in research_results['results']])
43
  except Exception as e:
@@ -51,14 +46,21 @@ def generate_video_from_topic(topic_prompt, progress=gr.Progress(track_tqdm=True
51
  Your output MUST be a valid JSON object with "narration_script" (string) and "scene_prompts" (a list of 4 detailed, cinematic prompts).
52
  """
53
  response = gemini_model.generate_content(prompt)
54
-
55
  try:
56
- cleaned_text = response.text.strip().replace("```json", "").replace("```", "")
 
 
 
 
 
57
  script_data = json.loads(cleaned_text)
58
  narration = script_data['narration_script']
59
  scene_prompts = script_data['scene_prompts']
60
  except (json.JSONDecodeError, KeyError) as e:
61
- raise gr.Error(f"Gemini did not return valid JSON. Error: {e}. Response was: {response.text}")
 
 
62
 
63
  # STEP 3: MOCK VOICE OVER
64
  progress(0.3, desc="πŸŽ™οΈ MOCKING voiceover to save credits...")
@@ -66,62 +68,39 @@ def generate_video_from_topic(topic_prompt, progress=gr.Progress(track_tqdm=True
66
  intermediate_files.append(audio_path)
67
  narration_duration = len(narration.split()) / 2.5
68
  subprocess.run([
69
- 'ffmpeg', '-f', 'lavfi', '-i', f'anullsrc=r=44100:cl=mono',
70
- '-t', str(narration_duration), '-q:a', '9', '-acodec', 'libmp3lame',
71
  audio_path, '-y'
72
  ], check=True)
73
- print(f"MOCK audio file saved (no credits used): {audio_path}")
74
 
75
- # STEP 4: VISUALS (Runway)
76
  video_clip_paths = []
77
- for i, scene_prompt in enumerate(scene_prompts):
78
- progress(0.4 + (i * 0.12), desc=f"🎬 Generating video scene {i+1}/{len(scene_prompts)}...")
79
-
80
- # !!!!!!!!!!! THE FINAL, FINAL, VERIFIED RUNWAY FIX !!!!!!!!!!!
81
- # 1. The payload structure is now correct for V2
82
- runway_payload = {
83
- "task_type": "gen2",
84
- "options": {
85
- "text_prompt": scene_prompt
86
- }
87
- }
88
- # 2. The POST request goes to the base /tasks endpoint
89
- post_response = requests.post(f"{RUNWAY_API_URL}/tasks", headers=RUNWAY_HEADERS, json=runway_payload)
90
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
91
-
92
- if post_response.status_code != 200:
93
- raise gr.Error(f"Runway API Error (start job): {post_response.status_code} - {post_response.text}")
94
-
95
- task_id = post_response.json().get("uuid")
96
- if not task_id:
97
- raise gr.Error(f"Runway API did not return a task UUID. Response: {post_response.json()}")
98
-
99
- video_url = None
100
- for _ in range(60):
101
- get_response = requests.get(f"{RUNWAY_API_URL}/tasks/{task_id}", headers=RUNWAY_HEADERS)
102
- status_details = get_response.json()
103
- status = status_details.get("status")
104
-
105
- if status == "succeeded":
106
- video_url = status_details.get("outputs", {}).get("video")
107
- break
108
- elif status == "failed":
109
- raise gr.Error(f"Runway job failed. Details: {status_details.get('error_message')}")
110
-
111
- print(f"Scene {i+1} status: {status}. Waiting 10 seconds...")
112
- time.sleep(10)
113
-
114
- if not video_url:
115
- raise gr.Error(f"Runway job timed out for scene {i+1}.")
116
-
117
- clip_path = f"scene_{i+1}_{job_id}.mp4"
118
  intermediate_files.append(clip_path)
119
  video_clip_paths.append(clip_path)
120
-
121
- video_response = requests.get(video_url, stream=True)
122
  with open(clip_path, "wb") as f:
123
- for chunk in video_response.iter_content(chunk_size=1024):
124
- if chunk: f.write(chunk)
 
125
  print(f"Video clip saved: {clip_path}")
126
 
127
  # STEP 5: STITCHING (FFmpeg)
@@ -134,43 +113,56 @@ def generate_video_from_topic(topic_prompt, progress=gr.Progress(track_tqdm=True
134
 
135
  combined_video_path = f"combined_video_{job_id}.mp4"
136
  intermediate_files.append(combined_video_path)
137
- subprocess.run(['ffmpeg', '-f', 'concat', '-safe', '0', '-i', file_list_path, '-c', 'copy', combined_video_path, '-y'], check=True)
138
-
 
 
 
139
  final_video_path = f"final_video_{job_id}.mp4"
140
- subprocess.run(['ffmpeg', '-i', combined_video_path, '-i', audio_path, '-c:v', 'copy', '-c:a', 'aac', '-shortest', final_video_path, '-y'], check=True)
 
 
 
 
141
  print(f"Final video created at: {final_video_path}")
142
-
143
  progress(1.0, desc="βœ… Done!")
144
  return final_video_path
145
 
146
  except Exception as e:
147
- print(f"--- JOB {job_id} FAILED --- \nError: {e}")
148
  raise gr.Error(f"An error occurred: {e}")
149
-
150
  finally:
151
- # STEP 6: CLEANUP
152
  print("Cleaning up intermediate files...")
153
  for file_path in intermediate_files:
154
  if os.path.exists(file_path):
155
  os.remove(file_path)
156
  print(f"Removed: {file_path}")
157
 
158
- # --- 4. CREATE AND LAUNCH THE GRADIO INTERFACE ---
159
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
160
  gr.Markdown("# πŸ€– My Personal AI Video Studio")
161
  gr.Markdown("Enter a topic to generate a short-form video. This private tool is used for fulfilling freelance orders.")
162
-
163
  with gr.Row():
164
- topic_input = gr.Textbox(label="Video Topic", placeholder="e.g., 'The history of coffee'", scale=3)
 
 
 
 
165
  generate_button = gr.Button("Generate Video", variant="primary", scale=1)
166
-
167
  with gr.Row():
168
  video_output = gr.Video(label="Generated Video")
169
-
170
- generate_button.click(fn=generate_video_from_topic, inputs=topic_input, outputs=video_output)
171
-
172
- gr.Markdown("--- \n ### Examples of Good Topics:\n - A product: 'The new waterproof Chrono-Watch X1'\n - A concept: 'The science of sleep'")
173
 
 
 
 
 
 
 
 
174
 
175
  if __name__ == "__main__":
176
- demo.launch()
 
1
  import gradio as gr
2
  import os
 
 
 
 
 
3
  import json
4
  import time
5
  import random
6
+ import subprocess
7
+ import google.generativeai as genai
8
+ from tavily import TavilyClient
9
+ from runwayml import RunwayML, TaskFailedError
10
 
11
+ # --- 1. CONFIGURE API KEYS ---
12
  try:
13
  genai.configure(api_key=os.environ["GEMINI_API_KEY"])
14
  tavily_client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
15
  RUNWAY_API_KEY = os.environ["RUNWAY_API_KEY"]
16
+ runway_client = RunwayML(api_key=RUNWAY_API_KEY)
17
  except KeyError as e:
18
+ raise ValueError(f"API Key Error: Please set the {e} secret in your environment.")
 
19
 
20
+ # --- 2. CORE VIDEO GENERATION FUNCTION ---
 
 
 
 
 
 
 
21
  def generate_video_from_topic(topic_prompt, progress=gr.Progress(track_tqdm=True)):
22
  job_id = f"{int(time.time())}_{random.randint(1000, 9999)}"
23
  print(f"--- Starting New Job: {job_id} for topic: '{topic_prompt}' ---")
24
+
25
  intermediate_files = []
26
 
27
  try:
 
29
  progress(0.1, desc="πŸ” Researching topic with Tavily...")
30
  facts = "No research data available."
31
  try:
32
+ research_results = tavily_client.search(
33
+ query=f"Key facts and interesting points about {topic_prompt}",
34
+ search_depth="basic"
35
+ )
36
  if research_results and 'results' in research_results:
37
  facts = "\n".join([res['content'] for res in research_results['results']])
38
  except Exception as e:
 
46
  Your output MUST be a valid JSON object with "narration_script" (string) and "scene_prompts" (a list of 4 detailed, cinematic prompts).
47
  """
48
  response = gemini_model.generate_content(prompt)
49
+
50
  try:
51
+ cleaned_text = (
52
+ response.text
53
+ .strip()
54
+ .replace("```json", "")
55
+ .replace("```", "")
56
+ )
57
  script_data = json.loads(cleaned_text)
58
  narration = script_data['narration_script']
59
  scene_prompts = script_data['scene_prompts']
60
  except (json.JSONDecodeError, KeyError) as e:
61
+ raise gr.Error(
62
+ f"Gemini did not return valid JSON. Error: {e}. Response was: {response.text}"
63
+ )
64
 
65
  # STEP 3: MOCK VOICE OVER
66
  progress(0.3, desc="πŸŽ™οΈ MOCKING voiceover to save credits...")
 
68
  intermediate_files.append(audio_path)
69
  narration_duration = len(narration.split()) / 2.5
70
  subprocess.run([
71
+ 'ffmpeg', '-f', 'lavfi', '-i', 'anullsrc=r=44100:cl=mono',
72
+ '-t', str(narration_duration), '-q:a', '9', '-acodec', 'libmp3lame',
73
  audio_path, '-y'
74
  ], check=True)
75
+ print(f"MOCK audio file saved: {audio_path}")
76
 
77
+ # STEP 4: GENERATE VIDEO SCENES (Runway SDK)
78
  video_clip_paths = []
79
+ for i, scene_prompt in enumerate(scene_prompts, start=1):
80
+ progress(0.4 + (i * 0.12), desc=f"🎬 Generating scene {i}/{len(scene_prompts)}...")
81
+ try:
82
+ task = (
83
+ runway_client.image_to_video.create(
84
+ model="gen4_turbo",
85
+ prompt_text=scene_prompt,
86
+ duration=5,
87
+ ratio="1280:720"
88
+ )
89
+ .wait_for_task_output()
90
+ )
91
+ video_url = task.output[0]
92
+ except TaskFailedError as e:
93
+ raise gr.Error(f"Runway job failed: {e.task_details}")
94
+
95
+ clip_path = f"scene_{i}_{job_id}.mp4"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  intermediate_files.append(clip_path)
97
  video_clip_paths.append(clip_path)
98
+
99
+ # Download the scene clip
100
  with open(clip_path, "wb") as f:
101
+ for chunk in runway_client._session.get(video_url, stream=True).iter_content(chunk_size=1024):
102
+ if chunk:
103
+ f.write(chunk)
104
  print(f"Video clip saved: {clip_path}")
105
 
106
  # STEP 5: STITCHING (FFmpeg)
 
113
 
114
  combined_video_path = f"combined_video_{job_id}.mp4"
115
  intermediate_files.append(combined_video_path)
116
+ subprocess.run([
117
+ 'ffmpeg', '-f', 'concat', '-safe', '0',
118
+ '-i', file_list_path, '-c', 'copy', combined_video_path, '-y'
119
+ ], check=True)
120
+
121
  final_video_path = f"final_video_{job_id}.mp4"
122
+ subprocess.run([
123
+ 'ffmpeg', '-i', combined_video_path,
124
+ '-i', audio_path,
125
+ '-c:v', 'copy', '-c:a', 'aac', '-shortest', final_video_path, '-y'
126
+ ], check=True)
127
  print(f"Final video created at: {final_video_path}")
128
+
129
  progress(1.0, desc="βœ… Done!")
130
  return final_video_path
131
 
132
  except Exception as e:
133
+ print(f"--- JOB {job_id} FAILED ---\nError: {e}")
134
  raise gr.Error(f"An error occurred: {e}")
135
+
136
  finally:
 
137
  print("Cleaning up intermediate files...")
138
  for file_path in intermediate_files:
139
  if os.path.exists(file_path):
140
  os.remove(file_path)
141
  print(f"Removed: {file_path}")
142
 
143
+ # --- 3. LAUNCH GRADIO APP ---
144
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
145
  gr.Markdown("# πŸ€– My Personal AI Video Studio")
146
  gr.Markdown("Enter a topic to generate a short-form video. This private tool is used for fulfilling freelance orders.")
147
+
148
  with gr.Row():
149
+ topic_input = gr.Textbox(
150
+ label="Video Topic",
151
+ placeholder="e.g., 'The history of coffee'",
152
+ scale=3
153
+ )
154
  generate_button = gr.Button("Generate Video", variant="primary", scale=1)
155
+
156
  with gr.Row():
157
  video_output = gr.Video(label="Generated Video")
 
 
 
 
158
 
159
+ generate_button.click(
160
+ fn=generate_video_from_topic,
161
+ inputs=topic_input,
162
+ outputs=video_output
163
+ )
164
+
165
+ gr.Markdown("--- \n ### Examples of Good Topics:\n - A product: 'The new waterproof Chrono-Watch X1'\n - A concept: 'The science of sleep'")
166
 
167
  if __name__ == "__main__":
168
+ demo.launch()