Aniket2012 commited on
Commit
6217602
Β·
verified Β·
1 Parent(s): d36becf

create app.py

Browse files
Files changed (1) hide show
  1. app.py +442 -0
app.py ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==============================================================================
2
+ # PitchPerfect AI: Enterprise-Grade Sales Coach (Single File Application)
3
+ #
4
+ # This single file contains the complete application code, enhanced with
5
+ # YouTube support, JAX-based quantitative analysis, and a more robust
6
+ # agentic architecture.
7
+ # ==============================================================================
8
+
9
+ # ==============================================================================
10
+ # File: README.md (Instructions)
11
+ # ==============================================================================
12
+ """
13
+ # PitchPerfect AI: Enterprise-Grade Sales Coach
14
+
15
+ This application provides AI-powered feedback on sales pitches using Google's most advanced multimodal AI, all managed through the Vertex AI platform. It analyzes your content, vocal delivery, and visual presence to give you actionable insights for improvement.
16
+
17
+ This advanced version includes:
18
+ - Support for local video uploads and YouTube URLs.
19
+ - Quantitative vocal analysis powered by JAX for high performance.
20
+ - An agentic architecture where specialized tools (YouTube Downloader, JAX Analyzer) work in concert with the Gemini 1.5 Pro model.
21
+
22
+ ## πŸ”‘ Prerequisites
23
+
24
+ 1. A Google Cloud Platform (GCP) project with billing enabled.
25
+ 2. The Vertex AI API and Cloud Storage API enabled in your GCP project.
26
+ 3. The `gcloud` CLI installed and authenticated on your local machine.
27
+
28
+ ## μ…‹μ—…
29
+
30
+ 1. **Create a Google Cloud Storage (GCS) Bucket:**
31
+ * In your GCP project, create a new GCS bucket. It must have a globally unique name.
32
+ * **Example name:** `your-project-id-pitch-videos`
33
+
34
+ 2. **Authenticate with Google Cloud:**
35
+ Run the following command in your terminal and follow the prompts. This sets up Application Default Credentials (ADC).
36
+ ```bash
37
+ gcloud auth application-default login
38
+ ```
39
+ *Note: The user/principal needs `Storage Object Admin` and `Vertex AI User` roles.*
40
+
41
+ 3. **Install Dependencies:**
42
+ Create a `requirements.txt` file with the content below and run `pip install -r requirements.txt`.
43
+ ```
44
+ gradio
45
+ google-cloud-aiplatform
46
+ google-cloud-storage
47
+ moviepy
48
+ # For JAX and Quantitative Analysis
49
+ jax
50
+ jaxlib
51
+ librosa
52
+ speechrecognition
53
+ openai-whisper
54
+ # For YouTube support
55
+ yt-dlp
56
+ ```
57
+
58
+ 4. **Configure Project Details:**
59
+ * In this file, scroll down to the "CONFIGURATION" section.
60
+ * Set your `GCP_PROJECT_ID`, `GCP_LOCATION`, and `GCS_BUCKET_NAME`.
61
+
62
+ 5. **Run the Application:**
63
+ ```bash
64
+ python app.py
65
+ ```
66
+ This will launch a Gradio web server. **Look for a public URL ending in `.gradio.live` in the output and open it in your browser.**
67
+ """
68
+
69
+ # ==============================================================================
70
+ # IMPORTS
71
+ # ==============================================================================
72
+ import logging
73
+ import json
74
+ import uuid
75
+ import os
76
+ import re
77
+ from typing import Dict, Any
78
+ import gradio as gr
79
+ import vertexai
80
+ from google.cloud import storage
81
+ from vertexai.generative_models import (
82
+ GenerativeModel, Part, GenerationConfig,
83
+ HarmCategory, HarmBlockThreshold
84
+ )
85
+
86
+ # Third-party imports for advanced features
87
+ import yt_dlp
88
+ import librosa
89
+ import numpy as np
90
+ import whisper
91
+ import jax
92
+ import jax.numpy as jnp
93
+ from moviepy.editor import VideoFileClip
94
+
95
+
96
+ # ==============================================================================
97
+ # CONFIGURATION
98
+ # ==============================================================================
99
+ # --- GCP and Vertex AI Configuration ---
100
+ GCP_PROJECT_ID = "aniket-personal"
101
+ GCP_LOCATION = "us-central1"
102
+
103
+ # --- GCS Configuration ---
104
+ GCS_BUCKET_NAME = "ghiblify"
105
+
106
+ # --- Model Configuration ---
107
+ MODEL_GEMINI_PRO = "gemini-1.5-pro-002"
108
+
109
+ # --- Example Videos ---
110
+ # These are publicly accessible videos for demonstration purposes.
111
+ EXAMPLE_VIDEOS = [
112
+ ["Confident Business Presentation", "https://storage.googleapis.com/pitchperfect-ai-examples/business_pitch_example.mp4"],
113
+ ["Casual Tech Talk", "https://storage.googleapis.com/pitchperfect-ai-examples/tech_talk_example.mp4"],
114
+ ]
115
+
116
+ # --- Schemas for Controlled Generation (as Dictionaries) ---
117
+ FEEDBACK_ITEM_SCHEMA = {
118
+ "type": "object",
119
+ "properties": {
120
+ "score": {"type": "integer", "minimum": 1, "maximum": 10},
121
+ "feedback": {"type": "string"}
122
+ },
123
+ "required": ["score", "feedback"]
124
+ }
125
+ HOLISTIC_ANALYSIS_SCHEMA = {
126
+ "type": "object",
127
+ "properties": {
128
+ "content_analysis": {"type": "object", "properties": {"clarity": FEEDBACK_ITEM_SCHEMA, "structure": FEEDBACK_ITEM_SCHEMA, "value_proposition": FEEDBACK_ITEM_SCHEMA, "cta": FEEDBACK_ITEM_SCHEMA}},
129
+ "vocal_analysis": {"type": "object", "properties": {"pacing": FEEDBACK_ITEM_SCHEMA, "vocal_variety": FEEDBACK_ITEM_SCHEMA, "confidence_energy": FEEDBACK_ITEM_SCHEMA, "clarity_enunciation": FEEDBACK_ITEM_SCHEMA}},
130
+ "visual_analysis": {"type": "object", "properties": {"eye_contact": FEEDBACK_ITEM_SCHEMA, "body_language": FEEDBACK_ITEM_SCHEMA, "facial_expressions": FEEDBACK_ITEM_SCHEMA}}
131
+ },
132
+ "required": ["content_analysis", "vocal_analysis", "visual_analysis"]
133
+ }
134
+ FINAL_SYNTHESIS_SCHEMA = {
135
+ "type": "object",
136
+ "properties": {
137
+ "key_strengths": {"type": "string"},
138
+ "growth_opportunities": {"type": "string"},
139
+ "executive_summary": {"type": "string"}
140
+ },
141
+ "required": ["key_strengths", "growth_opportunities", "executive_summary"]
142
+ }
143
+
144
+ # --- Enhanced Prompts ---
145
+ PROMPT_HOLISTIC_VIDEO_ANALYSIS = """
146
+ You are an expert sales coach. Analyze the provided video and the supplementary quantitative metrics to generate a structured, holistic feedback report. Your output MUST strictly conform to the provided JSON schema, including the 1-10 score range.
147
+
148
+ **Quantitative Metrics (for additional context):**
149
+ {quantitative_metrics_json}
150
+
151
+ **Evaluation Framework (Analyze the video directly):**
152
+ 1. **Content & Structure:** Analyze clarity, flow, value proposition, and the call to action.
153
+ 2. **Vocal Delivery:** Analyze pacing, vocal variety, confidence, energy, and enunciation. Use the quantitative metrics to inform your qualitative assessment.
154
+ 3. **Visual Delivery:** Analyze eye contact, body language, and facial expressions.
155
+
156
+ Provide specific examples from the video to support your points.
157
+ """
158
+
159
+ PROMPT_FINAL_SYNTHESIS = """
160
+ You are a senior executive coach. Synthesize the provided detailed analysis data into a high-level summary. Your output MUST strictly conform to the provided JSON schema.
161
+
162
+ - "key_strengths" should be a single string with bullet points (e.g., "- Point one\\n- Point two").
163
+ - "growth_opportunities" should be a single string, formatted similarly.
164
+ - "executive_summary" should be a single string paragraph.
165
+
166
+ **Detailed Analysis Data:**
167
+ ---
168
+ {full_analysis_json}
169
+ ---
170
+ """
171
+
172
+ # ==============================================================================
173
+ # AGENT TOOLKIT
174
+ # ==============================================================================
175
+ class YouTubeDownloaderTool:
176
+ """A tool to download a YouTube video to a local path."""
177
+ def run(self, url: str, output_dir: str = "temp_downloads") -> str:
178
+ if not os.path.exists(output_dir):
179
+ os.makedirs(output_dir)
180
+
181
+ filepath = os.path.join(output_dir, f"{uuid.uuid4()}.mp4")
182
+ ydl_opts = {
183
+ 'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best',
184
+ 'outtmpl': filepath,
185
+ 'quiet': True,
186
+ }
187
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
188
+ ydl.download([url])
189
+ return filepath
190
+
191
+ class QuantitativeAudioTool:
192
+ """A tool for performing objective, numerical analysis on an audio track."""
193
+ class JAXAudioProcessor:
194
+ """A nested class demonstrating JAX for high-performance audio processing."""
195
+ def __init__(self):
196
+ self.jit_rms_energy = jax.jit(self._calculate_rms_energy)
197
+ @staticmethod
198
+ @jax.jit
199
+ def _calculate_rms_energy(waveform: jnp.ndarray) -> jnp.ndarray:
200
+ return jnp.sqrt(jnp.mean(jnp.square(waveform)))
201
+ def analyze_energy_variation(self, waveform_np):
202
+ if waveform_np is None or waveform_np.size == 0: return 0.0
203
+ waveform_jnp = jnp.asarray(waveform_np)
204
+ frame_length, hop_length = 2048, 512
205
+ num_frames = (waveform_jnp.shape[0] - frame_length) // hop_length
206
+ start_positions = jnp.arange(num_frames) * hop_length
207
+ offsets = jnp.arange(frame_length)
208
+ frame_indices = start_positions[:, None] + offsets[None, :]
209
+ frames = waveform_jnp[frame_indices]
210
+ frame_energies = jax.vmap(self.jit_rms_energy)(frames)
211
+ return float(jnp.std(frame_energies))
212
+
213
+ def __init__(self):
214
+ self.jax_processor = self.JAXAudioProcessor()
215
+ self.whisper_model = whisper.load_model("base.en")
216
+
217
+ def run(self, video_path: str, output_dir: str = "temp_output"):
218
+ if not os.path.exists(output_dir): os.makedirs(output_dir)
219
+ video = None
220
+ try:
221
+ video = VideoFileClip(video_path)
222
+
223
+ if video.audio is None:
224
+ raise ValueError("The provided video file does not contain an audio track, or it could not be decoded. Analysis cannot proceed.")
225
+
226
+ audio_path = os.path.join(output_dir, f"audio_{uuid.uuid4()}.wav")
227
+ video.audio.write_audiofile(audio_path, codec='pcm_s16le', fps=16000)
228
+
229
+ transcript_result = self.whisper_model.transcribe(audio_path, fp16=False)
230
+ word_count = len(transcript_result['text'].split())
231
+ duration = video.duration
232
+ pace = (word_count / duration) * 60 if duration > 0 else 0
233
+
234
+ y, sr = librosa.load(audio_path, sr=16000)
235
+ energy_variation = self.jax_processor.analyze_energy_variation(y)
236
+
237
+ os.remove(audio_path)
238
+
239
+ return {
240
+ "speaking_pace_wpm": round(pace, 2),
241
+ "vocal_energy_variation": round(energy_variation, 4),
242
+ }
243
+ finally:
244
+ if video:
245
+ video.close()
246
+
247
+ # ==============================================================================
248
+ # VERTEX AI MANAGER CLASS
249
+ # ==============================================================================
250
+ class VertexAIManager:
251
+ def __init__(self):
252
+ vertexai.init(project=GCP_PROJECT_ID, location=GCP_LOCATION)
253
+ self.model = GenerativeModel(MODEL_GEMINI_PRO)
254
+
255
+ def run_multimodal_analysis(self, video_gcs_uri: str, prompt: str) -> dict:
256
+ video_part = Part.from_uri(uri=video_gcs_uri, mime_type="video/mp4")
257
+ contents = [video_part, prompt]
258
+ config = GenerationConfig(response_schema=HOLISTIC_ANALYSIS_SCHEMA, temperature=0.2, response_mime_type="application/json")
259
+ response = self.model.generate_content(contents, generation_config=config)
260
+ return json.loads(response.text)
261
+
262
+ def run_synthesis(self, prompt: str) -> dict:
263
+ config = GenerationConfig(response_schema=FINAL_SYNTHESIS_SCHEMA, temperature=0.3, response_mime_type="application/json")
264
+ response = self.model.generate_content(prompt, generation_config=config)
265
+ return json.loads(response.text)
266
+
267
+ # ==============================================================================
268
+ # AGENT CLASS
269
+ # ==============================================================================
270
+ class PitchAnalyzerAgent:
271
+ def __init__(self):
272
+ self.vertex_manager = VertexAIManager()
273
+ self.storage_client = storage.Client(project=GCP_PROJECT_ID)
274
+ self.youtube_tool = YouTubeDownloaderTool()
275
+ self.quant_tool = QuantitativeAudioTool()
276
+ self._check_bucket()
277
+
278
+ def _check_bucket(self):
279
+ self.storage_client.get_bucket(GCS_BUCKET_NAME)
280
+
281
+ def _upload_to_gcs(self, path: str) -> str:
282
+ bucket = self.storage_client.bucket(GCS_BUCKET_NAME)
283
+ blob_name = f"pitch-videos/{uuid.uuid4()}.mp4"
284
+ blob = bucket.blob(blob_name)
285
+ blob.upload_from_filename(path)
286
+ return f"gs://{GCS_BUCKET_NAME}/{blob_name}"
287
+
288
+ def _delete_from_gcs(self, gcs_uri: str):
289
+ bucket_name, blob_name = gcs_uri.replace("gs://", "").split("/", 1)
290
+ self.storage_client.bucket(bucket_name).blob(blob_name).delete()
291
+
292
+ def run_analysis_pipeline(self, video_path_or_url: str, progress_callback):
293
+ local_video_path = None
294
+ video_gcs_uri = None
295
+ try:
296
+ if re.match(r"^(https?://)?(www\.)?(youtube\.com|youtu\.?be)/.+$", video_path_or_url):
297
+ progress_callback(0.1, "Downloading video from YouTube...")
298
+ local_video_path = self.youtube_tool.run(video_path_or_url)
299
+ else:
300
+ local_video_path = video_path_or_url
301
+
302
+ progress_callback(0.3, "Performing JAX-based quantitative analysis...")
303
+ quant_metrics = self.quant_tool.run(local_video_path)
304
+
305
+ progress_callback(0.5, "Uploading video to secure Cloud Storage...")
306
+ video_gcs_uri = self._upload_to_gcs(local_video_path)
307
+
308
+ progress_callback(0.7, "Gemini 1.5 Pro is analyzing the video...")
309
+ analysis_prompt = PROMPT_HOLISTIC_VIDEO_ANALYSIS.format(quantitative_metrics_json=json.dumps(quant_metrics, indent=2))
310
+ multimodal_analysis = self.vertex_manager.run_multimodal_analysis(video_gcs_uri, analysis_prompt)
311
+
312
+ progress_callback(0.9, "Synthesizing final report...")
313
+ synthesis_prompt = PROMPT_FINAL_SYNTHESIS.format(full_analysis_json=json.dumps(multimodal_analysis, indent=2))
314
+ final_summary = self.vertex_manager.run_synthesis(synthesis_prompt)
315
+
316
+ return {"quantitative_metrics": quant_metrics, "multimodal_analysis": multimodal_analysis, "executive_summary": final_summary}
317
+ except Exception as e:
318
+ logging.error(f"Analysis pipeline failed: {e}", exc_info=True)
319
+ return {"error": str(e)}
320
+ finally:
321
+ if video_gcs_uri:
322
+ try: self._delete_from_gcs(video_gcs_uri)
323
+ except Exception as e: logging.warning(f"Failed to delete GCS object {video_gcs_uri}: {e}")
324
+ if local_video_path and video_path_or_url != local_video_path:
325
+ if os.path.exists(local_video_path): os.remove(local_video_path)
326
+
327
+ # ==============================================================================
328
+ # UI FORMATTING HELPER
329
+ # ==============================================================================
330
+ def format_feedback_markdown(analysis: dict) -> str:
331
+ if not analysis or "error" in analysis:
332
+ return f"## Analysis Failed 😞\n\n**Reason:** {analysis.get('error', 'Unknown error.')}"
333
+
334
+ summary = analysis.get('executive_summary', {})
335
+ metrics = analysis.get('quantitative_metrics', {})
336
+ ai_analysis = analysis.get('multimodal_analysis', {})
337
+
338
+ def get_pace_rating(wpm):
339
+ if wpm == 0: return "N/A (No speech detected)"
340
+ if wpm < 120: return "Slow / Deliberate"
341
+ if wpm <= 160: return "Conversational"
342
+ return "Fast-Paced"
343
+
344
+ def get_energy_rating(variation):
345
+ if variation == 0: return "N/A"
346
+ if variation < 0.02: return "Consistent / Monotonous"
347
+ if variation <= 0.05: return "Moderately Dynamic"
348
+ return "Highly Dynamic & Engaging"
349
+
350
+ wpm = metrics.get('speaking_pace_wpm', 0)
351
+ energy_var = metrics.get('vocal_energy_variation', 0)
352
+ pace_rating = get_pace_rating(wpm)
353
+ energy_rating = get_energy_rating(energy_var)
354
+
355
+ metrics_md = f"""
356
+ - **Speaking Pace:** **{wpm} WPM** *(Rating: {pace_rating})*
357
+ - *This measures the number of words spoken per minute. A typical conversational pace is between 120-160 WPM.*
358
+ - **Vocal Energy Variation:** **{energy_var:.4f}** *(Rating: {energy_rating})*
359
+ - *This measures the standard deviation of your vocal loudness. A higher value indicates a more dynamic and engaging vocal range, while a very low value suggests a monotonous delivery.*
360
+ """
361
+
362
+ # --- FIX: Revert to using bold text instead of headers for consistency ---
363
+ def format_ai_item(title, data):
364
+ if not data or "score" not in data: return f"**{title}:**\n> Analysis not available.\n\n"
365
+ raw_score = data.get('score', 0); score = max(1, min(10, raw_score))
366
+ stars = "🟒" * score + "βšͺ️" * (10 - score)
367
+ feedback = data.get('feedback', 'No feedback.').replace('\n', '\n> ')
368
+ return f"**{title}:** `{stars} [{score}/10]`\n\n> {feedback}\n\n"
369
+
370
+ content = ai_analysis.get('content_analysis', {}); vocal = ai_analysis.get('vocal_analysis', {}); visual = ai_analysis.get('visual_analysis', {})
371
+
372
+ # --- FIX: Use a more consistent structure for the final report ---
373
+ return f"""
374
+ # PitchPerfect AI Analysis Report πŸ“Š
375
+ ## πŸ† Executive Summary
376
+ ### Key Strengths
377
+ {summary.get('key_strengths', '- N/A')}
378
+ ### High-Leverage Growth Opportunities
379
+ {summary.get('growth_opportunities', '- N/A')}
380
+ ### Final Verdict
381
+ > {summary.get('executive_summary', 'N/A')}
382
+ ---
383
+ ## πŸ“ˆ Quantitative Metrics Explained (via JAX)
384
+ {metrics_md}
385
+ ---
386
+ ## 🧠 AI Multimodal Analysis (via Gemini 1.5 Pro)
387
+ ### I. Content & Structure
388
+ {format_ai_item("Clarity", content.get('clarity'))}
389
+ {format_ai_item("Structure & Flow", content.get('structure'))}
390
+ {format_ai_item("Value Proposition", content.get('value_proposition'))}
391
+ {format_ai_item("Call to Action (CTA)", content.get('cta'))}
392
+ <hr style="border:1px solid #ddd">
393
+
394
+ ### II. Vocal Delivery
395
+ {format_ai_item("Pacing", vocal.get('pacing'))}
396
+ {format_ai_item("Vocal Variety", vocal.get('vocal_variety'))}
397
+ {format_ai_item("Confidence & Energy", vocal.get('confidence_energy'))}
398
+ {format_ai_item("Clarity & Enunciation", vocal.get('clarity_enunciation'))}
399
+ <hr style="border:1px solid #ddd">
400
+
401
+ ### III. Visual Delivery
402
+ {format_ai_item("Eye Contact", visual.get('eye_contact'))}
403
+ {format_ai_item("Body Language", visual.get('body_language'))}
404
+ {format_ai_item("Facial Expressions", visual.get('facial_expressions'))}
405
+ """
406
+
407
+ # ==============================================================================
408
+ # GRADIO APPLICATION
409
+ # ==============================================================================
410
+ if __name__ == "__main__":
411
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
412
+ pitch_agent = None
413
+ try:
414
+ pitch_agent = PitchAnalyzerAgent()
415
+ except Exception as e:
416
+ logging.fatal(f"Failed to initialize agent during startup: {e}", exc_info=True)
417
+
418
+ def run_analysis_pipeline(video_path, url_path, progress=gr.Progress(track_tqdm=True)):
419
+ if not pitch_agent: return "## FATAL ERROR: Application not initialized. Check logs and config."
420
+ input_path = url_path if url_path else video_path
421
+ if not input_path: return "## No Video Provided. Please upload a video or enter a YouTube URL."
422
+
423
+ analysis_result = pitch_agent.run_analysis_pipeline(input_path, progress)
424
+ return format_feedback_markdown(analysis_result)
425
+
426
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="orange")) as demo:
427
+ gr.Markdown("# **Video Analysis AI**: Your Enterprise-Grade Sales Coach πŸš€")
428
+ with gr.Row():
429
+ with gr.Column(scale=1):
430
+ video_uploader = gr.Video(label="Upload Your Pitch", sources=["upload"])
431
+ gr.Markdown("--- **OR** ---")
432
+ youtube_url = gr.Textbox(label="Enter YouTube URL")
433
+ analyze_button = gr.Button("Analyze My Pitch 🧠", variant="primary")
434
+ gr.Examples(examples=EXAMPLE_VIDEOS, inputs=youtube_url, label="Example Pitches (Click to Use)")
435
+ with gr.Column(scale=2):
436
+ analysis_output = gr.Markdown(label="Your Feedback Report", value="### Your detailed report will appear here...")
437
+ analyze_button.click(fn=run_analysis_pipeline, inputs=[video_uploader, youtube_url], outputs=analysis_output)
438
+
439
+ if pitch_agent:
440
+ demo.launch(debug=True, share=True)
441
+ else:
442
+ print("\n" + "="*80 + "\nCOULD NOT START GRADIO APP: Agent failed to initialize.\n" + "="*80)