Update app.py
Browse files
app.py
CHANGED
@@ -4,32 +4,32 @@ import zipfile
|
|
4 |
import io
|
5 |
import time
|
6 |
import os
|
7 |
-
import openai
|
8 |
import requests
|
9 |
from PIL import Image
|
10 |
import base64
|
11 |
import textwrap
|
12 |
from dotenv import load_dotenv
|
|
|
13 |
|
14 |
# Load environment variables
|
15 |
load_dotenv()
|
16 |
|
17 |
-
# Initialize API
|
18 |
-
|
19 |
ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY")
|
20 |
|
21 |
# =============================
|
22 |
-
#
|
23 |
# =============================
|
24 |
|
25 |
class TopicAgent:
|
26 |
def generate_outline(self, topic, duration, difficulty):
|
27 |
-
if not
|
28 |
return self._mock_outline(topic, duration, difficulty)
|
29 |
|
30 |
try:
|
31 |
-
response =
|
32 |
-
model="gpt-4",
|
33 |
messages=[
|
34 |
{
|
35 |
"role": "system",
|
@@ -47,9 +47,10 @@ class TopicAgent:
|
|
47 |
}
|
48 |
],
|
49 |
temperature=0.3,
|
50 |
-
max_tokens=1500
|
|
|
51 |
)
|
52 |
-
return json.loads(response.choices[0].message
|
53 |
except Exception as e:
|
54 |
st.error(f"Outline generation error: {str(e)}")
|
55 |
return self._mock_outline(topic, duration, difficulty)
|
@@ -99,12 +100,12 @@ class TopicAgent:
|
|
99 |
|
100 |
class ContentAgent:
|
101 |
def generate_content(self, outline):
|
102 |
-
if not
|
103 |
return self._mock_content(outline)
|
104 |
|
105 |
try:
|
106 |
-
response =
|
107 |
-
model="gpt-4",
|
108 |
messages=[
|
109 |
{
|
110 |
"role": "system",
|
@@ -122,9 +123,10 @@ class ContentAgent:
|
|
122 |
}
|
123 |
],
|
124 |
temperature=0.4,
|
125 |
-
max_tokens=2000
|
|
|
126 |
)
|
127 |
-
return json.loads(response.choices[0].message
|
128 |
except Exception as e:
|
129 |
st.error(f"Content generation error: {str(e)}")
|
130 |
return self._mock_content(outline)
|
@@ -156,12 +158,12 @@ class ContentAgent:
|
|
156 |
|
157 |
class SlideAgent:
|
158 |
def generate_slides(self, content):
|
159 |
-
if not
|
160 |
return self._mock_slides(content)
|
161 |
|
162 |
try:
|
163 |
-
response =
|
164 |
-
model="gpt-4",
|
165 |
messages=[
|
166 |
{
|
167 |
"role": "system",
|
@@ -180,7 +182,7 @@ class SlideAgent:
|
|
180 |
temperature=0.2,
|
181 |
max_tokens=2500
|
182 |
)
|
183 |
-
return response.choices[0].message
|
184 |
except Exception as e:
|
185 |
st.error(f"Slide generation error: {str(e)}")
|
186 |
return self._mock_slides(content)
|
@@ -224,12 +226,12 @@ backgroundImage: url('https://marp.app/assets/hero-background.svg')
|
|
224 |
|
225 |
class CodeAgent:
|
226 |
def generate_code(self, content):
|
227 |
-
if not
|
228 |
return self._mock_code(content)
|
229 |
|
230 |
try:
|
231 |
-
response =
|
232 |
-
model="gpt-4",
|
233 |
messages=[
|
234 |
{
|
235 |
"role": "system",
|
@@ -247,7 +249,7 @@ class CodeAgent:
|
|
247 |
temperature=0.3,
|
248 |
max_tokens=2000
|
249 |
)
|
250 |
-
return response.choices[0].message
|
251 |
except Exception as e:
|
252 |
st.error(f"Code generation error: {str(e)}")
|
253 |
return self._mock_code(content)
|
@@ -260,11 +262,11 @@ import pandas as pd
|
|
260 |
|
261 |
## Exercise 1: Basic Prompt Engineering
|
262 |
def generate_response(prompt):
|
263 |
-
response = openai.
|
264 |
model="gpt-4",
|
265 |
messages=[{{"role": "user", "content": prompt}}]
|
266 |
)
|
267 |
-
return response.choices[0].message
|
268 |
|
269 |
# Test your function
|
270 |
print(generate_response("Explain quantum computing in simple terms"))
|
@@ -279,16 +281,16 @@ print(generate_response("Explain quantum computing in simple terms"))
|
|
279 |
|
280 |
class DesignAgent:
|
281 |
def generate_design(self, slide_content):
|
282 |
-
if not
|
283 |
return None
|
284 |
|
285 |
try:
|
286 |
-
response =
|
287 |
prompt=f"Create a professional slide background for a corporate AI workshop about: {slide_content[:500]}",
|
288 |
n=1,
|
289 |
size="1024x1024"
|
290 |
)
|
291 |
-
return response
|
292 |
except Exception as e:
|
293 |
st.error(f"Design generation error: {str(e)}")
|
294 |
return None
|
@@ -346,7 +348,8 @@ class VoiceoverAgent:
|
|
346 |
if response.status_code == 200:
|
347 |
return response.json().get("voices", [])
|
348 |
return []
|
349 |
-
except:
|
|
|
350 |
return []
|
351 |
|
352 |
# Initialize agents
|
@@ -367,14 +370,19 @@ st.set_page_config(
|
|
367 |
initial_sidebar_state="expanded"
|
368 |
)
|
369 |
|
370 |
-
# Custom CSS
|
371 |
st.markdown("""
|
372 |
<style>
|
373 |
.stApp {
|
374 |
background: linear-gradient(135deg, #6a11cb 0%, #2575fc 100%);
|
375 |
color: #fff;
|
376 |
}
|
377 |
-
|
|
|
|
|
|
|
|
|
|
|
378 |
background-color: rgba(255,255,255,0.1) !important;
|
379 |
color: white !important;
|
380 |
}
|
@@ -500,7 +508,10 @@ with st.sidebar:
|
|
500 |
selected_voice_name = next((v['name'] for v in voices if v['voice_id'] == st.session_state.selected_voice), "Default")
|
501 |
st.info(f"Selected Voice: **{selected_voice_name}**")
|
502 |
else:
|
503 |
-
|
|
|
|
|
|
|
504 |
|
505 |
if st.button("✨ Generate Workshop", type="primary", use_container_width=True):
|
506 |
st.session_state.generating = True
|
@@ -522,7 +533,7 @@ if st.session_state.generating:
|
|
522 |
voiceovers = {}
|
523 |
if include_voiceover and ELEVENLABS_API_KEY:
|
524 |
for i, module in enumerate(content.get("modules", [])):
|
525 |
-
# Create a short intro for each module
|
526 |
intro_text = f"Welcome to Module {i+1}: {module['title']}. " + \
|
527 |
f"In this module, we'll cover: {', '.join(module.get('speaker_notes', []))[:300]}"
|
528 |
|
@@ -614,6 +625,8 @@ if st.session_state.generated:
|
|
614 |
mime="audio/mpeg",
|
615 |
key=f"voiceover_dl_{i}"
|
616 |
)
|
|
|
|
|
617 |
|
618 |
# Sales and booking section
|
619 |
st.divider()
|
@@ -639,15 +652,15 @@ with col2:
|
|
639 |
# Debug info
|
640 |
with st.sidebar:
|
641 |
st.divider()
|
642 |
-
if
|
643 |
st.success("OpenAI API Connected")
|
644 |
else:
|
645 |
st.warning("OpenAI API not set - using enhanced mock data")
|
646 |
|
647 |
if ELEVENLABS_API_KEY:
|
648 |
-
st.success("ElevenLabs API
|
649 |
-
elif
|
650 |
-
st.warning("ElevenLabs API not set
|
651 |
|
652 |
st.info(f"""
|
653 |
**Current Workshop:**
|
|
|
4 |
import io
|
5 |
import time
|
6 |
import os
|
|
|
7 |
import requests
|
8 |
from PIL import Image
|
9 |
import base64
|
10 |
import textwrap
|
11 |
from dotenv import load_dotenv
|
12 |
+
from openai import OpenAI # Updated OpenAI client
|
13 |
|
14 |
# Load environment variables
|
15 |
load_dotenv()
|
16 |
|
17 |
+
# Initialize API clients
|
18 |
+
openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) if os.getenv("OPENAI_API_KEY") else None
|
19 |
ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY")
|
20 |
|
21 |
# =============================
|
22 |
+
# UPDATED AGENT IMPLEMENTATION (OpenAI v1.x compatible)
|
23 |
# =============================
|
24 |
|
25 |
class TopicAgent:
|
26 |
def generate_outline(self, topic, duration, difficulty):
|
27 |
+
if not openai_client:
|
28 |
return self._mock_outline(topic, duration, difficulty)
|
29 |
|
30 |
try:
|
31 |
+
response = openai_client.chat.completions.create(
|
32 |
+
model="gpt-4-turbo",
|
33 |
messages=[
|
34 |
{
|
35 |
"role": "system",
|
|
|
47 |
}
|
48 |
],
|
49 |
temperature=0.3,
|
50 |
+
max_tokens=1500,
|
51 |
+
response_format={"type": "json_object"}
|
52 |
)
|
53 |
+
return json.loads(response.choices[0].message.content)
|
54 |
except Exception as e:
|
55 |
st.error(f"Outline generation error: {str(e)}")
|
56 |
return self._mock_outline(topic, duration, difficulty)
|
|
|
100 |
|
101 |
class ContentAgent:
|
102 |
def generate_content(self, outline):
|
103 |
+
if not openai_client:
|
104 |
return self._mock_content(outline)
|
105 |
|
106 |
try:
|
107 |
+
response = openai_client.chat.completions.create(
|
108 |
+
model="gpt-4-turbo",
|
109 |
messages=[
|
110 |
{
|
111 |
"role": "system",
|
|
|
123 |
}
|
124 |
],
|
125 |
temperature=0.4,
|
126 |
+
max_tokens=2000,
|
127 |
+
response_format={"type": "json_object"}
|
128 |
)
|
129 |
+
return json.loads(response.choices[0].message.content)
|
130 |
except Exception as e:
|
131 |
st.error(f"Content generation error: {str(e)}")
|
132 |
return self._mock_content(outline)
|
|
|
158 |
|
159 |
class SlideAgent:
|
160 |
def generate_slides(self, content):
|
161 |
+
if not openai_client:
|
162 |
return self._mock_slides(content)
|
163 |
|
164 |
try:
|
165 |
+
response = openai_client.chat.completions.create(
|
166 |
+
model="gpt-4-turbo",
|
167 |
messages=[
|
168 |
{
|
169 |
"role": "system",
|
|
|
182 |
temperature=0.2,
|
183 |
max_tokens=2500
|
184 |
)
|
185 |
+
return response.choices[0].message.content
|
186 |
except Exception as e:
|
187 |
st.error(f"Slide generation error: {str(e)}")
|
188 |
return self._mock_slides(content)
|
|
|
226 |
|
227 |
class CodeAgent:
|
228 |
def generate_code(self, content):
|
229 |
+
if not openai_client:
|
230 |
return self._mock_code(content)
|
231 |
|
232 |
try:
|
233 |
+
response = openai_client.chat.completions.create(
|
234 |
+
model="gpt-4-turbo",
|
235 |
messages=[
|
236 |
{
|
237 |
"role": "system",
|
|
|
249 |
temperature=0.3,
|
250 |
max_tokens=2000
|
251 |
)
|
252 |
+
return response.choices[0].message.content
|
253 |
except Exception as e:
|
254 |
st.error(f"Code generation error: {str(e)}")
|
255 |
return self._mock_code(content)
|
|
|
262 |
|
263 |
## Exercise 1: Basic Prompt Engineering
|
264 |
def generate_response(prompt):
|
265 |
+
response = openai.chat.completions.create(
|
266 |
model="gpt-4",
|
267 |
messages=[{{"role": "user", "content": prompt}}]
|
268 |
)
|
269 |
+
return response.choices[0].message.content
|
270 |
|
271 |
# Test your function
|
272 |
print(generate_response("Explain quantum computing in simple terms"))
|
|
|
281 |
|
282 |
class DesignAgent:
|
283 |
def generate_design(self, slide_content):
|
284 |
+
if not openai_client:
|
285 |
return None
|
286 |
|
287 |
try:
|
288 |
+
response = openai_client.images.generate(
|
289 |
prompt=f"Create a professional slide background for a corporate AI workshop about: {slide_content[:500]}",
|
290 |
n=1,
|
291 |
size="1024x1024"
|
292 |
)
|
293 |
+
return response.data[0].url
|
294 |
except Exception as e:
|
295 |
st.error(f"Design generation error: {str(e)}")
|
296 |
return None
|
|
|
348 |
if response.status_code == 200:
|
349 |
return response.json().get("voices", [])
|
350 |
return []
|
351 |
+
except Exception as e:
|
352 |
+
st.error(f"Voice loading error: {str(e)}")
|
353 |
return []
|
354 |
|
355 |
# Initialize agents
|
|
|
370 |
initial_sidebar_state="expanded"
|
371 |
)
|
372 |
|
373 |
+
# Custom CSS with fixed input styling
|
374 |
st.markdown("""
|
375 |
<style>
|
376 |
.stApp {
|
377 |
background: linear-gradient(135deg, #6a11cb 0%, #2575fc 100%);
|
378 |
color: #fff;
|
379 |
}
|
380 |
+
/* Fix for input text color */
|
381 |
+
.stTextInput>div>div>input {
|
382 |
+
color: #333 !important;
|
383 |
+
background-color: #fff !important;
|
384 |
+
}
|
385 |
+
.stSlider>div>div>div>div {
|
386 |
background-color: rgba(255,255,255,0.1) !important;
|
387 |
color: white !important;
|
388 |
}
|
|
|
508 |
selected_voice_name = next((v['name'] for v in voices if v['voice_id'] == st.session_state.selected_voice), "Default")
|
509 |
st.info(f"Selected Voice: **{selected_voice_name}**")
|
510 |
else:
|
511 |
+
if ELEVENLABS_API_KEY:
|
512 |
+
st.warning("Couldn't load voices. Using default voice.")
|
513 |
+
else:
|
514 |
+
st.warning("ElevenLabs API key not set. Voiceovers disabled.")
|
515 |
|
516 |
if st.button("✨ Generate Workshop", type="primary", use_container_width=True):
|
517 |
st.session_state.generating = True
|
|
|
533 |
voiceovers = {}
|
534 |
if include_voiceover and ELEVENLABS_API_KEY:
|
535 |
for i, module in enumerate(content.get("modules", [])):
|
536 |
+
# Create a short intro for each module
|
537 |
intro_text = f"Welcome to Module {i+1}: {module['title']}. " + \
|
538 |
f"In this module, we'll cover: {', '.join(module.get('speaker_notes', []))[:300]}"
|
539 |
|
|
|
625 |
mime="audio/mpeg",
|
626 |
key=f"voiceover_dl_{i}"
|
627 |
)
|
628 |
+
elif include_voiceover and ELEVENLABS_API_KEY:
|
629 |
+
st.warning("Voiceovers not generated. Check your ElevenLabs API key.")
|
630 |
|
631 |
# Sales and booking section
|
632 |
st.divider()
|
|
|
652 |
# Debug info
|
653 |
with st.sidebar:
|
654 |
st.divider()
|
655 |
+
if openai_client:
|
656 |
st.success("OpenAI API Connected")
|
657 |
else:
|
658 |
st.warning("OpenAI API not set - using enhanced mock data")
|
659 |
|
660 |
if ELEVENLABS_API_KEY:
|
661 |
+
st.success("ElevenLabs API Key Found")
|
662 |
+
elif include_voiceover:
|
663 |
+
st.warning("ElevenLabs API key not set")
|
664 |
|
665 |
st.info(f"""
|
666 |
**Current Workshop:**
|