naso / app.py
mgbam's picture
Update app.py
7154b6b verified
raw
history blame
13.5 kB
import streamlit as st
import json
import zipfile
import io
import time
import os
import requests
from PIL import Image
import base64
import textwrap
from dotenv import load_dotenv
from openai import OpenAI # Updated OpenAI client
from elevenlabs import ElevenLabs # Official ElevenLabs SDK import
# Load environment variables
load_dotenv()
# Initialize API clients
openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) if os.getenv("OPENAI_API_KEY") else None
ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY")
eleven_client = ElevenLabs(api_key=ELEVENLABS_API_KEY) if ELEVENLABS_API_KEY else None
# =============================
# UPDATED AGENT IMPLEMENTATION (OpenAI v1.x compatible)
# =============================
class TopicAgent:
def generate_outline(self, topic, duration, difficulty):
if not openai_client:
return self._mock_outline(topic, duration, difficulty)
try:
response = openai_client.chat.completions.create(
model="gpt-4-turbo",
messages=[
{"role": "system", "content": "You're an expert corporate trainer creating comprehensive AI workshop outlines."},
{"role": "user", "content": (
f"Create a detailed {duration}-hour {difficulty} workshop outline on {topic}. "
"Include: 4-6 modules with specific learning objectives, hands-on exercises, "
"and real-world case studies. Format as JSON with keys: "
"{'topic', 'duration', 'difficulty', 'goals', 'modules': ["
"{'title', 'duration', 'learning_objectives', 'case_study', 'exercises'}]}"
)}
],
temperature=0.3,
max_tokens=1500,
response_format={"type": "json_object"}
)
return json.loads(response.choices[0].message.content)
except Exception as e:
st.error(f"Outline generation error: {e}")
return self._mock_outline(topic, duration, difficulty)
def _mock_outline(self, topic, duration, difficulty):
return {
"topic": topic,
"duration": f"{duration} hours",
"difficulty": difficulty,
"goals": [
"Master core concepts and advanced techniques",
"Develop practical implementation skills",
"Learn industry best practices and case studies",
"Build confidence in real-world applications"
],
"modules": [
{
"title": "Foundations of Prompt Engineering",
"duration": "90 min",
"learning_objectives": [
"Understand prompt components and structure",
"Learn prompt patterns and anti-patterns",
"Master zero-shot and few-shot prompting"
],
"case_study": "How Anthropic improved customer support with prompt engineering",
"exercises": [
"Craft effective prompts for different scenarios",
"Optimize prompts for specific AI models"
]
},
{
"title": "Advanced Techniques & Strategies",
"duration": "120 min",
"learning_objectives": [
"Implement chain-of-thought prompting",
"Use meta-prompts for complex tasks",
"Apply self-consistency methods"
],
"case_study": "OpenAI's approach to prompt engineering in GPT-4",
"exercises": [
"Design prompts for multi-step reasoning",
"Create self-correcting prompt systems"
]
}
]
}
class ContentAgent:
def generate_content(self, outline):
if not openai_client:
return self._mock_content(outline)
try:
response = openai_client.chat.completions.create(
model="gpt-4-turbo",
messages=[
{"role": "system", "content": "You're a corporate training content developer creating detailed workshop materials."},
{"role": "user", "content": (
f"Expand this workshop outline into comprehensive content: {json.dumps(outline)}. "
"For each module, include: detailed script (3-5 paragraphs), speaker notes (bullet points), "
"3 quiz questions with explanations, and exercise instructions. Format as JSON with keys: "
"{'workshop_title', 'modules': [{'title', 'script', 'speaker_notes', 'quiz': ["
"{'question', 'options', 'answer', 'explanation'}], 'exercise_instructions'}]}"
)}
],
temperature=0.4,
max_tokens=2000,
response_format={"type": "json_object"}
)
return json.loads(response.choices[0].message.content)
except Exception as e:
st.error(f"Content generation error: {e}")
return self._mock_content(outline)
def _mock_content(self, outline):
return {
"workshop_title": f"Mastering {outline['topic']}",
"modules": [
{
"title": "Foundations of Prompt Engineering",
"script": "This module introduces the core concepts of effective prompt engineering...",
"speaker_notes": [
"Emphasize the importance of clear instructions",
"Show examples of good vs bad prompts",
"Discuss token limitations and their impact"
],
"quiz": [
{"question": "What's the most important element of a good prompt?",
"options": ["Length", "Specificity", "Complexity", "Creativity"],
"answer": "Specificity",
"explanation": "Specific prompts yield more accurate and relevant responses"}
],
"exercise_instructions": "Create a prompt that extracts key insights from a financial report..."
}
]
}
class SlideAgent:
def generate_slides(self, content):
if not openai_client:
return self._mock_slides(content)
try:
response = openai_client.chat.completions.create(
model="gpt-4-turbo",
messages=[
{"role": "system", "content": "You create professional slide decks in Markdown format using Marp syntax."},
{"role": "user", "content": (
f"Create a slide deck for this workshop content: {json.dumps(content)}. "
"Use Marp Markdown format with themes and visual elements. "
"Include: title slide, module slides with key points, case studies, "
"exercise instructions, and summary slides. Make it visually appealing."
)}
],
temperature=0.2,
max_tokens=2500
)
return response.choices[0].message.content
except Exception as e:
st.error(f"Slide generation error: {e}")
return self._mock_slides(content)
def _mock_slides(self, content):
return f"""---
marp: true
theme: gaia
backgroundColor: #fff
backgroundImage: url('https://marp.app/assets/hero-background.svg')
---
# {content['workshop_title']}
## Comprehensive Corporate Training Program
---
## Module 1: Foundations of Prompt Engineering
![w:250](https://images.unsplash.com/photo-1677442135722-5fcdbdf1b7e6)
- Core concepts and principles
- Patterns and anti-patterns
- Practical implementation techniques
---
## Case Study
### Anthropic's Customer Support Implementation
- 40% faster resolution times
- 25% reduction in training costs
- 92% customer satisfaction
---
## Exercises
1. Craft effective prompts for different scenarios
2. Optimize prompts for specific AI models
3. Analyze and refine prompt performance
"""
class CodeAgent:
def generate_code(self, content):
if not openai_client:
return self._mock_code(content)
try:
response = openai_client.chat.completions.create(
model="gpt-4-turbo",
messages=[
{"role": "system", "content": "You create practical code labs for technical workshops."},
{"role": "user", "content": (
f"Create a Jupyter notebook with code exercises for this workshop: {json.dumps(content)}. "
"Include: setup instructions, practical exercises with solutions, "
"and real-world implementation examples. Use Python with popular AI libraries."
)}
],
temperature=0.3,
max_tokens=2000
)
return response.choices[0].message.content
except Exception as e:
st.error(f"Code generation error: {e}")
return self._mock_code(content)
def _mock_code(self, content):
return f"""
# {content['workshop_title']} - Code Labs
import openai
import pandas as pd
## Exercise 1: Basic Prompt Engineering
def generate_response(prompt):
response = openai.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message.content
print(generate_response("Explain quantum computing in simple terms"))
## Exercise 2: Advanced Prompt Patterns
# TODO: Implement chain-of-thought prompting
# TODO: Create meta-prompts for complex tasks
## Real-World Implementation
# TODO: Build a customer support question classifier
"""
class DesignAgent:
def generate_design(self, slide_content):
if not openai_client:
return None
try:
response = openai_client.images.generate(
prompt=f"Create a professional slide background for a corporate AI workshop about: {slide_content[:500]}",
n=1,
size="1024x1024"
)
return response.data[0].url
except Exception as e:
st.error(f"Design generation error: {e}")
return None
class VoiceoverAgent:
def __init__(self):
self.client = eleven_client
self.default_voice_id = "9BWtsMINqrJLrRacOk9x" # Default fallback voice
self.model_id = "eleven_monolingual_v1"
def get_voices(self):
if not self.client:
return []
try:
voices = self.client.voices.list()
return [{"voice_id": v.voice_id, "name": v.name} for v in voices]
except Exception as e:
st.error(f"Voice loading error: {e}")
return []
def generate_voiceover(self, text, voice_id=None):
if not self.client:
return None
try:
vid = voice_id or self.default_voice_id
audio = self.client.text_to_speech.convert(
text=text,
voice_id=vid,
model_id=self.model_id,
voice_settings={
"stability": 0.7,
"similarity_boost": 0.8,
"style": 0.5,
"use_speaker_boost": True
}
)
return audio
except Exception as e:
st.error(f"Voiceover generation error: {e}")
return None
# Initialize agents
topic_agent = TopicAgent()
content_agent = ContentAgent()
slide_agent = SlideAgent()
code_agent = CodeAgent()
design_agent = DesignAgent()
voiceover_agent = VoiceoverAgent()
# =====================
# STREAMLIT APPLICATION
# =====================
st.set_page_config(
page_title="Workshop in a Box Pro",
layout="wide",
initial_sidebar_state="expanded"
)
# Custom CSS
st.markdown("""
<style>
.stApp {
background: linear-gradient(135deg, #6a11cb 0%, #2575fc 100%);
color: #fff;
}
.stTextInput>div>div>input {
color: #333 !important;
background-color: #fff !important;
}
.stSlider>div>div>div>div {
background-color: rgba(255,255,255,0.1) !important;
color: white !important;
}
.stButton>button {
background: linear-gradient(to right, #00b09b, #96c93d) !important;
color: white !important;
border: none;
border-radius: 30px;
padding: 10px 25px;
font-size: 16px;
font-weight: bold;
}
.stDownloadButton>button {
background: linear-gradient(to right, #ff5e62, #ff9966) !important;
}
.stExpander {
background-color: rgba(0,0,0,0.2) !important;
border-radius: 10px;
padding: 15px;
}
.audio-player {
margin: 15px 0;
border-radius: 10px;
background: rgba(255,255,255,0.1);
padding: 15px;
}
.voice-option {
display: flex;
align-items: center;
margin: 5px 0;
padding