File size: 13,501 Bytes
af64a0f
 
 
 
 
96005ed
a90e972
 
 
 
96005ed
3ee401e
7154b6b
96005ed
 
 
 
3ee401e
 
d07300c
a90e972
7154b6b
 
a90e972
3ee401e
a90e972
f0d7cfb
 
 
3ee401e
f0d7cfb
 
3ee401e
 
f0d7cfb
7154b6b
 
 
 
 
 
 
 
a90e972
 
3ee401e
 
f0d7cfb
3ee401e
a90e972
7154b6b
f0d7cfb
7154b6b
f0d7cfb
 
 
 
 
 
a90e972
 
 
 
f0d7cfb
 
 
a90e972
 
 
 
 
 
 
 
 
 
 
f0d7cfb
 
 
a90e972
 
 
 
 
 
 
 
 
 
 
f0d7cfb
 
 
 
 
 
 
3ee401e
f0d7cfb
 
3ee401e
 
f0d7cfb
7154b6b
 
 
 
 
 
 
 
a90e972
 
3ee401e
 
f0d7cfb
3ee401e
a90e972
7154b6b
f0d7cfb
7154b6b
f0d7cfb
 
 
 
 
a90e972
 
 
 
 
 
 
f0d7cfb
7154b6b
 
 
 
a90e972
 
 
f0d7cfb
 
 
 
 
3ee401e
a90e972
 
3ee401e
 
a90e972
7154b6b
 
 
 
 
 
 
a90e972
 
 
 
3ee401e
a90e972
7154b6b
a90e972
7154b6b
a90e972
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f0d7cfb
 
 
3ee401e
a90e972
 
3ee401e
 
a90e972
7154b6b
 
 
 
 
 
a90e972
 
 
 
3ee401e
a90e972
7154b6b
a90e972
7154b6b
a90e972
7154b6b
 
a90e972
 
 
 
 
 
3ee401e
a90e972
7154b6b
a90e972
3ee401e
a90e972
 
 
 
 
 
 
 
 
 
 
 
 
3ee401e
a90e972
 
3ee401e
a90e972
 
 
 
3ee401e
a90e972
7154b6b
a90e972
af64a0f
d07300c
 
7154b6b
 
 
 
 
 
 
 
 
 
 
 
 
 
d07300c
7154b6b
d07300c
 
7154b6b
 
 
 
 
 
d07300c
 
 
 
 
7154b6b
 
d07300c
7154b6b
d07300c
 
af64a0f
f0d7cfb
 
af64a0f
 
a90e972
d07300c
a90e972
 
 
 
 
 
 
 
 
 
af64a0f
7154b6b
a90e972
 
 
 
 
 
3ee401e
 
 
 
 
a90e972
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d07300c
 
 
 
 
 
 
 
 
 
7154b6b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
import streamlit as st
import json
import zipfile
import io
import time
import os
import requests
from PIL import Image
import base64
import textwrap
from dotenv import load_dotenv
from openai import OpenAI  # Updated OpenAI client
from elevenlabs import ElevenLabs  # Official ElevenLabs SDK import

# Load environment variables
load_dotenv()

# Initialize API clients
openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) if os.getenv("OPENAI_API_KEY") else None
ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY")

eleven_client = ElevenLabs(api_key=ELEVENLABS_API_KEY) if ELEVENLABS_API_KEY else None

# =============================
# UPDATED AGENT IMPLEMENTATION (OpenAI v1.x compatible)
# =============================

class TopicAgent:
    def generate_outline(self, topic, duration, difficulty):
        if not openai_client:
            return self._mock_outline(topic, duration, difficulty)
        try:
            response = openai_client.chat.completions.create(
                model="gpt-4-turbo",
                messages=[
                    {"role": "system", "content": "You're an expert corporate trainer creating comprehensive AI workshop outlines."},
                    {"role": "user", "content": (
                        f"Create a detailed {duration}-hour {difficulty} workshop outline on {topic}. "
                        "Include: 4-6 modules with specific learning objectives, hands-on exercises, "
                        "and real-world case studies. Format as JSON with keys: "
                        "{'topic', 'duration', 'difficulty', 'goals', 'modules': ["
                        "{'title', 'duration', 'learning_objectives', 'case_study', 'exercises'}]}"
                    )}
                ],
                temperature=0.3,
                max_tokens=1500,
                response_format={"type": "json_object"}
            )
            return json.loads(response.choices[0].message.content)
        except Exception as e:
            st.error(f"Outline generation error: {e}")
            return self._mock_outline(topic, duration, difficulty)

    def _mock_outline(self, topic, duration, difficulty):
        return {
            "topic": topic,
            "duration": f"{duration} hours",
            "difficulty": difficulty,
            "goals": [
                "Master core concepts and advanced techniques",
                "Develop practical implementation skills",
                "Learn industry best practices and case studies",
                "Build confidence in real-world applications"
            ],
            "modules": [
                {
                    "title": "Foundations of Prompt Engineering",
                    "duration": "90 min",
                    "learning_objectives": [
                        "Understand prompt components and structure",
                        "Learn prompt patterns and anti-patterns",
                        "Master zero-shot and few-shot prompting"
                    ],
                    "case_study": "How Anthropic improved customer support with prompt engineering",
                    "exercises": [
                        "Craft effective prompts for different scenarios",
                        "Optimize prompts for specific AI models"
                    ]
                },
                {
                    "title": "Advanced Techniques & Strategies",
                    "duration": "120 min",
                    "learning_objectives": [
                        "Implement chain-of-thought prompting",
                        "Use meta-prompts for complex tasks",
                        "Apply self-consistency methods"
                    ],
                    "case_study": "OpenAI's approach to prompt engineering in GPT-4",
                    "exercises": [
                        "Design prompts for multi-step reasoning",
                        "Create self-correcting prompt systems"
                    ]
                }
            ]
        }

class ContentAgent:
    def generate_content(self, outline):
        if not openai_client:
            return self._mock_content(outline)
        try:
            response = openai_client.chat.completions.create(
                model="gpt-4-turbo",
                messages=[
                    {"role": "system", "content": "You're a corporate training content developer creating detailed workshop materials."},
                    {"role": "user", "content": (
                        f"Expand this workshop outline into comprehensive content: {json.dumps(outline)}. "
                        "For each module, include: detailed script (3-5 paragraphs), speaker notes (bullet points), "
                        "3 quiz questions with explanations, and exercise instructions. Format as JSON with keys: "
                        "{'workshop_title', 'modules': [{'title', 'script', 'speaker_notes', 'quiz': ["
                        "{'question', 'options', 'answer', 'explanation'}], 'exercise_instructions'}]}"
                    )}
                ],
                temperature=0.4,
                max_tokens=2000,
                response_format={"type": "json_object"}
            )
            return json.loads(response.choices[0].message.content)
        except Exception as e:
            st.error(f"Content generation error: {e}")
            return self._mock_content(outline)

    def _mock_content(self, outline):
        return {
            "workshop_title": f"Mastering {outline['topic']}",
            "modules": [
                {
                    "title": "Foundations of Prompt Engineering",
                    "script": "This module introduces the core concepts of effective prompt engineering...",
                    "speaker_notes": [
                        "Emphasize the importance of clear instructions",
                        "Show examples of good vs bad prompts",
                        "Discuss token limitations and their impact"
                    ],
                    "quiz": [
                        {"question": "What's the most important element of a good prompt?",
                         "options": ["Length", "Specificity", "Complexity", "Creativity"],
                         "answer": "Specificity",
                         "explanation": "Specific prompts yield more accurate and relevant responses"}
                    ],
                    "exercise_instructions": "Create a prompt that extracts key insights from a financial report..."
                }
            ]
        }

class SlideAgent:
    def generate_slides(self, content):
        if not openai_client:
            return self._mock_slides(content)
        try:
            response = openai_client.chat.completions.create(
                model="gpt-4-turbo",
                messages=[
                    {"role": "system", "content": "You create professional slide decks in Markdown format using Marp syntax."},
                    {"role": "user", "content": (
                        f"Create a slide deck for this workshop content: {json.dumps(content)}. "
                        "Use Marp Markdown format with themes and visual elements. "
                        "Include: title slide, module slides with key points, case studies, "
                        "exercise instructions, and summary slides. Make it visually appealing."
                    )}
                ],
                temperature=0.2,
                max_tokens=2500
            )
            return response.choices[0].message.content
        except Exception as e:
            st.error(f"Slide generation error: {e}")
            return self._mock_slides(content)

    def _mock_slides(self, content):
        return f"""---
marp: true
theme: gaia
backgroundColor: #fff
backgroundImage: url('https://marp.app/assets/hero-background.svg')
---

# {content['workshop_title']}
## Comprehensive Corporate Training Program

---

## Module 1: Foundations of Prompt Engineering
![w:250](https://images.unsplash.com/photo-1677442135722-5fcdbdf1b7e6)

- Core concepts and principles
- Patterns and anti-patterns
- Practical implementation techniques

---

## Case Study
### Anthropic's Customer Support Implementation
- 40% faster resolution times
- 25% reduction in training costs
- 92% customer satisfaction

---

## Exercises
1. Craft effective prompts for different scenarios
2. Optimize prompts for specific AI models
3. Analyze and refine prompt performance
"""

class CodeAgent:
    def generate_code(self, content):
        if not openai_client:
            return self._mock_code(content)
        try:
            response = openai_client.chat.completions.create(
                model="gpt-4-turbo",
                messages=[
                    {"role": "system", "content": "You create practical code labs for technical workshops."},
                    {"role": "user", "content": (
                        f"Create a Jupyter notebook with code exercises for this workshop: {json.dumps(content)}. "
                        "Include: setup instructions, practical exercises with solutions, "
                        "and real-world implementation examples. Use Python with popular AI libraries."
                    )}
                ],
                temperature=0.3,
                max_tokens=2000
            )
            return response.choices[0].message.content
        except Exception as e:
            st.error(f"Code generation error: {e}")
            return self._mock_code(content)

    def _mock_code(self, content):
        return f"""
# {content['workshop_title']} - Code Labs

import openai
import pandas as pd

## Exercise 1: Basic Prompt Engineering
def generate_response(prompt):
    response = openai.chat.completions.create(
        model="gpt-4",
        messages=[{"role": "user", "content": prompt}]
    )
    return response.choices[0].message.content

print(generate_response("Explain quantum computing in simple terms"))

## Exercise 2: Advanced Prompt Patterns
# TODO: Implement chain-of-thought prompting
# TODO: Create meta-prompts for complex tasks

## Real-World Implementation
# TODO: Build a customer support question classifier
"""

class DesignAgent:
    def generate_design(self, slide_content):
        if not openai_client:
            return None
        try:
            response = openai_client.images.generate(
                prompt=f"Create a professional slide background for a corporate AI workshop about: {slide_content[:500]}",
                n=1,
                size="1024x1024"
            )
            return response.data[0].url
        except Exception as e:
            st.error(f"Design generation error: {e}")
            return None

class VoiceoverAgent:
    def __init__(self):
        self.client = eleven_client
        self.default_voice_id = "9BWtsMINqrJLrRacOk9x"  # Default fallback voice
        self.model_id = "eleven_monolingual_v1"

    def get_voices(self):
        if not self.client:
            return []
        try:
            voices = self.client.voices.list()
            return [{"voice_id": v.voice_id, "name": v.name} for v in voices]
        except Exception as e:
            st.error(f"Voice loading error: {e}")
            return []

    def generate_voiceover(self, text, voice_id=None):
        if not self.client:
            return None
        try:
            vid = voice_id or self.default_voice_id
            audio = self.client.text_to_speech.convert(
                text=text,
                voice_id=vid,
                model_id=self.model_id,
                voice_settings={
                    "stability": 0.7,
                    "similarity_boost": 0.8,
                    "style": 0.5,
                    "use_speaker_boost": True
                }
            )
            return audio
        except Exception as e:
            st.error(f"Voiceover generation error: {e}")
            return None

# Initialize agents
topic_agent = TopicAgent()
content_agent = ContentAgent()
slide_agent = SlideAgent()
code_agent = CodeAgent()
design_agent = DesignAgent()
voiceover_agent = VoiceoverAgent()

# =====================
# STREAMLIT APPLICATION
# =====================

st.set_page_config(
    page_title="Workshop in a Box Pro",
    layout="wide",
    initial_sidebar_state="expanded"
)

# Custom CSS
st.markdown("""
<style>
    .stApp {
        background: linear-gradient(135deg, #6a11cb 0%, #2575fc 100%);
        color: #fff;
    }
    .stTextInput>div>div>input {
        color: #333 !important;
        background-color: #fff !important;
    }
    .stSlider>div>div>div>div {
        background-color: rgba(255,255,255,0.1) !important;
        color: white !important;
    }
    .stButton>button {
        background: linear-gradient(to right, #00b09b, #96c93d) !important;
        color: white !important;
        border: none;
        border-radius: 30px;
        padding: 10px 25px;
        font-size: 16px;
        font-weight: bold;
    }
    .stDownloadButton>button {
        background: linear-gradient(to right, #ff5e62, #ff9966) !important;
    }
    .stExpander {
        background-color: rgba(0,0,0,0.2) !important;
        border-radius: 10px;
        padding: 15px;
    }
    .audio-player {
        margin: 15px 0;
        border-radius: 10px;
        background: rgba(255,255,255,0.1);
        padding: 15px;
    }
    .voice-option {
        display: flex;
        align-items: center;
        margin: 5px 0;
        padding