File size: 6,900 Bytes
c0d2d56 9f5c19b c0d2d56 9f5c19b c0d2d56 9f5c19b c0d2d56 28aa36f c0d2d56 9f5c19b c0d2d56 9f5c19b c0d2d56 9f5c19b c0d2d56 9f5c19b 28aa36f 9f5c19b 28aa36f 9f5c19b c0d2d56 9f5c19b c0d2d56 9f5c19b 28aa36f 9f5c19b 28aa36f c0d2d56 9f5c19b c0d2d56 9f5c19b c0d2d56 9f5c19b c0d2d56 9f5c19b c0d2d56 9f5c19b c0d2d56 9f5c19b c0d2d56 9f5c19b c0d2d56 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
#!/usr/bin/env python3
import streamlit as st
from gradio_client import Client
from groq import Groq
from PIL import Image
import moviepy.editor as mp
from natsort import natsorted
import os
from dotenv import load_dotenv
import json
# Load environment variables
load_dotenv()
# Constants
HF_TOKEN = os.getenv("HF_TOKEN")
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
IMAGE_GENERATION_SPACE_NAME = "stabilityai/stable-diffusion-3.5-large-turbo"
# Initialize Groq client with minimal parameters
try:
groq_client = Groq(api_key=GROQ_API_KEY)
except Exception as e:
st.error(f"Failed to initialize Groq client: {e}")
groq_client = None
# LLM Models (free options)
LLM_MODELS = {
"Mixtral 8x7B (Groq)": "mixtral-8x7b-32768",
"Mistral 7B (HF)": "mistralai/Mixtral-7B-Instruct-v0.1",
"LLaMA 13B (HF)": "meta-llama/Llama-13b-hf" # Note: May require approval; replace if needed
}
# Utility Functions
def generate_tutor_output(subject, difficulty, student_input, model):
prompt = f"""
You are an expert tutor in {subject} at the {difficulty} level.
The student has provided the following input: "{student_input}"
Please generate:
1. A brief, engaging lesson on the topic (2-3 paragraphs)
2. A thought-provoking question to check understanding
3. Constructive feedback on the student's input
Format your response as a JSON object with keys: "lesson", "question", "feedback"
"""
if model.startswith("mixtral") and groq_client: # Groq model
try:
completion = groq_client.chat.completions.create(
messages=[{
"role": "system",
"content": f"You are the world's best AI tutor for {subject}, renowned for clear, engaging explanations."
}, {
"role": "user",
"content": prompt
}],
model=model,
max_tokens=1000
)
return json.loads(completion.choices[0].message.content)
except Exception as e:
st.error(f"Groq error: {e}")
return {"lesson": "Error generating lesson", "question": "N/A", "feedback": "N/A"}
else: # Hugging Face models
try:
client = Client("https://api-inference.huggingface.co/models/" + model, hf_token=HF_TOKEN)
response = client.predict(prompt, api_name="/generate")
return json.loads(response)
except:
st.warning(f"HF model {model} failed, falling back to Mixtral.")
if groq_client:
return generate_tutor_output(subject, difficulty, student_input, "mixtral-8x7b-32768")
return {"lesson": "Error generating lesson", "question": "N/A", "feedback": "N/A"}
def generate_image(prompt, path='temp_image.png'):
try:
client = Client(IMAGE_GENERATION_SPACE_NAME, hf_token=HF_TOKEN)
result = client.predict(
prompt=prompt,
width=512,
height=512,
api_name="/predict"
)
image = Image.open(result)
image.save(path)
return path
except Exception as e:
st.error(f"Error generating image: {e}")
return None
def generate_video(images, audio_text, language, speaker, path='temp_video.mp4'):
try:
audio_client = Client("habib926653/Multilingual-TTS")
audio_result = audio_client.predict(
text=audio_text,
language_code=language,
speaker=speaker,
api_name="/text_to_speech_edge"
)
audio_file = audio_result[1]
with open(audio_file, 'rb') as f:
audio_bytes = f.read()
audio_path = "temp_audio.mp3"
with open(audio_path, 'wb') as f:
f.write(audio_bytes)
audio_clip = mp.AudioFileClip(audio_path)
duration_per_image = audio_clip.duration / len(images)
image_clips = [mp.ImageClip(img).set_duration(duration_per_image) for img in images if img]
video = mp.concatenate_videoclips(image_clips, method="compose").set_audio(audio_clip)
video.write_videofile(path, fps=24, codec='libx264')
return path
except Exception as e:
st.error(f"Error generating video: {e}")
return None
# Streamlit App
def main():
st.markdown("<h1 style='text-align: center;'>EduAI: Your Interactive Tutor</h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center;'>Learn, Ask, Visualize! ❤️</p>", unsafe_allow_html=True)
subject = st.selectbox("Choose Subject:", ["Math", "Science", "History", "Literature", "Code", "AI"])
difficulty = st.selectbox("Difficulty Level:", ["Beginner", "Intermediate", "Advanced"])
model = st.selectbox("Choose LLM Model:", list(LLM_MODELS.keys()))
student_input = st.text_area("Your Question/Input (max 1500 chars):", max_chars=1500)
if 'tutor_response' not in st.session_state:
st.session_state.tutor_response = None
if st.button("Generate Answer & Question"):
if student_input:
with st.spinner("Generating your lesson..."):
response = generate_tutor_output(subject, difficulty, student_input, LLM_MODELS[model])
st.session_state.tutor_response = response
else:
st.warning("Please provide an input!")
if st.session_state.tutor_response:
st.markdown("### Lesson")
st.write(st.session_state.tutor_response["lesson"])
st.markdown("### Comprehension Question")
st.write(st.session_state.tutor_response["question"])
st.markdown("### Feedback")
st.write(st.session_state.tutor_response["feedback"])
col1, col2 = st.columns(2)
with col1:
if st.button("Generate Image"):
with st.spinner("Creating image..."):
image_path = generate_image(st.session_state.tutor_response["lesson"])
if image_path:
st.image(image_path, caption="Visual of your lesson")
with col2:
if st.button("Generate Video"):
with st.spinner("Creating video..."):
audio_client = Client("habib926653/Multilingual-TTS")
speakers_response = audio_client.predict(language="English", api_name="/get_speakers")
speaker = speakers_response["choices"][0][0]
images = [generate_image(st.session_state.tutor_response["lesson"])]
video_path = generate_video(images, st.session_state.tutor_response["lesson"], "English", speaker)
if video_path:
st.video(video_path)
st.markdown("---")
st.markdown("<p style='text-align: center;'>Built for learning, powered by AI!</p>", unsafe_allow_html=True)
if __name__ == "__main__":
main() |