Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ except Exception as e:
|
|
22 |
logger.error("Failed to initialize Groq client: %s", str(e))
|
23 |
raise
|
24 |
|
25 |
-
# Load Text-to-Image Models (placeholders; adjust
|
26 |
model1 = gr.load("models/prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA", fallback=None)
|
27 |
model2 = gr.load("models/Purz/face-projection", fallback=None)
|
28 |
|
@@ -31,8 +31,8 @@ stop_event = threading.Event()
|
|
31 |
|
32 |
# Function to generate tutor output (lesson, question, feedback)
|
33 |
def generate_tutor_output(subject, difficulty, student_input):
|
34 |
-
if not subject
|
35 |
-
return '{"lesson": "Please
|
36 |
|
37 |
prompt = f"""
|
38 |
You are an expert tutor in {subject} at the {difficulty} level.
|
@@ -96,7 +96,7 @@ def generate_images(text, selected_model):
|
|
96 |
def generate_text_to_video(text):
|
97 |
if not text:
|
98 |
return "No text provided for video generation."
|
99 |
-
|
100 |
try:
|
101 |
# Generate narration using Groq (text-to-speech simulation)
|
102 |
narration_prompt = f"Convert this text to a natural-sounding narration: {text}"
|
@@ -148,26 +148,29 @@ def generate_text_to_video(text):
|
|
148 |
logger.error("Error generating video: %s", str(e))
|
149 |
return f"Error generating video: {str(e)}"
|
150 |
|
151 |
-
# Gradio interface
|
152 |
with gr.Blocks(title="AI Tutor with Visuals") as demo:
|
153 |
-
gr.Markdown("# 🎓 Your AI Tutor with Visuals &
|
154 |
|
155 |
-
# Text-based output
|
156 |
with gr.Row():
|
157 |
with gr.Column(scale=2):
|
158 |
subject = gr.Dropdown(
|
159 |
["Math", "Science", "History", "Literature", "Code", "AI"],
|
160 |
label="Subject",
|
|
|
161 |
value="Math"
|
162 |
)
|
163 |
difficulty = gr.Radio(
|
164 |
["Beginner", "Intermediate", "Advanced"],
|
165 |
label="Difficulty Level",
|
|
|
166 |
value="Beginner"
|
167 |
)
|
168 |
student_input = gr.Textbox(
|
169 |
placeholder="Type your query here...",
|
170 |
-
label="Your Input"
|
|
|
171 |
)
|
172 |
submit_button_text = gr.Button("Generate Lesson & Question", variant="primary")
|
173 |
|
@@ -176,7 +179,7 @@ with gr.Blocks(title="AI Tutor with Visuals") as demo:
|
|
176 |
question_output = gr.Markdown(label="Comprehension Question")
|
177 |
feedback_output = gr.Markdown(label="Feedback")
|
178 |
|
179 |
-
# Visual output
|
180 |
with gr.Row():
|
181 |
with gr.Column(scale=2):
|
182 |
model_selector = gr.Radio(
|
@@ -195,27 +198,36 @@ with gr.Blocks(title="AI Tutor with Visuals") as demo:
|
|
195 |
|
196 |
gr.Markdown("""
|
197 |
### How to Use
|
198 |
-
1. **Text Section**: Select a subject and difficulty, type your query, and click 'Generate Lesson & Question'.
|
199 |
-
2. **Visual Section**:
|
200 |
-
3.
|
201 |
""")
|
202 |
|
203 |
# Processing functions
|
204 |
def process_output_text(subject, difficulty, student_input):
|
205 |
-
tutor_output = generate_tutor_output(subject, difficulty, student_input)
|
206 |
try:
|
207 |
-
|
|
|
208 |
return parsed["lesson"], parsed["question"], parsed["feedback"]
|
209 |
except Exception as e:
|
210 |
logger.error("Error parsing tutor output: %s", str(e))
|
211 |
return "Error parsing output", "No question available", "No feedback available"
|
212 |
|
213 |
def process_output_visual(text, selected_model):
|
214 |
-
|
215 |
-
|
|
|
|
|
|
|
|
|
216 |
|
217 |
def process_output_video(text):
|
218 |
-
|
|
|
|
|
|
|
|
|
|
|
219 |
|
220 |
# Button click handlers
|
221 |
submit_button_text.click(
|
@@ -235,5 +247,4 @@ with gr.Blocks(title="AI Tutor with Visuals") as demo:
|
|
235 |
)
|
236 |
|
237 |
if __name__ == "__main__":
|
238 |
-
# Launch Gradio app
|
239 |
demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)
|
|
|
22 |
logger.error("Failed to initialize Groq client: %s", str(e))
|
23 |
raise
|
24 |
|
25 |
+
# Load Text-to-Image Models (placeholders; adjust if models are unavailable)
|
26 |
model1 = gr.load("models/prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA", fallback=None)
|
27 |
model2 = gr.load("models/Purz/face-projection", fallback=None)
|
28 |
|
|
|
31 |
|
32 |
# Function to generate tutor output (lesson, question, feedback)
|
33 |
def generate_tutor_output(subject, difficulty, student_input):
|
34 |
+
if not all([subject, difficulty, student_input]):
|
35 |
+
return '{"lesson": "Please fill in all fields.", "question": "", "feedback": ""}'
|
36 |
|
37 |
prompt = f"""
|
38 |
You are an expert tutor in {subject} at the {difficulty} level.
|
|
|
96 |
def generate_text_to_video(text):
|
97 |
if not text:
|
98 |
return "No text provided for video generation."
|
99 |
+
|
100 |
try:
|
101 |
# Generate narration using Groq (text-to-speech simulation)
|
102 |
narration_prompt = f"Convert this text to a natural-sounding narration: {text}"
|
|
|
148 |
logger.error("Error generating video: %s", str(e))
|
149 |
return f"Error generating video: {str(e)}"
|
150 |
|
151 |
+
# Set up the Gradio interface
|
152 |
with gr.Blocks(title="AI Tutor with Visuals") as demo:
|
153 |
+
gr.Markdown("# 🎓 Your AI Tutor with Visuals & Images")
|
154 |
|
155 |
+
# Section for generating Text-based output
|
156 |
with gr.Row():
|
157 |
with gr.Column(scale=2):
|
158 |
subject = gr.Dropdown(
|
159 |
["Math", "Science", "History", "Literature", "Code", "AI"],
|
160 |
label="Subject",
|
161 |
+
info="Choose the subject of your lesson",
|
162 |
value="Math"
|
163 |
)
|
164 |
difficulty = gr.Radio(
|
165 |
["Beginner", "Intermediate", "Advanced"],
|
166 |
label="Difficulty Level",
|
167 |
+
info="Select your proficiency level",
|
168 |
value="Beginner"
|
169 |
)
|
170 |
student_input = gr.Textbox(
|
171 |
placeholder="Type your query here...",
|
172 |
+
label="Your Input",
|
173 |
+
info="Enter the topic you want to learn"
|
174 |
)
|
175 |
submit_button_text = gr.Button("Generate Lesson & Question", variant="primary")
|
176 |
|
|
|
179 |
question_output = gr.Markdown(label="Comprehension Question")
|
180 |
feedback_output = gr.Markdown(label="Feedback")
|
181 |
|
182 |
+
# Section for generating Visual output
|
183 |
with gr.Row():
|
184 |
with gr.Column(scale=2):
|
185 |
model_selector = gr.Radio(
|
|
|
198 |
|
199 |
gr.Markdown("""
|
200 |
### How to Use
|
201 |
+
1. **Text Section**: Select a subject and difficulty, type your query, and click 'Generate Lesson & Question' to get your personalized lesson, comprehension question, and feedback.
|
202 |
+
2. **Visual Section**: Select the model for image generation, then click 'Generate Visuals' to receive 3 variations of an image based on your topic. Click 'Generate Video with Voice' to create a video with narration.
|
203 |
+
3. Review the AI-generated content to enhance your learning experience!
|
204 |
""")
|
205 |
|
206 |
# Processing functions
|
207 |
def process_output_text(subject, difficulty, student_input):
|
|
|
208 |
try:
|
209 |
+
tutor_output = generate_tutor_output(subject, difficulty, student_input)
|
210 |
+
parsed = eval(tutor_output) # Use json.loads in production for safety
|
211 |
return parsed["lesson"], parsed["question"], parsed["feedback"]
|
212 |
except Exception as e:
|
213 |
logger.error("Error parsing tutor output: %s", str(e))
|
214 |
return "Error parsing output", "No question available", "No feedback available"
|
215 |
|
216 |
def process_output_visual(text, selected_model):
|
217 |
+
try:
|
218 |
+
images = generate_images(text, selected_model)
|
219 |
+
return images[0], images[1], images[2]
|
220 |
+
except Exception as e:
|
221 |
+
logger.error("Error in process_output_visual: %s", str(e))
|
222 |
+
return None, None, None
|
223 |
|
224 |
def process_output_video(text):
|
225 |
+
try:
|
226 |
+
video_path = generate_text_to_video(text)
|
227 |
+
return video_path
|
228 |
+
except Exception as e:
|
229 |
+
logger.error("Error in process_output_video: %s", str(e))
|
230 |
+
return None
|
231 |
|
232 |
# Button click handlers
|
233 |
submit_button_text.click(
|
|
|
247 |
)
|
248 |
|
249 |
if __name__ == "__main__":
|
|
|
250 |
demo.launch(server_name="0.0.0.0", server_port=7860, debug=True)
|