CodeOpt / app.py
mgbam's picture
Update app.py
3212e03 verified
raw
history blame
4.14 kB
import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the Hugging Face Inference Client
client = InferenceClient()
# Function to stream content for Math, STEM, and Code Generation
def generate_stream(selected_topic, subtopic, input_text, examples_count):
"""
Generates dynamic lessons, solutions, or code snippets based on the selected topic.
Args:
selected_topic (str): The selected subject (e.g., Math, STEM, or Code Generation).
subtopic (str): Specific subtopic or category for more focused output.
input_text (str): Additional input for contextual content generation.
examples_count (int): Number of examples to generate.
Yields:
str: Incremental output content.
"""
# Create a topic-specific prompt
prompt = (
f"Generate {examples_count} detailed {selected_topic.lower()} examples, lessons, or problems "
f"focused on {subtopic}. Input context: {input_text}"
if input_text.strip() else
f"Generate {examples_count} beginner-level {selected_topic.lower()} lessons or examples on {subtopic}."
)
messages = [{"role": "user", "content": prompt}]
try:
# Create a stream for generating content
stream = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct", # Streaming model
messages=messages,
temperature=0.5,
max_tokens=1024,
top_p=0.7,
stream=True
)
# Stream the generated content incrementally
generated_content = ""
for chunk in stream:
generated_content += chunk.choices[0].delta.content
yield generated_content # Yield content incrementally
except Exception as e:
yield f"Error: {e}" # Display error if any issues occur
# Create the Gradio interface
with gr.Blocks() as app:
# App Title and Instructions
gr.Markdown("## πŸŽ“ Enhanced STEM Learning and Code Generator")
gr.Markdown(
"Generate tailored lessons, problem-solving examples, or code snippets for Math, STEM, "
"or Computer Science. Select a topic, subtopic, and customize your experience!"
)
with gr.Row():
# Input Section
with gr.Column():
selected_topic = gr.Radio(
choices=["Math", "STEM", "Computer Science (Code Generation)"],
label="Select a Topic",
value="Math" # Default selection
)
subtopic = gr.Textbox(
lines=1,
label="Subtopic",
placeholder="Specify a subtopic (e.g., Algebra, Physics, Data Structures)."
)
input_text = gr.Textbox(
lines=2,
label="Context or Additional Input",
placeholder="Provide additional context (e.g., 'Explain calculus basics' or 'Generate Python code for sorting')."
)
examples_count = gr.Slider(
minimum=1,
maximum=5,
value=1,
step=1,
label="Number of Examples"
)
generate_button = gr.Button("Generate Content")
# Output Section
with gr.Column():
gr.Markdown("### Generated Content")
output_stream = gr.Textbox(
lines=20,
label="Output",
interactive=False
)
export_button = gr.Button("Export Code (if applicable)")
# Link the generate button to the streaming function
generate_button.click(
fn=generate_stream,
inputs=[selected_topic, subtopic, input_text, examples_count],
outputs=output_stream
)
# Export functionality for code snippets
def export_code(content):
with open("generated_code.py", "w") as file:
file.write(content)
return "Code exported successfully to generated_code.py!"
export_button.click(
fn=export_code,
inputs=[output_stream],
outputs=[output_stream]
)
# Launch the Gradio app
app.launch()