CodeOpt / app.py
mgbam's picture
Update app.py
a8ddaea verified
raw
history blame
5.16 kB
import gradio as gr
from huggingface_hub import InferenceClient
import tempfile
# Initialize the Hugging Face Inference Client
client = InferenceClient()
# Function to generate dynamic lessons, examples, or code
def generate_content(selected_topic, subtopic, complexity, input_text, examples_count, output_type):
"""
Generate content dynamically based on user input, with support for LaTeX rendering and file downloads.
Args:
selected_topic (str): Topic selected by the user.
subtopic (str): Specific subtopic for generation.
complexity (str): User expertise level.
input_text (str): Additional input context.
examples_count (int): Number of examples to generate.
output_type (str): Desired output format (Plain Text, LaTeX, or Downloadable).
Returns:
dict or str: Generated content in the desired format.
"""
# Build the prompt dynamically
prompt = (
f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} examples, lessons, "
f"or problems related to {subtopic}. Context: {input_text}" if input_text.strip()
else f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} lessons "
f"or problems focused on {subtopic}."
)
messages = [{"role": "user", "content": prompt}]
try:
# Generate the content using the model
response = client.chat.completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
temperature=0.5,
max_tokens=1024,
top_p=0.7
)
content = response.choices[0].message.content
# Adjust the output based on the selected type
if output_type == "LaTeX":
# Wrap content in LaTeX delimiters for rendering
latex_content = f"$$\n{content}\n$$"
return {"content": latex_content, "latex": True}
elif output_type == "Downloadable":
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt")
with open(temp_file.name, "w") as file:
file.write(content)
return {"file": temp_file.name}
else:
return content
except Exception as e:
return f"Error: {e}"
# Create the Gradio interface
with gr.Blocks() as app:
# App Title and Instructions
gr.Markdown("## 🌟 Advanced STEM and Code Generator with LaTeX Rendering and Downloads")
with gr.Row():
# Input Section
with gr.Column():
selected_topic = gr.Radio(
choices=["Math", "STEM", "Code Generation"],
label="Select a Topic",
value="Math"
)
subtopic = gr.Textbox(
label="Subtopic",
placeholder="E.g., Algebra, Physics, Sorting Algorithms"
)
complexity = gr.Radio(
choices=["Beginner", "Intermediate", "Advanced"],
label="Expertise Level",
value="Beginner"
)
input_text = gr.Textbox(
label="Additional Context",
placeholder="E.g., 'Explain integration basics' or 'Generate Python code for searching.'",
lines=3
)
examples_count = gr.Slider(
minimum=1,
maximum=5,
step=1,
label="Number of Examples",
value=1
)
output_type = gr.Radio(
choices=["Plain Text", "LaTeX", "Downloadable"],
label="Output Format",
value="Plain Text"
)
generate_button = gr.Button("Generate Content")
# Output Section
with gr.Column():
gr.Markdown("### πŸ“ Generated Output (Supports LaTeX)")
output_text = gr.Markdown(label="Generated Content")
download_button = gr.File(label="Download File (if applicable)")
# Connect the generate function to the button
def update_output(result):
if isinstance(result, dict):
if "latex" in result:
return result["content"], None
elif "file" in result:
return "File ready for download.", result["file"]
else:
return result, None
generate_button.click(
fn=generate_content,
inputs=[selected_topic, subtopic, complexity, input_text, examples_count, output_type],
outputs=[output_text, download_button],
preprocess=False,
postprocess=update_output
)
# Feedback Section
feedback_label = gr.Label(value="Was this content helpful?")
feedback_rating = gr.Radio(
choices=["Yes", "No"],
label="Feedback",
value="Yes"
)
feedback_button = gr.Button("Submit Feedback")
def collect_feedback(feedback):
return f"Thank you for your feedback: {feedback}"
feedback_button.click(
fn=collect_feedback,
inputs=[feedback_rating],
outputs=[feedback_label]
)
# Launch the Gradio app
app.launch()