|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
import tempfile |
|
|
|
|
|
client = InferenceClient() |
|
|
|
|
|
def generate_content(selected_topic, subtopic, complexity, input_text, examples_count, output_type): |
|
""" |
|
Generate content dynamically based on user input with support for LaTeX and file downloads. |
|
|
|
Args: |
|
selected_topic (str): The selected topic (e.g., Math, STEM, Code Generation). |
|
subtopic (str): A specific subtopic for content generation. |
|
complexity (str): Expertise level (Beginner, Intermediate, Advanced). |
|
input_text (str): Additional context or problem to solve. |
|
examples_count (int): Number of examples or outputs to generate. |
|
output_type (str): Desired output format (Plain Text, LaTeX, Downloadable). |
|
|
|
Returns: |
|
tuple: Generated content and file path (if applicable). |
|
""" |
|
|
|
prompt = ( |
|
f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} examples, lessons, " |
|
f"or problems related to {subtopic}. Context: {input_text}" if input_text.strip() |
|
else f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} lessons " |
|
f"or problems focused on {subtopic}." |
|
) |
|
|
|
try: |
|
|
|
messages = [{"role": "user", "content": prompt}] |
|
response = client.chat.completions.create( |
|
model="Qwen/Qwen2.5-Coder-32B-Instruct", |
|
messages=messages, |
|
temperature=0.5, |
|
max_tokens=1024, |
|
top_p=0.7 |
|
) |
|
|
|
content = response.choices[0].message.content if response.choices else "No content generated." |
|
|
|
|
|
if output_type == "LaTeX": |
|
|
|
latex_content = f"$$\n{content.strip()}\n$$" |
|
return latex_content, None |
|
elif output_type == "Downloadable": |
|
|
|
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt") |
|
with open(temp_file.name, "w") as file: |
|
file.write(content) |
|
return "File generated successfully. Use the download button.", temp_file.name |
|
else: |
|
|
|
return content, None |
|
except Exception as e: |
|
|
|
return f"Error during content generation: {e}", None |
|
|
|
|
|
|
|
with gr.Blocks() as app: |
|
|
|
gr.Markdown("## π Advanced STEM and Code Generator with LaTeX and File Downloads") |
|
|
|
with gr.Row(): |
|
|
|
with gr.Column(): |
|
selected_topic = gr.Radio( |
|
choices=["Math", "STEM", "Code Generation"], |
|
label="Select a Topic", |
|
value="Math" |
|
) |
|
subtopic = gr.Textbox( |
|
label="Subtopic", |
|
placeholder="E.g., Algebra, Physics, Sorting Algorithms" |
|
) |
|
complexity = gr.Radio( |
|
choices=["Beginner", "Intermediate", "Advanced"], |
|
label="Expertise Level", |
|
value="Beginner" |
|
) |
|
input_text = gr.Textbox( |
|
label="Additional Context", |
|
placeholder="E.g., 'Explain integration basics' or 'Generate Python code for searching.'", |
|
lines=3 |
|
) |
|
examples_count = gr.Slider( |
|
minimum=1, |
|
maximum=5, |
|
step=1, |
|
label="Number of Examples", |
|
value=1 |
|
) |
|
output_type = gr.Radio( |
|
choices=["Plain Text", "LaTeX", "Downloadable"], |
|
label="Output Format", |
|
value="Plain Text" |
|
) |
|
generate_button = gr.Button("Generate Content") |
|
|
|
|
|
with gr.Column(): |
|
gr.Markdown("### π Generated Output (Supports LaTeX)") |
|
output_text = gr.Markdown(label="Generated Content") |
|
download_button = gr.File(label="Download File (if applicable)") |
|
|
|
|
|
def update_output(result, file_path): |
|
if file_path: |
|
return result, file_path |
|
return result, None |
|
|
|
generate_button.click( |
|
fn=generate_content, |
|
inputs=[selected_topic, subtopic, complexity, input_text, examples_count, output_type], |
|
outputs=[output_text, download_button], |
|
preprocess=False, |
|
postprocess=update_output |
|
) |
|
|
|
|
|
feedback_label = gr.Label(value="Was this content helpful?") |
|
feedback_rating = gr.Radio( |
|
choices=["Yes", "No"], |
|
label="Feedback", |
|
value="Yes" |
|
) |
|
feedback_button = gr.Button("Submit Feedback") |
|
|
|
def collect_feedback(feedback): |
|
return f"Thank you for your feedback: {feedback}" |
|
|
|
feedback_button.click( |
|
fn=collect_feedback, |
|
inputs=[feedback_rating], |
|
outputs=[feedback_label] |
|
) |
|
|
|
|
|
app.launch() |
|
|