import gradio as gr from huggingface_hub import InferenceClient import tempfile # Initialize the Hugging Face Inference Client client = InferenceClient() # Function to generate content dynamically def generate_content(selected_topic, subtopic, complexity, input_text, examples_count, output_type): """ Generate content dynamically based on user input with support for LaTeX and file downloads. Args: selected_topic (str): The selected topic (e.g., Math, STEM, Code Generation). subtopic (str): A specific subtopic for content generation. complexity (str): Expertise level (Beginner, Intermediate, Advanced). input_text (str): Additional context or problem to solve. examples_count (int): Number of examples or outputs to generate. output_type (str): Desired output format (Plain Text, LaTeX, Downloadable). Returns: tuple: Generated content and file path (if applicable). """ # Create the prompt dynamically prompt = ( f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} examples, lessons, " f"or problems related to {subtopic}. Context: {input_text}" if input_text.strip() else f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} lessons " f"or problems focused on {subtopic}." ) try: # Generate content using the model messages = [{"role": "user", "content": prompt}] response = client.chat.completions.create( model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.5, max_tokens=1024, top_p=0.7 ) # Extract content from the response content = response.choices[0].message.content if response.choices else "No content generated." # Handle output formatting if output_type == "LaTeX": # Ensure LaTeX content is properly wrapped latex_content = f"$$\n{content.strip()}\n$$" return latex_content, None elif output_type == "Downloadable": # Save content to a temporary file temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt") with open(temp_file.name, "w") as file: file.write(content) return "File generated successfully. Use the download button.", temp_file.name else: # Default to plain text return content, None except Exception as e: # Catch and return any errors return f"Error during content generation: {e}", None # Create the Gradio interface with gr.Blocks() as app: # App Title and Description gr.Markdown("## 🌟 Advanced STEM and Code Generator with LaTeX and File Downloads") with gr.Row(): # Input Section with gr.Column(): selected_topic = gr.Radio( choices=["Math", "STEM", "Code Generation"], label="Select a Topic", value="Math" ) subtopic = gr.Textbox( label="Subtopic", placeholder="E.g., Algebra, Physics, Sorting Algorithms" ) complexity = gr.Radio( choices=["Beginner", "Intermediate", "Advanced"], label="Expertise Level", value="Beginner" ) input_text = gr.Textbox( label="Additional Context", placeholder="E.g., 'Explain integration basics' or 'Generate Python code for searching.'", lines=3 ) examples_count = gr.Slider( minimum=1, maximum=5, step=1, label="Number of Examples", value=1 ) output_type = gr.Radio( choices=["Plain Text", "LaTeX", "Downloadable"], label="Output Format", value="Plain Text" ) generate_button = gr.Button("Generate Content") # Output Section with gr.Column(): gr.Markdown("### 📝 Generated Output (Supports LaTeX)") output_text = gr.Markdown(label="Generated Content") download_button = gr.File(label="Download File (if applicable)") # Connect the generate function to the button def update_output(result, file_path): if file_path: return result, file_path return result, None generate_button.click( fn=generate_content, inputs=[selected_topic, subtopic, complexity, input_text, examples_count, output_type], outputs=[output_text, download_button], preprocess=False, postprocess=update_output ) # Feedback Section feedback_label = gr.Label(value="Was this content helpful?") feedback_rating = gr.Radio( choices=["Yes", "No"], label="Feedback", value="Yes" ) feedback_button = gr.Button("Submit Feedback") def collect_feedback(feedback): return f"Thank you for your feedback: {feedback}" feedback_button.click( fn=collect_feedback, inputs=[feedback_rating], outputs=[feedback_label] ) # Launch the Gradio app app.launch()