File size: 5,157 Bytes
a14fe7d
473cdbb
43dcd71
a14fe7d
4febf46
473cdbb
a14fe7d
43dcd71
 
a14fe7d
a8ddaea
65151e2
 
43dcd71
 
 
 
3212e03
a8ddaea
65151e2
43dcd71
a8ddaea
473cdbb
43dcd71
4febf46
43dcd71
 
 
 
473cdbb
4febf46
a14fe7d
4febf46
43dcd71
 
 
4febf46
 
 
43dcd71
4febf46
43dcd71
4febf46
43dcd71
 
a8ddaea
 
 
43dcd71
 
 
 
 
 
 
4febf46
43dcd71
4febf46
65151e2
a14fe7d
a8ddaea
 
4febf46
a14fe7d
a8ddaea
4febf46
 
43dcd71
4febf46
43dcd71
4febf46
3212e03
 
43dcd71
 
 
 
 
 
3212e03
4febf46
43dcd71
 
 
4febf46
3212e03
 
 
 
43dcd71
 
 
 
 
 
 
3212e03
65151e2
a14fe7d
a8ddaea
4febf46
a8ddaea
 
43dcd71
a14fe7d
a8ddaea
 
 
 
 
 
 
 
 
 
4febf46
43dcd71
 
a8ddaea
 
 
43dcd71
 
a8ddaea
43dcd71
 
 
 
 
4febf46
43dcd71
a14fe7d
43dcd71
 
3212e03
43dcd71
 
 
 
3212e03
 
4febf46
a14fe7d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import gradio as gr
from huggingface_hub import InferenceClient
import tempfile

# Initialize the Hugging Face Inference Client
client = InferenceClient()

# Function to generate dynamic lessons, examples, or code
def generate_content(selected_topic, subtopic, complexity, input_text, examples_count, output_type):
    """
    Generate content dynamically based on user input, with support for LaTeX rendering and file downloads.

    Args:
        selected_topic (str): Topic selected by the user.
        subtopic (str): Specific subtopic for generation.
        complexity (str): User expertise level.
        input_text (str): Additional input context.
        examples_count (int): Number of examples to generate.
        output_type (str): Desired output format (Plain Text, LaTeX, or Downloadable).

    Returns:
        dict or str: Generated content in the desired format.
    """
    # Build the prompt dynamically
    prompt = (
        f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} examples, lessons, "
        f"or problems related to {subtopic}. Context: {input_text}" if input_text.strip()
        else f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} lessons "
             f"or problems focused on {subtopic}."
    )
    messages = [{"role": "user", "content": prompt}]

    try:
        # Generate the content using the model
        response = client.chat.completions.create(
            model="Qwen/Qwen2.5-Coder-32B-Instruct",
            messages=messages,
            temperature=0.5,
            max_tokens=1024,
            top_p=0.7
        )
        content = response.choices[0].message.content

        # Adjust the output based on the selected type
        if output_type == "LaTeX":
            # Wrap content in LaTeX delimiters for rendering
            latex_content = f"$$\n{content}\n$$"
            return {"content": latex_content, "latex": True}
        elif output_type == "Downloadable":
            temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt")
            with open(temp_file.name, "w") as file:
                file.write(content)
            return {"file": temp_file.name}
        else:
            return content
    except Exception as e:
        return f"Error: {e}"

# Create the Gradio interface
with gr.Blocks() as app:
    # App Title and Instructions
    gr.Markdown("## 🌟 Advanced STEM and Code Generator with LaTeX Rendering and Downloads")

    with gr.Row():
        # Input Section
        with gr.Column():
            selected_topic = gr.Radio(
                choices=["Math", "STEM", "Code Generation"],
                label="Select a Topic",
                value="Math"
            )
            subtopic = gr.Textbox(
                label="Subtopic",
                placeholder="E.g., Algebra, Physics, Sorting Algorithms"
            )
            complexity = gr.Radio(
                choices=["Beginner", "Intermediate", "Advanced"],
                label="Expertise Level",
                value="Beginner"
            )
            input_text = gr.Textbox(
                label="Additional Context",
                placeholder="E.g., 'Explain integration basics' or 'Generate Python code for searching.'",
                lines=3
            )
            examples_count = gr.Slider(
                minimum=1,
                maximum=5,
                step=1,
                label="Number of Examples",
                value=1
            )
            output_type = gr.Radio(
                choices=["Plain Text", "LaTeX", "Downloadable"],
                label="Output Format",
                value="Plain Text"
            )
            generate_button = gr.Button("Generate Content")

        # Output Section
        with gr.Column():
            gr.Markdown("### πŸ“ Generated Output (Supports LaTeX)")
            output_text = gr.Markdown(label="Generated Content")
            download_button = gr.File(label="Download File (if applicable)")

    # Connect the generate function to the button
    def update_output(result):
        if isinstance(result, dict):
            if "latex" in result:
                return result["content"], None
            elif "file" in result:
                return "File ready for download.", result["file"]
        else:
            return result, None

    generate_button.click(
        fn=generate_content,
        inputs=[selected_topic, subtopic, complexity, input_text, examples_count, output_type],
        outputs=[output_text, download_button],
        preprocess=False,
        postprocess=update_output
    )

    # Feedback Section
    feedback_label = gr.Label(value="Was this content helpful?")
    feedback_rating = gr.Radio(
        choices=["Yes", "No"],
        label="Feedback",
        value="Yes"
    )
    feedback_button = gr.Button("Submit Feedback")

    def collect_feedback(feedback):
        return f"Thank you for your feedback: {feedback}"

    feedback_button.click(
        fn=collect_feedback,
        inputs=[feedback_rating],
        outputs=[feedback_label]
    )

# Launch the Gradio app
app.launch()