File size: 4,144 Bytes
a14fe7d
473cdbb
a14fe7d
4febf46
473cdbb
a14fe7d
65151e2
3212e03
a14fe7d
65151e2
 
 
 
3212e03
65151e2
3212e03
65151e2
 
 
473cdbb
65151e2
4febf46
3212e03
 
65151e2
3212e03
473cdbb
4febf46
a14fe7d
4febf46
65151e2
4febf46
 
 
 
 
 
65151e2
4febf46
 
 
 
 
 
65151e2
4febf46
65151e2
4febf46
65151e2
a14fe7d
65151e2
3212e03
4febf46
3212e03
 
a14fe7d
4febf46
a14fe7d
4febf46
 
 
65151e2
4febf46
65151e2
4febf46
3212e03
 
 
 
 
4febf46
 
3212e03
65151e2
4febf46
3212e03
 
 
 
 
 
 
65151e2
a14fe7d
4febf46
 
 
3212e03
 
65151e2
 
 
3212e03
a14fe7d
65151e2
4febf46
 
3212e03
65151e2
4febf46
a14fe7d
3212e03
 
 
 
 
 
 
 
 
 
 
 
4febf46
a14fe7d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import gradio as gr
from huggingface_hub import InferenceClient

# Initialize the Hugging Face Inference Client
client = InferenceClient()

# Function to stream content for Math, STEM, and Code Generation
def generate_stream(selected_topic, subtopic, input_text, examples_count):
    """
    Generates dynamic lessons, solutions, or code snippets based on the selected topic.

    Args:
        selected_topic (str): The selected subject (e.g., Math, STEM, or Code Generation).
        subtopic (str): Specific subtopic or category for more focused output.
        input_text (str): Additional input for contextual content generation.
        examples_count (int): Number of examples to generate.

    Yields:
        str: Incremental output content.
    """
    # Create a topic-specific prompt
    prompt = (
        f"Generate {examples_count} detailed {selected_topic.lower()} examples, lessons, or problems "
        f"focused on {subtopic}. Input context: {input_text}"
        if input_text.strip() else
        f"Generate {examples_count} beginner-level {selected_topic.lower()} lessons or examples on {subtopic}."
    )
    messages = [{"role": "user", "content": prompt}]

    try:
        # Create a stream for generating content
        stream = client.chat.completions.create(
            model="Qwen/Qwen2.5-Coder-32B-Instruct",  # Streaming model
            messages=messages,
            temperature=0.5,
            max_tokens=1024,
            top_p=0.7,
            stream=True
        )

        # Stream the generated content incrementally
        generated_content = ""
        for chunk in stream:
            generated_content += chunk.choices[0].delta.content
            yield generated_content  # Yield content incrementally
    except Exception as e:
        yield f"Error: {e}"  # Display error if any issues occur

# Create the Gradio interface
with gr.Blocks() as app:
    # App Title and Instructions
    gr.Markdown("## πŸŽ“ Enhanced STEM Learning and Code Generator")
    gr.Markdown(
        "Generate tailored lessons, problem-solving examples, or code snippets for Math, STEM, "
        "or Computer Science. Select a topic, subtopic, and customize your experience!"
    )

    with gr.Row():
        # Input Section
        with gr.Column():
            selected_topic = gr.Radio(
                choices=["Math", "STEM", "Computer Science (Code Generation)"],
                label="Select a Topic",
                value="Math"  # Default selection
            )
            subtopic = gr.Textbox(
                lines=1,
                label="Subtopic",
                placeholder="Specify a subtopic (e.g., Algebra, Physics, Data Structures)."
            )
            input_text = gr.Textbox(
                lines=2,
                label="Context or Additional Input",
                placeholder="Provide additional context (e.g., 'Explain calculus basics' or 'Generate Python code for sorting')."
            )
            examples_count = gr.Slider(
                minimum=1,
                maximum=5,
                value=1,
                step=1,
                label="Number of Examples"
            )
            generate_button = gr.Button("Generate Content")

        # Output Section
        with gr.Column():
            gr.Markdown("### Generated Content")
            output_stream = gr.Textbox(
                lines=20,
                label="Output",
                interactive=False
            )
            export_button = gr.Button("Export Code (if applicable)")

    # Link the generate button to the streaming function
    generate_button.click(
        fn=generate_stream,
        inputs=[selected_topic, subtopic, input_text, examples_count],
        outputs=output_stream
    )

    # Export functionality for code snippets
    def export_code(content):
        with open("generated_code.py", "w") as file:
            file.write(content)
        return "Code exported successfully to generated_code.py!"

    export_button.click(
        fn=export_code,
        inputs=[output_stream],
        outputs=[output_stream]
    )

# Launch the Gradio app
app.launch()