File size: 3,007 Bytes
a14fe7d
473cdbb
a14fe7d
4febf46
473cdbb
a14fe7d
4febf46
 
a14fe7d
4febf46
a14fe7d
4febf46
 
 
 
 
 
473cdbb
4febf46
 
 
 
 
473cdbb
4febf46
a14fe7d
4febf46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a14fe7d
4febf46
 
 
 
 
a14fe7d
4febf46
a14fe7d
4febf46
 
 
 
 
 
 
 
 
 
 
 
 
a14fe7d
4febf46
 
 
 
 
 
 
 
a14fe7d
4febf46
 
 
 
 
 
a14fe7d
4febf46
a14fe7d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import gradio as gr
from huggingface_hub import InferenceClient

# Initialize the Hugging Face Inference Client
client = InferenceClient()

# Function to stream content for Math, STEM, and Code Generation
def generate_stream(selected_topic, input_text):
    """
    Generates dynamic lessons, solutions, or code snippets based on the selected topic.

    Args:
        selected_topic (str): The selected subject (e.g., Math, STEM, or Code Generation).
        input_text (str): Additional input for contextual content generation.

    Yields:
        str: Incremental output content.
    """
    # Create a topic-specific prompt
    prompt = (
        f"Generate a {selected_topic.lower()} lesson, problem, or example based on the following input: {input_text}"
        if input_text.strip() else
        f"Generate a beginner-level {selected_topic.lower()} lesson with examples."
    )
    messages = [{"role": "user", "content": prompt}]

    try:
        # Create a stream for generating content
        stream = client.chat.completions.create(
            model="Qwen/Qwen2.5-Coder-32B-Instruct",  # Streaming model
            messages=messages,
            temperature=0.5,
            max_tokens=1024,
            top_p=0.7,
            stream=True
        )

        # Stream the generated content incrementally
        generated_content = ""
        for chunk in stream:
            generated_content += chunk.choices[0].delta.content
            yield generated_content  # Yield content incrementally
    except Exception as e:
        yield f"Error: {e}"  # Display error if any issues occur

# Create the Gradio interface
with gr.Blocks() as app:
    # App Title and Instructions
    gr.Markdown("## πŸŽ“ STEM Learning and Code Generator")
    gr.Markdown(
        "Get dynamic lessons, problem-solving examples, or code snippets for Math, STEM, "
        "or Computer Science. Select a topic and get started!"
    )

    with gr.Row():
        # Input Section
        with gr.Column():
            selected_topic = gr.Radio(
                choices=["Math", "STEM", "Computer Science (Code Generation)"],
                label="Select a Topic",
                value="Math"  # Default selection
            )
            input_text = gr.Textbox(
                lines=2,
                label="Optional Input",
                placeholder="Provide additional context (e.g., 'Explain calculus basics' or 'Generate Python code for sorting')."
            )
            generate_button = gr.Button("Generate Content")

        # Output Section
        with gr.Column():
            gr.Markdown("### Generated Content")
            output_stream = gr.Textbox(
                lines=15,
                label="Output",
                interactive=False
            )

    # Link the generate button to the streaming function
    generate_button.click(
        fn=generate_stream,
        inputs=[selected_topic, input_text],
        outputs=output_stream
    )

# Launch the Gradio app
app.launch()