mgbam commited on
Commit
4febf46
·
verified ·
1 Parent(s): 473cdbb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -49
app.py CHANGED
@@ -1,66 +1,87 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # Initialize Hugging Face Inference Client
5
  client = InferenceClient()
6
 
7
- # Function to use a model for generating content dynamically
8
- def generate_content(selected_path):
9
  """
10
- Generates dynamic content based on the selected learning path using an NLP model.
11
- """
12
- prompt = f"Create a short lesson or problem for the topic: {selected_path}."
13
- response = client.text_generation(
14
- model="gpt2", # Replace with a more suitable Hugging Face model for educational tasks
15
- inputs=prompt,
16
- max_new_tokens=150,
17
- temperature=0.7
18
- )
19
- return response["generated_text"]
20
 
21
- # Function to evaluate user input (for example, math problem answers)
22
- def evaluate_answer(user_input, selected_path):
23
- """
24
- Evaluates user input using the model and provides feedback.
 
 
25
  """
26
- prompt = f"Evaluate the following response for {selected_path}:\n\nUser Answer: {user_input}\nProvide feedback."
27
- response = client.text_generation(
28
- model="gpt2", # Replace with a more appropriate evaluation model
29
- inputs=prompt,
30
- max_new_tokens=100,
31
- temperature=0.5
32
  )
33
- return response["generated_text"]
34
 
35
- # Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  with gr.Blocks() as app:
37
- gr.Markdown("## AI-Powered Learning Platform")
38
- gr.Markdown("Choose a topic to start learning:")
39
-
40
- # Progress bar at the top (HTML-based)
41
- gr.HTML('<div style="width: 100%; background-color: lightgray; height: 5px; border-radius: 5px;">'
42
- '<div style="width: 25%; background-color: green; height: 100%;"></div></div>')
43
-
44
- # Options for learning paths
45
- selected_topic = gr.Radio(
46
- choices=["Math", "Science & Engineering", "Computer Science & Programming", "Data Science & Data Analysis"],
47
- label="Select a Learning Path",
48
- value="Math"
49
  )
50
-
51
- # Buttons for user interaction
52
  with gr.Row():
53
- generate_button = gr.Button("Generate Content")
54
- submit_answer_button = gr.Button("Submit Answer")
 
 
 
 
 
 
 
 
 
 
 
55
 
56
- # Outputs
57
- content_output = gr.Textbox(lines=8, interactive=False, label="Generated Content")
58
- answer_input = gr.Textbox(lines=2, label="Your Answer")
59
- feedback_output = gr.Textbox(interactive=False, label="Feedback")
 
 
 
 
60
 
61
- # Link interactions to actions
62
- generate_button.click(fn=generate_content, inputs=selected_topic, outputs=content_output)
63
- submit_answer_button.click(fn=evaluate_answer, inputs=[answer_input, selected_topic], outputs=feedback_output)
 
 
 
64
 
65
- # Launch the app
66
  app.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Initialize the Hugging Face Inference Client
5
  client = InferenceClient()
6
 
7
+ # Function to stream content for Math, STEM, and Code Generation
8
+ def generate_stream(selected_topic, input_text):
9
  """
10
+ Generates dynamic lessons, solutions, or code snippets based on the selected topic.
 
 
 
 
 
 
 
 
 
11
 
12
+ Args:
13
+ selected_topic (str): The selected subject (e.g., Math, STEM, or Code Generation).
14
+ input_text (str): Additional input for contextual content generation.
15
+
16
+ Yields:
17
+ str: Incremental output content.
18
  """
19
+ # Create a topic-specific prompt
20
+ prompt = (
21
+ f"Generate a {selected_topic.lower()} lesson, problem, or example based on the following input: {input_text}"
22
+ if input_text.strip() else
23
+ f"Generate a beginner-level {selected_topic.lower()} lesson with examples."
 
24
  )
25
+ messages = [{"role": "user", "content": prompt}]
26
 
27
+ try:
28
+ # Create a stream for generating content
29
+ stream = client.chat.completions.create(
30
+ model="Qwen/Qwen2.5-Coder-32B-Instruct", # Streaming model
31
+ messages=messages,
32
+ temperature=0.5,
33
+ max_tokens=1024,
34
+ top_p=0.7,
35
+ stream=True
36
+ )
37
+
38
+ # Stream the generated content incrementally
39
+ generated_content = ""
40
+ for chunk in stream:
41
+ generated_content += chunk.choices[0].delta.content
42
+ yield generated_content # Yield content incrementally
43
+ except Exception as e:
44
+ yield f"Error: {e}" # Display error if any issues occur
45
+
46
+ # Create the Gradio interface
47
  with gr.Blocks() as app:
48
+ # App Title and Instructions
49
+ gr.Markdown("## 🎓 STEM Learning and Code Generator")
50
+ gr.Markdown(
51
+ "Get dynamic lessons, problem-solving examples, or code snippets for Math, STEM, "
52
+ "or Computer Science. Select a topic and get started!"
 
 
 
 
 
 
 
53
  )
54
+
 
55
  with gr.Row():
56
+ # Input Section
57
+ with gr.Column():
58
+ selected_topic = gr.Radio(
59
+ choices=["Math", "STEM", "Computer Science (Code Generation)"],
60
+ label="Select a Topic",
61
+ value="Math" # Default selection
62
+ )
63
+ input_text = gr.Textbox(
64
+ lines=2,
65
+ label="Optional Input",
66
+ placeholder="Provide additional context (e.g., 'Explain calculus basics' or 'Generate Python code for sorting')."
67
+ )
68
+ generate_button = gr.Button("Generate Content")
69
 
70
+ # Output Section
71
+ with gr.Column():
72
+ gr.Markdown("### Generated Content")
73
+ output_stream = gr.Textbox(
74
+ lines=15,
75
+ label="Output",
76
+ interactive=False
77
+ )
78
 
79
+ # Link the generate button to the streaming function
80
+ generate_button.click(
81
+ fn=generate_stream,
82
+ inputs=[selected_topic, input_text],
83
+ outputs=output_stream
84
+ )
85
 
86
+ # Launch the Gradio app
87
  app.launch()