mgbam commited on
Commit
43dcd71
Β·
verified Β·
1 Parent(s): 3212e03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -59
app.py CHANGED
@@ -1,114 +1,133 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
  # Initialize the Hugging Face Inference Client
5
  client = InferenceClient()
6
 
7
- # Function to stream content for Math, STEM, and Code Generation
8
- def generate_stream(selected_topic, subtopic, input_text, examples_count):
9
  """
10
- Generates dynamic lessons, solutions, or code snippets based on the selected topic.
11
 
12
  Args:
13
- selected_topic (str): The selected subject (e.g., Math, STEM, or Code Generation).
14
- subtopic (str): Specific subtopic or category for more focused output.
15
- input_text (str): Additional input for contextual content generation.
 
16
  examples_count (int): Number of examples to generate.
 
17
 
18
- Yields:
19
- str: Incremental output content.
20
  """
21
- # Create a topic-specific prompt
22
  prompt = (
23
- f"Generate {examples_count} detailed {selected_topic.lower()} examples, lessons, or problems "
24
- f"focused on {subtopic}. Input context: {input_text}"
25
- if input_text.strip() else
26
- f"Generate {examples_count} beginner-level {selected_topic.lower()} lessons or examples on {subtopic}."
27
  )
28
  messages = [{"role": "user", "content": prompt}]
29
 
30
  try:
31
- # Create a stream for generating content
32
- stream = client.chat.completions.create(
33
- model="Qwen/Qwen2.5-Coder-32B-Instruct", # Streaming model
34
  messages=messages,
35
  temperature=0.5,
36
  max_tokens=1024,
37
- top_p=0.7,
38
- stream=True
39
  )
 
40
 
41
- # Stream the generated content incrementally
42
- generated_content = ""
43
- for chunk in stream:
44
- generated_content += chunk.choices[0].delta.content
45
- yield generated_content # Yield content incrementally
 
 
 
 
 
46
  except Exception as e:
47
- yield f"Error: {e}" # Display error if any issues occur
48
 
49
  # Create the Gradio interface
50
  with gr.Blocks() as app:
51
- # App Title and Instructions
52
- gr.Markdown("## πŸŽ“ Enhanced STEM Learning and Code Generator")
53
- gr.Markdown(
54
- "Generate tailored lessons, problem-solving examples, or code snippets for Math, STEM, "
55
- "or Computer Science. Select a topic, subtopic, and customize your experience!"
56
- )
57
 
58
  with gr.Row():
59
- # Input Section
60
  with gr.Column():
61
  selected_topic = gr.Radio(
62
- choices=["Math", "STEM", "Computer Science (Code Generation)"],
63
  label="Select a Topic",
64
- value="Math" # Default selection
65
  )
66
  subtopic = gr.Textbox(
67
- lines=1,
68
  label="Subtopic",
69
- placeholder="Specify a subtopic (e.g., Algebra, Physics, Data Structures)."
 
 
 
 
 
70
  )
71
  input_text = gr.Textbox(
72
- lines=2,
73
- label="Context or Additional Input",
74
- placeholder="Provide additional context (e.g., 'Explain calculus basics' or 'Generate Python code for sorting')."
75
  )
76
  examples_count = gr.Slider(
77
  minimum=1,
78
  maximum=5,
79
- value=1,
80
  step=1,
81
- label="Number of Examples"
 
 
 
 
 
 
82
  )
83
  generate_button = gr.Button("Generate Content")
84
 
85
- # Output Section
86
  with gr.Column():
87
- gr.Markdown("### Generated Content")
88
- output_stream = gr.Textbox(
89
- lines=20,
90
- label="Output",
91
  interactive=False
92
  )
93
- export_button = gr.Button("Export Code (if applicable)")
94
 
95
- # Link the generate button to the streaming function
96
  generate_button.click(
97
- fn=generate_stream,
98
- inputs=[selected_topic, subtopic, input_text, examples_count],
99
- outputs=output_stream
 
 
 
 
 
 
 
 
100
  )
 
101
 
102
- # Export functionality for code snippets
103
- def export_code(content):
104
- with open("generated_code.py", "w") as file:
105
- file.write(content)
106
- return "Code exported successfully to generated_code.py!"
107
 
108
- export_button.click(
109
- fn=export_code,
110
- inputs=[output_stream],
111
- outputs=[output_stream]
112
  )
113
 
114
  # Launch the Gradio app
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import tempfile
4
 
5
  # Initialize the Hugging Face Inference Client
6
  client = InferenceClient()
7
 
8
+ # Function to generate dynamic lessons, examples, or code
9
+ def generate_content(selected_topic, subtopic, complexity, input_text, examples_count, output_type):
10
  """
11
+ Generate content dynamically based on user input.
12
 
13
  Args:
14
+ selected_topic (str): Topic selected by the user.
15
+ subtopic (str): Specific subtopic for generation.
16
+ complexity (str): User expertise level.
17
+ input_text (str): Additional input context.
18
  examples_count (int): Number of examples to generate.
19
+ output_type (str): Desired output format.
20
 
21
+ Returns:
22
+ str or dict: Generated content in the selected format.
23
  """
24
+ # Build the prompt dynamically
25
  prompt = (
26
+ f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} examples, lessons, "
27
+ f"or problems related to {subtopic}. Context: {input_text}" if input_text.strip()
28
+ else f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} lessons "
29
+ f"or problems focused on {subtopic}."
30
  )
31
  messages = [{"role": "user", "content": prompt}]
32
 
33
  try:
34
+ # Generate the content using the model
35
+ response = client.chat.completions.create(
36
+ model="Qwen/Qwen2.5-Coder-32B-Instruct",
37
  messages=messages,
38
  temperature=0.5,
39
  max_tokens=1024,
40
+ top_p=0.7
 
41
  )
42
+ content = response.choices[0].message.content
43
 
44
+ # Adjust the output based on the selected type
45
+ if output_type == "LaTeX":
46
+ return {"content": content, "latex": True}
47
+ elif output_type == "Downloadable":
48
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt")
49
+ with open(temp_file.name, "w") as file:
50
+ file.write(content)
51
+ return {"file": temp_file.name}
52
+ else:
53
+ return content
54
  except Exception as e:
55
+ return f"Error: {e}"
56
 
57
  # Create the Gradio interface
58
  with gr.Blocks() as app:
59
+ # App Title
60
+ gr.Markdown("## 🌟 Advanced STEM and Code Generator with Interactive Features")
 
 
 
 
61
 
62
  with gr.Row():
63
+ # Input Panel
64
  with gr.Column():
65
  selected_topic = gr.Radio(
66
+ choices=["Math", "STEM", "Code Generation"],
67
  label="Select a Topic",
68
+ value="Math"
69
  )
70
  subtopic = gr.Textbox(
 
71
  label="Subtopic",
72
+ placeholder="E.g., Algebra, Physics, Sorting Algorithms"
73
+ )
74
+ complexity = gr.Radio(
75
+ choices=["Beginner", "Intermediate", "Advanced"],
76
+ label="Expertise Level",
77
+ value="Beginner"
78
  )
79
  input_text = gr.Textbox(
80
+ label="Additional Context",
81
+ placeholder="E.g., 'Explain integration basics' or 'Generate Python code for searching.'",
82
+ lines=3
83
  )
84
  examples_count = gr.Slider(
85
  minimum=1,
86
  maximum=5,
 
87
  step=1,
88
+ label="Number of Examples",
89
+ value=1
90
+ )
91
+ output_type = gr.Radio(
92
+ choices=["Plain Text", "LaTeX", "Downloadable"],
93
+ label="Output Format",
94
+ value="Plain Text"
95
  )
96
  generate_button = gr.Button("Generate Content")
97
 
98
+ # Output Panel
99
  with gr.Column():
100
+ gr.Markdown("### πŸ“ Output")
101
+ output = gr.Textbox(
102
+ label="Generated Output",
103
+ lines=15,
104
  interactive=False
105
  )
106
+ download_button = gr.File(label="Download File (if applicable)")
107
 
108
+ # Link the generation function to the button
109
  generate_button.click(
110
+ fn=generate_content,
111
+ inputs=[selected_topic, subtopic, complexity, input_text, examples_count, output_type],
112
+ outputs=[output, download_button]
113
+ )
114
+
115
+ # Feedback Mechanism
116
+ feedback_label = gr.Label(value="Was this content helpful?")
117
+ feedback_rating = gr.Radio(
118
+ choices=["Yes", "No"],
119
+ label="Feedback",
120
+ value="Yes"
121
  )
122
+ feedback_button = gr.Button("Submit Feedback")
123
 
124
+ def collect_feedback(feedback):
125
+ return f"Thank you for your feedback: {feedback}"
 
 
 
126
 
127
+ feedback_button.click(
128
+ fn=collect_feedback,
129
+ inputs=[feedback_rating],
130
+ outputs=[feedback_label]
131
  )
132
 
133
  # Launch the Gradio app