SUMANTH-CH commited on
Commit
7bd289d
Β·
verified Β·
1 Parent(s): acb1a09

Upload 4 files

Browse files
Files changed (4) hide show
  1. .gitignore +7 -0
  2. README.md +29 -13
  3. app.py +304 -64
  4. requirements.txt +5 -1
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ .Python
6
+ env/
7
+ venv/
README.md CHANGED
@@ -1,13 +1,29 @@
1
- ---
2
- title: EDUTUTOR AI
3
- emoji: πŸ’¬
4
- colorFrom: yellow
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 5.0.1
8
- app_file: app.py
9
- pinned: false
10
- short_description: Learn New, Gain The Knowledge.
11
- ---
12
-
13
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: EDUTUTOR AI
3
+ emoji: πŸŽ“
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.8.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ ---
12
+
13
+ # EDUTUTOR AI πŸŽ“
14
+
15
+ An intelligent AI tutor powered by IBM Granite that provides personalized educational explanations across multiple subjects and difficulty levels.
16
+
17
+ ## Features
18
+ - Subject-specific tutoring (Math, Physics, Computer Science, etc.)
19
+ - Adaptive difficulty levels (Beginner, Intermediate, Advanced)
20
+ - Interactive learning sessions
21
+ - Conversation history tracking
22
+
23
+ ## Usage
24
+ 1. Click "Initialize EDUTUTOR AI" to load the model
25
+ 2. Select your subject and difficulty level
26
+ 3. Ask your educational questions
27
+ 4. Get personalized, clear explanations
28
+
29
+ Built with ❀️ using Gradio and Hugging Face Transformers.
app.py CHANGED
@@ -1,64 +1,304 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # EDUTUTOR AI - Complete app.py for Hugging Face Spaces
2
+ # An intelligent AI tutor powered by IBM Granite that provides personalized educational explanations across multiple subjects and difficulty levels.
3
+
4
+ import gradio as gr
5
+ import torch
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
7
+ import warnings
8
+ warnings.filterwarnings("ignore")
9
+
10
+ class EduTutorAI:
11
+ def __init__(self):
12
+ self.model_name = "ibm-granite/granite-3.3-2b-instruct"
13
+ self.tokenizer = None
14
+ self.model = None
15
+ self.pipe = None
16
+ self.conversation_history = []
17
+
18
+ def load_model(self):
19
+ """Load the Granite model and tokenizer"""
20
+ try:
21
+ print("Loading EDUTUTOR AI model...")
22
+
23
+ # Load tokenizer
24
+ self.tokenizer = AutoTokenizer.from_pretrained(
25
+ self.model_name,
26
+ trust_remote_code=True
27
+ )
28
+
29
+ # Load model with optimization for deployment
30
+ self.model = AutoModelForCausalLM.from_pretrained(
31
+ self.model_name,
32
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
33
+ device_map="auto" if torch.cuda.is_available() else None,
34
+ trust_remote_code=True,
35
+ low_cpu_mem_usage=True
36
+ )
37
+
38
+ # Create pipeline
39
+ self.pipe = pipeline(
40
+ "text-generation",
41
+ model=self.model,
42
+ tokenizer=self.tokenizer,
43
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
44
+ device_map="auto" if torch.cuda.is_available() else None
45
+ )
46
+
47
+ print("βœ… Model loaded successfully!")
48
+ return True
49
+
50
+ except Exception as e:
51
+ print(f"❌ Error loading model: {str(e)}")
52
+ return False
53
+
54
+ def create_educational_prompt(self, user_question, subject="General", difficulty="Intermediate"):
55
+ """Create an educational prompt template"""
56
+ system_prompt = f"""You are EDUTUTOR AI, an expert educational tutor specializing in {subject}.
57
+ Your role is to:
58
+ 1. Provide clear, accurate explanations at {difficulty} level
59
+ 2. Break down complex concepts into digestible parts
60
+ 3. Use examples and analogies when helpful
61
+ 4. Encourage learning through questions
62
+ 5. Be patient and supportive
63
+
64
+ Student Question: {user_question}
65
+
66
+ Please provide a comprehensive yet accessible explanation:"""
67
+
68
+ return system_prompt
69
+
70
+ def generate_response(self, question, subject, difficulty, max_length=512):
71
+ """Generate educational response"""
72
+ if not self.pipe:
73
+ return "❌ Model not loaded. Please wait for initialization."
74
+
75
+ try:
76
+ # Create educational prompt
77
+ prompt = self.create_educational_prompt(question, subject, difficulty)
78
+
79
+ # Generate response
80
+ response = self.pipe(
81
+ prompt,
82
+ max_length=max_length,
83
+ num_return_sequences=1,
84
+ temperature=0.7,
85
+ do_sample=True,
86
+ pad_token_id=self.tokenizer.eos_token_id,
87
+ truncation=True
88
+ )
89
+
90
+ # Extract the generated text
91
+ full_response = response[0]['generated_text']
92
+
93
+ # Remove the prompt to get only the AI response
94
+ ai_response = full_response.replace(prompt, "").strip()
95
+
96
+ # Store in conversation history
97
+ self.conversation_history.append({
98
+ "question": question,
99
+ "subject": subject,
100
+ "difficulty": difficulty,
101
+ "response": ai_response
102
+ })
103
+
104
+ return ai_response
105
+
106
+ except Exception as e:
107
+ return f"❌ Error generating response: {str(e)}"
108
+
109
+ def get_conversation_history(self):
110
+ """Get formatted conversation history"""
111
+ if not self.conversation_history:
112
+ return "No conversation history yet."
113
+
114
+ history = "πŸ“š **EDUTUTOR AI - Learning Session History**\n\n"
115
+ for i, conv in enumerate(self.conversation_history[-5:], 1): # Show last 5 conversations
116
+ history += f"**Session {i}:**\n"
117
+ history += f"🎯 Subject: {conv['subject']} | Level: {conv['difficulty']}\n"
118
+ history += f"❓ Question: {conv['question']}\n"
119
+ history += f"πŸ’‘ Response: {conv['response'][:200]}...\n\n"
120
+
121
+ return history
122
+
123
+ def clear_history(self):
124
+ """Clear conversation history"""
125
+ self.conversation_history = []
126
+ return "πŸ—‘οΈ Conversation history cleared!"
127
+
128
+ # Initialize the EduTutor AI
129
+ edututor = EduTutorAI()
130
+
131
+ # Load model function for Gradio
132
+ def initialize_model():
133
+ """Initialize the model and return status"""
134
+ success = edututor.load_model()
135
+ if success:
136
+ return "βœ… EDUTUTOR AI is ready! You can now start asking questions."
137
+ else:
138
+ return "❌ Failed to load model. Please try again."
139
+
140
+ # Main chat function
141
+ def chat_with_edututor(question, subject, difficulty, max_length):
142
+ """Main chat interface function"""
143
+ if not question.strip():
144
+ return "Please enter a question to get started!"
145
+
146
+ response = edututor.generate_response(question, subject, difficulty, max_length)
147
+ return response
148
+
149
+ # Create Gradio interface
150
+ def create_interface():
151
+ """Create the EDUTUTOR AI Gradio interface"""
152
+
153
+ with gr.Blocks(
154
+ title="πŸŽ“ EDUTUTOR AI - Your Personal Learning Assistant",
155
+ theme=gr.themes.Soft(),
156
+ css="""
157
+ .gradio-container {
158
+ font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
159
+ }
160
+ .main-header {
161
+ text-align: center;
162
+ background: linear-gradient(45deg, #667eea 0%, #764ba2 100%);
163
+ color: white;
164
+ padding: 20px;
165
+ border-radius: 10px;
166
+ margin-bottom: 20px;
167
+ }
168
+ """
169
+ ) as interface:
170
+
171
+ # Header
172
+ gr.HTML("""
173
+ <div class="main-header">
174
+ <h1>πŸŽ“ EDUTUTOR AI</h1>
175
+ <p>Your Intelligent Educational Tutor powered by IBM Granite 3.3-2B</p>
176
+ <p><em>Ask questions, learn concepts, and expand your knowledge!</em></p>
177
+ </div>
178
+ """)
179
+
180
+ # Model initialization section
181
+ with gr.Row():
182
+ with gr.Column():
183
+ init_button = gr.Button("πŸš€ Initialize EDUTUTOR AI", variant="primary", size="lg")
184
+ init_status = gr.Textbox(
185
+ label="Initialization Status",
186
+ value="Click 'Initialize EDUTUTOR AI' to start",
187
+ interactive=False
188
+ )
189
+
190
+ # Main interface
191
+ with gr.Row():
192
+ with gr.Column(scale=2):
193
+ # Input section
194
+ with gr.Group():
195
+ gr.Markdown("### πŸ“ Ask Your Question")
196
+ question_input = gr.Textbox(
197
+ label="Your Question",
198
+ placeholder="e.g., Explain quantum physics, How does photosynthesis work?, What is machine learning?",
199
+ lines=3
200
+ )
201
+
202
+ with gr.Row():
203
+ subject_dropdown = gr.Dropdown(
204
+ choices=[
205
+ "General", "Mathematics", "Physics", "Chemistry",
206
+ "Biology", "Computer Science", "History", "Literature",
207
+ "Geography", "Economics", "Philosophy"
208
+ ],
209
+ value="General",
210
+ label="Subject Area"
211
+ )
212
+
213
+ difficulty_dropdown = gr.Dropdown(
214
+ choices=["Beginner", "Intermediate", "Advanced"],
215
+ value="Intermediate",
216
+ label="Difficulty Level"
217
+ )
218
+
219
+ max_length_slider = gr.Slider(
220
+ minimum=100,
221
+ maximum=1000,
222
+ value=512,
223
+ step=50,
224
+ label="Response Length (tokens)"
225
+ )
226
+
227
+ ask_button = gr.Button("πŸ€” Ask EDUTUTOR AI", variant="primary")
228
+
229
+ with gr.Column(scale=1):
230
+ # Quick actions
231
+ with gr.Group():
232
+ gr.Markdown("### ⚑ Quick Actions")
233
+ history_button = gr.Button("πŸ“š View Learning History")
234
+ clear_button = gr.Button("πŸ—‘οΈ Clear History")
235
+
236
+ gr.Markdown("### πŸ’‘ Tips")
237
+ gr.Markdown("""
238
+ - Be specific with your questions
239
+ - Select appropriate subject and difficulty
240
+ - Use follow-up questions for deeper understanding
241
+ - Experiment with different difficulty levels
242
+ """)
243
+
244
+ # Response section
245
+ with gr.Row():
246
+ response_output = gr.Textbox(
247
+ label="πŸŽ“ EDUTUTOR AI Response",
248
+ lines=15,
249
+ max_lines=20,
250
+ interactive=False
251
+ )
252
+
253
+ # History section
254
+ with gr.Row():
255
+ history_output = gr.Textbox(
256
+ label="πŸ“š Learning Session History",
257
+ lines=10,
258
+ interactive=False,
259
+ visible=False
260
+ )
261
+
262
+ # Event handlers
263
+ init_button.click(
264
+ fn=initialize_model,
265
+ outputs=init_status
266
+ )
267
+
268
+ ask_button.click(
269
+ fn=chat_with_edututor,
270
+ inputs=[question_input, subject_dropdown, difficulty_dropdown, max_length_slider],
271
+ outputs=response_output
272
+ )
273
+
274
+ question_input.submit(
275
+ fn=chat_with_edututor,
276
+ inputs=[question_input, subject_dropdown, difficulty_dropdown, max_length_slider],
277
+ outputs=response_output
278
+ )
279
+
280
+ history_button.click(
281
+ fn=edututor.get_conversation_history,
282
+ outputs=history_output
283
+ ).then(
284
+ fn=lambda: gr.update(visible=True),
285
+ outputs=history_output
286
+ )
287
+
288
+ clear_button.click(
289
+ fn=edututor.clear_history,
290
+ outputs=init_status
291
+ )
292
+
293
+ return interface
294
+
295
+ # Launch the application
296
+ if __name__ == "__main__":
297
+ print("πŸŽ“ Starting EDUTUTOR AI...")
298
+ print("=" * 50)
299
+
300
+ # Create and launch interface
301
+ demo = create_interface()
302
+
303
+ # Launch for Hugging Face Spaces (simplified)
304
+ demo.launch()
requirements.txt CHANGED
@@ -1 +1,5 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
1
+ transformers>=4.35.0
2
+ torch>=2.0.0
3
+ gradio>=4.0.0
4
+ accelerate>=0.20.0
5
+ bitsandbytes>=0.41.0