Spaces:
Sleeping
Sleeping
Ajey95
commited on
Commit
·
1f1f2eb
1
Parent(s):
d90deef
Fix: chat_history addition
Browse files- agents/academic_agent.py +73 -33
- agents/agent_helpers.py +15 -0
- agents/drug_info_agent.py +21 -34
- agents/mnemonic_agent.py +15 -8
- agents/quiz_agent.py +17 -12
agents/academic_agent.py
CHANGED
@@ -264,7 +264,7 @@ Now returns a standardized dictionary instead of a raw string.
|
|
264 |
import json
|
265 |
import os
|
266 |
import re
|
267 |
-
|
268 |
class AcademicAgent:
|
269 |
def __init__(self, gemini_model=None):
|
270 |
self.model = gemini_model
|
@@ -384,40 +384,80 @@ class AcademicAgent:
|
|
384 |
return "🙏 **Namaste!** I'm here to help..."
|
385 |
|
386 |
# --- THIS IS THE ONLY METHOD THAT CHANGES ---
|
387 |
-
|
388 |
-
def process_query(self, query: str, file_context: str = "",chat_history: list = None):
|
389 |
"""
|
390 |
-
|
391 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
392 |
"""
|
393 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
394 |
try:
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
400 |
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
|
|
264 |
import json
|
265 |
import os
|
266 |
import re
|
267 |
+
from .agent_helpers import format_history_for_prompt
|
268 |
class AcademicAgent:
|
269 |
def __init__(self, gemini_model=None):
|
270 |
self.model = gemini_model
|
|
|
384 |
return "🙏 **Namaste!** I'm here to help..."
|
385 |
|
386 |
# --- THIS IS THE ONLY METHOD THAT CHANGES ---
|
387 |
+
def process_query(self, query: str, file_context: str = "", chat_history: list = None):
|
|
|
388 |
"""
|
389 |
+
Processes a general academic query using the Gemini model.
|
390 |
+
|
391 |
+
Args:
|
392 |
+
query (str): The user's full query.
|
393 |
+
file_context (str): Context from any uploaded files.
|
394 |
+
chat_history (list): The history of the conversation.
|
395 |
+
|
396 |
+
Returns:
|
397 |
+
dict: A dictionary containing the response message and agent metadata.
|
398 |
"""
|
399 |
+
if not self.model:
|
400 |
+
return {'message': "📚 My knowledge circuits are offline! The Gemini API key is missing.", 'agent_used': 'academic', 'status': 'error_no_api_key'}
|
401 |
+
|
402 |
+
history_for_prompt = format_history_for_prompt(chat_history)
|
403 |
+
|
404 |
+
context_section = ""
|
405 |
+
if file_context:
|
406 |
+
context_section = f"---\nCONTEXT FROM UPLOADED FILE:\n{file_context}\n---"
|
407 |
+
|
408 |
+
prompt = f"""You are a helpful and knowledgeable AI pharmacy tutor for a student in India.
|
409 |
+
|
410 |
+
Your reasoning process must be:
|
411 |
+
1. First, analyze the CONVERSATION HISTORY to understand the immediate context of the CURRENT QUESTION. This is especially important to understand what "this," "that," or "it" refers to.
|
412 |
+
2. Once you understand the user's real question, check if the UPLOADED FILE context is relevant to the topic.
|
413 |
+
3. Formulate your answer based on this reasoning, keeping an encouraging and professional tone.
|
414 |
+
|
415 |
+
CONVERSATION HISTORY:
|
416 |
+
{history_for_prompt}
|
417 |
+
{context_section}
|
418 |
+
CURRENT QUESTION:
|
419 |
+
User: {query}
|
420 |
+
"""
|
421 |
try:
|
422 |
+
response = self.model.generate_content(prompt)
|
423 |
+
return {'message': response.text, 'agent_used': 'academic', 'status': 'success'}
|
424 |
+
except Exception as e:
|
425 |
+
print(f"Academic Agent Error: {e}")
|
426 |
+
return {'message': f"Sorry, I encountered a problem: {e}", 'agent_used': 'academic', 'status': 'error_api_call'}
|
427 |
+
|
428 |
+
# def process_query(self, query: str, file_context: str = "",chat_history: list = None):
|
429 |
+
# """
|
430 |
+
# Main method to process academic queries.
|
431 |
+
# It now returns a standardized dictionary.
|
432 |
+
# """
|
433 |
+
# response_message = ""
|
434 |
+
# try:
|
435 |
+
# # Priority 1: Use AI for a comprehensive response if available.
|
436 |
+
# if self.model:
|
437 |
+
# ai_response = self.process_with_ai(query, file_context,chat_history)
|
438 |
+
# if ai_response:
|
439 |
+
# response_message = f"🤖 **AI-Powered Response**\n\n{ai_response}"
|
440 |
|
441 |
+
# # Priority 2 (Fallback): Use the local knowledge base if AI fails or is unavailable.
|
442 |
+
# if not response_message:
|
443 |
+
# key_terms = self.extract_key_terms(query)
|
444 |
+
# if not key_terms:
|
445 |
+
# response_message = self.generate_general_response(query, file_context)
|
446 |
+
# else:
|
447 |
+
# best_topic = self.find_best_match(key_terms)
|
448 |
+
# if best_topic:
|
449 |
+
# content = self.knowledge_base[best_topic]
|
450 |
+
# response_message = self.format_response(best_topic, content)
|
451 |
+
# else:
|
452 |
+
# response_message = self.generate_general_response(query, file_context)
|
453 |
|
454 |
+
# except Exception as e:
|
455 |
+
# print(f"An unexpected error occurred in AcademicAgent.process_query: {e}")
|
456 |
+
# response_message = f"माफ करें (Sorry), I encountered an error. Please try again."
|
457 |
+
|
458 |
+
# # **THE FIX**: Always wrap the final message in the standard dictionary format.
|
459 |
+
# return {
|
460 |
+
# 'message': response_message,
|
461 |
+
# 'agent_used': 'academic',
|
462 |
+
# 'status': 'success'
|
463 |
+
# }
|
agents/agent_helpers.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# agents/agent_helpers.py
|
2 |
+
|
3 |
+
def format_history_for_prompt(chat_history: list = None) -> str:
|
4 |
+
"""Formats the chat history list into a string for the AI prompt."""
|
5 |
+
if not chat_history:
|
6 |
+
return ""
|
7 |
+
|
8 |
+
history_for_prompt = ""
|
9 |
+
for turn in chat_history:
|
10 |
+
# Ensure 'parts' is a list and not empty before accessing
|
11 |
+
if turn.get('parts') and isinstance(turn.get('parts'), list):
|
12 |
+
role = "User" if turn['role'] == 'user' else "AI"
|
13 |
+
history_for_prompt += f"{role}: {turn['parts'][0]}\n"
|
14 |
+
|
15 |
+
return history_for_prompt
|
agents/drug_info_agent.py
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
Drug Information Agent - Handles drug-related queries using Generative AI.
|
4 |
"""
|
5 |
import re
|
6 |
-
|
7 |
class DrugInfoAgent:
|
8 |
def __init__(self, gemini_model=None):
|
9 |
"""
|
@@ -14,49 +14,37 @@ class DrugInfoAgent:
|
|
14 |
"""
|
15 |
self.model = gemini_model
|
16 |
|
17 |
-
|
18 |
-
"""
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
for p in patterns:
|
30 |
-
drug_name = re.sub(p, "", drug_name)
|
31 |
-
|
32 |
-
# Clean up any extra whitespace
|
33 |
-
return drug_name.strip().title() # Capitalize for better recognition
|
34 |
-
def process_query(self, query: str, file_context: str = "", chat_history: list = None):
|
35 |
if not self.model:
|
36 |
-
return {
|
37 |
-
'message': "💊 The pharmacy database is offline! The Gemini API key is missing.",
|
38 |
-
'agent_used': 'drug_info', 'status': 'error_no_api_key'
|
39 |
-
}
|
40 |
|
41 |
-
|
42 |
|
43 |
-
|
44 |
-
if chat_history:
|
45 |
-
for turn in chat_history:
|
46 |
-
role = "User" if turn['role'] == 'user' else "AI"
|
47 |
-
if turn.get('parts'):
|
48 |
-
history_for_prompt += f"{role}: {turn['parts'][0]}\n"
|
49 |
-
|
50 |
-
prompt = f"""You are a cautious AI Pharmacist Tutor providing educational information for a B.Pharmacy student.
|
51 |
|
52 |
**CRITICAL SAFETY INSTRUCTION:** START EVERY RESPONSE with this disclaimer: "⚠️ **Disclaimer:** This information is for educational purposes ONLY and is not a substitute for professional medical advice."
|
53 |
|
|
|
|
|
|
|
|
|
54 |
CONVERSATION HISTORY:
|
55 |
{history_for_prompt}
|
56 |
CURRENT QUESTION:
|
57 |
User: {query}
|
58 |
|
59 |
-
|
60 |
"""
|
61 |
try:
|
62 |
response = self.model.generate_content(prompt)
|
@@ -65,7 +53,6 @@ Based on the CURRENT QUESTION and conversation history, provide a structured sum
|
|
65 |
print(f"Drug Info Agent Error: {e}")
|
66 |
return {'message': f"Sorry, I couldn't access the drug database. Error: {e}", 'agent_used': 'drug_info', 'status': 'error_api_call'}
|
67 |
|
68 |
-
|
69 |
# def process_query(self, query: str, file_context: str = "", chat_history: list = None):
|
70 |
# """
|
71 |
# Processes a query to retrieve information about a specific drug.
|
|
|
3 |
Drug Information Agent - Handles drug-related queries using Generative AI.
|
4 |
"""
|
5 |
import re
|
6 |
+
from .agent_helpers import format_history_for_prompt
|
7 |
class DrugInfoAgent:
|
8 |
def __init__(self, gemini_model=None):
|
9 |
"""
|
|
|
14 |
"""
|
15 |
self.model = gemini_model
|
16 |
|
17 |
+
def process_query(self, query: str, file_context: str = "", chat_history: list = None):
|
18 |
+
"""
|
19 |
+
Processes a query to retrieve information about a specific drug.
|
20 |
+
|
21 |
+
Args:
|
22 |
+
query (str): The user's full query (e.g., "Tell me about Metformin").
|
23 |
+
file_context (str): Optional context from uploaded files.
|
24 |
+
chat_history (list): The history of the conversation.
|
25 |
+
|
26 |
+
Returns:
|
27 |
+
dict: A dictionary containing the response message and agent metadata.
|
28 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
if not self.model:
|
30 |
+
return {'message': "💊 The pharmacy database is offline! The Gemini API key is missing.", 'agent_used': 'drug_info', 'status': 'error_no_api_key'}
|
|
|
|
|
|
|
31 |
|
32 |
+
history_for_prompt = format_history_for_prompt(chat_history)
|
33 |
|
34 |
+
prompt = f"""You are a cautious AI Pharmacist Tutor providing educational information.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
**CRITICAL SAFETY INSTRUCTION:** START EVERY RESPONSE with this disclaimer: "⚠️ **Disclaimer:** This information is for educational purposes ONLY and is not a substitute for professional medical advice."
|
37 |
|
38 |
+
Your reasoning process must be:
|
39 |
+
1. Analyze the CONVERSATION HISTORY and the CURRENT QUESTION to identify the drug being discussed.
|
40 |
+
2. Provide a structured summary for that drug. If the user asks a follow-up (e.g., "what about its side effects?"), answer that specific question in the context of the drug already being discussed.
|
41 |
+
|
42 |
CONVERSATION HISTORY:
|
43 |
{history_for_prompt}
|
44 |
CURRENT QUESTION:
|
45 |
User: {query}
|
46 |
|
47 |
+
Provide a structured summary including: Therapeutic Class, MOA, Indications, Side Effects, and Warnings. DO NOT provide specific dosages.
|
48 |
"""
|
49 |
try:
|
50 |
response = self.model.generate_content(prompt)
|
|
|
53 |
print(f"Drug Info Agent Error: {e}")
|
54 |
return {'message': f"Sorry, I couldn't access the drug database. Error: {e}", 'agent_used': 'drug_info', 'status': 'error_api_call'}
|
55 |
|
|
|
56 |
# def process_query(self, query: str, file_context: str = "", chat_history: list = None):
|
57 |
# """
|
58 |
# Processes a query to retrieve information about a specific drug.
|
agents/mnemonic_agent.py
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
Mnemonic Creation Agent - Creates memory aids and tricks using Generative AI.
|
4 |
"""
|
5 |
import re
|
6 |
-
|
7 |
class MnemonicAgent:
|
8 |
def __init__(self, gemini_model=None):
|
9 |
"""
|
@@ -31,15 +31,22 @@ class MnemonicAgent:
|
|
31 |
# Clean up any extra whitespace
|
32 |
return topic.strip()
|
33 |
def process_query(self, query: str, file_context: str = "", chat_history: list = None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
if not self.model:
|
35 |
return {'message': "🧠 My creative circuits are offline! The Gemini API key is missing.", 'agent_used': 'mnemonic_creation', 'status': 'error_no_api_key'}
|
36 |
|
37 |
-
history_for_prompt =
|
38 |
-
|
39 |
-
for turn in chat_history:
|
40 |
-
role = "User" if turn['role'] == 'user' else "AI"
|
41 |
-
if turn.get('parts'):
|
42 |
-
history_for_prompt += f"{role}: {turn['parts'][0]}\n"
|
43 |
|
44 |
prompt = f"""You are "Mnemonic Master," a creative AI that creates memorable mnemonics for B.Pharmacy students.
|
45 |
|
@@ -48,7 +55,7 @@ CONVERSATION HISTORY:
|
|
48 |
CURRENT TASK:
|
49 |
User: {query}
|
50 |
|
51 |
-
Based on the CURRENT TASK and conversation history, generate a clever
|
52 |
"""
|
53 |
try:
|
54 |
response = self.model.generate_content(prompt)
|
|
|
3 |
Mnemonic Creation Agent - Creates memory aids and tricks using Generative AI.
|
4 |
"""
|
5 |
import re
|
6 |
+
from .agent_helpers import format_history_for_prompt
|
7 |
class MnemonicAgent:
|
8 |
def __init__(self, gemini_model=None):
|
9 |
"""
|
|
|
31 |
# Clean up any extra whitespace
|
32 |
return topic.strip()
|
33 |
def process_query(self, query: str, file_context: str = "", chat_history: list = None):
|
34 |
+
"""
|
35 |
+
Processes a query to generate a mnemonic.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
query (str): The user's full query (e.g., "Give me a mnemonic for glycolysis steps").
|
39 |
+
file_context (str): Optional context from uploaded files.
|
40 |
+
chat_history (list): The history of the conversation.
|
41 |
+
|
42 |
+
Returns:
|
43 |
+
dict: A dictionary containing the response message and agent metadata.
|
44 |
+
"""
|
45 |
if not self.model:
|
46 |
return {'message': "🧠 My creative circuits are offline! The Gemini API key is missing.", 'agent_used': 'mnemonic_creation', 'status': 'error_no_api_key'}
|
47 |
|
48 |
+
history_for_prompt = format_history_for_prompt(chat_history)
|
49 |
+
topic = self._extract_topic(query)
|
|
|
|
|
|
|
|
|
50 |
|
51 |
prompt = f"""You are "Mnemonic Master," a creative AI that creates memorable mnemonics for B.Pharmacy students.
|
52 |
|
|
|
55 |
CURRENT TASK:
|
56 |
User: {query}
|
57 |
|
58 |
+
Based on the CURRENT TASK and conversation history, generate a clever mnemonic (acronym, rhyme, or story). If the user is asking for a modification of a previous mnemonic, adjust it accordingly. Explain how the mnemonic works. Be encouraging and fun!
|
59 |
"""
|
60 |
try:
|
61 |
response = self.model.generate_content(prompt)
|
agents/quiz_agent.py
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
Quiz Generation Agent - Creates quizzes and flashcards using Generative AI.
|
4 |
"""
|
5 |
import re
|
6 |
-
|
7 |
class QuizAgent:
|
8 |
def __init__(self, gemini_model=None):
|
9 |
"""
|
@@ -32,21 +32,25 @@ class QuizAgent:
|
|
32 |
# Clean up any extra whitespace
|
33 |
return topic.strip()
|
34 |
def process_query(self, query: str, file_context: str = "", chat_history: list = None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
if not self.model:
|
36 |
return {'message': "❓ The question bank is locked! The Gemini API key is missing.", 'agent_used': 'quiz_generation', 'status': 'error_no_api_key'}
|
37 |
|
|
|
38 |
topic = self._extract_topic(query)
|
39 |
-
|
40 |
-
history_for_prompt = ""
|
41 |
-
if chat_history:
|
42 |
-
for turn in chat_history:
|
43 |
-
role = "User" if turn['role'] == 'user' else "AI"
|
44 |
-
if turn.get('parts'):
|
45 |
-
history_for_prompt += f"{role}: {turn['parts'][0]}\n"
|
46 |
-
|
47 |
task_description = f"Generate a short quiz (3-5 questions) on the topic: **{topic.title()}**."
|
48 |
if file_context:
|
49 |
-
task_description += f"\nIf relevant, use
|
50 |
|
51 |
prompt = f"""You are "Quiz Master," an AI that creates educational quizzes.
|
52 |
|
@@ -55,7 +59,9 @@ CONVERSATION HISTORY:
|
|
55 |
CURRENT TASK:
|
56 |
{task_description}
|
57 |
|
58 |
-
Based on the CURRENT TASK and conversation history, create a quiz.
|
|
|
|
|
59 |
"""
|
60 |
try:
|
61 |
response = self.model.generate_content(prompt)
|
@@ -64,7 +70,6 @@ Based on the CURRENT TASK and conversation history, create a quiz. Include a mix
|
|
64 |
print(f"Quiz Agent Error: {e}")
|
65 |
return {'message': f"My question book seems to be stuck. Error: {e}", 'agent_used': 'quiz_generation', 'status': 'error_api_call'}
|
66 |
|
67 |
-
|
68 |
# def process_query(self, query: str, file_context: str = "",chat_history: list = None):
|
69 |
# """
|
70 |
# Processes a query to generate a quiz. The agent prioritizes file_context if provided.
|
|
|
3 |
Quiz Generation Agent - Creates quizzes and flashcards using Generative AI.
|
4 |
"""
|
5 |
import re
|
6 |
+
from .agent_helpers import format_history_for_prompt
|
7 |
class QuizAgent:
|
8 |
def __init__(self, gemini_model=None):
|
9 |
"""
|
|
|
32 |
# Clean up any extra whitespace
|
33 |
return topic.strip()
|
34 |
def process_query(self, query: str, file_context: str = "", chat_history: list = None):
|
35 |
+
"""
|
36 |
+
Processes a query to generate a quiz.
|
37 |
+
|
38 |
+
Args:
|
39 |
+
query (str): The user's full query (e.g., "Make a quiz on analgesics").
|
40 |
+
file_context (str): Optional text content from an uploaded file.
|
41 |
+
chat_history (list): The history of the conversation.
|
42 |
+
|
43 |
+
Returns:
|
44 |
+
dict: A dictionary containing the quiz and agent metadata.
|
45 |
+
"""
|
46 |
if not self.model:
|
47 |
return {'message': "❓ The question bank is locked! The Gemini API key is missing.", 'agent_used': 'quiz_generation', 'status': 'error_no_api_key'}
|
48 |
|
49 |
+
history_for_prompt = format_history_for_prompt(chat_history)
|
50 |
topic = self._extract_topic(query)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
task_description = f"Generate a short quiz (3-5 questions) on the topic: **{topic.title()}**."
|
52 |
if file_context:
|
53 |
+
task_description += f"\nIf relevant, use text from the student's notes for context:\n---\n{file_context}\n---"
|
54 |
|
55 |
prompt = f"""You are "Quiz Master," an AI that creates educational quizzes.
|
56 |
|
|
|
59 |
CURRENT TASK:
|
60 |
{task_description}
|
61 |
|
62 |
+
Based on the CURRENT TASK and conversation history, create a quiz. If the user is asking for a change to a previous quiz (e.g., "make it harder"), do that.
|
63 |
+
Include a mix of MCQs, True/False, and Fill-in-the-Blank questions.
|
64 |
+
CRITICAL: Provide a clearly separated "Answer Key" section with answers and brief explanations.
|
65 |
"""
|
66 |
try:
|
67 |
response = self.model.generate_content(prompt)
|
|
|
70 |
print(f"Quiz Agent Error: {e}")
|
71 |
return {'message': f"My question book seems to be stuck. Error: {e}", 'agent_used': 'quiz_generation', 'status': 'error_api_call'}
|
72 |
|
|
|
73 |
# def process_query(self, query: str, file_context: str = "",chat_history: list = None):
|
74 |
# """
|
75 |
# Processes a query to generate a quiz. The agent prioritizes file_context if provided.
|