Spaces:
Sleeping
Sleeping
# """ | |
# Academic Agent - Handles general academic questions | |
# Now with Gemini API integration and file context support! | |
# """ | |
# import json | |
# import os | |
# import re | |
# class AcademicAgent: | |
# def __init__(self, gemini_model=None): | |
# """ | |
# Initializes the agent. | |
# Args: | |
# gemini_model: An instance of a Gemini model client for AI-powered responses. | |
# If None, the agent will operate in offline/fallback mode. | |
# """ | |
# self.model = gemini_model | |
# self.knowledge_base = self.load_knowledge_base() | |
# def load_knowledge_base(self): | |
# """Load pre-built academic knowledge base from a JSON file.""" | |
# knowledge_file = 'data/academic_knowledge.json' | |
# # Create a default knowledge base if the file doesn't exist | |
# if not os.path.exists(knowledge_file): | |
# default_knowledge = { | |
# "pharmacology": { | |
# "definition": "Pharmacology is the branch of medicine concerned with the uses, effects, and modes of action of drugs.", | |
# "branches": ["Pharmacokinetics", "Pharmacodynamics", "Toxicology", "Clinical Pharmacology"], | |
# "importance": "Essential for understanding drug therapy and patient safety" | |
# }, | |
# "pharmacokinetics": { | |
# "definition": "The study of how the body affects a drug (ADME: Absorption, Distribution, Metabolism, Excretion)", | |
# "processes": ["Absorption", "Distribution", "Metabolism", "Excretion"], | |
# "factors": ["Age", "Gender", "Disease state", "Genetic factors"] | |
# }, | |
# "pharmacodynamics": { | |
# "definition": "The study of what a drug does to the body - drug actions and effects", | |
# "concepts": ["Receptor theory", "Dose-response relationship", "Therapeutic index"], | |
# "mechanisms": ["Agonism", "Antagonism", "Enzyme inhibition"] | |
# }, | |
# "krebs_cycle": { | |
# "definition": "A series of enzymatic reactions that generate energy (ATP) from carbohydrates, fats, and proteins", | |
# "location": "Mitochondrial matrix", | |
# "steps": 8, | |
# "importance": "Central metabolic pathway for energy production" | |
# }, | |
# "drug_metabolism": { | |
# "definition": "The biochemical modification of drugs by living organisms", | |
# "phases": ["Phase I (oxidation, reduction, hydrolysis)", "Phase II (conjugation reactions)"], | |
# "location": "Primarily liver, also kidneys, lungs, intestines", | |
# "enzymes": "Cytochrome P450 family" | |
# } | |
# } | |
# os.makedirs('data', exist_ok=True) | |
# with open(knowledge_file, 'w') as f: | |
# json.dump(default_knowledge, f, indent=2) | |
# return default_knowledge | |
# try: | |
# with open(knowledge_file, 'r') as f: | |
# return json.load(f) | |
# except json.JSONDecodeError: | |
# print("Error: Could not decode JSON from knowledge base file.") | |
# return {} | |
# except Exception as e: | |
# print(f"Error loading knowledge base: {e}") | |
# return {} | |
# def process_with_ai(self, query, file_context=""): | |
# """Use Gemini AI to provide comprehensive, context-aware answers.""" | |
# if not self.model: | |
# return None # Fallback to local knowledge if no AI model is provided | |
# try: | |
# # Construct a context-aware prompt for the AI | |
# context_section = "" | |
# if file_context: | |
# context_section = f""" | |
# UPLOADED FILE CONTEXT: | |
# --- | |
# {file_context} | |
# --- | |
# Please reference the uploaded content when relevant to answer the question. | |
# """ | |
# prompt = f""" | |
# You are an expert pharmacy educator and AI tutor specializing in pharmaceutical sciences. | |
# Your role is to help B.Pharmacy students learn complex concepts in an engaging, culturally-sensitive way. | |
# STUDENT QUESTION: {query} | |
# {context_section} | |
# Please provide a comprehensive answer that includes: | |
# 1. A clear explanation suitable for a pharmacy student. | |
# 2. Key concepts and terminology. | |
# 3. Real-world applications or examples in medicine. | |
# 4. Any important safety considerations (if the topic is drug-related). | |
# 5. Use some Hindi terms naturally where appropriate (like आयुर्वेद, औषधि, etc.) to create a relatable tone. | |
# Format your response to be educational, encouraging, and include relevant emojis. | |
# If the question relates to uploaded file content, please reference it specifically in your answer. | |
# Remember: You're helping an Indian pharmacy student, so cultural context and an encouraging tone matter! | |
# """ | |
# response = self.model.generate_content(prompt) | |
# return response.text | |
# except Exception as e: | |
# print(f"Gemini API error in Academic Agent: {e}") | |
# return None # Return None to trigger fallback to local knowledge | |
# def extract_key_terms(self, query): | |
# """Extract key terms from the query to search the local knowledge base.""" | |
# common_words = {'what', 'is', 'the', 'define', 'explain', 'how', 'does', 'work', 'tell', 'me', 'about'} | |
# words = re.findall(r'\b\w+\b', query.lower()) | |
# key_terms = [word for word in words if word not in common_words and len(word) > 2] | |
# return key_terms | |
# def find_best_match(self, key_terms): | |
# """Find the best matching topic in the local knowledge base using a scoring system.""" | |
# best_match = None | |
# max_score = 0 | |
# for topic, content in self.knowledge_base.items(): | |
# score = 0 | |
# topic_words = topic.lower().split('_') | |
# # Check for matches in topic keywords and content | |
# for term in key_terms: | |
# if term in topic_words: | |
# score += 3 | |
# elif term in topic.lower(): | |
# score += 2 | |
# if isinstance(content, dict): | |
# content_str = str(content).lower() | |
# if term in content_str: | |
# score += 1 | |
# if score > max_score: | |
# max_score = score | |
# best_match = topic | |
# return best_match if max_score > 0 else None | |
# def format_response(self, topic, content): | |
# """Format the local knowledge base content in a user-friendly way with Hindi terms.""" | |
# if not isinstance(content, dict): | |
# return f"📚 **{topic.replace('_', ' ').title()}**\n\n{content}" | |
# response_parts = [f"📚 **{topic.replace('_', ' ').title()}**\n"] | |
# key_map = { | |
# 'definition': 'परिभाषा (Definition)', | |
# 'importance': 'महत्व (Importance)', | |
# 'processes': 'प्रक्रियाएं (Processes)', | |
# 'branches': 'शाखाएं (Branches)', | |
# 'concepts': 'मुख्य अवधारणाएं (Key Concepts)', | |
# 'steps': 'चरण (Steps)', | |
# 'location': 'स्थान (Location)', | |
# 'phases': 'चरण (Phases)', | |
# 'enzymes': 'एंजाइम (Enzymes)' | |
# } | |
# for key, title in key_map.items(): | |
# if key in content: | |
# value = content[key] | |
# if isinstance(value, list): | |
# value = ', '.join(value) | |
# response_parts.append(f"**{title}:** {value}\n") | |
# response_parts.append("💡 *Would you like me to create a quiz or mnemonic for this topic?*") | |
# return "\n".join(response_parts) | |
# def generate_general_response(self, query, file_context=""): | |
# """Generate a general helpful response when no specific match is found.""" | |
# file_mention = " I can also answer questions about any files you've uploaded!" if file_context else "" | |
# # More specific greeting if the query mentions pharmacy | |
# if any(word in query.lower() for word in ['pharmacy', 'pharmaceutical', 'drug']): | |
# return f"""📚 **Pharmacy & Pharmaceutical Sciences** | |
# Pharmacy is a fascinating field that bridges chemistry, biology, and medicine! Here are the main areas: | |
# 🔬 **Core Subjects:** | |
# • Pharmacology (औषधि विज्ञान - drug actions) | |
# • Pharmacokinetics (drug movement in body) | |
# • Medicinal Chemistry (drug design) | |
# • Pharmaceutics (drug formulation) | |
# • Pharmacognosy (natural drugs) | |
# 💊 **Career Paths:** | |
# • Clinical Pharmacist | |
# • Industrial Pharmacist | |
# • Research & Development | |
# • Regulatory Affairs | |
# • Hospital Pharmacy | |
# ✨ *"विद्या ददाति विनयं" - Knowledge gives humility* | |
# What specific topic would you like to explore?{file_mention}""" | |
# return f"""🙏 **Namaste!** I'm here to help with your pharmacy studies! I can assist with: | |
# 📚 **Academic Topics:** Pharmacology, Chemistry, Biology concepts | |
# 💊 **Drug Information:** Mechanisms, side effects, interactions | |
# ❓ **Quiz Generation:** Practice questions and flashcards | |
# 🧠 **Mnemonics:** Memory tricks and acronyms | |
# 🗣️ **Viva Practice:** Mock interview sessions | |
# 📄 **File Analysis:** Answer questions about uploaded documents{file_mention} | |
# *Please ask me about a specific topic, or try:* | |
# - "Explain pharmacokinetics" | |
# - "Make a quiz on analgesics" | |
# - "Give me a mnemonic for drug classifications" | |
# **आपका अध्ययन साथी (Your Study Companion)** 📖✨""" | |
# def process_query(self, query, file_context=""): | |
# """ | |
# Main method to process academic queries. | |
# It first tries the Gemini AI model and falls back to the local knowledge base. | |
# """ | |
# try: | |
# # Priority 1: Use AI for a comprehensive response if available. | |
# if self.model: | |
# ai_response = self.process_with_ai(query, file_context) | |
# if ai_response: | |
# return f"🤖 **AI-Powered Response**\n\n{ai_response}" | |
# # Priority 2 (Fallback): Use the local knowledge base. | |
# key_terms = self.extract_key_terms(query) | |
# if not key_terms: | |
# return self.generate_general_response(query, file_context) | |
# best_topic = self.find_best_match(key_terms) | |
# if best_topic: | |
# content = self.knowledge_base[best_topic] | |
# response = self.format_response(best_topic, content) | |
# if file_context: | |
# response += f"\n\n📄 *Note: I see you have uploaded files. Feel free to ask specific questions about their content!*" | |
# return response | |
# else: | |
# # No specific match found, provide general guidance. | |
# return self.generate_general_response(query, file_context) | |
# except Exception as e: | |
# # This is the completed part: a graceful error handler. | |
# print(f"An unexpected error occurred in AcademicAgent.process_query: {e}") | |
# return f"माफ करें (Sorry), I encountered an unexpected error while processing your request. Please try rephrasing your question or try again later." | |
# agents/academic_agent.py | |
""" | |
Academic Agent - Handles general academic questions. | |
Now returns a standardized dictionary instead of a raw string. | |
""" | |
import json | |
import os | |
import re | |
from .agent_helpers import format_history_for_prompt | |
class AcademicAgent: | |
def __init__(self, gemini_model=None): | |
self.model = gemini_model | |
# The knowledge base logic remains the same | |
self.knowledge_base = self.load_knowledge_base() | |
# The load_knowledge_base, process_with_ai, extract_key_terms, | |
# find_best_match, format_response, and generate_general_response | |
# methods remain exactly the same as before. | |
# We only need to change the final process_query method. | |
def load_knowledge_base(self): | |
"""Load pre-built academic knowledge base from a JSON file.""" | |
knowledge_file = 'data/academic_knowledge.json' | |
if not os.path.exists(knowledge_file): | |
# (Content of this method is unchanged) | |
default_knowledge = { "pharmacology": { "definition": "..." } } # (abbreviated for clarity) | |
os.makedirs('data', exist_ok=True) | |
with open(knowledge_file, 'w') as f: | |
json.dump(default_knowledge, f, indent=2) | |
return default_knowledge | |
try: | |
with open(knowledge_file, 'r') as f: return json.load(f) | |
except: return {} | |
# def process_with_ai(self, query, file_context=""): | |
# """Use Gemini AI to provide comprehensive, context-aware answers.""" | |
# if not self.model: return None | |
# try: | |
# # (Content of this method is unchanged) | |
# context_section = f"UPLOADED FILE CONTEXT:\n{file_context}" if file_context else "" | |
# prompt = f"You are an expert pharmacy educator... STUDENT QUESTION: {query}\n{context_section} ..." | |
# response = self.model.generate_content(prompt) | |
# return response.text | |
# except Exception as e: | |
# print(f"Gemini API error in Academic Agent: {e}") | |
# return None | |
# In agents/academic_agent.py -> class AcademicAgent | |
def process_with_ai(self, query, file_context="", chat_history=None): | |
"""Use Gemini AI with conversation history and file context.""" | |
if not self.model: | |
return None | |
# --- NEW HISTORY AND PROMPT LOGIC --- | |
# Format the past conversation for the prompt | |
history_for_prompt = "" | |
if chat_history: | |
for turn in chat_history: | |
# Ensure 'parts' is a list and not empty before accessing | |
if turn.get('parts') and isinstance(turn.get('parts'), list): | |
role = "User" if turn['role'] == 'user' else "AI" | |
history_for_prompt += f"{role}: {turn['parts'][0]}\n" | |
# Format the file context | |
context_section = "" | |
if file_context: | |
context_section = f""" | |
--- | |
CONTEXT FROM UPLOADED FILE: | |
{file_context} | |
--- | |
Use the context from the uploaded file above to answer the user's current question if it is relevant. | |
""" | |
# The new prompt structure | |
prompt = f"""You are a helpful and knowledgeable AI pharmacy tutor for a student in India. | |
CONVERSATION HISTORY: | |
{history_for_prompt} | |
{context_section} | |
CURRENT QUESTION: | |
User: {query} | |
Please provide a helpful and accurate answer to the user's CURRENT QUESTION. | |
- If the question is a follow-up, use the CONVERSATION HISTORY to understand the context. | |
- If the question relates to the UPLOADED FILE, prioritize information from that context. | |
- Keep the tone encouraging and professional like Acharya Sushruta. | |
- Also ask the user if they have any doubts or need further clarification. | |
""" | |
try: | |
# This is a more direct and robust way to send the complete context | |
response = self.model.generate_content(prompt) | |
return response.text | |
except Exception as e: | |
print(f"Gemini API error in Academic Agent: {e}") | |
return None | |
def extract_key_terms(self, query): | |
"""Extract key terms from the query.""" | |
# (Content of this method is unchanged) | |
common_words = {'what', 'is', 'the', 'define', 'explain'} | |
words = re.findall(r'\b\w+\b', query.lower()) | |
return [word for word in words if word not in common_words] | |
def find_best_match(self, key_terms): | |
"""Find the best matching topic in the local knowledge base.""" | |
# (Content of this method is unchanged) | |
best_match, max_score = None, 0 | |
for topic, content in self.knowledge_base.items(): | |
score = 0 | |
# ... scoring logic ... | |
if score > max_score: | |
max_score, best_match = score, topic | |
return best_match | |
def format_response(self, topic, content): | |
"""Format the local knowledge base content in a user-friendly way.""" | |
# (Content of this method is unchanged) | |
response = f"📚 **{topic.replace('_', ' ').title()}**\n\n" | |
# ... formatting logic ... | |
return response + "💡 *Would you like me to create a quiz or mnemonic?*" | |
def generate_general_response(self, query, file_context=""): | |
"""Generate a general helpful response.""" | |
# (Content of this method is unchanged) | |
return "🙏 **Namaste!** I'm here to help..." | |
# --- THIS IS THE ONLY METHOD THAT CHANGES --- | |
def process_query(self, query: str, file_context: str = "", chat_history: list = None): | |
""" | |
Processes a general academic query using the Gemini model. | |
Args: | |
query (str): The user's full query. | |
file_context (str): Context from any uploaded files. | |
chat_history (list): The history of the conversation. | |
Returns: | |
dict: A dictionary containing the response message and agent metadata. | |
""" | |
if not self.model: | |
return {'message': "📚 My knowledge circuits are offline! The Gemini API key is missing.", 'agent_used': 'academic', 'status': 'error_no_api_key'} | |
history_for_prompt = format_history_for_prompt(chat_history) | |
context_section = f"---\nCONTEXT FROM KNOWLEDGE BASE:\n{file_context}\n---" if file_context else "" | |
# if file_context: | |
# context_section = f"---\nCONTEXT FROM UPLOADED FILE:\n{file_context}\n---" | |
prompt = f"""You are a helpful and knowledgeable AI pharmacy tutor for a student in India. | |
**CRITICAL INSTRUCTION FOR CITATIONS:** When you use information from the KNOWLEDGE BASE CONTEXT, you MUST cite the source at the end of the relevant sentence using the format `[Source: filename, Page: page_number]`. | |
Your reasoning process must be: | |
1. First, analyze the CONVERSATION HISTORY to understand the immediate context of the CURRENT QUESTION. This is especially important to understand what "this," "that," or "it" refers to. | |
2. Once you understand the user's real question, Check if the KNOWLEDGE BASE CONTEXT is relevant to the topic. | |
3. Formulate your answer based on this reasoning, keeping an encouraging and professional tone. | |
CONVERSATION HISTORY: | |
{history_for_prompt} | |
{context_section} | |
CURRENT QUESTION: | |
User: {query} | |
""" | |
try: | |
response = self.model.generate_content(prompt) | |
return {'message': response.text, 'agent_used': 'academic', 'status': 'success'} | |
except Exception as e: | |
print(f"Academic Agent Error: {e}") | |
return {'message': f"Sorry, I encountered a problem: {e}", 'agent_used': 'academic', 'status': 'error_api_call'} | |
# def process_query(self, query: str, file_context: str = "",chat_history: list = None): | |
# """ | |
# Main method to process academic queries. | |
# It now returns a standardized dictionary. | |
# """ | |
# response_message = "" | |
# try: | |
# # Priority 1: Use AI for a comprehensive response if available. | |
# if self.model: | |
# ai_response = self.process_with_ai(query, file_context,chat_history) | |
# if ai_response: | |
# response_message = f"🤖 **AI-Powered Response**\n\n{ai_response}" | |
# # Priority 2 (Fallback): Use the local knowledge base if AI fails or is unavailable. | |
# if not response_message: | |
# key_terms = self.extract_key_terms(query) | |
# if not key_terms: | |
# response_message = self.generate_general_response(query, file_context) | |
# else: | |
# best_topic = self.find_best_match(key_terms) | |
# if best_topic: | |
# content = self.knowledge_base[best_topic] | |
# response_message = self.format_response(best_topic, content) | |
# else: | |
# response_message = self.generate_general_response(query, file_context) | |
# except Exception as e: | |
# print(f"An unexpected error occurred in AcademicAgent.process_query: {e}") | |
# response_message = f"माफ करें (Sorry), I encountered an error. Please try again." | |
# # **THE FIX**: Always wrap the final message in the standard dictionary format. | |
# return { | |
# 'message': response_message, | |
# 'agent_used': 'academic', | |
# 'status': 'success' | |
# } |