Ajey95 commited on
Commit
1141dbd
Β·
1 Parent(s): 6c689f5

Configure Git LFS for PDFs and ignore FAISS index

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.pdf filter=lfs diff=lfs merge=lfs -text
.gitignore CHANGED
Binary files a/.gitignore and b/.gitignore differ
 
agents/academic_agent.py CHANGED
@@ -403,15 +403,16 @@ class AcademicAgent:
403
 
404
  history_for_prompt = format_history_for_prompt(chat_history)
405
 
406
- context_section = ""
407
- if file_context:
408
- context_section = f"---\nCONTEXT FROM UPLOADED FILE:\n{file_context}\n---"
409
 
410
  prompt = f"""You are a helpful and knowledgeable AI pharmacy tutor for a student in India.
 
411
 
412
  Your reasoning process must be:
413
  1. First, analyze the CONVERSATION HISTORY to understand the immediate context of the CURRENT QUESTION. This is especially important to understand what "this," "that," or "it" refers to.
414
- 2. Once you understand the user's real question, check if the UPLOADED FILE context is relevant to the topic.
415
  3. Formulate your answer based on this reasoning, keeping an encouraging and professional tone.
416
 
417
  CONVERSATION HISTORY:
 
403
 
404
  history_for_prompt = format_history_for_prompt(chat_history)
405
 
406
+ context_section = f"---\nCONTEXT FROM KNOWLEDGE BASE:\n{file_context}\n---" if file_context else ""
407
+ # if file_context:
408
+ # context_section = f"---\nCONTEXT FROM UPLOADED FILE:\n{file_context}\n---"
409
 
410
  prompt = f"""You are a helpful and knowledgeable AI pharmacy tutor for a student in India.
411
+ **CRITICAL INSTRUCTION FOR CITATIONS:** When you use information from the KNOWLEDGE BASE CONTEXT, you MUST cite the source at the end of the relevant sentence using the format `[Source: filename, Page: page_number]`.
412
 
413
  Your reasoning process must be:
414
  1. First, analyze the CONVERSATION HISTORY to understand the immediate context of the CURRENT QUESTION. This is especially important to understand what "this," "that," or "it" refers to.
415
+ 2. Once you understand the user's real question, Check if the KNOWLEDGE BASE CONTEXT is relevant to the topic.
416
  3. Formulate your answer based on this reasoning, keeping an encouraging and professional tone.
417
 
418
  CONVERSATION HISTORY:
agents/drug_info_agent.py CHANGED
@@ -56,7 +56,7 @@ class DrugInfoAgent:
56
  }
57
 
58
  history_for_prompt = format_history_for_prompt(chat_history)
59
-
60
  prompt = f"""You are a cautious AI Pharmacist Tutor providing educational information like ancient india's Chanakya advisor to Chandragupta Maurya.
61
 
62
  **CRITICAL SAFETY INSTRUCTION:** START EVERY RESPONSE with this disclaimer: "⚠️ **Disclaimer:** This information is for educational purposes ONLY and is not a substitute for professional medical advice."
@@ -68,6 +68,7 @@ Your reasoning process is:
68
 
69
  CONVERSATION HISTORY:
70
  {history_for_prompt}
 
71
  CURRENT QUESTION:
72
  User: {query}
73
 
 
56
  }
57
 
58
  history_for_prompt = format_history_for_prompt(chat_history)
59
+ context_section = f"---\nCONTEXT FROM KNOWLEDGE BASE:\n{file_context}\n---" if file_context else ""
60
  prompt = f"""You are a cautious AI Pharmacist Tutor providing educational information like ancient india's Chanakya advisor to Chandragupta Maurya.
61
 
62
  **CRITICAL SAFETY INSTRUCTION:** START EVERY RESPONSE with this disclaimer: "⚠️ **Disclaimer:** This information is for educational purposes ONLY and is not a substitute for professional medical advice."
 
68
 
69
  CONVERSATION HISTORY:
70
  {history_for_prompt}
71
+ {context_section}
72
  CURRENT QUESTION:
73
  User: {query}
74
 
agents/mnemonic_agent.py CHANGED
@@ -47,10 +47,12 @@ class MnemonicAgent:
47
 
48
  history_for_prompt = format_history_for_prompt(chat_history)
49
  topic = self._extract_topic(query)
50
-
51
  prompt = f"""You are "Mnemonic Master," a creative AI that creates memorable mnemonics for B.Pharmacy students.
52
-
 
53
  CONVERSATION HISTORY:
 
54
  {history_for_prompt}
55
  CURRENT TASK:
56
  User: {query}
 
47
 
48
  history_for_prompt = format_history_for_prompt(chat_history)
49
  topic = self._extract_topic(query)
50
+ context_section = f"---\nCONTEXT FROM KNOWLEDGE BASE:\n{file_context}\n---" if file_context else ""
51
  prompt = f"""You are "Mnemonic Master," a creative AI that creates memorable mnemonics for B.Pharmacy students.
52
+ **CRITICAL INSTRUCTION FOR CITATIONS:** When you use information from the KNOWLEDGE BASE CONTEXT, you MUST cite the source at the end of the relevant sentence using the format `[Source: filename, Page: page_number]`.
53
+ get the topic from {topic}
54
  CONVERSATION HISTORY:
55
+ {context_section}
56
  {history_for_prompt}
57
  CURRENT TASK:
58
  User: {query}
agents/quiz_agent.py CHANGED
@@ -48,14 +48,16 @@ class QuizAgent:
48
 
49
  history_for_prompt = format_history_for_prompt(chat_history)
50
  topic = self._extract_topic(query)
 
51
  task_description = f"Generate a short quiz (3-5 questions) on the topic: **{topic.title()}**."
52
  if file_context:
53
  task_description += f"\nIf relevant, use text from the student's notes for context:\n---\n{file_context}\n---"
54
 
55
  prompt = f"""You are "Quiz Master," an AI that creates educational quizzes like Maryada Ramanna. Maryada Ramannaβ€”he’s a legendary character from Indian (particularly South Indian) folklore, often associated with justice, integrity, and cleverness.
56
-
57
  CONVERSATION HISTORY:
58
  {history_for_prompt}
 
59
  CURRENT TASK:
60
  {task_description}
61
 
 
48
 
49
  history_for_prompt = format_history_for_prompt(chat_history)
50
  topic = self._extract_topic(query)
51
+ context_section = f"---\nCONTEXT FROM KNOWLEDGE BASE:\n{file_context}\n---" if file_context else ""
52
  task_description = f"Generate a short quiz (3-5 questions) on the topic: **{topic.title()}**."
53
  if file_context:
54
  task_description += f"\nIf relevant, use text from the student's notes for context:\n---\n{file_context}\n---"
55
 
56
  prompt = f"""You are "Quiz Master," an AI that creates educational quizzes like Maryada Ramanna. Maryada Ramannaβ€”he’s a legendary character from Indian (particularly South Indian) folklore, often associated with justice, integrity, and cleverness.
57
+ **CRITICAL INSTRUCTION FOR CITATIONS:** When you use information from the KNOWLEDGE BASE CONTEXT, you MUST cite the source at the end of the relevant sentence using the format `[Source: filename, Page: page_number]`.
58
  CONVERSATION HISTORY:
59
  {history_for_prompt}
60
+ {context_section}
61
  CURRENT TASK:
62
  {task_description}
63
 
agents/viva_agent.py CHANGED
@@ -20,13 +20,13 @@ class VivaAgent:
20
 
21
  context_source = ""
22
  if file_context:
23
- context_source = f"The question should be based on the following text from an uploaded document:\n---\n{file_context}\n---"
24
  else:
25
  context_source = f"The question should be about the B.Pharmacy topic: **{topic}**."
26
 
27
  prompt = f"""
28
  You are a "Viva Coach," a professional and encouraging AI examiner for B.Pharmacy students like Gurus from ancient India.
29
-
30
  **Your Task:**
31
  Generate a SINGLE, insightful, open-ended viva question.
32
 
 
20
 
21
  context_source = ""
22
  if file_context:
23
+ context_source =f"---\nCONTEXT FROM KNOWLEDGE BASE:\n{file_context}\n---" if file_context else ""
24
  else:
25
  context_source = f"The question should be about the B.Pharmacy topic: **{topic}**."
26
 
27
  prompt = f"""
28
  You are a "Viva Coach," a professional and encouraging AI examiner for B.Pharmacy students like Gurus from ancient India.
29
+ **CRITICAL INSTRUCTION FOR CITATIONS:** When you use information from the KNOWLEDGE BASE CONTEXT, you MUST cite the source at the end of the relevant sentence using the format `[Source: filename, Page: page_number]`.
30
  **Your Task:**
31
  Generate a SINGLE, insightful, open-ended viva question.
32
 
app.py CHANGED
@@ -646,7 +646,7 @@ app.config['SECRET_KEY'] = os.getenv('FLASK_SECRET_KEY', 'a-very-secret-key-for-
646
  model = None
647
  vector_store = None
648
  try:
649
- GEMINI_API_KEY = os.getenv('GEMINI_API_KEY')
650
  if GEMINI_API_KEY:
651
  genai.configure(api_key=GEMINI_API_KEY)
652
  model = genai.GenerativeModel('gemini-1.5-flash')
@@ -674,9 +674,21 @@ class MyPharmaAI:
674
  # 1. Search the permanent knowledge base for context.
675
  file_context = ""
676
  if self.vector_store:
677
- relevant_docs = self.vector_store.similarity_search(query, k=3) # Get top 3 results
678
  file_context = "\n".join(doc.page_content for doc in relevant_docs)
679
-
 
 
 
 
 
 
 
 
 
 
 
 
680
  # 2. Pass the retrieved context to the multi-agent router system.
681
  return self.router.route_query(query, file_context, viva_state, chat_history)
682
 
@@ -690,6 +702,7 @@ def index():
690
 
691
  @app.route('/chat', methods=['POST'])
692
  def chat():
 
693
  try:
694
  data = request.get_json()
695
  query = data.get('query', '').strip()
@@ -699,24 +712,44 @@ def chat():
699
  chat_history = session.get('chat_history', [])
700
  viva_state = session.get('viva_state', None)
701
 
702
- # Call the main orchestrator
703
- result = pharma_ai.process_query(query, viva_state, chat_history)
704
-
705
- if result.get('success'):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
706
  chat_history.append({'role': 'user', 'parts': [query]})
707
- chat_history.append({'role': 'model', 'parts': [result.get('message', '')]})
708
  session['chat_history'] = chat_history[-10:]
709
 
710
- if 'viva_state' in result:
711
- session['viva_state'] = result.get('viva_state')
 
712
 
713
- return jsonify(result)
714
 
715
  except Exception as e:
716
- print(f"Error in /chat endpoint: {e}")
717
- return jsonify({'success': False, 'message': f'Server error: {e}', 'agent_used': 'error'}), 500
 
718
 
719
  # --- Main Execution ---
720
  if __name__ == '__main__':
 
721
  port = int(os.environ.get('PORT', 7860))
722
  app.run(host='0.0.0.0', port=port)
 
646
  model = None
647
  vector_store = None
648
  try:
649
+ GEMINI_API_KEY = os.getenv('GOOGLE_API_KEY')
650
  if GEMINI_API_KEY:
651
  genai.configure(api_key=GEMINI_API_KEY)
652
  model = genai.GenerativeModel('gemini-1.5-flash')
 
674
  # 1. Search the permanent knowledge base for context.
675
  file_context = ""
676
  if self.vector_store:
677
+ relevant_docs = self.vector_store.similarity_search(query, k=4) # Get top 4 results
678
  file_context = "\n".join(doc.page_content for doc in relevant_docs)
679
+ context_with_sources = []
680
+ for doc in relevant_docs:
681
+ # Clean up the source path to just the filename
682
+ source_filename = os.path.basename(doc.metadata.get('source', 'Unknown Source'))
683
+ # Page numbers from PyPDF are 0-indexed, so we add 1 for readability
684
+ page_number = doc.metadata.get('page', -1) + 1
685
+
686
+ context_with_sources.append(
687
+ f"[Source: {source_filename}, Page: {page_number}]\n{doc.page_content}"
688
+ )
689
+
690
+ file_context = "\n\n".join(context_with_sources)
691
+
692
  # 2. Pass the retrieved context to the multi-agent router system.
693
  return self.router.route_query(query, file_context, viva_state, chat_history)
694
 
 
702
 
703
  @app.route('/chat', methods=['POST'])
704
  def chat():
705
+ # This function is now the final, stable version.
706
  try:
707
  data = request.get_json()
708
  query = data.get('query', '').strip()
 
712
  chat_history = session.get('chat_history', [])
713
  viva_state = session.get('viva_state', None)
714
 
715
+ # Get the result dictionary from the agent system
716
+ agent_result = pharma_ai.process_query(query, viva_state, chat_history)
717
+
718
+ # --- THIS IS THE FIX ---
719
+ # We now build the final JSON response to match what the JavaScript expects.
720
+ if "error" in agent_result.get('status', ''):
721
+ final_response = {
722
+ 'success': False,
723
+ 'error': agent_result.get('message', 'An unknown error occurred.'),
724
+ 'agent_used': agent_result.get('agent_used', 'error')
725
+ }
726
+ else:
727
+ final_response = {
728
+ 'success': True,
729
+ 'message': agent_result.get('message', 'Sorry, I could not generate a response.'),
730
+ 'agent_used': agent_result.get('agent_used', 'academic')
731
+ }
732
+ # --- END OF FIX ---
733
+
734
+ # Update chat history if the call was successful
735
+ if final_response.get('success'):
736
  chat_history.append({'role': 'user', 'parts': [query]})
737
+ chat_history.append({'role': 'model', 'parts': [final_response.get('message', '')]})
738
  session['chat_history'] = chat_history[-10:]
739
 
740
+ # Handle Viva state if present (no changes needed here)
741
+ if 'viva_state' in agent_result:
742
+ session['viva_state'] = agent_result.get('viva_state')
743
 
744
+ return jsonify(final_response)
745
 
746
  except Exception as e:
747
+ print(f"Critical Error in /chat endpoint: {e}")
748
+ return jsonify({'success': False, 'error': f'A critical server error occurred: {e}', 'agent_used': 'error'}), 500
749
+
750
 
751
  # --- Main Execution ---
752
  if __name__ == '__main__':
753
+ # app.run(host='127.0.0.1', port=5000, debug=True)
754
  port = int(os.environ.get('PORT', 7860))
755
  app.run(host='0.0.0.0', port=port)
faiss_index/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40247f25d66ec00e349df03dc9f81e3fddb9ef95c5cc184104268b9f8f0d4ce3
3
+ size 653990
knowledge_base/PharmaLite.in Pharmaceutical Biotechnology (Thakur).pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf5f6857a39bf2c469c7fbd3d5c0de3ce5db3ba633c2c9dddddd20e8bf15e14e
3
+ size 9284177
requirements.txt CHANGED
Binary files a/requirements.txt and b/requirements.txt differ
 
templates/index.html CHANGED
@@ -111,7 +111,7 @@
111
  flex-direction: column;
112
  overflow: hidden;
113
  border: 3px solid var(--gold);
114
- padding: 20px;
115
  }
116
 
117
  .chat-header {
@@ -232,28 +232,16 @@
232
  cursor: not-allowed;
233
  }
234
 
235
- /* Loading Animation */
236
  .loading {
237
- display: none;
238
  text-align: center;
 
239
  color: var(--maroon);
240
- font-style: italic;
241
  }
242
-
243
  .loading.show {
244
- display: block;
245
- }
246
-
247
- .loading::after {
248
- content: "...";
249
- animation: dots 1.5s infinite;
250
- }
251
-
252
- @keyframes dots {
253
- 0%, 20% { content: "..."; }
254
- 40% { content: "....."; }
255
- 60% { content: "......."; }
256
- 80%, 100% { content: "..."; }
257
  }
258
 
259
  /* Quick Actions */
@@ -372,7 +360,7 @@
372
  <button class="quick-btn" onclick="populateInput('Make a quiz on ')">❓ Create Quiz</button>
373
  <button class="quick-btn" onclick="populateInput('Mnemonic for ')">🧠 Mnemonics</button>
374
  </div>
375
- </div>
376
 
377
 
378
  <div class="input-container">
@@ -382,10 +370,12 @@
382
  onkeypress="handleKeyPress(event)">
383
  <button id="sendBtn" onclick="sendMessage()">Send πŸ“¨</button>
384
  </div>
385
-
386
  <div class="loading" id="loading">
387
- πŸ”„ Processing your query
388
  </div>
 
 
389
  </div>
390
  </div>
391
  </div>
@@ -408,7 +398,7 @@
408
  body: JSON.stringify({ query: message })
409
  });
410
  const data = await response.json();
411
- showLoading(false);
412
  if (data.success) {
413
  addMessage(data.message, 'bot', data.agent_used);
414
  } else {
@@ -424,7 +414,17 @@
424
  const messagesContainer = document.getElementById('chatMessages');
425
  const messageDiv = document.createElement('div');
426
  messageDiv.className = `message ${sender}`;
427
- const agentIcons = { 'academic': 'πŸ“š Academic Agent', 'drug_info': 'πŸ’Š Drug Info Agent', 'quiz_generation': '❓ Quiz Master' };
 
 
 
 
 
 
 
 
 
 
428
  const agentBadge = sender === 'bot' ? `<div class="agent-badge">${agentIcons[agentType] || 'πŸ€– AI Assistant'}</div>` : '';
429
  const formattedText = marked.parse(text || 'Sorry, I received an empty response.');
430
  messageDiv.innerHTML = `<div class="message-bubble">${agentBadge}${formattedText}</div>`;
@@ -432,22 +432,20 @@
432
  messagesContainer.scrollTop = messagesContainer.scrollHeight;
433
  }
434
 
435
- function showLoading(show) {
 
 
436
  document.getElementById('sendBtn').disabled = show;
437
- const existingLoading = document.getElementById('loading-indicator');
438
- if (existingLoading) existingLoading.remove();
439
 
440
  if (show) {
441
- const messagesContainer = document.getElementById('chatMessages');
442
- const loadingDiv = document.createElement('div');
443
- loadingDiv.className = 'message bot';
444
- loadingDiv.id = 'loading-indicator';
445
- loadingDiv.innerHTML = `<div class="message-bubble"><div class="typing-indicator"><span></span><span></span><span></span></div></div>`;
446
- messagesContainer.appendChild(loadingDiv);
447
- messagesContainer.scrollTop = messagesContainer.scrollHeight;
448
  }
449
  }
450
 
 
451
  function handleKeyPress(event) {
452
  if (event.key === 'Enter' && !event.shiftKey) {
453
  event.preventDefault();
 
111
  flex-direction: column;
112
  overflow: hidden;
113
  border: 3px solid var(--gold);
114
+
115
  }
116
 
117
  .chat-header {
 
232
  cursor: not-allowed;
233
  }
234
 
235
+ /* --- CSS FOR THE LOADING INDICATOR --- */
236
  .loading {
237
+ display: none; /* Hidden by default */
238
  text-align: center;
239
+ margin-top: 15px;
240
  color: var(--maroon);
241
+ font-weight: bold;
242
  }
 
243
  .loading.show {
244
+ display: block; /* Visible when .show class is added */
 
 
 
 
 
 
 
 
 
 
 
 
245
  }
246
 
247
  /* Quick Actions */
 
360
  <button class="quick-btn" onclick="populateInput('Make a quiz on ')">❓ Create Quiz</button>
361
  <button class="quick-btn" onclick="populateInput('Mnemonic for ')">🧠 Mnemonics</button>
362
  </div>
363
+
364
 
365
 
366
  <div class="input-container">
 
370
  onkeypress="handleKeyPress(event)">
371
  <button id="sendBtn" onclick="sendMessage()">Send πŸ“¨</button>
372
  </div>
373
+
374
  <div class="loading" id="loading">
375
+ πŸ”„ Processing your query...
376
  </div>
377
+
378
+
379
  </div>
380
  </div>
381
  </div>
 
398
  body: JSON.stringify({ query: message })
399
  });
400
  const data = await response.json();
401
+ showLoading(false); // Remove loading indicator *before* adding new message
402
  if (data.success) {
403
  addMessage(data.message, 'bot', data.agent_used);
404
  } else {
 
414
  const messagesContainer = document.getElementById('chatMessages');
415
  const messageDiv = document.createElement('div');
416
  messageDiv.className = `message ${sender}`;
417
+
418
+ // This dictionary now includes all agent types
419
+ const agentIcons = {
420
+ 'academic': 'πŸ“š Academic Agent',
421
+ 'drug_info': 'πŸ’Š Drug Info Agent',
422
+ 'quiz_generation': '❓ Quiz Master',
423
+ 'mnemonic_creation': '🧠 Memory Master',
424
+ 'viva_practice': 'πŸ—£οΈ Viva Coach',
425
+ 'error': '⚠️ System'
426
+ };
427
+
428
  const agentBadge = sender === 'bot' ? `<div class="agent-badge">${agentIcons[agentType] || 'πŸ€– AI Assistant'}</div>` : '';
429
  const formattedText = marked.parse(text || 'Sorry, I received an empty response.');
430
  messageDiv.innerHTML = `<div class="message-bubble">${agentBadge}${formattedText}</div>`;
 
432
  messagesContainer.scrollTop = messagesContainer.scrollHeight;
433
  }
434
 
435
+ // This is the correct "typing indicator" loading function
436
+ function showLoading(show) {
437
+ isProcessing = show;
438
  document.getElementById('sendBtn').disabled = show;
439
+ const loadingElement = document.getElementById('loading');
 
440
 
441
  if (show) {
442
+ loadingElement.classList.add('show');
443
+ } else {
444
+ loadingElement.classList.remove('show');
 
 
 
 
445
  }
446
  }
447
 
448
+
449
  function handleKeyPress(event) {
450
  if (event.key === 'Enter' && !event.shiftKey) {
451
  event.preventDefault();
utils/helpers.py CHANGED
@@ -12,7 +12,9 @@ from langchain_community.document_loaders import PyPDFDirectoryLoader
12
  from langchain.text_splitter import RecursiveCharacterTextSplitter
13
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
14
  from langchain_community.vectorstores import FAISS
15
-
 
 
16
  def create_vector_store():
17
  """
18
  Checks if a vector store index exists. If not, it creates one from
@@ -40,7 +42,7 @@ def create_vector_store():
40
  docs = text_splitter.split_documents(documents)
41
 
42
  print(f"--- Creating embeddings and vector store. This may take a moment... ---")
43
- embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
44
  db = FAISS.from_documents(docs, embeddings)
45
  db.save_local(persist_directory)
46
  print("--- Knowledge base created successfully. ---")
 
12
  from langchain.text_splitter import RecursiveCharacterTextSplitter
13
  from langchain_google_genai import GoogleGenerativeAIEmbeddings
14
  from langchain_community.vectorstores import FAISS
15
+ from dotenv import load_dotenv
16
+ load_dotenv()
17
+ GEMINI_API_KEY = os.getenv('GEMINI_API_KEY')
18
  def create_vector_store():
19
  """
20
  Checks if a vector store index exists. If not, it creates one from
 
42
  docs = text_splitter.split_documents(documents)
43
 
44
  print(f"--- Creating embeddings and vector store. This may take a moment... ---")
45
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", google_api_key=GEMINI_API_KEY)
46
  db = FAISS.from_documents(docs, embeddings)
47
  db.save_local(persist_directory)
48
  print("--- Knowledge base created successfully. ---")