Spaces:
Sleeping
Sleeping
File size: 6,945 Bytes
f39ba75 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
# agents/quiz_agent.py
"""
Quiz Generation Agent - Creates quizzes and flashcards using Generative AI.
"""
import re
from .agent_helpers import format_history_for_prompt
class QuizAgent:
def __init__(self, gemini_model=None):
"""
Initializes the agent with the Gemini model.
Args:
gemini_model: An instance of the Gemini model client.
"""
self.model = gemini_model
def _extract_topic(self, query: str) -> str:
"""A simple helper to extract the core topic from the user's query."""
# Remove common phrases used to request a quiz
patterns = [
r"make a quiz on",
r"create a quiz on",
r"give me a quiz on",
r"quiz on",
r"quiz about",
r"test me on"
]
topic = query.lower()
for p in patterns:
topic = re.sub(p, "", topic)
# Clean up any extra whitespace
return topic.strip()
def process_query(self, query: str, file_context: str = "", chat_history: list = None):
"""
Processes a query to generate a quiz.
Args:
query (str): The user's full query (e.g., "Make a quiz on analgesics").
file_context (str): Optional text content from an uploaded file.
chat_history (list): The history of the conversation.
Returns:
dict: A dictionary containing the quiz and agent metadata.
"""
if not self.model:
return {'message': "β The question bank is locked! The Gemini API key is missing.", 'agent_used': 'quiz_generation', 'status': 'error_no_api_key'}
history_for_prompt = format_history_for_prompt(chat_history)
topic = self._extract_topic(query)
context_section = f"---\nCONTEXT FROM KNOWLEDGE BASE:\n{file_context}\n---" if file_context else ""
task_description = f"Generate a short quiz (3-5 questions) on the topic: **{topic.title()}**."
if file_context:
task_description += f"\nIf relevant, use text from the student's notes for context:\n---\n{file_context}\n---"
prompt = f"""You are "Quiz Master," an AI that creates educational quizzes like Maryada Ramanna. Maryada Ramannaβheβs a legendary character from Indian (particularly South Indian) folklore, often associated with justice, integrity, and cleverness.
**CRITICAL INSTRUCTION FOR CITATIONS:** When you use information from the KNOWLEDGE BASE CONTEXT, you MUST cite the source at the end of the relevant sentence using the format `[Source: filename, Page: page_number]`.
CONVERSATION HISTORY:
{history_for_prompt}
{context_section}
CURRENT TASK:
{task_description}
Based on the CURRENT TASK and conversation history, create a quiz. If the user is asking for a change to a previous quiz (e.g., "make it harder"), do that.
Include a mix of MCQs, True/False, and Fill-in-the-Blank questions.
CRITICAL: Provide a clearly separated "Answer Key" section with answers and brief explanations.
"""
try:
response = self.model.generate_content(prompt)
return {'message': response.text, 'agent_used': 'quiz_generation', 'status': 'success'}
except Exception as e:
print(f"Quiz Agent Error: {e}")
return {'message': f"My question book seems to be stuck. Error: {e}", 'agent_used': 'quiz_generation', 'status': 'error_api_call'}
# def process_query(self, query: str, file_context: str = "",chat_history: list = None):
# """
# Processes a query to generate a quiz. The agent prioritizes file_context if provided.
# Args:
# query (str): The user's full query (e.g., "Make a quiz on analgesics").
# file_context (str): Optional text content from an uploaded file.
# Returns:
# dict: A dictionary containing the quiz and agent metadata.
# """
# if not self.model:
# return {
# 'message': "β **Quiz Master**\n\nThe question bank is locked! The Gemini API key is missing, so I can't generate quizzes. Please configure the API key to enable this feature.",
# 'agent_type': 'quiz_generation',
# 'status': 'error_no_api_key'
# }
# topic = self._extract_topic(query)
# task_description = f"Generate a short quiz (3-5 questions) for a B.Pharmacy student on the topic: **{topic.title()}**."
# if file_context:
# task_description += f"\n\nIf relevant, use the following text from the student's uploaded notes for additional context:\n---\n{file_context}\n---"
# else:
# return {
# 'message': "Please tell me what to quiz you on! Either upload a file or ask for a quiz on a specific topic, like 'quiz on antibiotics'.",
# 'agent_type': 'quiz_generation',
# 'status': 'error_no_topic'
# }
# # Construct a specialized prompt for the Gemini model
# prompt = f"""
# You are "Quiz Master," an AI that creates engaging and effective study quizzes for B.Pharmacy students in India.
# **Your Task:**
# {task_description}
# **Instructions:**
# 1. **Question Variety:** Create a mix of question types:
# * Multiple Choice Questions (MCQs) with 4 options.
# * True/False questions.
# * Fill-in-the-Blank questions.
# 2. **Clarity:** Ensure questions are clear, concise, and relevant.
# 3. **Answer Key:** THIS IS ESSENTIAL. After all the questions, provide a clearly separated "π Answer Key" section with the correct answers. For MCQs, also provide a brief (one-sentence) explanation for why the answer is correct.
# 4. **Formatting:** Use markdown for headings, bolding, and lists. Use emojis to make it fun and engaging.
# Good luck! π
# **Example Output Structure:**
# π **Quiz Time: [Topic Name]**
# **Q1. [MCQ Question]**
# A) Option 1
# B) Option 2
# ...
# **Q2. [True/False Question]**
# **Q3. [Fill-in-the-Blank Question]**
# ---
# π **Answer Key**
# 1. **Answer:** B) Correct Option. *Explanation: [Brief reason why B is correct].*
# 2. **Answer:** True.
# 3. **Answer:** [Correct word(s)].
# Let's test your knowledge! Good luck! π
# """
# try:
# # Generate content using the AI model
# ai_response = self.model.generate_content(prompt, chat_history)
# return {
# 'message': ai_response.text,
# 'agent_used': 'quiz_generation',
# 'status': 'success'
# }
# except Exception as e:
# print(f"Quiz Agent Error: {e}")
# return {
# 'message': f"I'm sorry, my question book seems to be stuck. I ran into an error: {str(e)}",
# 'agent_type': 'quiz_generation',
# 'status': 'error_api_call'
# } |