File size: 12,790 Bytes
a6d04e1
be49f6d
a6d04e1
be49f6d
216ab9a
b37e303
f194991
 
 
d68e573
b37e303
 
d68e573
 
 
216ab9a
 
d68e573
be49f6d
216ab9a
f194991
 
a6d04e1
216ab9a
 
a6d04e1
216ab9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be49f6d
a6d04e1
216ab9a
 
 
 
 
 
 
 
 
 
a6d04e1
216ab9a
 
a6d04e1
216ab9a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f194991
 
b37e303
 
f194991
 
 
216ab9a
f194991
b37e303
a6d04e1
 
216ab9a
be49f6d
 
 
 
 
 
 
 
 
216ab9a
 
 
b37e303
 
216ab9a
b37e303
216ab9a
be49f6d
b37e303
be49f6d
 
a6d04e1
 
 
 
216ab9a
a6d04e1
f194991
 
be49f6d
 
f194991
b37e303
f194991
a6d04e1
216ab9a
 
 
 
 
 
 
 
 
a6d04e1
b37e303
 
a6d04e1
 
be49f6d
a6d04e1
 
 
f194991
 
b37e303
216ab9a
 
 
 
 
f194991
be49f6d
f194991
a6d04e1
 
216ab9a
 
f194991
b37e303
 
216ab9a
b37e303
216ab9a
 
 
 
 
 
 
 
 
f194991
be49f6d
f194991
216ab9a
 
 
 
f194991
b37e303
 
be49f6d
b37e303
 
be49f6d
216ab9a
 
 
 
 
 
 
 
a6d04e1
b37e303
216ab9a
 
 
be49f6d
b37e303
 
f194991
216ab9a
be49f6d
b37e303
f194991
216ab9a
b37e303
 
a6d04e1
be49f6d
b37e303
be49f6d
b37e303
 
a6d04e1
 
b37e303
be49f6d
a6d04e1
b37e303
 
 
216ab9a
b37e303
a6d04e1
 
be49f6d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
# /home/user/app/agent.py
import os
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.agents import AgentExecutor, create_structured_chat_agent
# from langchain_google_genai import HarmBlockThreshold, HarmCategory # Optional for safety settings

from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage

# --- Import your defined tools FROM THE 'tools' PACKAGE ---
# This relies on tools/__init__.py correctly exporting these names.
from tools import (
    BioPortalLookupTool,
    UMLSLookupTool,
    QuantumTreatmentOptimizerTool,
    # QuantumOptimizerInput, # Only if needed for type hints directly in this file for some reason
    # GeminiTool, # Assuming not used for now as main LLM is Gemini
)

from config.settings import settings # This loads your HF secrets into the settings object
from services.logger import app_logger

# --- Initialize LLM (Gemini) ---
# This block is critical for ensuring the API key is used.
llm = None # Initialize to None in case of failure
try:
    # Prioritize the API key from settings (loaded from HF Secrets)
    # settings.GEMINI_API_KEY should be populated by Pydantic BaseSettings from the HF Secret
    gemini_api_key_from_settings = settings.GEMINI_API_KEY
    
    # Fallback to environment variable GOOGLE_API_KEY if settings.GEMINI_API_KEY is not found/set
    # (though ideally, settings.GEMINI_API_KEY should be the primary source via HF Secrets)
    api_key_to_use = gemini_api_key_from_settings or os.getenv("GOOGLE_API_KEY")

    if not api_key_to_use:
        app_logger.error(
            "CRITICAL: Gemini API Key not found. "
            "Ensure GEMINI_API_KEY is set in Hugging Face Space secrets and loaded into settings, "
            "or GOOGLE_API_KEY is set as an environment variable."
        )
        raise ValueError(
            "Gemini API Key not configured. Please set it in Hugging Face Space secrets "
            "as GEMINI_API_KEY or ensure GOOGLE_API_KEY environment variable is available."
        )

    llm = ChatGoogleGenerativeAI(
        model="gemini-1.5-pro-latest", # Using a more capable model
        # model="gemini-pro", # Fallback if 1.5-pro is not available or for cost reasons
        temperature=0.2,
        google_api_key=api_key_to_use, # *** EXPLICITLY PASS THE KEY HERE ***
        convert_system_message_to_human=True, # Often useful for non-OpenAI models
        # Example safety settings (optional, adjust as needed)
        # safety_settings={
        #     HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
        #     HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
        # }
    )
    app_logger.info(f"ChatGoogleGenerativeAI ({llm.model}) initialized successfully using provided API key.")

except Exception as e:
    # This broad exception catch is to provide a clear error message if LLM init fails for any reason.
    detailed_error_message = str(e)
    user_facing_error = f"Gemini LLM initialization failed: {detailed_error_message}. " \
                        "Check API key validity, model name, and configurations in Hugging Face Secrets."
    
    if "default credentials were not found" in detailed_error_message.lower() or \
       "could not find default credentials" in detailed_error_message.lower() or \
       "api_key" in detailed_error_message.lower(): # Catch common API key related messages
        user_facing_error = "Gemini LLM initialization failed: API key issue or missing credentials. " \
                            "Ensure GEMINI_API_KEY is correctly set in Hugging Face Secrets and is valid."
        app_logger.error(user_facing_error + f" Original error details: {detailed_error_message}", exc_info=False)
    else:
        app_logger.error(user_facing_error, exc_info=True) # Log full traceback for other errors
    
    # Re-raise to stop agent setup if LLM fails. This will be caught in get_agent_executor.
    raise ValueError(user_facing_error)


# --- Initialize Tools List ---
tools_list = [
    UMLSLookupTool(),
    BioPortalLookupTool(),
    QuantumTreatmentOptimizerTool(),
    # GeminiTool(), # Add if you have a specific reason to use Gemini as a sub-tool
]
app_logger.info(f"Agent tools initialized: {[tool.name for tool in tools_list]}")


# --- Agent Prompt (for Structured Chat with Gemini) ---
SYSTEM_PROMPT_TEMPLATE = (
    "You are 'Quantum Health Navigator', an advanced AI assistant for healthcare professionals. "
    "Your primary goal is to provide accurate information and insights based on user queries and available tools. "
    "You must adhere to the following guidelines:\n"
    "1.  Disclaimers: Always remind the user that you are an AI, not a human medical professional, and your information "
        "is for support, not a substitute for clinical judgment. Do not provide direct medical advice for specific patient cases "
        "unless it's the direct output of a specialized tool like 'quantum_treatment_optimizer'.\n"
    "2.  Patient Context: The user may provide patient context at the start of the session. This context is available as: {patient_context}. "
        "You MUST consider this context when it's relevant to the query, especially for the 'quantum_treatment_optimizer' tool.\n"
    "3.  Tool Usage: You have access to the following tools:\n{tools}\n" # {tools} is filled by the agent
    "   To use a tool, respond *only* with a JSON markdown code block with 'action' and 'action_input' keys. "
    "   The 'action_input' must match the schema for the specified tool. Examples:\n"
    "   For `umls_lookup`: ```json\n{{\"action\": \"umls_lookup\", \"action_input\": \"myocardial infarction\"}}\n```\n"
    "   For `bioportal_lookup`: ```json\n{{\"action\": \"bioportal_lookup\", \"action_input\": {{\"term\": \"diabetes mellitus\", \"ontology\": \"SNOMEDCT\"}}}}\n```\n"
    "   For `quantum_treatment_optimizer`: ```json\n{{\"action\": \"quantum_treatment_optimizer\", \"action_input\": {{\"patient_data\": {{\"age\": 55, \"gender\": \"Male\", \"symptoms\": [\"chest pain\"]}}, \"current_treatments\": [\"metformin\"], \"conditions\": [\"Type 2 Diabetes\"]}}}}\n```\n"
    "   Ensure the `action_input` for `quantum_treatment_optimizer` includes a `patient_data` dictionary populated from the overall {patient_context}.\n"
    "4.  Responding to User: After using a tool, you will receive an observation. Use this observation and your knowledge to formulate a comprehensive final answer to the human. Cite the tool if you used one (e.g., 'According to UMLS Lookup...'). Do not output a tool call again unless necessary for a multi-step process.\n"
    "5.  Specific Tool Guidance:\n"
    "   - If asked about treatment optimization for a specific patient (especially if patient context is provided), you MUST use the `quantum_treatment_optimizer` tool.\n"
    "   - For definitions, codes, or general medical concepts, `umls_lookup` or `bioportal_lookup` are appropriate.\n"
    "6.  Conversation Flow: Refer to the `Previous conversation history` to maintain context.\n\n"
    "Begin!\n\n"
    "Previous conversation history:\n"
    "{chat_history}\n\n"
    "New human question: {input}\n"
    "{agent_scratchpad}" # Placeholder for agent's internal thoughts, tool calls, and tool observations
)

prompt = ChatPromptTemplate.from_messages([
    ("system", SYSTEM_PROMPT_TEMPLATE),
    MessagesPlaceholder(variable_name="agent_scratchpad"),
])
app_logger.info("Agent prompt template created for Gemini structured chat agent.")

# --- Create Agent ---
# This assumes `llm` was successfully initialized above.
if llm is None:
    # This case should ideally not be reached if the ValueError was raised during LLM init,
    # but as a defensive measure:
    app_logger.critical("LLM object is None at agent creation stage. Cannot proceed.")
    # The ValueError from LLM init should have already stopped the module loading.
    # If somehow execution reaches here with llm=None, something is very wrong.
    raise SystemExit("Agent LLM failed to initialize. Application cannot start.")

try:
    agent = create_structured_chat_agent(llm=llm, tools=tools_list, prompt=prompt)
    app_logger.info("Structured chat agent created successfully with Gemini LLM and tools.")
except Exception as e:
    app_logger.error(f"Failed to create structured chat agent: {e}", exc_info=True)
    raise ValueError(f"Gemini agent creation failed: {e}")


# --- Create Agent Executor ---
agent_executor = AgentExecutor(
    agent=agent,
    tools=tools_list,
    verbose=True,
    handle_parsing_errors=True,
    max_iterations=10,
    early_stopping_method="generate",
    # return_intermediate_steps=True, # Good for debugging, makes response a dict with 'intermediate_steps'
)
app_logger.info("AgentExecutor with Gemini agent created successfully.")


# --- Getter Function for Streamlit App ---
_agent_executor_instance = agent_executor # Store the initialized executor

def get_agent_executor():
    """
    Returns the configured agent executor for Gemini.
    The executor is initialized when this module is first imported.
    """
    global _agent_executor_instance
    if _agent_executor_instance is None:
        # This should not happen if module initialization was successful.
        # It might indicate an issue where the module is reloaded or init failed silently.
        app_logger.critical("CRITICAL: Agent executor is None when get_agent_executor is called. Re-initialization attempt or fundamental error.")
        # You could try to re-initialize here, but it's better to ensure init works on first load.
        # For now, raise an error to make it obvious.
        raise RuntimeError("Agent executor was not properly initialized. Check application startup logs.")
    return _agent_executor_instance

# --- Example Usage (for local testing of this agent.py file) ---
if __name__ == "__main__":
    # Check if the API key is available for the test
    main_test_api_key = settings.GEMINI_API_KEY or os.getenv("GOOGLE_API_KEY")
    if not main_test_api_key:
        print("🚨 Please set your GOOGLE_API_KEY (for Gemini) in .env file or as an environment variable to run the test.")
    else:
        print("\nπŸš€ Quantum Health Navigator (Gemini Agent Test Console) πŸš€")
        print("-----------------------------------------------------------")
        print("Type 'exit' or 'quit' to stop.")
        print("Example topics: medical definitions, treatment optimization (will use simulated patient context).")
        print("-" * 59)

        try:
            test_executor = get_agent_executor() # Get the executor
        except ValueError as e_init:
            print(f"⚠️ Agent initialization failed during test startup: {e_init}")
            print("Ensure your API key is correctly configured.")
            exit() # Exit if agent can't be initialized

        current_chat_history_for_test_run = []

        test_patient_context_summary_str = (
            "Age: 58; Gender: Female; Chief Complaint: Recent onset of blurry vision and fatigue; "
            "Key Medical History: Prediabetes, Mild dyslipidemia; "
            "Current Medications: None; Allergies: None known."
        )
        print(f"ℹ️  Simulated Patient Context for this test run: {test_patient_context_summary_str}\n")

        while True:
            user_input_str = input("πŸ‘€ You: ").strip()
            if user_input_str.lower() in ["exit", "quit"]:
                print("πŸ‘‹ Exiting test console.")
                break
            if not user_input_str:
                continue

            try:
                app_logger.info(f"__main__ test: Invoking agent with input: '{user_input_str}'")
                response_dict = test_executor.invoke({
                    "input": user_input_str,
                    "chat_history": current_chat_history_for_test_run,
                    "patient_context": test_patient_context_summary_str
                })
                
                ai_output_str = response_dict.get('output', "Agent did not produce an 'output' key.")
                print(f"πŸ€– Agent: {ai_output_str}")
                
                current_chat_history_for_test_run.append(HumanMessage(content=user_input_str))
                current_chat_history_for_test_run.append(AIMessage(content=ai_output_str))
                
                if len(current_chat_history_for_test_run) > 10:
                    current_chat_history_for_test_run = current_chat_history_for_test_run[-10:]

            except Exception as e:
                print(f"⚠️ Error during agent invocation: {e}")
                app_logger.error(f"Error in __main__ agent test invocation: {e}", exc_info=True)