File size: 26,953 Bytes
5fced44
3fd0067
740846d
baface3
07a6e29
baface3
 
 
 
 
 
e106c9a
 
3fd0067
 
5fced44
 
bdfd7a5
5fced44
9b324d1
5fced44
3fd0067
5fced44
 
 
3fd0067
 
5fced44
 
baface3
3fd0067
5fced44
 
 
3fd0067
5fced44
 
 
3fd0067
5fced44
3fd0067
bdfd7a5
3fd0067
5fced44
 
 
 
 
 
 
 
 
 
 
 
 
baface3
5fced44
 
 
3fd0067
5fced44
 
baface3
5fced44
 
 
 
 
baface3
5fced44
 
 
 
 
 
 
 
 
baface3
5fced44
 
 
 
 
 
 
 
 
 
 
 
 
3fd0067
5fced44
baface3
5fced44
 
 
 
 
 
 
 
baface3
5fced44
 
 
 
 
baface3
 
5fced44
baface3
5fced44
 
baface3
 
5fced44
baface3
5fced44
 
 
 
 
 
baface3
5fced44
 
 
baface3
5fced44
 
 
 
baface3
 
 
 
 
5fced44
 
 
 
 
 
 
 
 
 
baface3
5fced44
 
 
 
 
 
baface3
5fced44
baface3
3fd0067
9b324d1
5fced44
 
 
 
baface3
5fced44
 
 
 
 
 
 
 
 
baface3
 
 
5fced44
 
 
 
 
 
 
9b324d1
5fced44
 
baface3
 
 
5fced44
 
 
 
 
 
 
 
baface3
5fced44
 
 
 
 
 
baface3
 
5fced44
 
 
baface3
5fced44
 
 
 
 
 
 
baface3
 
5fced44
baface3
 
e106c9a
a517846
5fced44
 
 
 
 
 
baface3
9b324d1
5fced44
 
 
 
baface3
 
 
5fced44
 
baface3
 
5fced44
baface3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fced44
 
baface3
 
 
5fced44
 
 
baface3
 
 
 
 
 
 
 
 
 
 
 
5fced44
baface3
 
 
5fced44
 
baface3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fced44
baface3
5fced44
baface3
5fced44
baface3
 
5fced44
baface3
 
 
 
 
 
 
 
 
 
 
5fced44
 
 
 
 
 
baface3
5fced44
 
 
 
 
 
 
baface3
 
 
 
 
 
 
 
5fced44
 
 
baface3
5fced44
baface3
5fced44
baface3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fced44
a517846
5fced44
 
baface3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fced44
 
 
 
 
 
 
 
 
 
 
baface3
5fced44
baface3
9b324d1
5fced44
baface3
 
 
 
 
 
 
 
 
 
5fced44
 
 
 
 
 
baface3
 
 
5fced44
 
 
baface3
 
 
 
 
5fced44
baface3
5fced44
 
 
 
 
 
 
baface3
5fced44
baface3
5fced44
 
baface3
5fced44
baface3
 
 
5fced44
 
 
 
 
 
 
 
 
 
 
3fd0067
bdfd7a5
5fced44
baface3
 
 
 
 
 
 
 
5fced44
 
5f3d5cb
5fced44
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
# -*- coding: utf-8 -*-
import os
import gradio as gr
# Corrected import: Import the main module and use an alias
import google.generativeai as genai
# Types will be accessed via genai.types

# Removed direct type imports, will use genai.types instead
# from google.generativeai import types # No longer needed
# from google.generativeai.types import HarmCategory, HarmBlockThreshold # No longer needed

import requests
import markdownify
from urllib.robotparser import RobotFileParser
from urllib.parse import urlparse
import traceback
import json # Although not directly used in the final code, useful for debugging args

# --- Browser/Web Tool Functions ---

def can_crawl_url(url: str, user_agent: str = "PythonGoogleGenAIAgent/1.0") -> bool:
    """Check robots.txt permissions for a URL"""
    if not url:
        print("No URL provided to can_crawl_url")
        return False
    try:
        parsed_url = urlparse(url)
        if not parsed_url.scheme or not parsed_url.netloc:
            print(f"Invalid URL format for robots.txt check: {url}")
            return False
        robots_url = f"{parsed_url.scheme}://{parsed_url.netloc}/robots.txt"
        print(f"Checking robots.txt at: {robots_url} for URL: {url}")
        rp = RobotFileParser()
        rp.set_url(robots_url)
        rp.read()
        can_fetch = rp.can_fetch(user_agent, url)
        print(f"Can fetch {url} with agent '{user_agent}': {can_fetch}")
        return can_fetch
    except Exception as e:
        print(f"Error checking robots.txt for {url}: {e}")
        return False

def load_page(url: str) -> str:
    """
    Load webpage content as markdown. Designed to be used as a Gemini Function.
    Args:
        url: The URL of the webpage to load.
    Returns:
        Markdown content of the page or an error message.
    """
    print(f"Attempting to load page: {url}")
    if not url:
        return "Error: No URL provided."
    if not url.startswith(('http://', 'https://')):
         return f"Error: Invalid URL scheme. Please provide http or https URL. Got: {url}"

    USER_AGENT = "PythonGoogleGenAIAgent/1.0 (Function Calling)"
    if not can_crawl_url(url, user_agent=USER_AGENT):
        print(f"URL {url} failed robots.txt check for agent {USER_AGENT}")
        return f"Error: Access denied by robots.txt for URL {url}"
    try:
        headers = {'User-Agent': USER_AGENT}
        response = requests.get(url, timeout=15, headers=headers, allow_redirects=True)
        response.raise_for_status()
        content_type = response.headers.get('content-type', '').lower()
        if 'html' not in content_type:
            print(f"Non-HTML content type '{content_type}' at {url}. Returning summary.")
            return f"Content at {url} is of type '{content_type}'. Size: {len(response.content)} bytes. Cannot convert to Markdown."

        MAX_CONTENT_SIZE = 1_000_000
        if len(response.content) > MAX_CONTENT_SIZE:
             print(f"Content size {len(response.content)} exceeds limit {MAX_CONTENT_SIZE}. Truncating.")
             try:
                 html_content = response.content[:MAX_CONTENT_SIZE].decode(response.apparent_encoding or 'utf-8', errors='ignore')
             except Exception as decode_err:
                 print(f"Decoding error after truncation: {decode_err}. Falling back to utf-8 ignore.")
                 html_content = response.content[:MAX_CONTENT_SIZE].decode('utf-8', errors='ignore')
             truncated_msg = "\n\n[Content truncated due to size limit]"
        else:
            html_content = response.text
            truncated_msg = ""

        markdown_content = markdownify.markdownify(html_content, heading_style="ATX", strip=['script', 'style'], escape_underscores=False)
        markdown_content = '\n'.join([line.strip() for line in markdown_content.splitlines() if line.strip()])
        print(f"Successfully loaded and converted {url} to markdown.")
        return f"Content from {url}:\n\n" + markdown_content + truncated_msg

    except requests.exceptions.Timeout:
        print(f"Timeout error loading page: {url}")
        return f"Error: Timeout while trying to load {url}"
    except requests.exceptions.RequestException as e:
        print(f"Request error loading page {url}: {str(e)}")
        return f"Error loading page {url}: {str(e)}"
    except Exception as e:
        print(f"General error loading page {url}: {str(e)}")
        traceback.print_exc()
        return f"Error loading page {url}: An unexpected error occurred ({type(e).__name__})."


# --- Gemini Client Initialization and Configuration ---
try:
    api_key = os.environ.get("GEMINI_API_KEY")
    if not api_key:
        raise ValueError("GEMINI_API_KEY environment variable not set.")
    # Use genai (the alias) to configure
    genai.configure(api_key=api_key)

    MODEL_NAME = "gemini-2.5-pro-exp-03-25"
    print(f"Attempting to use EXPERIMENTAL model: {MODEL_NAME}")

    # Define tools using genai.types
    browse_tool = genai.types.Tool(
        function_declarations=[
            genai.types.FunctionDeclaration(
                name='load_page',
                description='Fetches the content of a specific web page URL as Markdown text. Use this when the user asks for information from a specific URL they provide, or when you need to look up live information mentioned alongside a specific source URL.',
                parameters=genai.types.Schema(
                    type=genai.types.Type.OBJECT,
                    properties={
                        'url': genai.types.Schema(type=genai.types.Type.STRING, description="The *full* URL of the webpage to load (must start with http:// or https://).")
                    },
                    required=['url']
                )
            )
        ]
    )
    code_execution_tool = genai.types.Tool(code_execution=genai.types.ToolCodeExecution()) # Note: Simplified access

    tools = [browse_tool, code_execution_tool]

    # Create the model instance using genai alias
    model = genai.GenerativeModel(
        model_name=MODEL_NAME,
        tools=tools,
        safety_settings={
             # Access HarmCategory and HarmBlockThreshold via genai.types
             genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
             genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
             genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
             genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
        },
        system_instruction="You are a helpful AI assistant called Gemini-Toolkit. You can browse specific web pages provided by the user via the 'load_page' tool. You can also execute Python code using the 'code_execution' tool to perform calculations, analyze data, or demonstrate programming concepts. Explain your reasoning and the steps you take. If asked to browse, confirm the URL you are accessing. If providing code, explain what it does.",
    )
    print(f"Gemini client initialized with model: {MODEL_NAME} and tools.")

except Exception as e:
    print(f"CRITICAL ERROR: Error initializing Gemini client: {e}")
    traceback.print_exc()
    model = None
    tools = []


# --- Gradio App Logic ---

def handle_function_call(function_call):
    """Executes the function call requested by the model."""
    function_name = function_call.name
    args = function_call.args # Dict-like object

    print(f"Executing Function Call: {function_name} with args: {dict(args)}")

    try:
        if function_name == 'load_page':
            url = args.get('url')
            if url:
                function_response_content = load_page(url=url)
                MAX_RESPONSE_LEN = 50000
                if len(function_response_content) > MAX_RESPONSE_LEN:
                    print(f"Tool Response truncated from {len(function_response_content)} to {MAX_RESPONSE_LEN} chars.")
                    function_response_content = function_response_content[:MAX_RESPONSE_LEN] + "\n\n[... Tool Response Truncated Due to Size Limit ...]"
            else:
                function_response_content = "Error: URL parameter was missing in the function call. Please ensure the 'url' argument is provided."
        else:
            print(f"Error: Received call for unknown function '{function_name}'")
            function_response_content = f"Error: Unknown function '{function_name}' called by the model."

        # Use genai.types for FunctionResponse and Part
        function_response_part = genai.types.Part(
            function_response=genai.types.FunctionResponse(
                name=function_name,
                response={'content': function_response_content}
            )
        )
        print(f"Function Response generated for {function_name}")
        return function_response_part

    except Exception as e:
         print(f"Error during execution of function '{function_name}': {e}")
         traceback.print_exc()
         # Use genai.types here too
         return genai.types.Part(
             function_response=genai.types.FunctionResponse(
                 name=function_name,
                 response={'error': f"Failed to execute function {function_name}: {str(e)}"}
             )
         )

def generate_response_with_tools(user_input, history_state):
    """Handles user input, interacts with Gemini (incl. tools), and manages history."""
    if not model:
         return [[None, "Error: The AI model (Gemini) could not be initialized. Please check the logs or API key configuration."]], history_state or []

    if not user_input.strip():
        return [[None, "Please enter a valid query."]], history_state or []

    # --- History Management ---
    conversation_history = history_state if isinstance(history_state, list) else []
    # Use genai.types for Content and Part
    conversation_history.append(genai.types.Content(role="user", parts=[genai.types.Part.from_text(user_input)]))
    print(f"\n--- Sending to Gemini (History length: {len(conversation_history)}) ---")

    MAX_HISTORY_TURNS = 10
    max_history_items = MAX_HISTORY_TURNS * 2 + (1 if conversation_history and conversation_history[0].role == "system" else 0)
    if len(conversation_history) > max_history_items:
        print(f"Trimming conversation history from {len(conversation_history)} items to ~{max_history_items}")
        if conversation_history[0].role == "system":
             conversation_history = [conversation_history[0]] + conversation_history[-(max_history_items-1):]
        else:
             conversation_history = conversation_history[-max_history_items:]

    # --- Interaction Loop ---
    MAX_TOOL_LOOPS = 5
    loop_count = 0
    current_history_for_api = list(conversation_history)
    final_bot_message = "" # Initialize variable to hold the final message text

    try:
        while loop_count < MAX_TOOL_LOOPS:
            loop_count += 1
            print(f"Generation loop {loop_count}/{MAX_TOOL_LOOPS}...")

            response = model.generate_content(
                current_history_for_api,
                request_options={"timeout": 120},
            )

            if not response.candidates:
                 print("Warning: No candidates received from Gemini.")
                 final_bot_message = "[No response generated by the model.]"
                 # Use genai.types here
                 current_history_for_api.append(genai.types.Content(role="model", parts=[genai.types.Part.from_text(final_bot_message)]))
                 break

            candidate = response.candidates[0]
            # Access FinishReason via genai.types
            finish_reason = candidate.finish_reason

            # Append model's turn to history *before* potentially executing tools
            # This includes text parts and potential function_call parts
            if candidate.content:
                current_history_for_api.append(candidate.content)
            else:
                 print("Warning: Candidate content is empty.")
                 # Decide how to handle this - perhaps break or log and continue?
                 # If finish_reason indicates a stop, maybe just break.
                 # If it indicates TOOL_CALL without content, that's an error state.

            # Check for safety or unexpected stops first
            # Use genai.types for FinishReason comparison
            if finish_reason not in (genai.types.Candidate.FinishReason.STOP, genai.types.Candidate.FinishReason.TOOL_CALL):
                 print(f"Warning: Generation stopped unexpectedly. Reason: {finish_reason.name}")
                 stop_reason_msg = f"[Model stopped generating. Reason: {finish_reason.name}]"
                 # Extract any partial text response
                 partial_text = ""
                 if candidate.content and candidate.content.parts:
                     partial_text = "".join([p.text for p in candidate.content.parts if p.text])
                 final_bot_message = (partial_text + "\n" if partial_text else "") + stop_reason_msg
                 # We already appended the content, so the history is up-to-date with the partial model turn.
                 break # Exit loop

            # Check for Tool Call
            # Use genai.types for FinishReason comparison
            has_tool_call = finish_reason == genai.types.Candidate.FinishReason.TOOL_CALL

            if has_tool_call:
                print("Tool call requested by model.")
                if not candidate.content or not candidate.content.parts:
                     print("Error: TOOL_CALL indicated but candidate content is empty.")
                     final_bot_message = "[Model indicated tool use but provided no details.]"
                     # Append error message as model turn?
                     # current_history_for_api.append(genai.types.Content(role="model", parts=[genai.types.Part.from_text(final_bot_message)]))
                     break # Exit loop

                function_calls = [part.function_call for part in candidate.content.parts if hasattr(part, 'function_call')]

                if not function_calls:
                     print("Warning: TOOL_CALL finish reason but no function_call part found in content.")
                     final_bot_message = "".join([p.text for p in candidate.content.parts if p.text]) # Capture any text
                     if not final_bot_message:
                          final_bot_message = "[Model indicated tool use but provided no callable function.]"
                     # Model turn with text (if any) is already in history
                     break # Exit loop

                tool_responses = []
                for func_call in function_calls:
                     if func_call: # Ensure it's not None
                         function_response_part = handle_function_call(func_call)
                         tool_responses.append(function_response_part)
                     else:
                          print("Warning: Encountered None value where function_call was expected.")

                if not tool_responses:
                     print("Warning: No valid tool responses generated despite TOOL_CALL.")
                     # Decide how to proceed. Maybe break?
                     final_bot_message = "[Failed to process tool call request.]"
                     break

                # Add the tool execution results to history
                # Use genai.types for Content
                current_history_for_api.append(genai.types.Content(role="tool", parts=tool_responses))
                print("Added tool response(s) to history. Continuing loop...")
                continue # Go back to the start of the while loop

            else: # FinishReason == STOP
                print("No tool call requested. Final response received.")
                # Extract final text and any code suggestions/results
                final_bot_message = ""
                code_parts_display = []
                if candidate.content and candidate.content.parts:
                    for part in candidate.content.parts:
                        if hasattr(part, 'text'):
                             final_bot_message += part.text
                        if hasattr(part, 'executable_code') and part.executable_code:
                            lang = part.executable_code.language.name.lower() if part.executable_code.language else "python"
                            code = part.executable_code.code
                            code_parts_display.append(f"Suggested Code ({lang}):\n```{'python' if lang == 'unknown_language' else lang}\n{code}\n```")
                        elif hasattr(part, 'code_execution_result') and part.code_execution_result:
                            outcome_str = "Success" if part.code_execution_result.outcome == genai.types.ExecutableCodeResponse.Outcome.OK else "Failure" # Adjusted reference
                            code_parts_display.append(f"Code Execution Result ({outcome_str}):\n```\n{part.code_execution_result.output}\n```")

                if code_parts_display:
                    final_bot_message += "\n\n" + "\n\n".join(code_parts_display)

                if not final_bot_message.strip():
                     final_bot_message = "[Assistant completed its turn without generating text output.]"
                     # The empty model turn is already in history

                break # Exit the while loop

        # End of while loop
        if loop_count >= MAX_TOOL_LOOPS:
             print(f"Warning: Reached maximum tool execution loops ({MAX_TOOL_LOOPS}).")
             final_bot_message = (final_bot_message + "\n\n" if final_bot_message else "") + f"[Warning: Reached maximum tool execution loops ({MAX_TOOL_LOOPS}). The final response might be incomplete.]"
             # Ensure the loop warning is part of the last model message if needed
             if current_history_for_api[-1].role == "model":
                 # Append warning to the existing last message parts (simplistic)
                 # Use genai.types for Part
                 current_history_for_api[-1].parts.append(genai.types.Part.from_text(f"\n[Warning: Max loops reached]"))
             else:
                  # Use genai.types for Content and Part
                  current_history_for_api.append(genai.types.Content(role="model", parts=[genai.types.Part.from_text(final_bot_message)]))


        print("--- Response Generation Complete ---")
        # --- Format final output for Gradio Chatbot ---
        chatbot_display_list = []
        user_msg_buffer = None # To hold user message until bot reply comes
        for i, content in enumerate(current_history_for_api):
            if content.role == "system": continue # Skip system prompt in display

            # Combine parts into a single message string for display
            display_text = ""
            if content.parts: # Check if parts exist
                for part in content.parts:
                    # Check attributes safely before accessing
                    if hasattr(part, 'text'):
                        display_text += part.text + "\n"
                    elif hasattr(part, 'executable_code') and part.executable_code:
                        lang = part.executable_code.language.name.lower() if hasattr(part.executable_code, 'language') and part.executable_code.language else "python"
                        code = part.executable_code.code if hasattr(part.executable_code, 'code') else ""
                        display_text += f"\nSuggested Code ({lang}):\n```{'python' if lang == 'unknown_language' else lang}\n{code}\n```\n"
                    elif hasattr(part, 'code_execution_result') and part.code_execution_result:
                        # Use genai.types.ExecutableCodeResponse.Outcome
                        outcome_ok = genai.types.ExecutableCodeResponse.Outcome.OK if hasattr(genai.types, 'ExecutableCodeResponse') else 1 # Fallback if type not found? Be careful
                        outcome_str = "Success" if part.code_execution_result.outcome == outcome_ok else "Failure"
                        output = part.code_execution_result.output if hasattr(part.code_execution_result, 'output') else ""
                        display_text += f"\nCode Execution Result ({outcome_str}):\n```\n{output}\n```\n"
                    # Optional: Display indications of tool use (can make chat noisy)
                    # elif hasattr(part, 'function_call') and part.function_call: display_text += f"[Requesting tool: {part.function_call.name}...]\n"
                    # elif hasattr(part, 'function_response') and part.function_response: display_text += f"[Tool '{part.function_response.name}' response processed.]\n"

            display_text = display_text.strip()

            if not display_text and content.role != 'tool': continue # Skip empty non-tool turns

            if content.role == "user":
                user_msg_buffer = display_text # Store user message
                # Don't append to display list yet, wait for model response
            elif content.role == "model":
                if user_msg_buffer is not None:
                    # We have a user message and now the model's response
                    chatbot_display_list.append([user_msg_buffer, display_text])
                    user_msg_buffer = None # Clear buffer
                else:
                    # Model message without preceding user message (e.g., initial greeting or consecutive model turns)
                    chatbot_display_list.append([None, display_text])
            # Ignore 'tool' role messages in the chatbot display list

        # If the loop ended with a user message still in the buffer (e.g., error before model reply)
        if user_msg_buffer is not None:
             chatbot_display_list.append([user_msg_buffer, None]) # Show user msg, no bot reply yet


        return chatbot_display_list, current_history_for_api

    except Exception as e:
        print(f"ERROR during Gemini generation or tool processing: {str(e)}")
        traceback.print_exc()
        error_message = f"An error occurred: {str(e)}"
        # Return error in chatbot format, maintain previous history state
        # Build display history from existing state + error
        error_display_list = []
        if isinstance(history_state, list):
             # Simplified history-to-display conversion for error case
             temp_user_msg = None
             for content in history_state:
                  if content.role == "user": temp_user_msg = content.parts[0].text
                  elif content.role == "model" and temp_user_msg:
                       model_text = "".join([p.text for p in content.parts if hasattr(p, 'text')])
                       error_display_list.append([temp_user_msg, model_text])
                       temp_user_msg = None
             if temp_user_msg: error_display_list.append([temp_user_msg, None]) # Append dangling user message

        error_display_list.append([None, error_message]) # Add the error message

        # Return the state *before* the error occurred
        return error_display_list, conversation_history[:-1] # Exclude the failed user turn


# --- Gradio Interface ---

with gr.Blocks(title="Gemini AI Assistant w/ Tools", theme=gr.themes.Soft()) as demo:
    gr.Markdown(f"# πŸš€ Gemini AI Assistant ({MODEL_NAME})")
    gr.Markdown("Ask questions, request info from specific URLs, or ask for code/calculations. Uses function calling and code execution.")

    chatbot_display = gr.Chatbot(
        label="Conversation",
        bubble_full_width=False,
        height=600,
        show_copy_button=True,
        render_markdown=True
    )

    with gr.Row(): # Arrange input and buttons horizontally
        msg_input = gr.Textbox(
            label="Your Query",
            placeholder="Ask anything...",
            lines=3,
            scale=4 # Input takes more space
        )
        with gr.Column(scale=1, min_width=150): # Column for buttons
            send_btn = gr.Button("➑️ Send", variant="primary")
            clear_btn = gr.ClearButton(value="πŸ—‘οΈ Clear Chat")

    # Hidden state to store the raw conversation history (list of genai.types.Content)
    chat_history_state = gr.State([])

    def user_message_update(user_message, history_display_list):
        """Appends the user's message to the display list and clears the input."""
        if not user_message.strip():
             return gr.update(value=""), history_display_list
        return gr.update(value=""), history_display_list + [[user_message, None]] # Add placeholder for bot response

    def bot_response_update(history_display_list, history_state):
        """Calls the backend Gemini function and updates display/state."""
        if not history_display_list or history_display_list[-1][1] is not None:
            # Only proceed if there is a pending user message (placeholder is None)
            print("Bot update called without pending user message.")
            # Should return current state if called incorrectly
            return history_display_list, history_state

        user_message = history_display_list[-1][0]
        print(f"User message being sent to backend: {user_message}")

        # Call the main Gemini interaction function
        updated_display_list, updated_history_state = generate_response_with_tools(user_message, history_state)

        return updated_display_list, updated_history_state

    # --- Event Listeners ---
    msg_input.submit(
        user_message_update,
        [msg_input, chatbot_display],
        [msg_input, chatbot_display],
        queue=False,
    ).then(
        bot_response_update,
        [chatbot_display, chat_history_state],
        [chatbot_display, chat_history_state] # Update display and state
    )

    send_btn.click(
        user_message_update,
        [msg_input, chatbot_display],
        [msg_input, chatbot_display],
        queue=False,
    ).then(
        bot_response_update,
        [chatbot_display, chat_history_state],
        [chatbot_display, chat_history_state]
    )

    # Setup the ClearButton to target the necessary components, including the state
    # Use list comprehension/lambda if add doesn't accept state directly, or define custom clear fn
    #clear_btn.add(components=[msg_input, chatbot_display, chat_history_state]) # May not work with state

    # Custom clear function is safer for state
    def clear_all():
         return ["", None, []] # Clears Textbox, Chatbot display, State

    clear_btn.click(clear_all, [], [msg_input, chatbot_display, chat_history_state], queue=False)


if __name__ == "__main__":
    print("Starting Gradio App...")
    demo.queue().launch(server_name="0.0.0.0", server_port=7860)
    print("Gradio App Stopped.")