dlaima commited on
Commit
cc5313c
·
verified ·
1 Parent(s): c3bd339

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -7,6 +7,7 @@ import pandas as pd
7
 
8
  import google.generativeai as genai
9
  from smolagents import CodeAgent, DuckDuckGoSearchTool
 
10
 
11
  # System prompt used by the agent
12
  SYSTEM_PROMPT = """You are a general AI assistant. I will ask you a question.
@@ -17,13 +18,14 @@ If you're asked for a string, don’t use articles or abbreviations (e.g. for ci
17
 
18
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
19
 
20
- # Generation result wrapper to match smolagents expectations
21
- class GenerationResult:
22
- def __init__(self, content, token_usage=None, input_tokens=0, output_tokens=0):
23
- self.content = content
24
- self.token_usage = token_usage or {}
25
- self.input_tokens = input_tokens
26
- self.output_tokens = output_tokens
 
27
 
28
  # Gemini model wrapper
29
  class GeminiFlashModel:
@@ -49,14 +51,12 @@ class GeminiFlashModel:
49
  response = self.model.generate_content(prompt)
50
  return GenerationResult(
51
  content=response.text.strip(),
52
- token_usage={},
53
  input_tokens=0,
54
  output_tokens=0
55
  )
56
  except Exception as e:
57
  return GenerationResult(
58
  content=f"GENERATION ERROR: {e}",
59
- token_usage={},
60
  input_tokens=0,
61
  output_tokens=0
62
  )
 
7
 
8
  import google.generativeai as genai
9
  from smolagents import CodeAgent, DuckDuckGoSearchTool
10
+ from types import SimpleNamespace
11
 
12
  # System prompt used by the agent
13
  SYSTEM_PROMPT = """You are a general AI assistant. I will ask you a question.
 
18
 
19
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
20
 
21
+ # Gemini-compatible result wrapper that mimics smolagents' expected structure
22
+ def GenerationResult(content, token_usage=None, input_tokens=0, output_tokens=0):
23
+ return SimpleNamespace(
24
+ content=content,
25
+ token_usage=token_usage or {},
26
+ input_tokens=input_tokens,
27
+ output_tokens=output_tokens
28
+ )
29
 
30
  # Gemini model wrapper
31
  class GeminiFlashModel:
 
51
  response = self.model.generate_content(prompt)
52
  return GenerationResult(
53
  content=response.text.strip(),
 
54
  input_tokens=0,
55
  output_tokens=0
56
  )
57
  except Exception as e:
58
  return GenerationResult(
59
  content=f"GENERATION ERROR: {e}",
 
60
  input_tokens=0,
61
  output_tokens=0
62
  )