LamiaYT commited on
Commit
3ac106d
·
1 Parent(s): ceb787d
Files changed (1) hide show
  1. app.py +28 -87
app.py CHANGED
@@ -35,95 +35,36 @@ except Exception as e:
35
 
36
  # --- Core Tools ---
37
 
38
- def web_search(query: str) -> str:
39
- """Web search with fallbacks"""
40
- try:
41
- time.sleep(random.uniform(0.5, 1.5))
42
-
43
- # Try Serper API if available
44
- serper_key = os.getenv("SERPER_API_KEY")
45
- if serper_key:
46
- try:
47
- url = "https://google.serper.dev/search"
48
- payload = json.dumps({"q": query, "num": 5})
49
- headers = {
50
- 'X-API-KEY': serper_key,
51
- 'Content-Type': 'application/json'
52
- }
53
- response = requests.post(url, headers=headers, data=payload, timeout=10)
54
-
55
- if response.status_code == 200:
56
- data = response.json()
57
- results = []
58
-
59
- # Get direct answer if available
60
- if 'answerBox' in data:
61
- answer = data['answerBox'].get('answer', '')
62
- if answer:
63
- results.append(answer)
64
-
65
- # Get knowledge graph info
66
- if 'knowledgeGraph' in data:
67
- kg = data['knowledgeGraph']
68
- title = kg.get('title', '')
69
- desc = kg.get('description', '')
70
- if title and desc:
71
- results.append(f"{title}: {desc}")
72
-
73
- # Get organic results
74
- if 'organic' in data:
75
- for item in data['organic'][:2]:
76
- title = item.get('title', '')
77
- snippet = item.get('snippet', '')
78
- if title and snippet:
79
- results.append(f"{title} | {snippet}")
80
-
81
- if results:
82
- return " | ".join(results[:2]) # Return top 2 most relevant
83
-
84
- except Exception as e:
85
- print(f"Serper API failed: {e}")
86
-
87
- # Fallback to Wikipedia
88
- return wikipedia_search(query)
89
-
90
- except Exception as e:
91
- return f"Search error: {str(e)}"
92
-
93
  def wikipedia_search(query: str) -> str:
94
- """Wikipedia search"""
95
- try:
96
- clean_query = re.sub(r'[^a-zA-Z0-9 ]', '', query)[:100]
97
-
98
- params = {
99
- 'action': 'query',
100
- 'format': 'json',
101
- 'list': 'search',
102
- 'srsearch': clean_query,
103
- 'srlimit': 3,
104
- 'srprop': 'snippet'
105
- }
106
-
107
- response = requests.get(
108
- "https://en.wikipedia.org/w/api.php",
109
- params=params,
110
- timeout=8,
111
- headers={'User-Agent': 'GAIA-Agent/1.0'}
112
- )
113
-
114
- if response.status_code == 200:
115
- data = response.json()
116
-
117
- for item in data.get('query', {}).get('search', []):
118
- title = item.get('title', '')
119
- snippet = re.sub(r'<[^>]+>', '', item.get('snippet', ''))
120
- if title and snippet:
121
- return f"{title}: {snippet}"
122
-
123
- return f"No Wikipedia results for: {clean_query}"
124
 
125
- except Exception as e:
126
- return f"Wikipedia error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
  def extract_youtube_info(url: str) -> str:
129
  """Extract YouTube video information"""
 
35
 
36
  # --- Core Tools ---
37
 
38
+ @tool
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  def wikipedia_search(query: str) -> str:
40
+ """Search Wikipedia for a query and return maximum 2 results.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
+ Args:
43
+ query: The search query."""
44
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
45
+ formatted_search_docs = "\n\n---\n\n".join(
46
+ [
47
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
48
+ for doc in search_docs
49
+ ])
50
+ return {"wiki_results": formatted_search_docs}
51
+
52
+ @tool
53
+ def web_search(query: str) -> str:
54
+ """Search Tavily for a query and return maximum 3 results.
55
+
56
+ Args:
57
+ query: The search query."""
58
+ search_docs = TavilySearchResults(max_results=3).invoke(query=query)
59
+ formatted_search_docs = "\n\n---\n\n".join(
60
+ [
61
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
62
+ for doc in search_docs
63
+ ])
64
+ return {"web_results": formatted_search_docs}
65
+
66
+
67
+
68
 
69
  def extract_youtube_info(url: str) -> str:
70
  """Extract YouTube video information"""