File size: 12,016 Bytes
14fa0cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
# tools.py

import pandas as pd

from pathlib import Path
import requests
import regex as re
import time
import os
from duckduckgo_search import DDGS
from langchain_core.tools import tool
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"


def _download_file_for_task(task_id: str, ext: str) -> str:
    """
    Helper: attempt to GET the remote file for a given task_id.
    Saves under ./hf_files/{task_id}.{ext}. Returns the local path if successful,
    or an empty string if no file / download failed.
    """
    
    print("reached _download_file_for_task")
    os.makedirs("hf_files", exist_ok=True)
    local_path = os.path.join("hf_files", f"{task_id}.{ext}")
    url = f"{DEFAULT_API_URL}/files/{task_id}"
    
    try:
        resp = requests.get(url, timeout=10)
        if resp.status_code == 200 and resp.content:
            print(f"Downloaded file from {url} to {local_path}")
            with open(local_path, "wb") as f:
                f.write(resp.content)
            return local_path
    except Exception:
        print(f"Error downloading file from {url} to {local_path}")
        pass

    # If we get here, either 404 or download error
    return ""

@tool
def image_tool(task_id: str) -> str:
    """
    Expects: task_id is a string
    Returns: "OCR text + brief caption or an error message"
      
    """
    print("reached image_tool")
    # path_or_id = state.get("ocr_path", "")
    for ext in ("png", "jpg", "jpeg"):
            candidate = _download_file_for_task(task_id, ext)
            if candidate:
                local_img = candidate
                break

    if not local_img or not os.path.exists(local_img):
        return {
            "ocr_path": None,
            "ocr_result": "Error: No image file found (local nonexistent or download failed)."
        }

    # 2) Read raw bytes
    try:
        with open(local_img, "rb") as f:
            image_bytes = f.read()
    except Exception as e:
        return  f"Error reading image file: {e}"
        

    # 3) Prepare HF Inference headers
    hf_token = os.getenv("HF_TOKEN")
    if not hf_token:
        return "Error: HUGGINGFACE_API_KEY not set in environment."
        

    headers = {"Authorization": f"Bearer {hf_token}"}

    # 4) Call HF’s vision-ocr to extract text
    ocr_text = ""
    try:
        ocr_resp = requests.post(
            "https://api-inference.huggingface.co/models/google/vit-ocr",
            headers=headers,
            files={"file": image_bytes},
            timeout=30
        )
        ocr_resp.raise_for_status()
        ocr_json = ocr_resp.json()

        # The JSON has “pages” → list of blocks → “lines” → each line has “text”
        lines = []
        for page in ocr_json.get("pages", []):
            for line in page.get("lines", []):
                lines.append(line.get("text", "").strip())
        ocr_text = "\n".join(lines).strip() or "(no visible text)"
    except Exception as e:
        ocr_text = f"Error during HF OCR: {e}"

    # 5) Call HF’s image-captioning to get a brief description
    caption = ""
    try:
        cap_resp = requests.post(
            "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-base",
            headers=headers,
            files={"file": image_bytes},
            timeout=30
        )
        cap_resp.raise_for_status()
        cap_json = cap_resp.json()
        # The response looks like: {"generated_text": "...caption..."}
        caption = cap_json.get("generated_text", "").strip()
        if not caption:
            caption = "(no caption returned)"
    except Exception as e:
        caption = f"Error during HF captioning: {e}"

    # 6) Combine OCR + caption
    combined = f"OCR text:\n{ocr_text}\n\nImage caption:\n{caption}"
    print("combined: ")
    return combined

@tool
def excel_tool(task_id: str) -> str:
    """
    Downloads <task_id>.xlsx (if any) and returns a stringified list of
    records from the specified sheet.  No fallback to user-supplied tables.
    Expected keys in `task_id`:
        • task_id            – required (used to download the file)

    returns: stringified list of records from the specified sheet
    """
    print("reached excel_tool")
    sheet   = "Sheet1"

    local_xlsx = _download_file_for_task(task_id, "xlsx")
    if not local_xlsx or not os.path.exists(local_xlsx):
        return "Error: Excel file not found for this task."

    try:
        xls = pd.ExcelFile(local_xlsx)
        df  = pd.read_excel(
            xls,
            sheet_name=sheet if sheet and sheet in xls.sheet_names else xls.sheet_names[0]
        )
        print(f"Excel file read successfully: {str(df.to_dict(orient='records'))}")
        return str(df.to_dict(orient="records"))
    except Exception as e:
        return f"Error reading Excel file: {e}"
    

import openai
@tool
def audio_transcriber_tool(task_id: str) -> str:
    """
    LangGraph tool for transcribing audio via OpenAI's Whisper API.
    Expects: task_id is a string
    Returns:
    "<text or error message>"
    Always attempts to download the file for the given path or task ID.
    """
    print("reached audio_transcriber_tool")


    # Always attempt to download the file, regardless of local existence
    local_audio = ""
    for ext in ("mp3", "wav", "m4a"):
        candidate = _download_file_for_task(task_id, ext)
        if candidate:
            local_audio = candidate
            break

    if not local_audio or not os.path.exists(local_audio):
        return "Error: No audio file found (download failed)."
        

    # Send to OpenAI Whisper
    try:
        openai.api_key = os.getenv("OPENAI_API_KEY")
        if not openai.api_key:
            raise RuntimeError("OPENAI_API_KEY is not set in environment.")

        with open(local_audio, "rb") as audio_file:
            print("reached openai.audio.transcriptions.create")
            response = openai.audio.transcriptions.create(
                model="whisper-1", 
                file=audio_file,
            )
            print("reached response")
        text = response.text.strip()
    except Exception as e:
        text = f"Error during transcription: {e}"
    print(f"Transcripted as transcript: {text}")
    return text
# tools.py

import re
import requests

@tool
def wikipedia_search_tool(wiki_query: str) -> str:
    """
    LangGraph wrapper for searching Wikipedia.
    Expects: wiki_query is a non‐empty string.
    Returns: text summary of first matching page or an error message>"
      
    If no valid wiki_query is provided, returns {}.
    """
    print("reached wikipedia search tool")
    query = wiki_query
    if not query:
        return {}

    try:
        # 1) Use the MediaWiki API to search for page titles matching the query
        search_params = {
            "action": "query",
            "list": "search",
            "srsearch": query,
            "format": "json",
            "utf8": 1
        }
        search_resp = requests.get("https://en.wikipedia.org/w/api.php", params=search_params, timeout=10)
        search_resp.raise_for_status()
        search_data = search_resp.json()

        search_results = search_data.get("query", {}).get("search", [])
        # print("wikipedia: search_results",search_results)
        if not search_results:
            print(f"No Wikipedia page found for '{query}'.")
            return f"No Wikipedia page found for '{query}'."

        # 2) Take the first search result's title
        first_title = search_results[0].get("title", "")
        if not first_title:
            print("Unexpected format from Wikipedia search.")
            return "Unexpected format from Wikipedia search."

        # 3) Fetch the page summary for that title via the REST summary endpoint
        title_for_url = requests.utils.requote_uri(first_title)
        summary_url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{title_for_url}"
        summary_resp = requests.get(summary_url, timeout=10)
        summary_resp.raise_for_status()
        summary_data = summary_resp.json()

        # 4) Extract either the "extract" field or a fallback message
        summary_text = summary_data.get("extract")
        if not summary_text:
            summary_text = summary_data.get("description", "No summary available.")
        print(f"Title: {first_title}\n\n{summary_text}")
        return f"Title: {first_title}\n\n{summary_text}"
        

    except requests.exceptions.RequestException as e:
        return f"Wikipedia search error: {e}"
    except Exception as e:
        return f"Unexpected error in wikipedia_search_tool: {e}"


from langchain_openai import ChatOpenAI
from langchain.schema import SystemMessage, HumanMessage
LLM = ChatOpenAI(model_name="gpt-4.1-mini", temperature=0.2)

@tool
def analyze_code_tool(task_id: str) -> str:
    """
    Either task_id  OR  (file + task_id)
    Reads the code (max 400 lines / 10 kB) and asks the LLM for:
        • plain-language summary
        • list of key functions/classes
        • obvious bugs or style smells
    Returns that analysis as a string.
    """
    print("reached analyze_code_tool")
    code_txt = ""
    if not task_id:
        code_txt = "No code provided."
    else:
        path = _download_file_for_task(task_id, "py")
        if not path:
            return "Error: .py file not found for this task."
        code_txt = Path(path).read_text(encoding="utf-8", errors="ignore")
    # else:
    #     return "Error: neither snippet nor file provided."

    # Truncate for safety
    lines = code_txt.splitlines()[:400]
    code_sample = "\n".join(lines)[:10_000]

    prompt = [
        SystemMessage(content="You are a senior Python code reviewer."),
        HumanMessage(content=(
            "Please analyse the following code. "
            "Summarise what it does, list key functions/classes, "
            "and point out any obvious bugs, performance issues or style problems.\n\n"
            f"```python\n{code_sample}\n```"
            "If you can then find the output of the code and return it in the output."
        ))
    ]
    return LLM.invoke(prompt).content.strip()


# def web_search_tool(state: AgentState) -> AgentState:
#     """
#     Expects: state["web_search_query"] is a non‐empty string.
#     Returns: {"web_search_query": None, "web_search_result": <string>}.
#     Retries up to 5 times on either a DuckDuckGo “202 Ratelimit” response or any exception (e.g. timeout).
#     """
#     print("reached web_search_tool")
#     query = state.get("web_search_query", "")
#     if not query:
#         return {}  # nothing to do

#     ddg = DDGS()
#     max_retries = 5
#     result_text = ""

#     for attempt in range(1, max_retries + 1):
#         try:
#             result_text = str(ddg.text(query, max_results=5))
#         except Exception as e:
#             # Network error or timeout—retry up to max_retries
#             if attempt < max_retries:
#                 print(f"web_search_tool: exception '{e}', retrying in 4 seconds ({attempt}/{max_retries})")
#                 time.sleep(4)
#                 continue
#             else:
#                 # Final attempt failed
#                 return {
#                     "web_search_query": None,
#                     "web_search_result": f"Error during DuckDuckGo search: {e}"
#                 }

#         # Check for DuckDuckGo rate‐limit indicator
#         if "202 Ratelimit" in result_text:
#             if attempt < max_retries:
#                 print(f"web_search_tool: received '202 Ratelimit', retrying in 4 seconds ({attempt}/{max_retries})")
#                 time.sleep(4)
#                 continue
#             else:
#                 # Final attempt still rate‐limited
#                 break

#         # Successful response (no exception and no rate‐limit text)
#         break

#     return {
#         "web_search_query": None,
#         "web_search_result": result_text
#     }