wt002 commited on
Commit
ee6b900
·
verified ·
1 Parent(s): 5a5c64e

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +29 -17
agent.py CHANGED
@@ -54,7 +54,6 @@ from langchain.agents import initialize_agent, Tool, AgentType
54
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
55
  from huggingface_hub import login
56
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, BitsAndBytesConfig
57
-
58
  from langchain_huggingface import HuggingFaceEndpoint
59
 
60
  load_dotenv()
@@ -329,22 +328,35 @@ tools = [tool_map[name] for name in enabled_tool_names]
329
  # Step 2: Load the JSON file or tasks (Replace this part if you're loading tasks dynamically)
330
  # -------------------------------
331
  # Here we assume the tasks are already fetched from a URL or file.
332
- # For now, using an example JSON array directly. Replace this with the actual loading logic.
333
-
334
- tasks = [
335
- {
336
- "task_id": "8e867cd7-cff9-4e6c-867a-ff5ddc2550be",
337
- "question": "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of English Wikipedia.",
338
- "Level": "1",
339
- "file_name": ""
340
- },
341
- {
342
- "task_id": "a1e91b78-d3d8-4675-bb8d-62741b4b68a6",
343
- "question": "In the video https://www.youtube.com/watch?v=L1vXCYZAYYM, what is the highest number of bird species to be on camera simultaneously?",
344
- "Level": "1",
345
- "file_name": ""
346
- }
347
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
348
 
349
  # -------------------------------
350
  # Step 3: Create Documents from Each JSON Object
 
54
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
55
  from huggingface_hub import login
56
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, BitsAndBytesConfig
 
57
  from langchain_huggingface import HuggingFaceEndpoint
58
 
59
  load_dotenv()
 
328
  # Step 2: Load the JSON file or tasks (Replace this part if you're loading tasks dynamically)
329
  # -------------------------------
330
  # Here we assume the tasks are already fetched from a URL or file.
331
+
332
+
333
+ # Replace this with your actual URL
334
+ json_url = "https://huggingface.co/spaces/wt002/Final_Assignment_Project/blob/main/questions.json"
335
+
336
+ try:
337
+ # Fetch the JSON content from the URL
338
+ response = requests.get(json_url)
339
+ response.raise_for_status() # Raise error if the request failed
340
+
341
+ tasks = response.json() # Parse JSON content
342
+ print(f" Loaded {len(tasks)} tasks from URL")
343
+
344
+ # Convert each task to a LangChain Document
345
+ docs = []
346
+ for task in tasks:
347
+ question = task.get("question", "").strip()
348
+ if not question:
349
+ print(f"⚠️ Skipping task with empty question: {task}")
350
+ continue
351
+
352
+ task["id"] = str(uuid.uuid4())
353
+ docs.append(Document(page_content=question, metadata=task))
354
+
355
+ except requests.RequestException as e:
356
+ print(f"❌ Failed to fetch JSON from URL: {e}")
357
+ except ValueError as e:
358
+ print(f"❌ Invalid JSON format: {e}")
359
+
360
 
361
  # -------------------------------
362
  # Step 3: Create Documents from Each JSON Object