wt002 commited on
Commit
0b65c57
·
verified ·
1 Parent(s): 8ea5b18

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +40 -37
agent.py CHANGED
@@ -401,6 +401,16 @@ def create_documents(data_source: str, data: List[dict]) -> List[Document]:
401
  return docs
402
 
403
  # 4. Vector Store Integration
 
 
 
 
 
 
 
 
 
 
404
  class MyVector_Store:
405
  def __init__(self, index: faiss.Index):
406
  self.index = index
@@ -413,40 +423,41 @@ class MyVector_Store:
413
  index = faiss.read_index(path)
414
  return cls(index)
415
 
 
 
 
 
 
416
 
417
- # Process JSON data
418
- with open("questions.json", "r", encoding="utf-8") as f:
419
- json_data = json.load(f)
420
-
421
- # Create documents from JSON
422
- docs = create_documents("json", json_data)
423
- texts = [doc.page_content for doc in docs]
424
-
425
- # Initialize embedding model
426
- embedding_model = HuggingFaceEmbeddings(
427
- model_name="sentence-transformers/all-MiniLM-L6-v2"
428
- )
429
 
430
- # Create FAISS index
431
- vector_store = FAISS.from_documents(
432
- documents=docs,
433
- embedding=embedding_model
434
- )
435
-
436
- # Save
437
- vector_store.save_local("/home/wendy/Downloads/faiss_index.index")
438
-
439
- # Load
440
- loaded_store = vector_store.load_local("/home/wendy/Downloads/faiss_index.index")
441
 
 
 
 
 
 
 
 
442
 
 
443
 
 
 
 
 
444
 
445
  # -----------------------------
446
  # Create LangChain Retriever Tool
447
  # -----------------------------
448
-
449
- retriever = FAISS.loaded_store("/home/wendy/Downloads/faiss_index.index", embedding_model).as_retriever()
450
 
451
  question_retriever_tool = create_retriever_tool(
452
  retriever=retriever,
@@ -454,17 +465,9 @@ question_retriever_tool = create_retriever_tool(
454
  description="A tool to retrieve documents related to a user's question."
455
  )
456
 
457
-
458
-
459
- # Define the LLM before using it
460
- #llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # or "gpt-3.5-turbo" "gpt-4"
461
- #llm = ChatMistralAI(model="mistral-7b-instruct-v0.1")
462
-
463
-
464
- # Get the Hugging Face API token from the environment variable
465
- #hf_token = os.getenv("HF_TOKEN")
466
-
467
-
468
  llm = HuggingFaceEndpoint(
469
  repo_id="HuggingFaceH4/zephyr-7b-beta",
470
  task="text-generation",
@@ -473,7 +476,7 @@ llm = HuggingFaceEndpoint(
473
  max_new_tokens=512
474
  )
475
 
476
-
477
 
478
  # -------------------------------
479
  # Step 8: Use the Planner, Classifier, and Decision Logic
 
401
  return docs
402
 
403
  # 4. Vector Store Integration
404
+ import os
405
+ import json
406
+ import faiss
407
+ from langchain.vectorstores import FAISS
408
+ from langchain.embeddings import HuggingFaceEmbeddings
409
+ from langchain.tools import create_retriever_tool
410
+ from langchain.llms import HuggingFaceEndpoint
411
+ from your_module import create_documents # Replace with your actual import
412
+
413
+ # Custom FAISS wrapper (optional, if you still want it)
414
  class MyVector_Store:
415
  def __init__(self, index: faiss.Index):
416
  self.index = index
 
423
  index = faiss.read_index(path)
424
  return cls(index)
425
 
426
+ # -----------------------------
427
+ # Process JSON data and create documents
428
+ # -----------------------------
429
+ with open("questions.json", "r", encoding="utf-8") as f:
430
+ json_data = json.load(f)
431
 
432
+ docs = create_documents("json", json_data)
433
+ texts = [doc.page_content for doc in docs]
 
 
 
 
 
 
 
 
 
 
434
 
435
+ # -----------------------------
436
+ # Initialize embedding model
437
+ # -----------------------------
438
+ embedding_model = HuggingFaceEmbeddings(
439
+ model_name="sentence-transformers/all-MiniLM-L6-v2"
440
+ )
 
 
 
 
 
441
 
442
+ # -----------------------------
443
+ # Create FAISS index and save it
444
+ # -----------------------------
445
+ vector_store = FAISS.from_documents(
446
+ documents=docs,
447
+ embedding=embedding_model
448
+ )
449
 
450
+ vector_store.save_local("/home/wendy/Downloads/faiss_index")
451
 
452
+ # -----------------------------
453
+ # Load FAISS index properly
454
+ # -----------------------------
455
+ loaded_store = FAISS.load_local("/home/wendy/Downloads/faiss_index", embedding_model)
456
 
457
  # -----------------------------
458
  # Create LangChain Retriever Tool
459
  # -----------------------------
460
+ retriever = loaded_store.as_retriever()
 
461
 
462
  question_retriever_tool = create_retriever_tool(
463
  retriever=retriever,
 
465
  description="A tool to retrieve documents related to a user's question."
466
  )
467
 
468
+ # -----------------------------
469
+ # Load HuggingFace LLM
470
+ # -----------------------------
 
 
 
 
 
 
 
 
471
  llm = HuggingFaceEndpoint(
472
  repo_id="HuggingFaceH4/zephyr-7b-beta",
473
  task="text-generation",
 
476
  max_new_tokens=512
477
  )
478
 
479
+
480
 
481
  # -------------------------------
482
  # Step 8: Use the Planner, Classifier, and Decision Logic