Samuel Thomas commited on
Commit
3127c31
·
1 Parent(s): c1c55d4

back to mistral model

Browse files
Files changed (2) hide show
  1. app.py +3 -0
  2. tools.py +2 -2
app.py CHANGED
@@ -4,12 +4,15 @@ import requests
4
  import inspect
5
  import pandas as pd
6
  import traceback
 
7
  from tools import create_memory_safe_workflow, get_file_type, write_bytes_to_temp_dir, AgentState, extract_final_answer, run_agent
8
 
9
  # (Keep Constants as is)
10
  # --- Constants ---
11
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
 
 
 
13
  # --- Basic Agent Definition ---
14
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
15
  #class BasicAgent:
 
4
  import inspect
5
  import pandas as pd
6
  import traceback
7
+ from huggingface_hub import login
8
  from tools import create_memory_safe_workflow, get_file_type, write_bytes_to_temp_dir, AgentState, extract_final_answer, run_agent
9
 
10
  # (Keep Constants as is)
11
  # --- Constants ---
12
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
 
14
+ # login(token=os.environ["HF_TOKEN"])
15
+
16
  # --- Basic Agent Definition ---
17
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
18
  #class BasicAgent:
tools.py CHANGED
@@ -66,10 +66,10 @@ logger = logging.getLogger(__name__)
66
 
67
  # --- Model Configuration ---
68
  def create_llm_pipeline():
69
- model_id = "meta-llama/Llama-2-13b-chat-hf"
70
  #model_id = "meta-llama/Llama-3.3-70B-Instruct"
71
  #model_id = "mistralai/Mistral-Small-24B-Base-2501"
72
- #model_id = "mistralai/Mistral-7B-Instruct-v0.3"
73
  #model_id = "Qwen/Qwen2-7B-Instruct"
74
  return pipeline(
75
  "text-generation",
 
66
 
67
  # --- Model Configuration ---
68
  def create_llm_pipeline():
69
+ #model_id = "meta-llama/Llama-2-13b-chat-hf"
70
  #model_id = "meta-llama/Llama-3.3-70B-Instruct"
71
  #model_id = "mistralai/Mistral-Small-24B-Base-2501"
72
+ model_id = "mistralai/Mistral-7B-Instruct-v0.3"
73
  #model_id = "Qwen/Qwen2-7B-Instruct"
74
  return pipeline(
75
  "text-generation",