Trial and error.
Browse files
app.py
CHANGED
@@ -17,25 +17,12 @@ class BasicAgent:
|
|
17 |
def __init__(self):
|
18 |
print("BasicAgent initialized.")
|
19 |
|
20 |
-
print("Loading
|
21 |
-
self.model_name = "
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
self.model = AutoModelForCausalLM.from_pretrained(
|
27 |
-
self.model_name,
|
28 |
-
torch_dtype="auto",
|
29 |
-
device_map="auto"
|
30 |
-
)
|
31 |
-
print(f"Successfully loaded {self.model_name}")
|
32 |
-
except Exception as e:
|
33 |
-
print(f"Error loading model: {e}")
|
34 |
-
# Fallback to HuggingFace Inference API if local loading fails
|
35 |
-
print("Falling back to InferenceClient")
|
36 |
-
self.client = InferenceClient(model=self.model_name)
|
37 |
-
self.tokenizer = None
|
38 |
-
self.model = None
|
39 |
|
40 |
def __call__(self, question: str) -> str:
|
41 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
@@ -43,7 +30,7 @@ class BasicAgent:
|
|
43 |
try:
|
44 |
# Create messages for the model
|
45 |
messages = [
|
46 |
-
{"role": "system", "content": "You are
|
47 |
{"role": "user", "content": question}
|
48 |
]
|
49 |
|
@@ -74,7 +61,7 @@ class BasicAgent:
|
|
74 |
answer = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
75 |
else:
|
76 |
# Fallback to Inference API
|
77 |
-
answer = self.client.chat(messages
|
78 |
|
79 |
print(f"Agent generated response (first 50 chars): {answer[:50]}...")
|
80 |
return answer
|
|
|
17 |
def __init__(self):
|
18 |
print("BasicAgent initialized.")
|
19 |
|
20 |
+
print("Loading Mistral-Nemo-Instruct-2407 model...")
|
21 |
+
self.model_name = "mistralai/Mistral-Nemo-Instruct-2407"
|
22 |
+
self.client = InferenceClient(model=self.model_name)
|
23 |
+
self.tokenizer = None
|
24 |
+
self.model = None
|
25 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
def __call__(self, question: str) -> str:
|
28 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
|
|
30 |
try:
|
31 |
# Create messages for the model
|
32 |
messages = [
|
33 |
+
{"role": "system", "content": "You are Mistral, a helpful AI assistant. Answer the user's questions accurately and helpfully."},
|
34 |
{"role": "user", "content": question}
|
35 |
]
|
36 |
|
|
|
61 |
answer = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
62 |
else:
|
63 |
# Fallback to Inference API
|
64 |
+
answer = self.client.chat(messages)
|
65 |
|
66 |
print(f"Agent generated response (first 50 chars): {answer[:50]}...")
|
67 |
return answer
|