Facelook commited on
Commit
3273c0a
·
1 Parent(s): c9c5699

Trial and error.

Browse files
Files changed (1) hide show
  1. app.py +3 -20
app.py CHANGED
@@ -19,24 +19,6 @@ class BasicAgent:
19
 
20
  print("Loading Qwen2.5-7B-Instruct model...")
21
  self.model_name = "Qwen/Qwen2.5-7B-Instruct"
22
-
23
- # Load model and tokenizer
24
- #try:
25
- # self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
26
- # self.model = AutoModelForCausalLM.from_pretrained(
27
- # self.model_name,
28
- # torch_dtype="auto",
29
- # device_map="auto"
30
- # )
31
- # print(f"Successfully loaded {self.model_name}")
32
- #except Exception as e:
33
- # print(f"Error loading model: {e}")
34
- # # Fallback to HuggingFace Inference API if local loading fails
35
- # print("Falling back to InferenceClient")
36
- # self.client = InferenceClient(model=self.model_name)
37
- # self.tokenizer = None
38
- # self.model = None
39
- print("Falling back to InferenceClient")
40
  self.client = InferenceClient(model=self.model_name)
41
  self.tokenizer = None
42
  self.model = None
@@ -72,8 +54,9 @@ class BasicAgent:
72
 
73
  answer = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
74
  else:
75
- # Fallback to Inference API
76
- answer = self.client.chat(messages=messages)
 
77
 
78
  print(f"Agent generated response (first 50 chars): {answer[:50]}...")
79
  return answer
 
19
 
20
  print("Loading Qwen2.5-7B-Instruct model...")
21
  self.model_name = "Qwen/Qwen2.5-7B-Instruct"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  self.client = InferenceClient(model=self.model_name)
23
  self.tokenizer = None
24
  self.model = None
 
54
 
55
  answer = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
56
  else:
57
+ # Fallback to Inference API - using the correct method call for chat completion
58
+ response = self.client.chat_completion(messages)
59
+ answer = response.choices[0].message.content
60
 
61
  print(f"Agent generated response (first 50 chars): {answer[:50]}...")
62
  return answer