Facelook commited on
Commit
bcc6dcf
·
1 Parent(s): 321fb3b

Trial and error.

Browse files
Files changed (1) hide show
  1. app.py +7 -39
app.py CHANGED
@@ -16,55 +16,21 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
16
  class BasicAgent:
17
  def __init__(self):
18
  print("BasicAgent initialized.")
19
- # Try multiple models in order of preference with better error handling
20
- #self.model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
21
- self.model_name = "https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud"
22
  self.hf_token = os.getenv("HF_TOKEN") # Get token from environment if available
23
 
24
- # List of fallback models (free models that don't require authentication)
25
- self.fallback_models = []
26
-
27
  try:
28
- print(f"Attempting to initialize with model: {self.model_name}")
29
  self.hf_client = InferenceClient(
30
  model=self.model_name,
31
  token=self.hf_token
32
  )
33
- # Test the client with a simple prompt to verify it works
34
- test_response = self.hf_client.text_generation(
35
- prompt="Hello, this is a test.",
36
- max_new_tokens=10
37
- )
38
  print(f"Model initialized successfully: {self.model_name}")
39
-
40
  except Exception as e:
41
- print(f"Error initializing primary model ({self.model_name}): {e}")
42
  self.hf_client = None
43
-
44
- # Try fallback models
45
- for fallback_model in self.fallback_models:
46
- try:
47
- print(f"Attempting fallback model: {fallback_model}")
48
- self.hf_client = InferenceClient(
49
- model=fallback_model,
50
- token=self.hf_token
51
- )
52
- # Quick test to verify the model works
53
- test_response = self.hf_client.text_generation(
54
- prompt="Hello, this is a test.",
55
- max_new_tokens=5
56
- )
57
- print(f"Successfully initialized fallback model: {fallback_model}")
58
- self.model_name = fallback_model
59
- break
60
- except Exception as fallback_error:
61
- print(f"Fallback model failed ({fallback_model}): {fallback_error}")
62
- self.hf_client = None
63
-
64
- # If all models fail, we'll use a rule-based response generator
65
- if self.hf_client is None:
66
- print("WARNING: All models failed. Using rule-based fallback for responses.")
67
- self.model_name = "rule-based-fallback"
68
 
69
  def break_down_question(self, question: str) -> list:
70
  """
@@ -265,6 +231,8 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
265
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
266
  print(agent_code)
267
 
 
 
268
  # 2. Fetch Questions
269
  print(f"Fetching questions from: {questions_url}")
270
  try:
 
16
  class BasicAgent:
17
  def __init__(self):
18
  print("BasicAgent initialized.")
19
+ # Use Qwen2.5-7B-Instruct model
20
+ self.model_name = "Qwen/Qwen2.5-7B-Instruct"
 
21
  self.hf_token = os.getenv("HF_TOKEN") # Get token from environment if available
22
 
 
 
 
23
  try:
24
+ print(f"Initializing model: {self.model_name}")
25
  self.hf_client = InferenceClient(
26
  model=self.model_name,
27
  token=self.hf_token
28
  )
 
 
 
 
 
29
  print(f"Model initialized successfully: {self.model_name}")
 
30
  except Exception as e:
31
+ print(f"Error initializing model ({self.model_name}): {e}")
32
  self.hf_client = None
33
+ print("WARNING: Model initialization failed. Agent may not function properly.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
  def break_down_question(self, question: str) -> list:
36
  """
 
231
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
232
  print(agent_code)
233
 
234
+ return
235
+
236
  # 2. Fetch Questions
237
  print(f"Fetching questions from: {questions_url}")
238
  try: