Chandima Prabhath commited on
Commit
7cda32d
·
1 Parent(s): 52dde05

Refactor error handling in generate_llm to raise LLMBadRequestError on HTTP 400; add clear_history function to reset conversation history on bad requests.

Browse files
Files changed (2) hide show
  1. app.py +15 -6
  2. polLLM.py +14 -7
app.py CHANGED
@@ -14,7 +14,7 @@ from pydantic import BaseModel, Field, ValidationError
14
 
15
  from FLUX import generate_image
16
  from VoiceReply import generate_voice_reply
17
- from polLLM import generate_llm
18
 
19
  # --- Logging Setup ---------------------------------------------------------
20
 
@@ -64,6 +64,9 @@ def record_bot_message(chat_id, sender, message):
64
  def get_history_text(chat_id, sender):
65
  return "\n".join(history[(chat_id, sender)])
66
 
 
 
 
67
  # --- Bot Config & Client --------------------------------------------------
68
 
69
  class BotConfig:
@@ -259,7 +262,6 @@ def _fn_poll_end(mid, cid):
259
  def _fn_generate_images(message_id: str, chat_id: str, prompt: str,
260
  count: int = 1, width: Optional[int] = None,
261
  height: Optional[int] = None, **_):
262
- # send accept message
263
  _fn_send_accept(message_id, chat_id, f"✨ Generating {count} image(s)…")
264
  for i in range(1, count+1):
265
  try:
@@ -365,7 +367,7 @@ ACTION_HANDLERS = {
365
  "send_text": lambda mid,cid,**i: _fn_send_text(mid,cid,i["message"]),
366
  }
367
 
368
- # --- Intent Routing with Fallback ------------------------------------------
369
 
370
  def route_intent(user_input: str, chat_id: str, sender: str):
371
  history_text = get_history_text(chat_id, sender)
@@ -388,7 +390,13 @@ def route_intent(user_input: str, chat_id: str, sender: str):
388
  "Otherwise, use send_text to reply with plain chat.\n"
389
  )
390
  prompt = f"{sys_prompt}\nConversation so far:\n{history_text}\n\nUser: {user_input}"
391
- raw = generate_llm(prompt)
 
 
 
 
 
 
392
 
393
  # 1) Strict: try each Pydantic model
394
  try:
@@ -432,9 +440,10 @@ def route_intent(user_input: str, chat_id: str, sender: str):
432
  elif action == "poll_vote":
433
  kwargs["voter"] = sender
434
  kwargs["choice"] = int(data.get("choice",0))
435
- # parse into Pydantic for uniformity
436
  try:
437
- return next(M for M in INTENT_MODELS if getattr(M, "__fields__", {}).get("action").default == action).parse_obj({"action":action,**kwargs})
 
 
438
  except Exception:
439
  return SendTextIntent(action="send_text", message=raw)
440
 
 
14
 
15
  from FLUX import generate_image
16
  from VoiceReply import generate_voice_reply
17
+ from polLLM import generate_llm, LLMBadRequestError # <- assume this exception is raised on 400
18
 
19
  # --- Logging Setup ---------------------------------------------------------
20
 
 
64
  def get_history_text(chat_id, sender):
65
  return "\n".join(history[(chat_id, sender)])
66
 
67
+ def clear_history(chat_id, sender):
68
+ history[(chat_id, sender)].clear()
69
+
70
  # --- Bot Config & Client --------------------------------------------------
71
 
72
  class BotConfig:
 
262
  def _fn_generate_images(message_id: str, chat_id: str, prompt: str,
263
  count: int = 1, width: Optional[int] = None,
264
  height: Optional[int] = None, **_):
 
265
  _fn_send_accept(message_id, chat_id, f"✨ Generating {count} image(s)…")
266
  for i in range(1, count+1):
267
  try:
 
367
  "send_text": lambda mid,cid,**i: _fn_send_text(mid,cid,i["message"]),
368
  }
369
 
370
+ # --- Intent Routing with Fallback & History‐Reset on 400 -------------------
371
 
372
  def route_intent(user_input: str, chat_id: str, sender: str):
373
  history_text = get_history_text(chat_id, sender)
 
390
  "Otherwise, use send_text to reply with plain chat.\n"
391
  )
392
  prompt = f"{sys_prompt}\nConversation so far:\n{history_text}\n\nUser: {user_input}"
393
+
394
+ try:
395
+ raw = generate_llm(prompt)
396
+ except LLMBadRequestError:
397
+ # Clear history on HTTP 400 from the LLM
398
+ clear_history(chat_id, sender)
399
+ return SendTextIntent(action="send_text", message="Oops, I lost my train of thought—let’s start fresh!")
400
 
401
  # 1) Strict: try each Pydantic model
402
  try:
 
440
  elif action == "poll_vote":
441
  kwargs["voter"] = sender
442
  kwargs["choice"] = int(data.get("choice",0))
 
443
  try:
444
+ # coerce into Pydantic for uniform interface
445
+ model = next(m for m in INTENT_MODELS if getattr(m, "__fields__", {}).get("action").default == action)
446
+ return model.parse_obj({"action":action, **kwargs})
447
  except Exception:
448
  return SendTextIntent(action="send_text", message=raw)
449
 
polLLM.py CHANGED
@@ -19,14 +19,19 @@ handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s"
19
  logger.addHandler(handler)
20
 
21
  # --- LLM settings from config.yaml ---
22
- _DEFAULT_MODEL = "openai-large" #_config.get("model", "openai-large")
23
  _SYSTEM_TEMPLATE = _config.get("system_prompt", "")
24
  _CHAR = _config.get("char", "Eve")
25
 
 
 
 
 
 
26
  # --- OpenAI client init ---
27
  client = OpenAI(
28
- base_url = "https://text.pollinations.ai/openai",
29
- api_key = "OPENAI_API_KEY"
30
  )
31
 
32
  def _build_system_prompt() -> str:
@@ -63,10 +68,9 @@ def generate_llm(
63
  logger.debug("LLM response received")
64
  return text
65
  except Exception as e:
66
- # Check if the error is a 400 Bad Request
67
- if hasattr(e, "status_code") and e.status_code == 400:
68
  logger.error("LLM error 400 (Bad Request): Not retrying.")
69
- return "Error: Bad Request (400)"
70
  logger.error(f"LLM error on attempt {attempt}: {e}")
71
  if attempt < 5:
72
  time.sleep(backoff)
@@ -78,4 +82,7 @@ def generate_llm(
78
  # Example local test
79
  if __name__ == "__main__":
80
  logger.info("Testing generate_llm() with a sample prompt")
81
- print(generate_llm("Say hello in a poetic style."))
 
 
 
 
19
  logger.addHandler(handler)
20
 
21
  # --- LLM settings from config.yaml ---
22
+ _DEFAULT_MODEL = "openai-large" # _config.get("model", "openai-large")
23
  _SYSTEM_TEMPLATE = _config.get("system_prompt", "")
24
  _CHAR = _config.get("char", "Eve")
25
 
26
+ # --- Custom exception ---
27
+ class LLMBadRequestError(Exception):
28
+ """Raised when the LLM returns HTTP 400 (Bad Request)."""
29
+ pass
30
+
31
  # --- OpenAI client init ---
32
  client = OpenAI(
33
+ base_url="https://text.pollinations.ai/openai",
34
+ api_key="OPENAI_API_KEY"
35
  )
36
 
37
  def _build_system_prompt() -> str:
 
68
  logger.debug("LLM response received")
69
  return text
70
  except Exception as e:
71
+ if getattr(e, "status_code", None) == 400:
 
72
  logger.error("LLM error 400 (Bad Request): Not retrying.")
73
+ raise LLMBadRequestError("LLM returned HTTP 400")
74
  logger.error(f"LLM error on attempt {attempt}: {e}")
75
  if attempt < 5:
76
  time.sleep(backoff)
 
82
  # Example local test
83
  if __name__ == "__main__":
84
  logger.info("Testing generate_llm() with a sample prompt")
85
+ try:
86
+ print(generate_llm("Say hello in a poetic style."))
87
+ except LLMBadRequestError as e:
88
+ logger.warning(f"Test failed with bad request: {e}")