Spaces:
Runtime error
Runtime error
Better error handling (#25)
Browse files* Log errors
* return an error message when slack encounters an issue
* patch openai[embeddings] for now
- app.py +31 -20
- buster/chatbot.py +2 -1
- requirements.txt +10 -3
app.py
CHANGED
|
@@ -1,15 +1,20 @@
|
|
|
|
|
| 1 |
import os
|
| 2 |
|
| 3 |
from slack_bolt import App
|
| 4 |
|
| 5 |
from buster.chatbot import Chatbot, ChatbotConfig
|
| 6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
MILA_CLUSTER_CHANNEL = "C04LR4H9KQA"
|
| 8 |
ORION_CHANNEL = "C04LYHGUYB0"
|
| 9 |
PYTORCH_CHANNEL = "C04MEK6N882"
|
| 10 |
HF_TRANSFORMERS_CHANNEL = "C04NJNCJWHE"
|
| 11 |
|
| 12 |
-
|
| 13 |
documents_file="buster/data/document_embeddings.csv",
|
| 14 |
unknown_prompt="This doesn't seem to be related to cluster usage.",
|
| 15 |
embedding_model="text-embedding-ada-002",
|
|
@@ -43,7 +48,7 @@ buster_cfg = ChatbotConfig(
|
|
| 43 |
Now answer the following question:
|
| 44 |
""",
|
| 45 |
)
|
| 46 |
-
|
| 47 |
|
| 48 |
orion_cfg = ChatbotConfig(
|
| 49 |
documents_file="buster/data/document_embeddings_orion.csv",
|
|
@@ -144,32 +149,38 @@ hf_transformers_cfg = ChatbotConfig(
|
|
| 144 |
)
|
| 145 |
hf_transformers_chatbot = Chatbot(hf_transformers_cfg)
|
| 146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
app = App(token=os.environ.get("SLACK_BOT_TOKEN"), signing_secret=os.environ.get("SLACK_SIGNING_SECRET"))
|
| 148 |
|
| 149 |
|
| 150 |
@app.event("app_mention")
|
| 151 |
def respond_to_question(event, say):
|
| 152 |
-
|
| 153 |
|
| 154 |
# user's text
|
| 155 |
text = event["text"]
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
answer =
|
| 170 |
-
else:
|
| 171 |
-
print(f"invalid channel: {channel}")
|
| 172 |
-
answer = "I was not yet implemented to support this channel."
|
| 173 |
|
| 174 |
# responds to the message in the thread
|
| 175 |
thread_ts = event["event_ts"]
|
|
|
|
| 1 |
+
import logging
|
| 2 |
import os
|
| 3 |
|
| 4 |
from slack_bolt import App
|
| 5 |
|
| 6 |
from buster.chatbot import Chatbot, ChatbotConfig
|
| 7 |
|
| 8 |
+
logger = logging.getLogger(__name__)
|
| 9 |
+
logging.basicConfig(level=logging.INFO)
|
| 10 |
+
|
| 11 |
+
# Set Slack channel IDs
|
| 12 |
MILA_CLUSTER_CHANNEL = "C04LR4H9KQA"
|
| 13 |
ORION_CHANNEL = "C04LYHGUYB0"
|
| 14 |
PYTORCH_CHANNEL = "C04MEK6N882"
|
| 15 |
HF_TRANSFORMERS_CHANNEL = "C04NJNCJWHE"
|
| 16 |
|
| 17 |
+
mila_doc_cfg = ChatbotConfig(
|
| 18 |
documents_file="buster/data/document_embeddings.csv",
|
| 19 |
unknown_prompt="This doesn't seem to be related to cluster usage.",
|
| 20 |
embedding_model="text-embedding-ada-002",
|
|
|
|
| 48 |
Now answer the following question:
|
| 49 |
""",
|
| 50 |
)
|
| 51 |
+
mila_doc_chatbot = Chatbot(mila_doc_cfg)
|
| 52 |
|
| 53 |
orion_cfg = ChatbotConfig(
|
| 54 |
documents_file="buster/data/document_embeddings_orion.csv",
|
|
|
|
| 149 |
)
|
| 150 |
hf_transformers_chatbot = Chatbot(hf_transformers_cfg)
|
| 151 |
|
| 152 |
+
# TODO: eventually move this to a factory of sorts
|
| 153 |
+
# Put all the bots in a dict by channel
|
| 154 |
+
channel_id_to_bot = {
|
| 155 |
+
MILA_CLUSTER_CHANNEL: mila_doc_chatbot,
|
| 156 |
+
ORION_CHANNEL: orion_chatbot,
|
| 157 |
+
PYTORCH_CHANNEL: pytorch_chatbot,
|
| 158 |
+
HF_TRANSFORMERS_CHANNEL: hf_transformers_chatbot,
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
app = App(token=os.environ.get("SLACK_BOT_TOKEN"), signing_secret=os.environ.get("SLACK_SIGNING_SECRET"))
|
| 162 |
|
| 163 |
|
| 164 |
@app.event("app_mention")
|
| 165 |
def respond_to_question(event, say):
|
| 166 |
+
logger.info(event)
|
| 167 |
|
| 168 |
# user's text
|
| 169 |
text = event["text"]
|
| 170 |
+
channel_id = event["channel"]
|
| 171 |
+
|
| 172 |
+
try:
|
| 173 |
+
chatbot = channel_id_to_bot.get(channel_id)
|
| 174 |
+
if chatbot:
|
| 175 |
+
answer = chatbot.process_input(text)
|
| 176 |
+
else:
|
| 177 |
+
answer = "I was not yet implemented to support this channel."
|
| 178 |
+
except ValueError as e:
|
| 179 |
+
# log the error and return a generic response instead.
|
| 180 |
+
import traceback
|
| 181 |
+
|
| 182 |
+
logging.error(traceback.format_exc())
|
| 183 |
+
answer = "Oops, something went wrong. Try again later!"
|
|
|
|
|
|
|
|
|
|
| 184 |
|
| 185 |
# responds to the message in the thread
|
| 186 |
thread_ts = event["event_ts"]
|
buster/chatbot.py
CHANGED
|
@@ -112,8 +112,9 @@ class Chatbot:
|
|
| 112 |
# log the error and return a generic response instead.
|
| 113 |
import traceback
|
| 114 |
|
|
|
|
| 115 |
logging.error(traceback.format_exc())
|
| 116 |
-
response_text = "
|
| 117 |
return response_text
|
| 118 |
|
| 119 |
def add_sources(self, response: str, matched_documents: pd.DataFrame):
|
|
|
|
| 112 |
# log the error and return a generic response instead.
|
| 113 |
import traceback
|
| 114 |
|
| 115 |
+
logger.error("Error connecting to OpenAI API")
|
| 116 |
logging.error(traceback.format_exc())
|
| 117 |
+
response_text = "Hmm, we're having trouble connecting to OpenAI right now... Try again soon!"
|
| 118 |
return response_text
|
| 119 |
|
| 120 |
def add_sources(self, response: str, matched_documents: pd.DataFrame):
|
requirements.txt
CHANGED
|
@@ -2,9 +2,16 @@ bs4
|
|
| 2 |
matplotlib
|
| 3 |
numpy
|
| 4 |
pandas
|
| 5 |
-
plotly
|
| 6 |
-
scikit-learn
|
| 7 |
tabulate
|
| 8 |
tenacity
|
| 9 |
tiktoken
|
| 10 |
-
openai
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
matplotlib
|
| 3 |
numpy
|
| 4 |
pandas
|
|
|
|
|
|
|
| 5 |
tabulate
|
| 6 |
tenacity
|
| 7 |
tiktoken
|
| 8 |
+
openai
|
| 9 |
+
|
| 10 |
+
# all openai[embeddings] deps, their list breaks our CI, see: https://github.com/openai/openai-python/issues/210
|
| 11 |
+
|
| 12 |
+
scikit-learn >= 1.0.2 # Needed for embedding utils, versions >= 1.1 require python 3.8
|
| 13 |
+
tenacity >= 8.0.1
|
| 14 |
+
matplotlib
|
| 15 |
+
plotly
|
| 16 |
+
pandas-stubs >= 1.1.0.11 # Needed for type hints for mypy
|
| 17 |
+
openpyxl >= 3.0.7 # Needed for CLI fine-tuning data preparation tool xlsx format
|