Spaces:
Sleeping
Sleeping
Commit
·
af9c758
1
Parent(s):
b72e9aa
update app.py
Browse files
app.py
CHANGED
@@ -71,6 +71,8 @@ vectorstore, all_chunks, all_texts, metadatas = initialize_resources()
|
|
71 |
# LLMs
|
72 |
repharser_llm = ChatNVIDIA(model="mistralai/mistral-7b-instruct-v0.3") | StrOutputParser()
|
73 |
relevance_llm = ChatNVIDIA(model="meta/llama3-70b-instruct") | StrOutputParser()
|
|
|
|
|
74 |
answer_llm = ChatOpenAI(
|
75 |
model="gpt-4-1106-preview",
|
76 |
temperature=0.3,
|
@@ -324,10 +326,14 @@ full_pipeline = (
|
|
324 |
import gradio as gr
|
325 |
|
326 |
def chat_interface(message, history):
|
327 |
-
|
328 |
-
|
|
|
|
|
|
|
|
|
329 |
else:
|
330 |
-
user_input = message
|
331 |
|
332 |
inputs = {
|
333 |
"query": user_input,
|
@@ -338,13 +344,13 @@ def chat_interface(message, history):
|
|
338 |
"vectorstore": vectorstore,
|
339 |
"full_document": "",
|
340 |
}
|
|
|
341 |
response = ""
|
342 |
for chunk in full_pipeline.stream(inputs):
|
343 |
if isinstance(chunk, str):
|
344 |
response += chunk
|
345 |
elif isinstance(chunk, dict) and "answer" in chunk:
|
346 |
response += chunk["answer"]
|
347 |
-
|
348 |
yield [{"role": "assistant", "content": response}]
|
349 |
|
350 |
with gr.Blocks(css="""
|
@@ -390,11 +396,11 @@ with gr.Blocks(css="""
|
|
390 |
chatbot=chatbot,
|
391 |
textbox=textbox,
|
392 |
examples=[
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
],
|
397 |
-
type=
|
398 |
)
|
399 |
|
400 |
-
demo.launch()
|
|
|
71 |
# LLMs
|
72 |
repharser_llm = ChatNVIDIA(model="mistralai/mistral-7b-instruct-v0.3") | StrOutputParser()
|
73 |
relevance_llm = ChatNVIDIA(model="meta/llama3-70b-instruct") | StrOutputParser()
|
74 |
+
if not os.environ.get("OPENAI_API_KEY"):
|
75 |
+
raise RuntimeError("OPENAI_API_KEY not found in environment!")
|
76 |
answer_llm = ChatOpenAI(
|
77 |
model="gpt-4-1106-preview",
|
78 |
temperature=0.3,
|
|
|
326 |
import gradio as gr
|
327 |
|
328 |
def chat_interface(message, history):
|
329 |
+
# Handle different input formats
|
330 |
+
if isinstance(message, list) and len(message) > 0:
|
331 |
+
if isinstance(message[-1], dict):
|
332 |
+
user_input = message[-1].get("content", "")
|
333 |
+
else:
|
334 |
+
user_input = message[-1]
|
335 |
else:
|
336 |
+
user_input = str(message)
|
337 |
|
338 |
inputs = {
|
339 |
"query": user_input,
|
|
|
344 |
"vectorstore": vectorstore,
|
345 |
"full_document": "",
|
346 |
}
|
347 |
+
|
348 |
response = ""
|
349 |
for chunk in full_pipeline.stream(inputs):
|
350 |
if isinstance(chunk, str):
|
351 |
response += chunk
|
352 |
elif isinstance(chunk, dict) and "answer" in chunk:
|
353 |
response += chunk["answer"]
|
|
|
354 |
yield [{"role": "assistant", "content": response}]
|
355 |
|
356 |
with gr.Blocks(css="""
|
|
|
396 |
chatbot=chatbot,
|
397 |
textbox=textbox,
|
398 |
examples=[
|
399 |
+
"What are Krishna's research interests?",
|
400 |
+
"Where did Krishna work?",
|
401 |
+
"What did he study at Virginia Tech?",
|
402 |
],
|
403 |
+
type="messages"
|
404 |
)
|
405 |
|
406 |
+
demo.launch(cache_examples=False)
|