Spaces:
Runtime error
Runtime error
Update main.py
Browse files
main.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
from fastapi import FastAPI, HTTPException
|
|
|
2 |
from pydantic import BaseModel
|
3 |
from hugchat import hugchat
|
4 |
from hugchat.login import Login
|
@@ -53,34 +54,34 @@ async def startup_event():
|
|
53 |
@app.post("/generate")
|
54 |
async def generate_response(request: QuestionRequest):
|
55 |
"""
|
56 |
-
Generates a response from the AI based on the provided question.
|
57 |
|
58 |
Args:
|
59 |
request (QuestionRequest): JSON body containing the question.
|
60 |
|
61 |
Returns:
|
62 |
-
|
63 |
"""
|
64 |
global chatbot
|
65 |
if chatbot is None:
|
66 |
raise HTTPException(status_code=500, detail="Chatbot not initialized. Please try again later.")
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
|
|
|
1 |
from fastapi import FastAPI, HTTPException
|
2 |
+
from fastapi.responses import StreamingResponse
|
3 |
from pydantic import BaseModel
|
4 |
from hugchat import hugchat
|
5 |
from hugchat.login import Login
|
|
|
54 |
@app.post("/generate")
|
55 |
async def generate_response(request: QuestionRequest):
|
56 |
"""
|
57 |
+
Generates a streaming response from the AI based on the provided question.
|
58 |
|
59 |
Args:
|
60 |
request (QuestionRequest): JSON body containing the question.
|
61 |
|
62 |
Returns:
|
63 |
+
StreamingResponse: A streaming response with the AI's reply.
|
64 |
"""
|
65 |
global chatbot
|
66 |
if chatbot is None:
|
67 |
raise HTTPException(status_code=500, detail="Chatbot not initialized. Please try again later.")
|
68 |
|
69 |
+
async def generate():
|
70 |
+
try:
|
71 |
+
# Use streaming to match client expectations
|
72 |
+
for token in chatbot.chat(request.question, stream=True):
|
73 |
+
# Handle hugchat response (token may be dict or string)
|
74 |
+
if isinstance(token, dict):
|
75 |
+
# Extract token from dictionary (e.g., {"type": "stream", "token": "text"})
|
76 |
+
token_text = token.get("token", "")
|
77 |
+
else:
|
78 |
+
token_text = str(token)
|
79 |
+
|
80 |
+
# Encode and yield the token
|
81 |
+
yield token_text.encode('utf-8')
|
82 |
+
except Exception as e:
|
83 |
+
error_message = f"Error: Failed to generate response: {str(e)}"
|
84 |
+
yield error_message.encode('utf-8')
|
85 |
+
|
86 |
+
return StreamingResponse(generate(), media_type="text/plain")
|
87 |
|