Spaces:
Runtime error
Runtime error
File size: 3,010 Bytes
808ec17 f294c26 808ec17 d7f32ed 74a7b28 d7f32ed 49158eb 29331bd 808ec17 49158eb 1cb9532 49158eb 03991d8 49158eb d5e95f4 49158eb e0c20b6 664b22f 49158eb cc3751a 49158eb 808ec17 49158eb f294c26 49158eb 808ec17 49158eb f294c26 49158eb 808ec17 49158eb f294c26 808ec17 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from hugchat import hugchat
from hugchat.login import Login
import os
app = FastAPI()
# Pydantic model for request body
class QuestionRequest(BaseModel):
question: str
# Global variable to store the chatbot instance
chatbot = None
def setup_chatbot(email, password, cookie_path, assistant_id):
"""
Sets up the Hugging Face chatbot with login and conversation.
Args:
email (str): User email for login
password (str): User password for login
cookie_path (str): Directory to store cookies
assistant_id (str): ID of the assistant to use
Returns:
hugchat.ChatBot: Configured chatbot instance
"""
try:
# Create cookie directory if it doesn't exist
os.makedirs(cookie_path, exist_ok=True)
sign = Login(email, password)
cookies = sign.login(cookie_dir_path=cookie_path, save_cookies=True)
chatbot = hugchat.ChatBot(cookies=cookies.get_dict())
chatbot.new_conversation(assistant=assistant_id, switch_to=True)
return chatbot
except Exception as e:
raise Exception(f"Failed to set up chatbot: {e}")
# Initialize chatbot at startup
@app.on_event("startup")
async def startup_event():
global chatbot
# Credentials and configuration
EMAIL = os.getenv("EMAIL")
PASSWD = os.getenv("PASSWD")
COOKIE_PATH_DIR = "./cookies/"
ASSISTANT_ID = "682e0c1f5f0c3d952a27498e" # Replace with your actual assistant ID
chatbot = setup_chatbot(EMAIL, PASSWD, COOKIE_PATH_DIR, ASSISTANT_ID)
@app.post("/generate")
async def generate_response(request: QuestionRequest):
"""
Generates a streaming response from the AI based on the provided question.
Args:
request (QuestionRequest): JSON body containing the question.
Returns:
StreamingResponse: A streaming response with the AI's reply.
"""
global chatbot
if chatbot is None:
raise HTTPException(status_code=500, detail="Chatbot not initialized. Please try again later.")
async def generate():
try:
# Use streaming to match client expectations
for token in chatbot.chat(request.question, stream=True):
# Handle hugchat response (token may be dict or string)
if isinstance(token, dict):
# Extract token from dictionary (e.g., {"type": "stream", "token": "text"})
token_text = token.get("token", "")
else:
token_text = str(token)
# Encode and yield the token
yield token_text.encode('utf-8')
except Exception as e:
error_message = f"Error: Failed to generate response: {str(e)}"
yield error_message.encode('utf-8')
return StreamingResponse(generate(), media_type="text/plain")
|