hostserver3 / main.py
abdullahalioo's picture
Update main.py
f294c26 verified
raw
history blame
3.01 kB
from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from hugchat import hugchat
from hugchat.login import Login
import os
app = FastAPI()
# Pydantic model for request body
class QuestionRequest(BaseModel):
question: str
# Global variable to store the chatbot instance
chatbot = None
def setup_chatbot(email, password, cookie_path, assistant_id):
"""
Sets up the Hugging Face chatbot with login and conversation.
Args:
email (str): User email for login
password (str): User password for login
cookie_path (str): Directory to store cookies
assistant_id (str): ID of the assistant to use
Returns:
hugchat.ChatBot: Configured chatbot instance
"""
try:
# Create cookie directory if it doesn't exist
os.makedirs(cookie_path, exist_ok=True)
sign = Login(email, password)
cookies = sign.login(cookie_dir_path=cookie_path, save_cookies=True)
chatbot = hugchat.ChatBot(cookies=cookies.get_dict())
chatbot.new_conversation(assistant=assistant_id, switch_to=True)
return chatbot
except Exception as e:
raise Exception(f"Failed to set up chatbot: {e}")
# Initialize chatbot at startup
@app.on_event("startup")
async def startup_event():
global chatbot
# Credentials and configuration
EMAIL = os.getenv("EMAIL")
PASSWD = os.getenv("PASSWD")
COOKIE_PATH_DIR = "./cookies/"
ASSISTANT_ID = "682e0c1f5f0c3d952a27498e" # Replace with your actual assistant ID
chatbot = setup_chatbot(EMAIL, PASSWD, COOKIE_PATH_DIR, ASSISTANT_ID)
@app.post("/generate")
async def generate_response(request: QuestionRequest):
"""
Generates a streaming response from the AI based on the provided question.
Args:
request (QuestionRequest): JSON body containing the question.
Returns:
StreamingResponse: A streaming response with the AI's reply.
"""
global chatbot
if chatbot is None:
raise HTTPException(status_code=500, detail="Chatbot not initialized. Please try again later.")
async def generate():
try:
# Use streaming to match client expectations
for token in chatbot.chat(request.question, stream=True):
# Handle hugchat response (token may be dict or string)
if isinstance(token, dict):
# Extract token from dictionary (e.g., {"type": "stream", "token": "text"})
token_text = token.get("token", "")
else:
token_text = str(token)
# Encode and yield the token
yield token_text.encode('utf-8')
except Exception as e:
error_message = f"Error: Failed to generate response: {str(e)}"
yield error_message.encode('utf-8')
return StreamingResponse(generate(), media_type="text/plain")