File size: 1,638 Bytes
0218c20
03991d8
 
 
d7f32ed
 
6a547e4
d7f32ed
 
 
 
 
29331bd
d7f32ed
 
 
62af7fd
d7f32ed
 
 
29331bd
d7f32ed
 
 
0218c20
d7f32ed
 
eb96984
d7f32ed
 
 
03991d8
d7f32ed
03991d8
 
2ba12d8
03991d8
 
0218c20
03991d8
 
 
 
 
d7f32ed
03991d8
 
 
d7f32ed
 
 
 
 
 
 
03991d8
d7f32ed
03991d8
 
eb96984
d7f32ed
eb96984
d7f32ed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
from hugchat import hugchat
from hugchat.login import Login
import asyncio
import os
from dotenv import load_dotenv

# Load environment variables from .env file
load_dotenv()

# Read credentials from environment variables
EMAIL = os.getenv("EMAIL")
PASSWD = os.getenv("PASSWD")
cookies = sign.login(cookie_dir_path="cookies", save_cookies=True)
# Cookie storage
cookie_path_dir = "./cookies/"
os.makedirs(cookie_path_dir, exist_ok=True)

# HugChat login
sign = Login(EMAIL, PASSWD)
cookies = sign.login(cookie_dir_path=cookie_path_dir, save_cookies=True)

# Create chatbot instance
chatbot = hugchat.ChatBot(cookies=cookies.get_dict())

# Optional: Use assistant ID
ASSISTANT_ID = "66017fca58d60bd7d5c5c26c"  # Replace if needed
chatbot.new_conversation(assistant=ASSISTANT_ID, switch_to=True)

# FastAPI setup
app = FastAPI()

# Enable CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Request model
class Question(BaseModel):
    question: str

# Token stream function
async def generate_response_stream(prompt: str):
    for chunk in chatbot.chat(prompt, stream=True):
        token = chunk.get("token", "")
        if token:
            yield token
            await asyncio.sleep(0.02)

# Endpoint
@app.post("/ask")
async def ask(question: Question):
    return StreamingResponse(
        generate_response_stream(question.question),
        media_type="text/plain"
    )