File size: 4,730 Bytes
b50abc0 332aafb b50abc0 332aafb b50abc0 332aafb b50abc0 332aafb b50abc0 332aafb a649779 b50abc0 332aafb b50abc0 a649779 b50abc0 332aafb b50abc0 332aafb b50abc0 8fbb228 b50abc0 332aafb b50abc0 332aafb b50abc0 332aafb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import json
import logging
import os
import uuid
from datetime import datetime
from typing import Any, Dict, List, Optional
import httpx
import uvicorn
from dotenv import load_dotenv
from fastapi import FastAPI, HTTPException, Depends
from pydantic import BaseModel
from starlette.responses import StreamingResponse
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
load_dotenv()
app = FastAPI()
APP_SECRET = os.getenv("APP_SECRET", "666")
ACCESS_TOKEN = os.getenv("SD_ACCESS_TOKEN", "")
headers = {
'authorization': f'Bearer {ACCESS_TOKEN}',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0',
}
ALLOWED_MODELS = [
"claude-3.5-sonnet", "sider", "gpt-4o-mini", "claude-3-haiku", "claude-3.5-haiku",
"gemini-1.5-flash", "llama-3", "gpt-4o", "gemini-1.5-pro", "llama-3.1-405b"
]
class Message(BaseModel):
role: str
content: str
class ChatRequest(BaseModel):
model: str
messages: List[Message]
stream: Optional[bool] = False
def create_chat_completion_data(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]:
return {
"id": f"chatcmpl-{uuid.uuid4()}",
"object": "chat.completion.chunk",
"created": int(datetime.now().timestamp()),
"model": model,
"choices": [
{
"index": 0,
"delta": {"content": content, "role": "assistant"},
"finish_reason": finish_reason,
}
],
"usage": None,
}
@app.post("/hf/v1/chat/completions")
async def chat_completions(request: ChatRequest):
logger.info(f"Received chat completion request for model: {request.model}")
if request.model not in ALLOWED_MODELS:
logger.error(f"Model {request.model} is not allowed.")
raise HTTPException(
status_code=400,
detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(ALLOWED_MODELS)}",
)
json_data = {
'prompt': "\n".join(
[f"{'User' if msg.role == 'user' else 'Assistant'}: {msg.content}" for msg in request.messages]
),
'stream': True,
'model': request.model,
}
logger.info(f"Sending request to external API with data: {json_data}")
async def generate():
async with httpx.AsyncClient() as client:
try:
async with client.stream('POST', 'https://sider.ai/api/v3/completion/text', headers=headers, json=json_data, timeout=120.0) as response:
response.raise_for_status()
async for line in response.aiter_lines():
if line and ("[DONE]" not in line):
content = json.loads(line[5:])["data"]
yield f"data: {json.dumps(create_chat_completion_data(content.get('text', ''), request.model))}\n\n"
yield f"data: {json.dumps(create_chat_completion_data('', request.model, 'stop'))}\n\n"
yield "data: [DONE]\n\n"
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {e}")
raise HTTPException(status_code=e.response.status_code, detail=str(e))
except httpx.RequestError as e:
logger.error(f"An error occurred while requesting: {e}")
raise HTTPException(status_code=500, detail=str(e))
if request.stream:
logger.info("Streaming response")
return StreamingResponse(generate(), media_type="text/event-stream")
else:
logger.info("Non-streaming response")
full_response = ""
async for chunk in generate():
if chunk.startswith("data: ") and not chunk[6:].startswith("[DONE]"):
data = json.loads(chunk[6:])
if data["choices"][0]["delta"].get("content"):
full_response += data["choices"][0]["delta"]["content"]
logger.info(f"Full response generated: {full_response}")
return {
"id": f"chatcmpl-{uuid.uuid4()}",
"object": "chat.completion",
"created": int(datetime.now().timestamp()),
"model": request.model,
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": full_response},
"finish_reason": "stop",
}
],
"usage": None,
}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)
|