File size: 4,881 Bytes
0637793 3f3c9af 0637793 3f3c9af 0637793 7c9f89e 3f3c9af 7c9f89e 3f3c9af 7c9f89e f516184 3f3c9af f516184 0f34634 3f3c9af f516184 3f3c9af f516184 0f34634 f516184 3f3c9af f516184 7c9f89e 3f3c9af 7c9f89e 3f3c9af 7c9f89e 3f3c9af 7c9f89e 3f3c9af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse, JSONResponse
from pydantic import BaseModel
from typing import List, Optional
import time
import json
import httpx
from models import AVAILABLE_MODELS, MODEL_ALIASES
app = FastAPI()
def unix_id():
return str(int(time.time() * 1000))
class Message(BaseModel):
role: str
content: str
class ChatRequest(BaseModel):
messages: List[Message]
model: str
stream: Optional[bool] = False
@app.get("/v1/models")
async def list_models():
return {"object": "list", "data": AVAILABLE_MODELS}
@app.post("/v1/chat/completions")
async def chat_completion(request: ChatRequest):
model_id = MODEL_ALIASES.get(request.model, request.model)
headers = {
'accept': 'text/event-stream',
'content-type': 'application/json',
'origin': 'https://www.chatwithmono.xyz',
'referer': 'https://www.chatwithmono.xyz/',
'user-agent': 'Mozilla/5.0',
}
payload = {
"messages": [{"role": msg.role, "content": msg.content} for msg in request.messages],
"model": model_id
}
if request.stream:
async def event_stream():
chat_id = f"chatcmpl-{unix_id()}"
created = int(time.time())
sent_done = False
async with httpx.AsyncClient(timeout=120) as client:
async with client.stream("POST", "https://www.chatwithmono.xyz/api/chat", headers=headers, json=payload) as response:
async for line in response.aiter_lines():
if line.startswith("0:"):
try:
content_piece = json.loads(line[2:])
chunk_data = {
"id": chat_id,
"object": "chat.completion.chunk",
"created": created,
"model": model_id,
"choices": [{
"delta": {"content": content_piece},
"index": 0,
"finish_reason": None
}]
}
yield f"data: {json.dumps(chunk_data)}\n\n"
except:
continue
elif line.startswith(("e:", "d:")) and not sent_done:
sent_done = True
done_chunk = {
"id": chat_id,
"object": "chat.completion.chunk",
"created": created,
"model": model_id,
"choices": [{
"delta": {},
"index": 0,
"finish_reason": "stop"
}]
}
yield f"data: {json.dumps(done_chunk)}\n\ndata: [DONE]\n\n"
return StreamingResponse(event_stream(), media_type="text/event-stream")
else:
assistant_response = ""
usage_info = {}
async with httpx.AsyncClient(timeout=120) as client:
async with client.stream("POST", "https://www.chatwithmono.xyz/api/chat", headers=headers, json=payload) as response:
async for chunk in response.aiter_lines():
if chunk.startswith("0:"):
try:
piece = json.loads(chunk[2:])
assistant_response += piece
except:
continue
elif chunk.startswith(("e:", "d:")):
try:
data = json.loads(chunk[2:])
usage_info = data.get("usage", {})
except:
continue
return JSONResponse(content={
"id": f"chatcmpl-{unix_id()}",
"object": "chat.completion",
"created": int(time.time()),
"model": model_id,
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": assistant_response
},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": usage_info.get("promptTokens", 0),
"completion_tokens": usage_info.get("completionTokens", 0),
"total_tokens": usage_info.get("promptTokens", 0) + usage_info.get("completionTokens", 0),
}
})
|