|
import os |
|
import time |
|
import json |
|
import grpc |
|
import asyncio |
|
from typing import List, Optional |
|
from fastapi import FastAPI, HTTPException, Request |
|
from fastapi.responses import JSONResponse, StreamingResponse |
|
from pydantic import BaseModel |
|
from dotenv import load_dotenv |
|
from grpc_tools import protoc |
|
import re |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
class Config: |
|
def __init__(self): |
|
self.API_PREFIX = os.getenv('API_PREFIX', '/') |
|
self.API_KEY = os.getenv('API_KEY', '') |
|
self.MAX_RETRY_COUNT = int(os.getenv('MAX_RETRY_COUNT', 3)) |
|
self.RETRY_DELAY = int(os.getenv('RETRY_DELAY', 5000)) |
|
self.COMMON_GRPC = 'runtime-native-io-vertex-inference-grpc-service-lmuw6mcn3q-ul.a.run.app' |
|
self.COMMON_PROTO = 'protos/VertexInferenceService.proto' |
|
self.GPT_GRPC = 'runtime-native-io-gpt-inference-grpc-service-lmuw6mcn3q-ul.a.run.app' |
|
self.GPT_PROTO = 'protos/GPTInferenceService.proto' |
|
self.PORT = int(os.getenv('PORT', 8787)) |
|
self.SUPPORTED_MODELS = [ |
|
"gpt-4o-mini", "gpt-4o", "gpt-4-turbo", "gpt-4", "gpt-3.5-turbo", |
|
"claude-3-sonnet@20240229", "claude-3-opus@20240229", "claude-3-haiku@20240307", |
|
"claude-3-5-sonnet@20240620", "gemini-1.5-flash", "gemini-1.5-pro", |
|
"chat-bison", "codechat-bison" |
|
] |
|
|
|
def is_valid_model(self, model): |
|
regex_input = r'^(claude-3-(5-sonnet|haiku|sonnet|opus))-(\d{8})$' |
|
match_input = re.match(regex_input, model) |
|
normalized_model = f"{match_input.group(1)}@{match_input.group(3)}" if match_input else model |
|
return normalized_model in self.SUPPORTED_MODELS |
|
|
|
|
|
class GRPCHandler: |
|
def __init__(self, proto_file): |
|
self.proto_file = proto_file |
|
self._compile_proto() |
|
self._load_proto() |
|
|
|
def _compile_proto(self): |
|
proto_dir = os.path.dirname(self.proto_file) |
|
proto_file = os.path.basename(self.proto_file) |
|
protoc.main(( |
|
'', |
|
f'-I{proto_dir}', |
|
f'--python_out=.', |
|
f'--grpc_python_out=.', |
|
os.path.join(proto_dir, proto_file) |
|
)) |
|
|
|
def _load_proto(self): |
|
module_name = os.path.splitext(os.path.basename(self.proto_file))[0] + '_pb2_grpc' |
|
proto_module = __import__(module_name) |
|
self.stub_class = getattr(proto_module, f"{module_name.split('_')[0]}Stub") |
|
|
|
async def grpc_to_pieces(self, model, content, rules, temperature, top_p): |
|
channel = grpc.aio.secure_channel( |
|
config.COMMON_GRPC if not model.startswith('gpt') else config.GPT_GRPC, |
|
grpc.ssl_channel_credentials() |
|
) |
|
stub = self.stub_class(channel) |
|
|
|
try: |
|
request = self._build_request(model, content, rules, temperature, top_p) |
|
response = await stub.Predict(request) |
|
return self._process_response(response, model) |
|
except grpc.RpcError as e: |
|
print(f"RPC failed: {e}") |
|
return {"error": str(e)} |
|
finally: |
|
await channel.close() |
|
|
|
async def grpc_to_pieces_stream(self, model, content, rules, temperature, top_p): |
|
channel = grpc.aio.secure_channel( |
|
config.COMMON_GRPC if not model.startswith('gpt') else config.GPT_GRPC, |
|
grpc.ssl_channel_credentials() |
|
) |
|
stub = self.stub_class(channel) |
|
|
|
try: |
|
request = self._build_request(model, content, rules, temperature, top_p) |
|
async for response in stub.PredictWithStream(request): |
|
result = self._process_stream_response(response, model) |
|
if result: |
|
yield f"data: {json.dumps(result)}\n\n" |
|
except grpc.RpcError as e: |
|
print(f"Stream RPC failed: {e}") |
|
yield f"data: {json.dumps({'error': str(e)})}\n\n" |
|
finally: |
|
await channel.close() |
|
|
|
def _build_request(self, model, content, rules, temperature, top_p): |
|
if model.startswith('gpt'): |
|
return self.stub_class.Request( |
|
models=model, |
|
messages=[ |
|
{"role": 0, "message": rules}, |
|
{"role": 1, "message": content} |
|
], |
|
temperature=temperature or 0.1, |
|
top_p=top_p or 1.0 |
|
) |
|
else: |
|
return self.stub_class.Request( |
|
models=model, |
|
args={ |
|
"messages": { |
|
"unknown": 1, |
|
"message": content |
|
}, |
|
"rules": rules |
|
} |
|
) |
|
|
|
def _process_response(self, response, model): |
|
if response.response_code == 200: |
|
if model.startswith('gpt'): |
|
message = response.body.message_warpper.message.message |
|
else: |
|
message = response.args.args.args.message |
|
return chat_completion_with_model(message, model) |
|
return {"error": f"Invalid response code: {response.response_code}"} |
|
|
|
def _process_stream_response(self, response, model): |
|
if response.response_code == 204: |
|
return None |
|
elif response.response_code == 200: |
|
if model.startswith('gpt'): |
|
message = response.body.message_warpper.message.message |
|
else: |
|
message = response.args.args.args.message |
|
return chat_completion_stream_with_model(message, model) |
|
else: |
|
return {"error": f"Invalid response code: {response.response_code}"} |
|
|
|
|
|
def messages_process(messages): |
|
rules = '' |
|
message = '' |
|
|
|
for msg in messages: |
|
role = msg.role |
|
content = msg.content |
|
|
|
if isinstance(content, list): |
|
content = ''.join([item.get('text', '') for item in content if item.get('text')]) |
|
|
|
if role == 'system': |
|
rules += f"system:{content};\r\n" |
|
elif role in ['user', 'assistant']: |
|
message += f"{role}:{content};\r\n" |
|
|
|
return rules, message |
|
|
|
def chat_completion_with_model(message: str, model: str): |
|
return { |
|
"id": "Chat-Nekohy", |
|
"object": "chat.completion", |
|
"created": int(time.time()), |
|
"model": model, |
|
"usage": { |
|
"prompt_tokens": 0, |
|
"completion_tokens": 0, |
|
"total_tokens": 0, |
|
}, |
|
"choices": [ |
|
{ |
|
"message": { |
|
"content": message, |
|
"role": "assistant", |
|
}, |
|
"index": 0, |
|
}, |
|
], |
|
} |
|
|
|
def chat_completion_stream_with_model(text: str, model: str): |
|
return { |
|
"id": "chatcmpl-Nekohy", |
|
"object": "chat.completion.chunk", |
|
"created": 0, |
|
"model": model, |
|
"choices": [ |
|
{ |
|
"index": 0, |
|
"delta": { |
|
"content": text, |
|
}, |
|
"finish_reason": None, |
|
}, |
|
], |
|
} |
|
|
|
|
|
config = Config() |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
class ChatMessage(BaseModel): |
|
role: str |
|
content: str |
|
|
|
class ChatCompletionRequest(BaseModel): |
|
model: str |
|
messages: List[ChatMessage] |
|
stream: Optional[bool] = False |
|
temperature: Optional[float] = None |
|
top_p: Optional[float] = None |
|
|
|
|
|
@app.get("/") |
|
async def root(): |
|
return {"message": "API 服务运行中~"} |
|
|
|
@app.get("/ping") |
|
async def ping(): |
|
return {"message": "pong"} |
|
|
|
@app.get(config.API_PREFIX + "/v1/models") |
|
async def list_models(): |
|
with open('cloud_model.json', 'r') as f: |
|
cloud_models = json.load(f) |
|
|
|
models = [ |
|
{"id": model["unique"], "object": "model", "owned_by": "pieces-os"} |
|
for model in cloud_models["iterable"] |
|
] |
|
|
|
return JSONResponse({ |
|
"object": "list", |
|
"data": models |
|
}) |
|
|
|
@app.post(config.API_PREFIX + "/v1/chat/completions") |
|
async def chat_completions(request: ChatCompletionRequest): |
|
if not config.is_valid_model(request.model): |
|
raise HTTPException(status_code=404, detail=f"Model '{request.model}' does not exist") |
|
|
|
rules, content = messages_process(request.messages) |
|
|
|
grpc_handler = GRPCHandler(config.COMMON_PROTO if not request.model.startswith('gpt') else config.GPT_PROTO) |
|
|
|
if request.stream: |
|
return StreamingResponse( |
|
grpc_handler.grpc_to_pieces_stream( |
|
request.model, content, rules, request.temperature, request.top_p |
|
), |
|
media_type="text/event-stream" |
|
) |
|
else: |
|
response = await grpc_handler.grpc_to_pieces( |
|
request.model, content, rules, request.temperature, request.top_p |
|
) |
|
return JSONResponse(content=response) |
|
|
|
if __name__ == "__main__": |
|
import uvicorn |
|
uvicorn.run(app, host="0.0.0.0", port=config.PORT) |
|
|