File size: 4,529 Bytes
4295e27 23e063f 4295e27 23e063f e19211a 23e063f 4295e27 e19211a 23e063f e19211a 4295e27 11ee54e 4295e27 11ee54e 4295e27 11ee54e 4295e27 11ee54e 4295e27 11ee54e 4295e27 11ee54e 473a85c 11ee54e 473a85c 11ee54e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import uuid
from typing import List, Dict
from aiohttp import ClientSession
from api.models import ChatRequest
from api.helper import format_prompt
from api.logger import logger
from api.config import MODEL_MAPPING, EDITEA_API_ENDPOINT, EDITEA_HEADERS
from fastapi import HTTPException
class Editee:
label = "Editee"
url = "https://editee.com"
api_endpoint = EDITEA_API_ENDPOINT
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'claude'
models = ['claude', 'gpt4', 'gemini', 'mistrallarge']
model_aliases = {
"claude-3.5-sonnet": "claude",
"gpt-4o": "gpt4",
"gemini-pro": "gemini",
"mistral-large": "mistrallarge",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: List[Dict[str, str]],
proxy: str = None,
**kwargs
):
model = cls.get_model(model)
headers = EDITEA_HEADERS
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"user_input": prompt,
"context": " ",
"template_id": "",
"selected_model": model
}
try:
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
# Check if the response is in JSON format
if response.headers.get('Content-Type') == 'application/json':
response_data = await response.json()
yield response_data['text']
else:
# Stream the response line by line
async for line in response.content:
yield line.decode('utf-8')
except Exception as e:
logger.error(f"Error in Editee API call: {e}")
raise HTTPException(status_code=500, detail="Error in Editee API call")
# Function to process non-streaming response
async def process_response(request: ChatRequest):
try:
model = MODEL_MAPPING.get(request.model, request.model)
messages = [
{"role": message.role, "content": message.content}
for message in request.messages
]
generator = Editee.create_async_generator(
model=model,
messages=messages,
proxy=None # Add proxy if needed
)
full_response = ""
async for chunk in generator:
full_response += chunk
return {
"id": f"chatcmpl-{uuid.uuid4()}",
"object": "chat.completion",
"created": int(uuid.uuid1().time),
"model": model,
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": full_response},
"finish_reason": "stop",
}
],
"usage": None,
}
except Exception as e:
logger.error(f"Error processing response: {e}")
raise HTTPException(status_code=500, detail=str(e))
# Function to process streaming response
async def process_response_stream(request: ChatRequest):
try:
model = MODEL_MAPPING.get(request.model, request.model)
messages = [
{"role": message.role, "content": message.content}
for message in request.messages
]
generator = Editee.create_async_generator(
model=model,
messages=messages,
proxy=None # Add proxy if needed
)
async def event_generator():
try:
async for chunk in generator:
yield f"data: {chunk}\n\n"
yield "data: [DONE]\n\n"
except Exception as e:
logger.error(f"Error in streaming response: {e}")
yield f"data: [ERROR] {str(e)}\n\n"
return event_generator()
except Exception as e:
logger.error(f"Error processing streaming response: {e}")
raise HTTPException(status_code=500, detail=str(e))
|