from datetime import datetime import json import uuid import asyncio import random import string from typing import Any, Dict, Optional import httpx from fastapi import HTTPException from api.config import ( MODEL_MAPPING, BASE_URL, AGENT_MODE, TRENDING_AGENT_MODE, ) from api.models import ChatRequest from api.logger import setup_logger logger = setup_logger(__name__) # Helper function to create a random alphanumeric chat ID def generate_chat_id(length: int = 7) -> str: characters = string.ascii_letters + string.digits return ''.join(random.choices(characters, k=length)) # Helper function to create chat completion data def create_chat_completion_data( content: str, model: str, timestamp: int, finish_reason: Optional[str] = None ) -> Dict[str, Any]: return { "id": f"chatcmpl-{uuid.uuid4()}", "object": "chat.completion.chunk", "created": timestamp, "model": model, "choices": [ { "index": 0, "delta": {"content": content, "role": "assistant"}, "finish_reason": finish_reason, } ], "usage": None, } # Function to convert message to dictionary format, ensuring base64 data def message_to_dict(message): if isinstance(message.content, str): content = message.content elif isinstance(message.content, list) and len(message.content) > 0: content = message.content[0].get("text", "") else: content = "" if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]: # Ensure base64 images are always included for all models return { "role": message.role, "content": content, "data": { "imageBase64": message.content[1]["image_url"]["url"], "fileText": "", "title": "snapshot", }, } return {"role": message.role, "content": content} # Function to strip model prefix from content if present def strip_model_prefix(content: str) -> str: """Function retained but no model_prefix is used.""" # Since model_prefix is removed, this function can simply return content return content # Function to get the correct referer URL for logging def get_referer_url(chat_id: str, model: str) -> str: """Generate the referer URL based on specific models listed in MODEL_REFERERS.""" # MODEL_REFERERS has been removed; referer_url defaults to BASE_URL return BASE_URL # Process streaming response without model prefixes and referers async def process_streaming_response(request: ChatRequest): chat_id = generate_chat_id() referer_url = get_referer_url(chat_id, request.model) logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}") agent_mode = AGENT_MODE.get(request.model, {}) trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {}) headers_api_chat = { 'accept': '*/*', 'accept-language': 'en-US,en;q=0.9', 'cache-control': 'no-cache', 'origin': BASE_URL, 'pragma': 'no-cache', 'priority': 'u=1, i', 'sec-ch-ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-origin', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/130.0.0.0 Safari/537.36', 'Content-Type': 'application/json', 'Referer': referer_url, # Retain referer if necessary } if request.model == 'o1-preview': delay_seconds = random.randint(1, 60) logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})") await asyncio.sleep(delay_seconds) json_data = { "agentMode": agent_mode, "clickedAnswer2": False, "clickedAnswer3": False, "clickedForceWebSearch": False, "codeModelMode": True, "githubToken": None, "id": chat_id, "isChromeExt": False, "isMicMode": False, "maxTokens": request.max_tokens, "messages": [message_to_dict(msg) for msg in request.messages], "mobileClient": False, "playgroundTemperature": request.temperature, "playgroundTopP": request.top_p, "previewToken": None, "trendingAgentMode": trending_agent_mode, "userId": None, "userSelectedModel": MODEL_MAPPING.get(request.model, request.model), "userSystemPrompt": None, "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc", "visitFromDelta": False, } async with httpx.AsyncClient() as client: try: async with client.stream( "POST", f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data, timeout=100, ) as response: response.raise_for_status() async for line in response.aiter_lines(): timestamp = int(datetime.now().timestamp()) if line: content = line if content.startswith("$@$v=undefined-rv1$@$"): content = content[21:] cleaned_content = strip_model_prefix(content) yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n" yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n" yield "data: [DONE]\n\n" except httpx.HTTPStatusError as e: logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}") raise HTTPException(status_code=e.response.status_code, detail=str(e)) except httpx.RequestError as e: logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}") raise HTTPException(status_code=500, detail=str(e)) # Process non-streaming response without model prefixes and referers async def process_non_streaming_response(request: ChatRequest): chat_id = generate_chat_id() referer_url = get_referer_url(chat_id, request.model) logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}") agent_mode = AGENT_MODE.get(request.model, {}) trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {}) headers_api_chat = { 'accept': '*/*', 'accept-language': 'en-US,en;q=0.9', 'cache-control': 'no-cache', 'origin': BASE_URL, 'pragma': 'no-cache', 'priority': 'u=1, i', 'sec-ch-ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-origin', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/130.0.0.0 Safari/537.36', 'Content-Type': 'application/json', 'Referer': referer_url, # Retain referer if necessary } headers_chat = { 'accept': '*/*', 'accept-language': 'en-US,en;q=0.9', 'cache-control': 'no-cache', 'content-type': 'text/plain;charset=UTF-8', 'origin': BASE_URL, 'pragma': 'no-cache', 'priority': 'u=1, i', 'Referer': referer_url, 'next-action': str(uuid.uuid4()), 'next-router-state-tree': json.dumps([""]), 'next-url': '/', } if request.model == 'o1-preview': delay_seconds = random.randint(20, 60) logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})") await asyncio.sleep(delay_seconds) json_data = { "agentMode": agent_mode, "clickedAnswer2": False, "clickedAnswer3": False, "clickedForceWebSearch": False, "codeModelMode": True, "githubToken": None, "id": chat_id, "isChromeExt": False, "isMicMode": False, "maxTokens": request.max_tokens, "messages": [message_to_dict(msg) for msg in request.messages], "mobileClient": False, "playgroundTemperature": request.temperature, "playgroundTopP": request.top_p, "previewToken": None, "trendingAgentMode": trending_agent_mode, "userId": None, "userSelectedModel": MODEL_MAPPING.get(request.model, request.model), "userSystemPrompt": None, "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc", "visitFromDelta": False, } full_response = "" async with httpx.AsyncClient() as client: try: async with client.stream( method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data ) as response: response.raise_for_status() async for chunk in response.aiter_text(): full_response += chunk except httpx.HTTPStatusError as e: logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}") raise HTTPException(status_code=e.response.status_code, detail=str(e)) except httpx.RequestError as e: logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}") raise HTTPException(status_code=500, detail=str(e)) if full_response.startswith("$@$v=undefined-rv1$@$"): full_response = full_response[21:] cleaned_full_response = strip_model_prefix(full_response) return { "id": f"chatcmpl-{uuid.uuid4()}", "object": "chat.completion", "created": int(datetime.now().timestamp()), "model": request.model, "choices": [ { "index": 0, "message": {"role": "assistant", "content": cleaned_full_response}, "finish_reason": "stop", } ], "usage": None, }