File size: 6,466 Bytes
bd06cc8 8dab3bb e100b79 8dab3bb e100b79 e56ea0f e100b79 395ec56 bd06cc8 e100b79 bd06cc8 8dab3bb bd06cc8 e3cd2c8 e100b79 bd06cc8 364bda2 c2b4e5c 364bda2 c2b4e5c 364bda2 e3cd2c8 fed1648 bd06cc8 bf3f697 bd06cc8 fed1648 8dab3bb fed1648 11a3d84 bd06cc8 fed1648 8dab3bb b3ba3af 8dab3bb e3cd2c8 837e868 bd06cc8 8dab3bb e100b79 bd06cc8 fed1648 8dab3bb bd06cc8 fed1648 bd06cc8 fed1648 bd06cc8 e100b79 bd06cc8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
import json
import random
import asyncio
import aiohttp
from fastapi import FastAPI, Request, Response
from fastapi.responses import StreamingResponse
from aiosseclient import aiosseclient
app = FastAPI()
def generate_random_ip():
return f"{random.randint(1, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
def generate_user_agent():
os_list = ['Windows NT 10.0', 'Windows NT 6.1', 'Mac OS X 10_15_7', 'Ubuntu', 'Linux x86_64']
browser_list = ['Chrome', 'Firefox', 'Safari', 'Edge']
chrome_version = f"{random.randint(70, 126)}.0.{random.randint(1000, 9999)}.{random.randint(100, 999)}"
firefox_version = f"{random.randint(70, 100)}.0"
safari_version = f"{random.randint(600, 615)}.{random.randint(1, 9)}.{random.randint(1, 9)}"
edge_version = f"{random.randint(80, 100)}.0.{random.randint(1000, 9999)}.{random.randint(100, 999)}"
os = random.choice(os_list)
browser = random.choice(browser_list)
if browser == 'Chrome':
return f"Mozilla/5.0 ({os}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{chrome_version} Safari/537.36"
elif browser == 'Firefox':
return f"Mozilla/5.0 ({os}; rv:{firefox_version}) Gecko/20100101 Firefox/{firefox_version}"
elif browser == 'Safari':
return f"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/{safari_version} (KHTML, like Gecko) Version/{safari_version.split('.')[0]}.1.2 Safari/{safari_version}"
elif browser == 'Edge':
return f"Mozilla/5.0 ({os}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{edge_version} Safari/537.36 Edg/{edge_version}"
def format_openai_response(content, finish_reason=None):
return {
"id": "chatcmpl-123",
"object": "chat.completion.chunk",
"created": 1677652288,
"model": "gpt-4o",
"choices": [{
"delta": {"content": content} if content else {"finish_reason": finish_reason},
"index": 0,
"finish_reason": finish_reason
}]
}
@app.post('/hf/v1/chat/completions')
async def chat_completions(request: Request):
data = await request.json()
messages = data.get('messages', [])
stream = data.get('stream', False)
if not messages:
return {"error": "No messages provided"}, 400
model = data.get('model', 'gpt-4o')
if model.startswith('gpt'):
endpoint = "openAI"
original_api_url = 'https://chatpro.ai-pro.org/api/ask/openAI'
elif model.startswith('claude'):
endpoint = "claude"
original_api_url = 'https://chatpro.ai-pro.org/api/ask/claude'
else:
return {"error": "Unsupported model"}, 400
headers = {
'content-type': 'application/json',
'X-Forwarded-For': generate_random_ip(),
'origin': 'https://chatpro.ai-pro.org',
'user-agent': generate_user_agent()
}
async def generate():
full_response = ""
while True:
conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
conversation += "\nPlease follow and reply to the user’s recent messages and avoid answers that summarize the conversation history."
payload = {
"text": conversation,
"endpoint": endpoint,
"model": model
}
async with aiosseclient(original_api_url, method='POST', headers=headers, json=payload) as client:
async for event in client:
if event.data.startswith('{"text":'):
data = json.loads(event.data)
new_content = data['text'][len(full_response):]
full_response = data['text']
if new_content:
yield f"data: {json.dumps(format_openai_response(new_content))}\n\n"
elif '"final":true' in event.data:
final_data = json.loads(event.data)
response_message = final_data.get('responseMessage', {})
finish_reason = response_message.get('finish_reason', 'stop')
if finish_reason == 'length':
messages.append({"role": "assistant", "content": full_response})
messages.append({"role": "user", "content": "Please continue your output and do not repeat the previous content"})
break # Continue with the next request
else:
last_content = response_message.get('text', '')
if last_content and last_content != full_response:
yield f"data: {json.dumps(format_openai_response(last_content[len(full_response):]))}\n\n"
yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
yield "data: [DONE]\n\n"
return
yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n"
yield "data: [DONE]\n\n"
if stream:
return StreamingResponse(generate(), media_type='text/event-stream')
else:
full_response = ""
finish_reason = "stop"
async for chunk in generate():
if chunk.startswith("data: ") and not chunk.strip() == "data: [DONE]":
response_data = json.loads(chunk[6:])
if 'choices' in response_data and response_data['choices']:
delta = response_data['choices'][0].get('delta', {})
if 'content' in delta:
full_response += delta['content']
if 'finish_reason' in delta:
finish_reason = delta['finish_reason']
return {
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1677652288,
"model": model,
"choices": [{
"index": 0,
"message": {
"role": "assistant",
"content": full_response
},
"finish_reason": finish_reason
}],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
}
}
|