|
import json |
|
import sseclient |
|
import random |
|
import aiohttp |
|
import asyncio |
|
from flask import Flask, request, Response, stream_with_context |
|
|
|
app = Flask(__name__) |
|
|
|
def generate_random_ip(): |
|
return f"{random.randint(1, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}" |
|
|
|
def generate_user_agent(): |
|
os_list = ['Windows NT 10.0', 'Windows NT 6.1', 'Mac OS X 10_15_7', 'Ubuntu', 'Linux x86_64'] |
|
browser_list = ['Chrome', 'Firefox', 'Safari', 'Edge'] |
|
chrome_version = f"{random.randint(70, 126)}.0.{random.randint(1000, 9999)}.{random.randint(100, 999)}" |
|
firefox_version = f"{random.randint(70, 100)}.0" |
|
safari_version = f"{random.randint(600, 615)}.{random.randint(1, 9)}.{random.randint(1, 9)}" |
|
edge_version = f"{random.randint(80, 100)}.0.{random.randint(1000, 9999)}.{random.randint(100, 999)}" |
|
|
|
os = random.choice(os_list) |
|
browser = random.choice(browser_list) |
|
|
|
if browser == 'Chrome': |
|
return f"Mozilla/5.0 ({os}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{chrome_version} Safari/537.36" |
|
elif browser == 'Firefox': |
|
return f"Mozilla/5.0 ({os}; rv:{firefox_version}) Gecko/20100101 Firefox/{firefox_version}" |
|
elif browser == 'Safari': |
|
return f"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/{safari_version} (KHTML, like Gecko) Version/{safari_version.split('.')[0]}.1.2 Safari/{safari_version}" |
|
elif browser == 'Edge': |
|
return f"Mozilla/5.0 ({os}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{edge_version} Safari/537.36 Edg/{edge_version}" |
|
|
|
def format_openai_response(content, finish_reason=None): |
|
return { |
|
"id": "chatcmpl-123", |
|
"object": "chat.completion.chunk", |
|
"created": 1677652288, |
|
"model": "gpt-4o", |
|
"choices": [{ |
|
"delta": {"content": content} if content else {"finish_reason": finish_reason}, |
|
"index": 0, |
|
"finish_reason": finish_reason |
|
}] |
|
} |
|
|
|
@app.route('/hf/v1/chat/completions', methods=['POST']) |
|
async def chat_completions(): |
|
data = request.json |
|
messages = data.get('messages', []) |
|
stream = data.get('stream', False) |
|
|
|
if not messages: |
|
return {"error": "No messages provided"}, 400 |
|
|
|
model = data.get('model', 'gpt-4o') |
|
if model.startswith('gpt'): |
|
endpoint = "openAI" |
|
original_api_url = 'https://chatpro.ai-pro.org/api/ask/openAI' |
|
elif model.startswith('claude'): |
|
endpoint = "claude" |
|
original_api_url = 'https://chatpro.ai-pro.org/api/ask/claude' |
|
else: |
|
return {"error": "Unsupported model"}, 400 |
|
|
|
headers = { |
|
'content-type': 'application/json', |
|
'X-Forwarded-For': generate_random_ip(), |
|
'origin': 'https://chatpro.ai-pro.org', |
|
'user-agent': generate_user_agent() |
|
} |
|
|
|
async def generate(): |
|
full_response = "" |
|
while True: |
|
conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages]) |
|
conversation += "\nPlease follow and reply to the user’s recent messages and avoid answers that summarize the conversation history." |
|
|
|
payload = { |
|
"text": conversation, |
|
"endpoint": endpoint, |
|
"model": model |
|
} |
|
|
|
async with aiohttp.ClientSession() as session: |
|
async with session.post(original_api_url, headers=headers, json=payload) as response: |
|
async for line in response.content: |
|
if line.startswith(b'data: '): |
|
event_data = line.decode().lstrip('data: ') |
|
event = json.loads(event_data) |
|
if event.get("final"): |
|
yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n" |
|
return |
|
new_content = event.get('text', '')[len(full_response):] |
|
if new_content: |
|
full_response = event.get('text', '') |
|
yield f"data: {json.dumps(format_openai_response(new_content))}\n\n" |
|
|
|
if stream: |
|
return Response(stream_with_context(generate()), content_type='text/event-stream') |
|
else: |
|
full_response = "" |
|
finish_reason = "stop" |
|
async for chunk in generate(): |
|
response_data = json.loads(chunk[6:]) |
|
if 'choices' in response_data and response_data['choices']: |
|
delta = response_data['choices'][0].get('delta', {}) |
|
if 'content' in delta: |
|
full_response += delta['content'] |
|
if 'finish_reason' in delta: |
|
finish_reason = delta['finish_reason'] |
|
|
|
return { |
|
"id": "chatcmpl-123", |
|
"object": "chat.completion", |
|
"created": 1677652288, |
|
"model": model, |
|
"choices": [{ |
|
"index": 0, |
|
"message": { |
|
"role": "assistant", |
|
"content": full_response |
|
}, |
|
"finish_reason": finish_reason |
|
}], |
|
"usage": { |
|
"prompt_tokens": 0, |
|
"completion_tokens": 0, |
|
"total_tokens": 0 |
|
} |
|
} |
|
|
|
if __name__ == '__main__': |
|
app.run(debug=True, port=5000) |
|
|