Spaces:
mxrkai
/
Runtime error

Niansuh commited on
Commit
e3cd2c8
·
verified ·
1 Parent(s): 4b940df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -33
app.py CHANGED
@@ -1,10 +1,10 @@
1
  import json
2
- import httpx
 
 
3
  import random
4
- from fastapi import FastAPI, Request, HTTPException
5
- from sse_starlette.sse import EventSourceResponse
6
 
7
- app = FastAPI()
8
 
9
  def generate_random_ip():
10
  return f"{random.randint(1, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
@@ -42,14 +42,14 @@ def format_openai_response(content, finish_reason=None):
42
  }]
43
  }
44
 
45
- @app.post("/hf/v1/chat/completions")
46
- async def chat_completions(request: Request):
47
- data = await request.json()
48
  messages = data.get('messages', [])
49
  stream = data.get('stream', False)
50
 
51
  if not messages:
52
- raise HTTPException(status_code=400, detail="No messages provided")
53
 
54
  model = data.get('model', 'gpt-4o')
55
  if model.startswith('gpt'):
@@ -59,7 +59,7 @@ async def chat_completions(request: Request):
59
  endpoint = "claude"
60
  original_api_url = 'https://chatpro.ai-pro.org/api/ask/claude'
61
  else:
62
- raise HTTPException(status_code=400, detail="Unsupported model")
63
 
64
  headers = {
65
  'content-type': 'application/json',
@@ -68,7 +68,7 @@ async def chat_completions(request: Request):
68
  'user-agent': generate_user_agent()
69
  }
70
 
71
- async def generate():
72
  full_response = ""
73
  while True:
74
  conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
@@ -80,35 +80,45 @@ async def chat_completions(request: Request):
80
  "model": model
81
  }
82
 
83
- async with httpx.AsyncClient() as client:
84
- async with client.stream("POST", original_api_url, headers=headers, json=payload) as response:
85
- async for line in response.aiter_lines():
86
- if line.startswith('{"text":'):
87
- data = json.loads(line)
88
- new_content = data['text'][len(full_response):]
89
- full_response = data['text']
90
-
91
- if new_content:
92
- yield f"data: {json.dumps(format_openai_response(new_content))}\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
- elif '"final":true' in line:
95
- final_data = json.loads(line)
96
- response_message = final_data.get('responseMessage', {})
97
- finish_reason = response_message.get('finish_reason', 'stop')
98
- last_content = response_message.get('text', '')
99
- if last_content and last_content != full_response:
100
- yield f"data: {json.dumps(format_openai_response(last_content[len(full_response):]))}\n\n"
101
-
102
- yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
103
- yield "data: [DONE]\n\n"
104
- return
105
 
106
  if stream:
107
- return EventSourceResponse(generate())
108
  else:
109
  full_response = ""
110
  finish_reason = "stop"
111
- async for chunk in generate():
112
  if chunk.startswith("data: ") and not chunk.strip() == "data: [DONE]":
113
  response_data = json.loads(chunk[6:])
114
  if 'choices' in response_data and response_data['choices']:
@@ -137,3 +147,6 @@ async def chat_completions(request: Request):
137
  "total_tokens": 0
138
  }
139
  }
 
 
 
 
1
  import json
2
+ import sseclient
3
+ import requests
4
+ from flask import Flask, request, Response, stream_with_context
5
  import random
 
 
6
 
7
+ app = Flask(__name__)
8
 
9
  def generate_random_ip():
10
  return f"{random.randint(1, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
 
42
  }]
43
  }
44
 
45
+ @app.route('/hf/v1/chat/completions', methods=['POST'])
46
+ def chat_completions():
47
+ data = request.json
48
  messages = data.get('messages', [])
49
  stream = data.get('stream', False)
50
 
51
  if not messages:
52
+ return {"error": "No messages provided"}, 400
53
 
54
  model = data.get('model', 'gpt-4o')
55
  if model.startswith('gpt'):
 
59
  endpoint = "claude"
60
  original_api_url = 'https://chatpro.ai-pro.org/api/ask/claude'
61
  else:
62
+ return {"error": "Unsupported model"}, 400
63
 
64
  headers = {
65
  'content-type': 'application/json',
 
68
  'user-agent': generate_user_agent()
69
  }
70
 
71
+ def generate():
72
  full_response = ""
73
  while True:
74
  conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
 
80
  "model": model
81
  }
82
 
83
+ response = requests.post(original_api_url, headers=headers, json=payload, stream=True)
84
+ client = sseclient.SSEClient(response)
85
+
86
+ for event in client.events():
87
+ if event.data.startswith('{"text":'):
88
+ data = json.loads(event.data)
89
+ new_content = data['text'][len(full_response):]
90
+ full_response = data['text']
91
+
92
+ if new_content:
93
+ yield f"data: {json.dumps(format_openai_response(new_content))}\n\n"
94
+
95
+ elif '"final":true' in event.data:
96
+ final_data = json.loads(event.data)
97
+ response_message = final_data.get('responseMessage', {})
98
+ finish_reason = response_message.get('finish_reason', 'stop')
99
+
100
+ if finish_reason == 'length':
101
+ messages.append({"role": "assistant", "content": full_response})
102
+ messages.append({"role": "user", "content": "Please continue your output and do not repeat the previous content"})
103
+ break # Jump out of the current loop and continue with the next request
104
+ else:
105
+ last_content = response_message.get('text', '')
106
+ if last_content and last_content != full_response:
107
+ yield f"data: {json.dumps(format_openai_response(last_content[len(full_response):]))}\n\n"
108
 
109
+ yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
110
+ yield "data: [DONE]\n\n"
111
+ return
112
+
113
+ yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n"
114
+ yield "data: [DONE]\n\n"
 
 
 
 
 
115
 
116
  if stream:
117
+ return Response(stream_with_context(generate()), content_type='text/event-stream')
118
  else:
119
  full_response = ""
120
  finish_reason = "stop"
121
+ for chunk in generate():
122
  if chunk.startswith("data: ") and not chunk.strip() == "data: [DONE]":
123
  response_data = json.loads(chunk[6:])
124
  if 'choices' in response_data and response_data['choices']:
 
147
  "total_tokens": 0
148
  }
149
  }
150
+
151
+ if __name__ == '__main__':
152
+ app.run(debug=True, port=5000)