Niansuh commited on
Commit
8dab3bb
·
verified ·
1 Parent(s): 3b1884f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -42
app.py CHANGED
@@ -1,10 +1,12 @@
1
  import json
2
- import sseclient
3
- import requests
4
- from flask import Flask, request, Response, stream_with_context
5
  import random
 
 
 
 
 
6
 
7
- app = Flask(__name__)
8
 
9
  def generate_random_ip():
10
  return f"{random.randint(1, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
@@ -42,9 +44,9 @@ def format_openai_response(content, finish_reason=None):
42
  }]
43
  }
44
 
45
- @app.route('/hf/v1/chat/completions', methods=['POST'])
46
- def chat_completions():
47
- data = request.json
48
  messages = data.get('messages', [])
49
  stream = data.get('stream', False)
50
 
@@ -68,7 +70,7 @@ def chat_completions():
68
  'user-agent': generate_user_agent()
69
  }
70
 
71
- def generate():
72
  full_response = ""
73
  while True:
74
  conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
@@ -79,46 +81,43 @@ def chat_completions():
79
  "endpoint": endpoint,
80
  "model": model
81
  }
82
-
83
- response = requests.post(original_api_url, headers=headers, json=payload, stream=True)
84
- client = sseclient.SSEClient(response)
85
-
86
- for event in client.events():
87
- if event.data.startswith('{"text":'):
88
- data = json.loads(event.data)
89
- new_content = data['text'][len(full_response):]
90
- full_response = data['text']
91
-
92
- if new_content:
93
- yield f"data: {json.dumps(format_openai_response(new_content))}\n\n"
94
-
95
- elif '"final":true' in event.data:
96
- final_data = json.loads(event.data)
97
- response_message = final_data.get('responseMessage', {})
98
- finish_reason = response_message.get('finish_reason', 'stop')
99
-
100
- if finish_reason == 'length':
101
- messages.append({"role": "assistant", "content": full_response})
102
- messages.append({"role": "user", "content": "Please continue your output and do not repeat the previous content"})
103
- break # Jump out of the current loop and continue with the next request
104
- else:
105
- last_content = response_message.get('text', '')
106
- if last_content and last_content != full_response:
107
- yield f"data: {json.dumps(format_openai_response(last_content[len(full_response):]))}\n\n"
108
 
109
- yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
110
- yield "data: [DONE]\n\n"
111
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
  yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n"
114
  yield "data: [DONE]\n\n"
115
 
116
  if stream:
117
- return Response(stream_with_context(generate()), content_type='text/event-stream')
118
  else:
119
  full_response = ""
120
  finish_reason = "stop"
121
- for chunk in generate():
122
  if chunk.startswith("data: ") and not chunk.strip() == "data: [DONE]":
123
  response_data = json.loads(chunk[6:])
124
  if 'choices' in response_data and response_data['choices']:
@@ -147,6 +146,3 @@ def chat_completions():
147
  "total_tokens": 0
148
  }
149
  }
150
-
151
- if __name__ == '__main__':
152
- app.run(debug=True, port=5000)
 
1
  import json
 
 
 
2
  import random
3
+ import asyncio
4
+ import aiohttp
5
+ from fastapi import FastAPI, Request, Response
6
+ from fastapi.responses import StreamingResponse
7
+ from aiosseclient import aiosseclient
8
 
9
+ app = FastAPI()
10
 
11
  def generate_random_ip():
12
  return f"{random.randint(1, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
 
44
  }]
45
  }
46
 
47
+ @app.post('/hf/v1/chat/completions')
48
+ async def chat_completions(request: Request):
49
+ data = await request.json()
50
  messages = data.get('messages', [])
51
  stream = data.get('stream', False)
52
 
 
70
  'user-agent': generate_user_agent()
71
  }
72
 
73
+ async def generate():
74
  full_response = ""
75
  while True:
76
  conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
 
81
  "endpoint": endpoint,
82
  "model": model
83
  }
84
+
85
+ async with aiosseclient(original_api_url, method='POST', headers=headers, json=payload) as client:
86
+ async for event in client:
87
+ if event.data.startswith('{"text":'):
88
+ data = json.loads(event.data)
89
+ new_content = data['text'][len(full_response):]
90
+ full_response = data['text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
+ if new_content:
93
+ yield f"data: {json.dumps(format_openai_response(new_content))}\n\n"
94
+ elif '"final":true' in event.data:
95
+ final_data = json.loads(event.data)
96
+ response_message = final_data.get('responseMessage', {})
97
+ finish_reason = response_message.get('finish_reason', 'stop')
98
+
99
+ if finish_reason == 'length':
100
+ messages.append({"role": "assistant", "content": full_response})
101
+ messages.append({"role": "user", "content": "Please continue your output and do not repeat the previous content"})
102
+ break # Continue with the next request
103
+ else:
104
+ last_content = response_message.get('text', '')
105
+ if last_content and last_content != full_response:
106
+ yield f"data: {json.dumps(format_openai_response(last_content[len(full_response):]))}\n\n"
107
+
108
+ yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
109
+ yield "data: [DONE]\n\n"
110
+ return
111
 
112
  yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n"
113
  yield "data: [DONE]\n\n"
114
 
115
  if stream:
116
+ return StreamingResponse(generate(), media_type='text/event-stream')
117
  else:
118
  full_response = ""
119
  finish_reason = "stop"
120
+ async for chunk in generate():
121
  if chunk.startswith("data: ") and not chunk.strip() == "data: [DONE]":
122
  response_data = json.loads(chunk[6:])
123
  if 'choices' in response_data and response_data['choices']:
 
146
  "total_tokens": 0
147
  }
148
  }