Niansuh commited on
Commit
2934ea9
·
verified ·
1 Parent(s): 5bbbdf6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -30
app.py CHANGED
@@ -4,7 +4,6 @@ import asyncio
4
  import aiohttp
5
  from fastapi import FastAPI, Request, Response
6
  from fastapi.responses import StreamingResponse
7
- from aiosseclient import aiosseclient
8
 
9
  app = FastAPI()
10
 
@@ -44,6 +43,20 @@ def format_openai_response(content, finish_reason=None):
44
  }]
45
  }
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  @app.post('/hf/v1/chat/completions')
48
  async def chat_completions(request: Request):
49
  data = await request.json()
@@ -82,35 +95,51 @@ async def chat_completions(request: Request):
82
  "model": model
83
  }
84
 
85
- async with aiosseclient(original_api_url, method='POST', headers=headers, json=payload) as client:
86
- async for event in client:
87
- if event.data.startswith('{"text":'):
88
- data = json.loads(event.data)
89
- new_content = data['text'][len(full_response):]
90
- full_response = data['text']
91
-
92
- if new_content:
93
- yield f"data: {json.dumps(format_openai_response(new_content))}\n\n"
94
- elif '"final":true' in event.data:
95
- final_data = json.loads(event.data)
96
- response_message = final_data.get('responseMessage', {})
97
- finish_reason = response_message.get('finish_reason', 'stop')
98
-
99
- if finish_reason == 'length':
100
- messages.append({"role": "assistant", "content": full_response})
101
- messages.append({"role": "user", "content": "Please continue your output and do not repeat the previous content"})
102
- break # Continue with the next request
103
- else:
104
- last_content = response_message.get('text', '')
105
- if last_content and last_content != full_response:
106
- yield f"data: {json.dumps(format_openai_response(last_content[len(full_response):]))}\n\n"
107
-
108
- yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
109
- yield "data: [DONE]\n\n"
110
- return
111
-
112
- yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n"
113
- yield "data: [DONE]\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
  if stream:
116
  return StreamingResponse(generate(), media_type='text/event-stream')
 
4
  import aiohttp
5
  from fastapi import FastAPI, Request, Response
6
  from fastapi.responses import StreamingResponse
 
7
 
8
  app = FastAPI()
9
 
 
43
  }]
44
  }
45
 
46
+ def sse_parser():
47
+ """Generator function to parse SSE messages."""
48
+ data = ''
49
+ while True:
50
+ line = yield
51
+ if line == '':
52
+ if data:
53
+ yield data
54
+ data = ''
55
+ elif line.startswith('data:'):
56
+ data += line[5:].strip()
57
+ else:
58
+ continue
59
+
60
  @app.post('/hf/v1/chat/completions')
61
  async def chat_completions(request: Request):
62
  data = await request.json()
 
95
  "model": model
96
  }
97
 
98
+ async with aiohttp.ClientSession() as session:
99
+ async with session.post(original_api_url, headers=headers, json=payload) as resp:
100
+ if resp.status != 200:
101
+ yield f"data: {json.dumps({'error': 'Failed to connect to upstream server'})}\n\n"
102
+ return
103
+
104
+ parser = sse_parser()
105
+ next(parser) # Initialize the generator
106
+
107
+ async for line in resp.content:
108
+ line = line.decode('utf-8').strip()
109
+ if line == '':
110
+ continue
111
+
112
+ parser.send(line)
113
+ try:
114
+ event_data = parser.send(None)
115
+ if event_data:
116
+ # Process the SSE event
117
+ event_json = json.loads(event_data)
118
+ if 'text' in event_json:
119
+ new_content = event_json['text'][len(full_response):]
120
+ full_response = event_json['text']
121
+ if new_content:
122
+ yield f"data: {json.dumps(format_openai_response(new_content))}\n\n"
123
+ elif '"final":true' in event_data:
124
+ final_data = event_json
125
+ response_message = final_data.get('responseMessage', {})
126
+ finish_reason = response_message.get('finish_reason', 'stop')
127
+ if finish_reason == 'length':
128
+ messages.append({"role": "assistant", "content": full_response})
129
+ messages.append({"role": "user", "content": "Please continue your output and do not repeat the previous content"})
130
+ break # Continue with the next request
131
+ else:
132
+ last_content = response_message.get('text', '')
133
+ if last_content and last_content != full_response:
134
+ yield f"data: {json.dumps(format_openai_response(last_content[len(full_response):]))}\n\n"
135
+ yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
136
+ yield "data: [DONE]\n\n"
137
+ return
138
+ except StopIteration:
139
+ pass # No complete event yet
140
+
141
+ yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n"
142
+ yield "data: [DONE]\n\n"
143
 
144
  if stream:
145
  return StreamingResponse(generate(), media_type='text/event-stream')