Update app.py
Browse files
app.py
CHANGED
@@ -367,28 +367,15 @@ def count_message_tokens(messages, model="gpt-3.5-turbo-0301"):
|
|
367 |
|
368 |
def stream_notdiamond_response(response, model):
|
369 |
"""流式处理 notdiamond API 响应。"""
|
370 |
-
|
371 |
-
content_pattern = re.compile(r'data: (.*?)\n\n', re.DOTALL)
|
372 |
-
full_content = ""
|
373 |
-
|
374 |
for chunk in response.iter_content(1024):
|
375 |
if chunk:
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
data = json.loads(match)
|
382 |
-
current_content = data['choices'][0]['delta'].get('content', '')
|
383 |
-
full_content += current_content # 拼接完整内容
|
384 |
-
|
385 |
-
if current_content:
|
386 |
-
yield create_openai_chunk(current_content, model)
|
387 |
-
except json.JSONDecodeError:
|
388 |
-
logger.error(f"Failed to decode JSON: {match}")
|
389 |
-
continue
|
390 |
|
391 |
-
# 生成包含完整内容的最终 chunk
|
392 |
yield create_openai_chunk('', model, 'stop')
|
393 |
|
394 |
def handle_non_stream_response(response, model, prompt_tokens):
|
@@ -428,12 +415,9 @@ def handle_non_stream_response(response, model, prompt_tokens):
|
|
428 |
def generate_stream_response(response, model, prompt_tokens):
|
429 |
"""生成流式 HTTP 响应。"""
|
430 |
total_completion_tokens = 0
|
431 |
-
full_content = ""
|
432 |
|
433 |
for chunk in stream_notdiamond_response(response, model):
|
434 |
content = chunk['choices'][0]['delta'].get('content', '')
|
435 |
-
full_content += content # 拼接完整内容
|
436 |
-
|
437 |
total_completion_tokens += count_tokens(content, model)
|
438 |
|
439 |
chunk['usage'] = {
|
@@ -444,15 +428,6 @@ def generate_stream_response(response, model, prompt_tokens):
|
|
444 |
|
445 |
yield f"data: {json.dumps(chunk)}\n\n"
|
446 |
|
447 |
-
# 生成包含完整内容的最终 chunk
|
448 |
-
final_chunk = create_openai_chunk('', model, 'stop')
|
449 |
-
final_chunk['usage'] = {
|
450 |
-
"prompt_tokens": prompt_tokens,
|
451 |
-
"completion_tokens": total_completion_tokens,
|
452 |
-
"total_tokens": prompt_tokens + total_completion_tokens
|
453 |
-
}
|
454 |
-
yield f"data: {json.dumps(final_chunk)}\n\n"
|
455 |
-
|
456 |
yield "data: [DONE]\n\n"
|
457 |
|
458 |
def get_auth_credentials():
|
@@ -700,4 +675,4 @@ if __name__ == "__main__":
|
|
700 |
health_check_thread.start()
|
701 |
|
702 |
port = int(os.environ.get("PORT", 3000))
|
703 |
-
app.run(debug=False, host='0.0.0.0', port=port, threaded=True)
|
|
|
367 |
|
368 |
def stream_notdiamond_response(response, model):
|
369 |
"""流式处理 notdiamond API 响应。"""
|
370 |
+
last_sent = ""
|
|
|
|
|
|
|
371 |
for chunk in response.iter_content(1024):
|
372 |
if chunk:
|
373 |
+
new_content = chunk.decode('utf-8')
|
374 |
+
to_send = new_content[len(last_sent):]
|
375 |
+
if to_send:
|
376 |
+
yield create_openai_chunk(to_send, model)
|
377 |
+
last_sent = new_content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
378 |
|
|
|
379 |
yield create_openai_chunk('', model, 'stop')
|
380 |
|
381 |
def handle_non_stream_response(response, model, prompt_tokens):
|
|
|
415 |
def generate_stream_response(response, model, prompt_tokens):
|
416 |
"""生成流式 HTTP 响应。"""
|
417 |
total_completion_tokens = 0
|
|
|
418 |
|
419 |
for chunk in stream_notdiamond_response(response, model):
|
420 |
content = chunk['choices'][0]['delta'].get('content', '')
|
|
|
|
|
421 |
total_completion_tokens += count_tokens(content, model)
|
422 |
|
423 |
chunk['usage'] = {
|
|
|
428 |
|
429 |
yield f"data: {json.dumps(chunk)}\n\n"
|
430 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
431 |
yield "data: [DONE]\n\n"
|
432 |
|
433 |
def get_auth_credentials():
|
|
|
675 |
health_check_thread.start()
|
676 |
|
677 |
port = int(os.environ.get("PORT", 3000))
|
678 |
+
app.run(debug=False, host='0.0.0.0', port=port, threaded=True)
|