Update app.py
Browse files
app.py
CHANGED
@@ -368,8 +368,8 @@ def count_message_tokens(messages, model="gpt-3.5-turbo-0301"):
|
|
368 |
def stream_notdiamond_response(response, model):
|
369 |
"""流式处理 notdiamond API 响应。"""
|
370 |
buffer = ""
|
371 |
-
previous_content = ""
|
372 |
content_pattern = re.compile(r'data: (.*?)\n\n', re.DOTALL)
|
|
|
373 |
|
374 |
for chunk in response.iter_content(1024):
|
375 |
if chunk:
|
@@ -380,15 +380,15 @@ def stream_notdiamond_response(response, model):
|
|
380 |
try:
|
381 |
data = json.loads(match)
|
382 |
current_content = data['choices'][0]['delta'].get('content', '')
|
383 |
-
|
384 |
-
previous_content = current_content # 更新已处理的内容
|
385 |
|
386 |
-
if
|
387 |
-
yield create_openai_chunk(
|
388 |
except json.JSONDecodeError:
|
389 |
logger.error(f"Failed to decode JSON: {match}")
|
390 |
continue
|
391 |
|
|
|
392 |
yield create_openai_chunk('', model, 'stop')
|
393 |
|
394 |
def handle_non_stream_response(response, model, prompt_tokens):
|
@@ -428,14 +428,13 @@ def handle_non_stream_response(response, model, prompt_tokens):
|
|
428 |
def generate_stream_response(response, model, prompt_tokens):
|
429 |
"""生成流式 HTTP 响应。"""
|
430 |
total_completion_tokens = 0
|
431 |
-
|
432 |
|
433 |
for chunk in stream_notdiamond_response(response, model):
|
434 |
content = chunk['choices'][0]['delta'].get('content', '')
|
435 |
-
|
436 |
-
previous_content = content # 更新已处理的内容
|
437 |
|
438 |
-
total_completion_tokens += count_tokens(
|
439 |
|
440 |
chunk['usage'] = {
|
441 |
"prompt_tokens": prompt_tokens,
|
@@ -445,6 +444,15 @@ def generate_stream_response(response, model, prompt_tokens):
|
|
445 |
|
446 |
yield f"data: {json.dumps(chunk)}\n\n"
|
447 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
448 |
yield "data: [DONE]\n\n"
|
449 |
|
450 |
def get_auth_credentials():
|
|
|
368 |
def stream_notdiamond_response(response, model):
|
369 |
"""流式处理 notdiamond API 响应。"""
|
370 |
buffer = ""
|
|
|
371 |
content_pattern = re.compile(r'data: (.*?)\n\n', re.DOTALL)
|
372 |
+
full_content = ""
|
373 |
|
374 |
for chunk in response.iter_content(1024):
|
375 |
if chunk:
|
|
|
380 |
try:
|
381 |
data = json.loads(match)
|
382 |
current_content = data['choices'][0]['delta'].get('content', '')
|
383 |
+
full_content += current_content # 拼接完整内容
|
|
|
384 |
|
385 |
+
if current_content:
|
386 |
+
yield create_openai_chunk(current_content, model)
|
387 |
except json.JSONDecodeError:
|
388 |
logger.error(f"Failed to decode JSON: {match}")
|
389 |
continue
|
390 |
|
391 |
+
# 生成包含完整内容的最终 chunk
|
392 |
yield create_openai_chunk('', model, 'stop')
|
393 |
|
394 |
def handle_non_stream_response(response, model, prompt_tokens):
|
|
|
428 |
def generate_stream_response(response, model, prompt_tokens):
|
429 |
"""生成流式 HTTP 响应。"""
|
430 |
total_completion_tokens = 0
|
431 |
+
full_content = ""
|
432 |
|
433 |
for chunk in stream_notdiamond_response(response, model):
|
434 |
content = chunk['choices'][0]['delta'].get('content', '')
|
435 |
+
full_content += content # 拼接完整内容
|
|
|
436 |
|
437 |
+
total_completion_tokens += count_tokens(content, model)
|
438 |
|
439 |
chunk['usage'] = {
|
440 |
"prompt_tokens": prompt_tokens,
|
|
|
444 |
|
445 |
yield f"data: {json.dumps(chunk)}\n\n"
|
446 |
|
447 |
+
# 生成包含完整内容的最终 chunk
|
448 |
+
final_chunk = create_openai_chunk('', model, 'stop')
|
449 |
+
final_chunk['usage'] = {
|
450 |
+
"prompt_tokens": prompt_tokens,
|
451 |
+
"completion_tokens": total_completion_tokens,
|
452 |
+
"total_tokens": prompt_tokens + total_completion_tokens
|
453 |
+
}
|
454 |
+
yield f"data: {json.dumps(final_chunk)}\n\n"
|
455 |
+
|
456 |
yield "data: [DONE]\n\n"
|
457 |
|
458 |
def get_auth_credentials():
|