Update app.py
Browse files
app.py
CHANGED
@@ -621,80 +621,129 @@ def handsome_chat_completions():
|
|
621 |
"https://api.siliconflow.cn/v1/images/generations",
|
622 |
headers=headers,
|
623 |
json=siliconflow_data,
|
624 |
-
timeout=120
|
|
|
625 |
)
|
626 |
|
627 |
if response.status_code == 429:
|
628 |
return jsonify(response.json()), 429
|
629 |
|
630 |
-
|
631 |
-
|
632 |
-
|
633 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
634 |
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
641 |
-
|
642 |
-
|
643 |
-
|
644 |
-
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
655 |
-
|
656 |
-
|
657 |
-
|
658 |
-
|
659 |
-
|
660 |
-
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
-
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|
680 |
-
|
681 |
-
|
682 |
-
|
683 |
-
|
684 |
-
|
685 |
-
|
686 |
|
687 |
-
|
688 |
-
|
689 |
-
|
690 |
-
|
691 |
-
|
692 |
|
693 |
-
|
694 |
-
|
695 |
-
|
696 |
|
697 |
-
|
698 |
except requests.exceptions.RequestException as e:
|
699 |
logging.error(f"请求转发异常: {e}")
|
700 |
return jsonify({"error": str(e)}), 500
|
|
|
621 |
"https://api.siliconflow.cn/v1/images/generations",
|
622 |
headers=headers,
|
623 |
json=siliconflow_data,
|
624 |
+
timeout=120,
|
625 |
+
stream=data.get("stream", False)
|
626 |
)
|
627 |
|
628 |
if response.status_code == 429:
|
629 |
return jsonify(response.json()), 429
|
630 |
|
631 |
+
if data.get("stream", False):
|
632 |
+
def generate():
|
633 |
+
first_chunk_time = None
|
634 |
+
full_response_content = ""
|
635 |
+
for chunk in response.iter_content(chunk_size=1024):
|
636 |
+
if chunk:
|
637 |
+
if first_chunk_time is None:
|
638 |
+
first_chunk_time = time.time()
|
639 |
+
try:
|
640 |
+
chunk_json = json.loads(chunk.decode("utf-8"))
|
641 |
+
if "images" in chunk_json and chunk_json["images"]:
|
642 |
+
image_url = chunk_json["images"][0].get("url", "")
|
643 |
+
if image_url:
|
644 |
+
full_response_content += json.dumps({"url":image_url})
|
645 |
+
yield f"data: {json.dumps({'url': image_url})}\n\n".encode('utf-8')
|
646 |
+
else:
|
647 |
+
full_response_content += json.dumps({"content":"Failed to generate image"})
|
648 |
+
yield f"data: {json.dumps({'content': 'Failed to generate image'})}\n\n".encode('utf-8')
|
649 |
+
else:
|
650 |
+
full_response_content += json.dumps({"content":"Failed to generate image"})
|
651 |
+
yield f"data: {json.dumps({'content': 'Failed to generate image'})}\n\n".encode('utf-8')
|
652 |
+
except json.JSONDecodeError:
|
653 |
+
logging.error(f"Failed to decode chunk JSON: {chunk.decode('utf-8')}")
|
654 |
+
full_response_content += json.dumps({"content":"Failed to generate image"})
|
655 |
+
yield f"data: {json.dumps({'content': 'Failed to generate image'})}\n\n".encode('utf-8')
|
656 |
+
|
657 |
+
end_time = time.time()
|
658 |
+
first_token_time = (
|
659 |
+
first_chunk_time - start_time
|
660 |
+
if first_chunk_time else 0
|
661 |
+
)
|
662 |
+
total_time = end_time - start_time
|
663 |
+
|
664 |
+
|
665 |
+
logging.info(
|
666 |
+
f"使用的key: {api_key}, "
|
667 |
+
f"首字用时: {first_token_time:.4f}秒, "
|
668 |
+
f"总共用时: {total_time:.4f}秒, "
|
669 |
+
f"使用的模型: {model_name}"
|
670 |
+
)
|
671 |
+
|
672 |
+
with data_lock:
|
673 |
+
request_timestamps.append(time.time())
|
674 |
+
token_counts.append(0) # Image generation doesn't use tokens
|
675 |
+
|
676 |
+
yield "data: [DONE]\n\n".encode('utf-8')
|
677 |
+
return Response(stream_with_context(generate()), content_type='text/event-stream')
|
678 |
+
else:
|
679 |
+
response.raise_for_status()
|
680 |
+
end_time = time.time()
|
681 |
+
response_json = response.json()
|
682 |
+
total_time = end_time - start_time
|
683 |
|
684 |
+
try:
|
685 |
+
images = response_json.get("images", [])
|
686 |
+
|
687 |
+
# Extract the first URL if available
|
688 |
+
image_url = ""
|
689 |
+
if images and isinstance(images[0], dict) and "url" in images[0]:
|
690 |
+
image_url = images[0]["url"]
|
691 |
+
logging.info(f"Extracted image URL: {image_url}")
|
692 |
+
elif images and isinstance(images[0], str):
|
693 |
+
image_url = images[0]
|
694 |
+
logging.info(f"Extracted image URL: {image_url}")
|
695 |
+
|
696 |
+
# Construct the expected JSON output - Mimicking OpenAI
|
697 |
+
response_data = {
|
698 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
699 |
+
"object": "chat.completion",
|
700 |
+
"created": int(time.time()),
|
701 |
+
"model": model_name,
|
702 |
+
"choices": [
|
703 |
+
{
|
704 |
+
"index": 0,
|
705 |
+
"message": {
|
706 |
+
"role": "assistant",
|
707 |
+
"content": image_url if image_url else "Failed to generate image", # Directly return the URL in content
|
708 |
+
},
|
709 |
+
"finish_reason": "stop",
|
710 |
+
}
|
711 |
+
],
|
712 |
+
}
|
713 |
|
714 |
+
except (KeyError, ValueError, IndexError) as e:
|
715 |
+
logging.error(
|
716 |
+
f"解析响应 JSON 失败: {e}, "
|
717 |
+
f"完整内容: {response_json}"
|
718 |
+
)
|
719 |
+
response_data = {
|
720 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
721 |
+
"object": "chat.completion",
|
722 |
+
"created": int(time.time()),
|
723 |
+
"model": model_name,
|
724 |
+
"choices": [
|
725 |
+
{
|
726 |
+
"index": 0,
|
727 |
+
"message": {
|
728 |
+
"role": "assistant",
|
729 |
+
"content": "Failed to process image data",
|
730 |
+
},
|
731 |
+
"finish_reason": "stop",
|
732 |
+
}
|
733 |
+
],
|
734 |
+
}
|
735 |
|
736 |
+
logging.info(
|
737 |
+
f"使用的key: {api_key}, "
|
738 |
+
f"总共用时: {total_time:.4f}秒, "
|
739 |
+
f"使用的模型: {model_name}"
|
740 |
+
)
|
741 |
|
742 |
+
with data_lock:
|
743 |
+
request_timestamps.append(time.time())
|
744 |
+
token_counts.append(0) # Image generation doesn't use tokens
|
745 |
|
746 |
+
return jsonify(response_data)
|
747 |
except requests.exceptions.RequestException as e:
|
748 |
logging.error(f"请求转发异常: {e}")
|
749 |
return jsonify({"error": str(e)}), 500
|