Spaces:
Running
Running
File size: 4,575 Bytes
d0dd276 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import json
import time
from app.utils.logging import log
def openAI_from_text(model="gemini",content=None,finish_reason=None,total_token_count=0,stream=True):
"""
根据传入参数,创建 OpenAI 标准响应对象块
"""
now_time = int(time.time())
content_chunk = {}
formatted_chunk = {
"id": f"chatcmpl-{now_time}",
"created": now_time,
"model": model,
"choices": [{"index": 0 , "finish_reason": finish_reason}]
}
if content:
content_chunk = {"role": "assistant", "content": content}
if finish_reason:
formatted_chunk["usage"]= {"total_tokens": total_token_count}
if stream:
formatted_chunk["choices"][0]["delta"] = content_chunk
formatted_chunk["object"] = "chat.completion.chunk"
return f"data: {json.dumps(formatted_chunk, ensure_ascii=False)}\n\n"
else:
formatted_chunk["choices"][0]["message"] = content_chunk
formatted_chunk["object"] = "chat.completion"
return formatted_chunk
def gemini_from_text(content=None, finish_reason=None, total_token_count=0, stream=True):
"""
根据传入参数,创建 Gemini API 标准响应对象块 (GenerateContentResponse 格式)。
"""
gemini_response = {
"candidates": {
"index": 0,
"content": {
"parts": [],
"role": "model"
}
}
}
if content:
gemini_response["candidates"]["content"]["parts"].append({"text": content})
if finish_reason:
gemini_response["usageMetadata"]= {"totalTokenCount": total_token_count}
if stream:
return f"data: {json.dumps(gemini_response, ensure_ascii=False)}\n\n"
else:
return gemini_response
def openAI_from_Gemini(response,stream=True):
"""
根据 GeminiResponseWrapper 对象创建 OpenAI 标准响应对象块。
Args:
response: GeminiResponseWrapper 对象,包含响应数据。
Returns:
OpenAI 标准响应
"""
now_time = int(time.time())
chunk_id = f"chatcmpl-{now_time}" # 使用时间戳生成唯一 ID
content_chunk = {}
formatted_chunk = {
"id": chunk_id,
"created": now_time,
"model": response.model,
"choices": [{"index": 0 , "finish_reason": response.finish_reason}]
}
# 准备 usage 数据,处理属性缺失或为 None 的情况
prompt_tokens_raw = getattr(response, 'prompt_token_count', None)
candidates_tokens_raw = getattr(response, 'candidates_token_count', None)
total_tokens_raw = getattr(response, 'total_token_count', None)
usage_data = {
"prompt_tokens": int(prompt_tokens_raw) if prompt_tokens_raw else 0,
"completion_tokens": int(candidates_tokens_raw) if candidates_tokens_raw else 0,
"total_tokens": int(total_tokens_raw) if total_tokens_raw else 0
}
if response.function_call:
tool_calls=[]
# 处理函数调用的每一部分
for part in response.function_call:
function_name = part.get("name")
# Gemini 的 args 是 dict, OpenAI 需要 string
function_args_str = json.dumps(part.get("args", {}), ensure_ascii=False)
tool_call_id = f"call_{function_name}" # 编码函数名到 ID
tool_calls.append({
"id": tool_call_id,
"type": "function",
"function": {
"name": function_name,
"arguments": function_args_str,
}
})
content_chunk = {
"role": "assistant",
"content": None, # 函数调用时 content 为 null
"tool_calls": tool_calls
}
elif response.text:
# 处理普通文本响应
content_chunk = {"role": "assistant", "content": response.text}
if stream:
formatted_chunk["choices"][0]["delta"] = content_chunk
formatted_chunk["object"] = "chat.completion.chunk"
# 仅在流结束时添加 usage 字段
if response.finish_reason:
formatted_chunk["usage"] = usage_data
return f"data: {json.dumps(formatted_chunk, ensure_ascii=False)}\n\n"
else:
formatted_chunk["choices"][0]["message"] = content_chunk
formatted_chunk["object"] = "chat.completion"
# 非流式响应总是包含 usage 字段,以满足 response_model 验证
formatted_chunk["usage"] = usage_data
return formatted_chunk
|