malt666 commited on
Commit
c4144fb
·
verified ·
1 Parent(s): f32de00

Upload 3 files

Browse files
Files changed (1) hide show
  1. app.py +18 -7
app.py CHANGED
@@ -1124,11 +1124,15 @@ def index():
1124
 
1125
  def num_tokens_from_string(string, model=""):
1126
  try:
1127
- print(f"\n开始计算token: model={model}")
 
 
 
1128
  request_data = {
1129
  "model": model,
1130
  "messages": [{"role": "user", "content": string}]
1131
  }
 
1132
  print(f"请求数据: {json.dumps(request_data, ensure_ascii=False)}")
1133
 
1134
  response = requests.post(
@@ -1137,23 +1141,30 @@ def num_tokens_from_string(string, model=""):
1137
  timeout=10
1138
  )
1139
 
1140
- print(f"Tokenizer响应状态码: {response.status_code}")
1141
  print(f"Tokenizer响应内容: {response.text}")
1142
 
1143
  if response.status_code == 200:
1144
  result = response.json()
1145
  input_tokens = result.get("input_tokens", 0)
1146
- print(f"成功获取token数: {input_tokens},返回计算方法: 精确")
 
 
1147
  return input_tokens, "精确"
1148
  else:
1149
  estimated_tokens = len(string) // 4
1150
- print(f"Tokenizer服务错误: {response.status_code} - {response.text}")
1151
- print(f"使用估算token数: {estimated_tokens},返回计算方法: 估算")
 
 
 
1152
  return estimated_tokens, "估算"
1153
  except Exception as e:
1154
  estimated_tokens = len(string) // 4
1155
- print(f"计算token错误: {e}")
1156
- print(f"使用估算token数: {estimated_tokens},返回计算方法: 估算")
 
 
1157
  return estimated_tokens, "估算"
1158
 
1159
 
 
1124
 
1125
  def num_tokens_from_string(string, model=""):
1126
  try:
1127
+ print("\n===================== 开始计算token =====================")
1128
+ print(f"模型: {model}")
1129
+ print(f"输入内容长度: {len(string)} 字符")
1130
+
1131
  request_data = {
1132
  "model": model,
1133
  "messages": [{"role": "user", "content": string}]
1134
  }
1135
+ print(f"发送请求到tokenizer服务: {TOKENIZER_SERVICE_URL}")
1136
  print(f"请求数据: {json.dumps(request_data, ensure_ascii=False)}")
1137
 
1138
  response = requests.post(
 
1141
  timeout=10
1142
  )
1143
 
1144
+ print(f"\nTokenizer响应状态码: {response.status_code}")
1145
  print(f"Tokenizer响应内容: {response.text}")
1146
 
1147
  if response.status_code == 200:
1148
  result = response.json()
1149
  input_tokens = result.get("input_tokens", 0)
1150
+ print(f"\n成功获取token数: {input_tokens}")
1151
+ print(f"使用计算方法: 精确")
1152
+ print("===================== 计算完成 =====================\n")
1153
  return input_tokens, "精确"
1154
  else:
1155
  estimated_tokens = len(string) // 4
1156
+ print(f"\nTokenizer服务错误: {response.status_code}")
1157
+ print(f"错误响应: {response.text}")
1158
+ print(f"使用估算token数: {estimated_tokens}")
1159
+ print(f"使用计算方法: 估算")
1160
+ print("===================== 计算完成 =====================\n")
1161
  return estimated_tokens, "估算"
1162
  except Exception as e:
1163
  estimated_tokens = len(string) // 4
1164
+ print(f"\n计算token时发生错误: {str(e)}")
1165
+ print(f"使用估算token数: {estimated_tokens}")
1166
+ print(f"使用计算方法: 估算")
1167
+ print("===================== 计算完成 =====================\n")
1168
  return estimated_tokens, "估算"
1169
 
1170