Create app纯单模型.py
Browse files- app纯单模型.py +121 -0
app纯单模型.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
from flask import Flask, request, make_response
|
| 4 |
+
import hashlib
|
| 5 |
+
import time
|
| 6 |
+
import xml.etree.ElementTree as ET
|
| 7 |
+
import os
|
| 8 |
+
from openai import OpenAI
|
| 9 |
+
from dotenv import load_dotenv
|
| 10 |
+
|
| 11 |
+
# 加载环境变量
|
| 12 |
+
load_dotenv()
|
| 13 |
+
|
| 14 |
+
app = Flask(__name__)
|
| 15 |
+
|
| 16 |
+
# 配置
|
| 17 |
+
TOKEN = os.getenv('TOKEN')
|
| 18 |
+
API_KEY = os.getenv("API_KEY")
|
| 19 |
+
BASE_URL = os.getenv("OPENAI_BASE_URL")
|
| 20 |
+
client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
|
| 21 |
+
|
| 22 |
+
# 存储用户会话信息
|
| 23 |
+
user_sessions = {}
|
| 24 |
+
|
| 25 |
+
def verify_wechat(request):
|
| 26 |
+
# 获取微信服务器发送过来的参数
|
| 27 |
+
data = request.args
|
| 28 |
+
signature = data.get('signature')
|
| 29 |
+
timestamp = data.get('timestamp')
|
| 30 |
+
nonce = data.get('nonce')
|
| 31 |
+
echostr = data.get('echostr')
|
| 32 |
+
|
| 33 |
+
# 对参数进行字典排序,拼接字符串
|
| 34 |
+
temp = [timestamp, nonce, TOKEN]
|
| 35 |
+
temp.sort()
|
| 36 |
+
temp = ''.join(temp)
|
| 37 |
+
|
| 38 |
+
# 加密
|
| 39 |
+
if (hashlib.sha1(temp.encode('utf8')).hexdigest() == signature):
|
| 40 |
+
return echostr
|
| 41 |
+
else:
|
| 42 |
+
return 'error', 403
|
| 43 |
+
|
| 44 |
+
def getUserMessageContentFromXML(xml_content):
|
| 45 |
+
# 解析XML字符串
|
| 46 |
+
root = ET.fromstring(xml_content)
|
| 47 |
+
# 提取数据
|
| 48 |
+
content = root.find('Content').text
|
| 49 |
+
from_user_name = root.find('FromUserName').text
|
| 50 |
+
to_user_name = root.find('ToUserName').text
|
| 51 |
+
return content, from_user_name, to_user_name
|
| 52 |
+
|
| 53 |
+
def generate_response_xml(from_user_name, to_user_name, output_content):
|
| 54 |
+
output_xml = '''
|
| 55 |
+
<xml>
|
| 56 |
+
<ToUserName><![CDATA[%s]]></ToUserName>
|
| 57 |
+
<FromUserName><![CDATA[%s]]></FromUserName>
|
| 58 |
+
<CreateTime>%s</CreateTime>
|
| 59 |
+
<MsgType><![CDATA[text]]></MsgType>
|
| 60 |
+
<Content><![CDATA[%s]]></Content>
|
| 61 |
+
</xml>'''
|
| 62 |
+
|
| 63 |
+
response = make_response(output_xml % (from_user_name, to_user_name, str(int(time.time())), output_content))
|
| 64 |
+
response.content_type = 'application/xml'
|
| 65 |
+
return response
|
| 66 |
+
|
| 67 |
+
def get_openai_response(messages):
|
| 68 |
+
try:
|
| 69 |
+
response = client.chat.completions.create(
|
| 70 |
+
model="gpt-4o-mini",
|
| 71 |
+
messages=messages
|
| 72 |
+
)
|
| 73 |
+
return response.choices[0].message.content
|
| 74 |
+
except Exception as e:
|
| 75 |
+
print(f"调用OpenAI API时出错: {str(e)}")
|
| 76 |
+
return "抱歉,我遇到了一些问题,无法回答您的问题。"
|
| 77 |
+
|
| 78 |
+
def split_message(message, max_length=500):
|
| 79 |
+
return [message[i:i+max_length] for i in range(0, len(message), max_length)]
|
| 80 |
+
|
| 81 |
+
@app.route('/api/wx', methods=['GET', 'POST'])
|
| 82 |
+
def wechatai():
|
| 83 |
+
if request.method == 'GET':
|
| 84 |
+
return verify_wechat(request)
|
| 85 |
+
else:
|
| 86 |
+
# 处理POST请求
|
| 87 |
+
print("user request data: ", request.data)
|
| 88 |
+
user_message_content, from_user_name, to_user_name = getUserMessageContentFromXML(request.data)
|
| 89 |
+
print("user message content: ", user_message_content)
|
| 90 |
+
|
| 91 |
+
if user_message_content.lower() == '继续':
|
| 92 |
+
if from_user_name in user_sessions and user_sessions[from_user_name]['pending_response']:
|
| 93 |
+
response_content = user_sessions[from_user_name]['pending_response'].pop(0)
|
| 94 |
+
if user_sessions[from_user_name]['pending_response']:
|
| 95 |
+
response_content += '\n\n回复"继续"获取下一部分。'
|
| 96 |
+
else:
|
| 97 |
+
response_content += '\n\n回复结束。'
|
| 98 |
+
else:
|
| 99 |
+
response_content = "没有待发送的消息。"
|
| 100 |
+
else:
|
| 101 |
+
if from_user_name not in user_sessions:
|
| 102 |
+
user_sessions[from_user_name] = {'messages': [], 'pending_response': []}
|
| 103 |
+
|
| 104 |
+
session = user_sessions[from_user_name]
|
| 105 |
+
session['messages'].append({"role": "user", "content": user_message_content})
|
| 106 |
+
|
| 107 |
+
gpt_response = get_openai_response(session['messages'])
|
| 108 |
+
session['messages'].append({"role": "assistant", "content": gpt_response})
|
| 109 |
+
|
| 110 |
+
response_parts = split_message(gpt_response)
|
| 111 |
+
|
| 112 |
+
if len(response_parts) > 1:
|
| 113 |
+
response_content = response_parts[0] + '\n\n回复"继续"获取下一部分。'
|
| 114 |
+
session['pending_response'] = response_parts[1:]
|
| 115 |
+
else:
|
| 116 |
+
response_content = response_parts[0]
|
| 117 |
+
|
| 118 |
+
return generate_response_xml(from_user_name, to_user_name, response_content)
|
| 119 |
+
|
| 120 |
+
if __name__ == '__main__':
|
| 121 |
+
app.run(host='0.0.0.0', port=7860, debug=True)
|