Spaces:
Sleeping
Sleeping
主要代码规整化
Browse files- check_proxy.py +11 -7
- config.py +6 -5
- core_functional.py +3 -2
- crazy_functional.py +4 -6
- theme.py +14 -10
- toolbox.py +102 -47
check_proxy.py
CHANGED
|
@@ -3,7 +3,8 @@ def check_proxy(proxies):
|
|
| 3 |
import requests
|
| 4 |
proxies_https = proxies['https'] if proxies is not None else '无'
|
| 5 |
try:
|
| 6 |
-
response = requests.get("https://ipapi.co/json/",
|
|
|
|
| 7 |
data = response.json()
|
| 8 |
print(f'查询代理的地理位置,返回的结果是{data}')
|
| 9 |
if 'country_name' in data:
|
|
@@ -21,9 +22,11 @@ def check_proxy(proxies):
|
|
| 21 |
|
| 22 |
def auto_update():
|
| 23 |
from toolbox import get_conf
|
| 24 |
-
import requests
|
|
|
|
|
|
|
| 25 |
proxies, = get_conf('proxies')
|
| 26 |
-
response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version",
|
| 27 |
proxies=proxies, timeout=1)
|
| 28 |
remote_json_data = json.loads(response.text)
|
| 29 |
remote_version = remote_json_data['version']
|
|
@@ -31,11 +34,12 @@ def auto_update():
|
|
| 31 |
new_feature = "新功能:" + remote_json_data["new_feature"]
|
| 32 |
else:
|
| 33 |
new_feature = ""
|
| 34 |
-
with open('./version', 'r', encoding='utf8') as f:
|
| 35 |
current_version = f.read()
|
| 36 |
current_version = json.loads(current_version)['version']
|
| 37 |
if (remote_version - current_version) >= 0.05:
|
| 38 |
-
print(
|
|
|
|
| 39 |
print('Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
|
| 40 |
time.sleep(3)
|
| 41 |
return
|
|
@@ -44,8 +48,8 @@ def auto_update():
|
|
| 44 |
|
| 45 |
|
| 46 |
if __name__ == '__main__':
|
| 47 |
-
import os
|
|
|
|
| 48 |
from toolbox import get_conf
|
| 49 |
proxies, = get_conf('proxies')
|
| 50 |
check_proxy(proxies)
|
| 51 |
-
|
|
|
|
| 3 |
import requests
|
| 4 |
proxies_https = proxies['https'] if proxies is not None else '无'
|
| 5 |
try:
|
| 6 |
+
response = requests.get("https://ipapi.co/json/",
|
| 7 |
+
proxies=proxies, timeout=4)
|
| 8 |
data = response.json()
|
| 9 |
print(f'查询代理的地理位置,返回的结果是{data}')
|
| 10 |
if 'country_name' in data:
|
|
|
|
| 22 |
|
| 23 |
def auto_update():
|
| 24 |
from toolbox import get_conf
|
| 25 |
+
import requests
|
| 26 |
+
import time
|
| 27 |
+
import json
|
| 28 |
proxies, = get_conf('proxies')
|
| 29 |
+
response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version",
|
| 30 |
proxies=proxies, timeout=1)
|
| 31 |
remote_json_data = json.loads(response.text)
|
| 32 |
remote_version = remote_json_data['version']
|
|
|
|
| 34 |
new_feature = "新功能:" + remote_json_data["new_feature"]
|
| 35 |
else:
|
| 36 |
new_feature = ""
|
| 37 |
+
with open('./version', 'r', encoding='utf8') as f:
|
| 38 |
current_version = f.read()
|
| 39 |
current_version = json.loads(current_version)['version']
|
| 40 |
if (remote_version - current_version) >= 0.05:
|
| 41 |
+
print(
|
| 42 |
+
f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}')
|
| 43 |
print('Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
|
| 44 |
time.sleep(3)
|
| 45 |
return
|
|
|
|
| 48 |
|
| 49 |
|
| 50 |
if __name__ == '__main__':
|
| 51 |
+
import os
|
| 52 |
+
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
| 53 |
from toolbox import get_conf
|
| 54 |
proxies, = get_conf('proxies')
|
| 55 |
check_proxy(proxies)
|
|
|
config.py
CHANGED
|
@@ -11,10 +11,10 @@ if USE_PROXY:
|
|
| 11 |
# [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
| 12 |
|
| 13 |
# 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
|
| 14 |
-
proxies = {
|
| 15 |
# [协议]:// [地址] :[端口]
|
| 16 |
-
"http": "socks5h://localhost:11284",
|
| 17 |
-
"https": "socks5h://localhost:11284",
|
| 18 |
}
|
| 19 |
else:
|
| 20 |
proxies = None
|
|
@@ -25,7 +25,7 @@ else:
|
|
| 25 |
CHATBOT_HEIGHT = 1115
|
| 26 |
|
| 27 |
# 窗口布局
|
| 28 |
-
LAYOUT = "LEFT-RIGHT"
|
| 29 |
|
| 30 |
# 发送请求到OpenAI后,等待多久判定为超时
|
| 31 |
TIMEOUT_SECONDS = 25
|
|
@@ -46,4 +46,5 @@ API_URL = "https://api.openai.com/v1/chat/completions"
|
|
| 46 |
CONCURRENT_COUNT = 100
|
| 47 |
|
| 48 |
# 设置用户名和密码(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
| 49 |
-
|
|
|
|
|
|
| 11 |
# [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
| 12 |
|
| 13 |
# 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
|
| 14 |
+
proxies = {
|
| 15 |
# [协议]:// [地址] :[端口]
|
| 16 |
+
"http": "socks5h://localhost:11284",
|
| 17 |
+
"https": "socks5h://localhost:11284",
|
| 18 |
}
|
| 19 |
else:
|
| 20 |
proxies = None
|
|
|
|
| 25 |
CHATBOT_HEIGHT = 1115
|
| 26 |
|
| 27 |
# 窗口布局
|
| 28 |
+
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
| 29 |
|
| 30 |
# 发送请求到OpenAI后,等待多久判定为超时
|
| 31 |
TIMEOUT_SECONDS = 25
|
|
|
|
| 46 |
CONCURRENT_COUNT = 100
|
| 47 |
|
| 48 |
# 设置用户名和密码(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
| 49 |
+
# [("username", "password"), ("username2", "password2"), ...]
|
| 50 |
+
AUTHENTICATION = []
|
core_functional.py
CHANGED
|
@@ -4,6 +4,7 @@
|
|
| 4 |
# 默认按钮颜色是 secondary
|
| 5 |
from toolbox import clear_line_break
|
| 6 |
|
|
|
|
| 7 |
def get_core_functions():
|
| 8 |
return {
|
| 9 |
"英语学术润色": {
|
|
@@ -11,12 +12,12 @@ def get_core_functions():
|
|
| 11 |
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
|
| 12 |
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
|
| 13 |
r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n",
|
| 14 |
-
# 后语
|
| 15 |
"Suffix": r"",
|
| 16 |
"Color": r"secondary", # 按钮颜色
|
| 17 |
},
|
| 18 |
"中文学术润色": {
|
| 19 |
-
"Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
|
| 20 |
r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
|
| 21 |
"Suffix": r"",
|
| 22 |
},
|
|
|
|
| 4 |
# 默认按钮颜色是 secondary
|
| 5 |
from toolbox import clear_line_break
|
| 6 |
|
| 7 |
+
|
| 8 |
def get_core_functions():
|
| 9 |
return {
|
| 10 |
"英语学术润色": {
|
|
|
|
| 12 |
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
|
| 13 |
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
|
| 14 |
r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n",
|
| 15 |
+
# 后语
|
| 16 |
"Suffix": r"",
|
| 17 |
"Color": r"secondary", # 按钮颜色
|
| 18 |
},
|
| 19 |
"中文学术润色": {
|
| 20 |
+
"Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
|
| 21 |
r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
|
| 22 |
"Suffix": r"",
|
| 23 |
},
|
crazy_functional.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
-
from toolbox import HotReload
|
|
|
|
| 2 |
|
| 3 |
def get_crazy_functions():
|
| 4 |
###################### 第一组插件 ###########################
|
|
@@ -81,7 +82,8 @@ def get_crazy_functions():
|
|
| 81 |
"[仅供开发调试] 批量总结PDF文档": {
|
| 82 |
"Color": "stop",
|
| 83 |
"AsButton": False, # 加入下拉菜单中
|
| 84 |
-
|
|
|
|
| 85 |
},
|
| 86 |
"[仅供开发调试] 批量总结PDF文档pdfminer": {
|
| 87 |
"Color": "stop",
|
|
@@ -109,9 +111,5 @@ def get_crazy_functions():
|
|
| 109 |
except Exception as err:
|
| 110 |
print(f'[下载arxiv论文并翻译摘要] 插件导入失败 {str(err)}')
|
| 111 |
|
| 112 |
-
|
| 113 |
-
|
| 114 |
###################### 第n组插件 ###########################
|
| 115 |
return function_plugins
|
| 116 |
-
|
| 117 |
-
|
|
|
|
| 1 |
+
from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效
|
| 2 |
+
|
| 3 |
|
| 4 |
def get_crazy_functions():
|
| 5 |
###################### 第一组插件 ###########################
|
|
|
|
| 82 |
"[仅供开发调试] 批量总结PDF文档": {
|
| 83 |
"Color": "stop",
|
| 84 |
"AsButton": False, # 加入下拉菜单中
|
| 85 |
+
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
| 86 |
+
"Function": HotReload(批量总结PDF文档)
|
| 87 |
},
|
| 88 |
"[仅供开发调试] 批量总结PDF文档pdfminer": {
|
| 89 |
"Color": "stop",
|
|
|
|
| 111 |
except Exception as err:
|
| 112 |
print(f'[下载arxiv论文并翻译摘要] 插件导入失败 {str(err)}')
|
| 113 |
|
|
|
|
|
|
|
| 114 |
###################### 第n组插件 ###########################
|
| 115 |
return function_plugins
|
|
|
|
|
|
theme.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
|
| 3 |
# gradio可用颜色列表
|
| 4 |
# gr.themes.utils.colors.slate (石板色)
|
|
@@ -24,14 +24,16 @@ import gradio as gr
|
|
| 24 |
# gr.themes.utils.colors.pink (粉红色)
|
| 25 |
# gr.themes.utils.colors.rose (玫瑰色)
|
| 26 |
|
|
|
|
| 27 |
def adjust_theme():
|
| 28 |
-
try:
|
| 29 |
color_er = gr.themes.utils.colors.fuchsia
|
| 30 |
-
set_theme = gr.themes.Default(
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
|
|
|
| 35 |
set_theme.set(
|
| 36 |
# Colors
|
| 37 |
input_background_fill_dark="*neutral_800",
|
|
@@ -77,10 +79,12 @@ def adjust_theme():
|
|
| 77 |
button_cancel_text_color=color_er.c600,
|
| 78 |
button_cancel_text_color_dark="white",
|
| 79 |
)
|
| 80 |
-
except:
|
| 81 |
-
set_theme = None
|
|
|
|
| 82 |
return set_theme
|
| 83 |
|
|
|
|
| 84 |
advanced_css = """
|
| 85 |
/* 设置表格的外边距为1em,内部单元格之间边框合并,空单元格显示. */
|
| 86 |
.markdown-body table {
|
|
@@ -149,4 +153,4 @@ advanced_css = """
|
|
| 149 |
padding: 1em;
|
| 150 |
margin: 1em 2em 1em 0.5em;
|
| 151 |
}
|
| 152 |
-
"""
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
|
| 3 |
# gradio可用颜色列表
|
| 4 |
# gr.themes.utils.colors.slate (石板色)
|
|
|
|
| 24 |
# gr.themes.utils.colors.pink (粉红色)
|
| 25 |
# gr.themes.utils.colors.rose (玫瑰色)
|
| 26 |
|
| 27 |
+
|
| 28 |
def adjust_theme():
|
| 29 |
+
try:
|
| 30 |
color_er = gr.themes.utils.colors.fuchsia
|
| 31 |
+
set_theme = gr.themes.Default(
|
| 32 |
+
primary_hue=gr.themes.utils.colors.orange,
|
| 33 |
+
neutral_hue=gr.themes.utils.colors.gray,
|
| 34 |
+
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui",
|
| 35 |
+
"sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
|
| 36 |
+
font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
|
| 37 |
set_theme.set(
|
| 38 |
# Colors
|
| 39 |
input_background_fill_dark="*neutral_800",
|
|
|
|
| 79 |
button_cancel_text_color=color_er.c600,
|
| 80 |
button_cancel_text_color_dark="white",
|
| 81 |
)
|
| 82 |
+
except:
|
| 83 |
+
set_theme = None
|
| 84 |
+
print('gradio版本较旧, 不能自定义字体和颜色')
|
| 85 |
return set_theme
|
| 86 |
|
| 87 |
+
|
| 88 |
advanced_css = """
|
| 89 |
/* 设置表格的外边距为1em,内部单元格之间边框合并,空单元格显示. */
|
| 90 |
.markdown-body table {
|
|
|
|
| 153 |
padding: 1em;
|
| 154 |
margin: 1em 2em 1em 0.5em;
|
| 155 |
}
|
| 156 |
+
"""
|
toolbox.py
CHANGED
|
@@ -1,14 +1,23 @@
|
|
| 1 |
-
import markdown
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
from show_math import convert as convert_math
|
| 3 |
from functools import wraps, lru_cache
|
| 4 |
|
|
|
|
| 5 |
def ArgsGeneralWrapper(f):
|
| 6 |
"""
|
| 7 |
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
|
| 8 |
"""
|
| 9 |
def decorated(txt, txt2, *args, **kwargs):
|
| 10 |
txt_passon = txt
|
| 11 |
-
if txt == "" and txt2 != "":
|
|
|
|
| 12 |
yield from f(txt_passon, *args, **kwargs)
|
| 13 |
return decorated
|
| 14 |
|
|
@@ -18,7 +27,7 @@ def get_reduce_token_percent(text):
|
|
| 18 |
# text = "maximum context length is 4097 tokens. However, your messages resulted in 4870 tokens"
|
| 19 |
pattern = r"(\d+)\s+tokens\b"
|
| 20 |
match = re.findall(pattern, text)
|
| 21 |
-
EXCEED_ALLO = 500
|
| 22 |
max_limit = float(match[0]) - EXCEED_ALLO
|
| 23 |
current_tokens = float(match[1])
|
| 24 |
ratio = max_limit/current_tokens
|
|
@@ -27,6 +36,7 @@ def get_reduce_token_percent(text):
|
|
| 27 |
except:
|
| 28 |
return 0.5, '不详'
|
| 29 |
|
|
|
|
| 30 |
def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], sys_prompt='', long_connection=True):
|
| 31 |
"""
|
| 32 |
调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
|
|
@@ -46,21 +56,26 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temp
|
|
| 46 |
# list就是最简单的mutable结构,我们第一个位置放gpt输出,第二个位置传递报错信息
|
| 47 |
mutable = [None, '']
|
| 48 |
# multi-threading worker
|
|
|
|
| 49 |
def mt(i_say, history):
|
| 50 |
while True:
|
| 51 |
try:
|
| 52 |
if long_connection:
|
| 53 |
-
mutable[0] = predict_no_ui_long_connection(
|
|
|
|
| 54 |
else:
|
| 55 |
-
mutable[0] = predict_no_ui(
|
|
|
|
| 56 |
break
|
| 57 |
except ConnectionAbortedError as token_exceeded_error:
|
| 58 |
# 尝试计算比例,尽可能多地保留文本
|
| 59 |
-
p_ratio, n_exceed = get_reduce_token_percent(
|
|
|
|
| 60 |
if len(history) > 0:
|
| 61 |
-
history = [his[
|
|
|
|
| 62 |
else:
|
| 63 |
-
i_say = i_say[: int(len(i_say)
|
| 64 |
mutable[1] = f'警告,文本过长将进行截断,Token溢出数:{n_exceed},截断比例:{(1-p_ratio):.0%}。'
|
| 65 |
except TimeoutError as e:
|
| 66 |
mutable[0] = '[Local Message] 请求超时。'
|
|
@@ -69,42 +84,51 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temp
|
|
| 69 |
mutable[0] = f'[Local Message] 异常:{str(e)}.'
|
| 70 |
raise RuntimeError(f'[Local Message] 异常:{str(e)}.')
|
| 71 |
# 创建新线程发出http请求
|
| 72 |
-
thread_name = threading.Thread(target=mt, args=(i_say, history))
|
|
|
|
| 73 |
# 原来的线程则负责持续更新UI,实现一个超时倒计时,并等待新线程的任务完成
|
| 74 |
cnt = 0
|
| 75 |
while thread_name.is_alive():
|
| 76 |
cnt += 1
|
| 77 |
-
chatbot[-1] = (i_say_show_user,
|
|
|
|
| 78 |
yield chatbot, history, '正常'
|
| 79 |
time.sleep(1)
|
| 80 |
# 把gpt的输出从mutable中取出来
|
| 81 |
gpt_say = mutable[0]
|
| 82 |
-
if gpt_say=='[Local Message] Failed with timeout.':
|
|
|
|
| 83 |
return gpt_say
|
| 84 |
|
|
|
|
| 85 |
def write_results_to_file(history, file_name=None):
|
| 86 |
"""
|
| 87 |
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
| 88 |
"""
|
| 89 |
-
import os
|
|
|
|
| 90 |
if file_name is None:
|
| 91 |
# file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
|
| 92 |
-
file_name = 'chatGPT分析报告' +
|
|
|
|
| 93 |
os.makedirs('./gpt_log/', exist_ok=True)
|
| 94 |
-
with open(f'./gpt_log/{file_name}', 'w', encoding
|
| 95 |
f.write('# chatGPT 分析报告\n')
|
| 96 |
for i, content in enumerate(history):
|
| 97 |
try: # 这个bug没找到触发条件,暂时先这样顶一下
|
| 98 |
-
if type(content) != str:
|
|
|
|
| 99 |
except:
|
| 100 |
continue
|
| 101 |
-
if i%2==0:
|
|
|
|
| 102 |
f.write(content)
|
| 103 |
f.write('\n\n')
|
| 104 |
res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
|
| 105 |
print(res)
|
| 106 |
return res
|
| 107 |
|
|
|
|
| 108 |
def regular_txt_to_markdown(text):
|
| 109 |
"""
|
| 110 |
将普通文本转换为Markdown格式的文本。
|
|
@@ -114,6 +138,7 @@ def regular_txt_to_markdown(text):
|
|
| 114 |
text = text.replace('\n\n\n', '\n\n')
|
| 115 |
return text
|
| 116 |
|
|
|
|
| 117 |
def CatchException(f):
|
| 118 |
"""
|
| 119 |
装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
|
|
@@ -127,11 +152,14 @@ def CatchException(f):
|
|
| 127 |
from toolbox import get_conf
|
| 128 |
proxies, = get_conf('proxies')
|
| 129 |
tb_str = '```\n' + traceback.format_exc() + '```'
|
| 130 |
-
if chatbot is None or len(chatbot) == 0:
|
| 131 |
-
|
|
|
|
|
|
|
| 132 |
yield chatbot, history, f'异常 {e}'
|
| 133 |
return decorated
|
| 134 |
|
|
|
|
| 135 |
def HotReload(f):
|
| 136 |
"""
|
| 137 |
装饰器函数,实现函数插件热更新
|
|
@@ -143,12 +171,15 @@ def HotReload(f):
|
|
| 143 |
yield from f_hot_reload(*args, **kwargs)
|
| 144 |
return decorated
|
| 145 |
|
|
|
|
| 146 |
def report_execption(chatbot, history, a, b):
|
| 147 |
"""
|
| 148 |
向chatbot中添加错误信息
|
| 149 |
"""
|
| 150 |
chatbot.append((a, b))
|
| 151 |
-
history.append(a)
|
|
|
|
|
|
|
| 152 |
|
| 153 |
def text_divide_paragraph(text):
|
| 154 |
"""
|
|
@@ -165,6 +196,7 @@ def text_divide_paragraph(text):
|
|
| 165 |
text = "</br>".join(lines)
|
| 166 |
return text
|
| 167 |
|
|
|
|
| 168 |
def markdown_convertion(txt):
|
| 169 |
"""
|
| 170 |
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
|
@@ -172,16 +204,19 @@ def markdown_convertion(txt):
|
|
| 172 |
pre = '<div class="markdown-body">'
|
| 173 |
suf = '</div>'
|
| 174 |
if ('$' in txt) and ('```' not in txt):
|
| 175 |
-
return pre + markdown.markdown(txt,extensions=['fenced_code','tables']) + '<br><br>' + markdown.markdown(convert_math(txt, splitParagraphs=False),extensions=['fenced_code','tables']) + suf
|
| 176 |
else:
|
| 177 |
-
return pre + markdown.markdown(txt,extensions=['fenced_code','tables']) + suf
|
|
|
|
| 178 |
|
| 179 |
def close_up_code_segment_during_stream(gpt_reply):
|
| 180 |
"""
|
| 181 |
在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的```
|
| 182 |
"""
|
| 183 |
-
if '```' not in gpt_reply:
|
| 184 |
-
|
|
|
|
|
|
|
| 185 |
|
| 186 |
# 排除了以上两个情况,我们
|
| 187 |
segments = gpt_reply.split('```')
|
|
@@ -191,19 +226,21 @@ def close_up_code_segment_during_stream(gpt_reply):
|
|
| 191 |
return gpt_reply+'\n```'
|
| 192 |
else:
|
| 193 |
return gpt_reply
|
| 194 |
-
|
| 195 |
|
| 196 |
|
| 197 |
def format_io(self, y):
|
| 198 |
"""
|
| 199 |
将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。
|
| 200 |
"""
|
| 201 |
-
if y is None or y == []:
|
|
|
|
| 202 |
i_ask, gpt_reply = y[-1]
|
| 203 |
-
i_ask = text_divide_paragraph(i_ask)
|
| 204 |
-
gpt_reply = close_up_code_segment_during_stream(
|
|
|
|
| 205 |
y[-1] = (
|
| 206 |
-
None if i_ask is None else markdown.markdown(
|
|
|
|
| 207 |
None if gpt_reply is None else markdown_convertion(gpt_reply)
|
| 208 |
)
|
| 209 |
return y
|
|
@@ -265,6 +302,7 @@ def extract_archive(file_path, dest_dir):
|
|
| 265 |
return ''
|
| 266 |
return ''
|
| 267 |
|
|
|
|
| 268 |
def find_recent_files(directory):
|
| 269 |
"""
|
| 270 |
me: find files that is created with in one minutes under a directory with python, write a function
|
|
@@ -278,21 +316,29 @@ def find_recent_files(directory):
|
|
| 278 |
|
| 279 |
for filename in os.listdir(directory):
|
| 280 |
file_path = os.path.join(directory, filename)
|
| 281 |
-
if file_path.endswith('.log'):
|
|
|
|
| 282 |
created_time = os.path.getmtime(file_path)
|
| 283 |
if created_time >= one_minute_ago:
|
| 284 |
-
if os.path.isdir(file_path):
|
|
|
|
| 285 |
recent_files.append(file_path)
|
| 286 |
|
| 287 |
return recent_files
|
| 288 |
|
| 289 |
|
| 290 |
def on_file_uploaded(files, chatbot, txt):
|
| 291 |
-
if len(files) == 0:
|
| 292 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 293 |
from toolbox import extract_archive
|
| 294 |
-
try:
|
| 295 |
-
|
|
|
|
|
|
|
| 296 |
time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
| 297 |
os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
|
| 298 |
err_msg = ''
|
|
@@ -300,13 +346,14 @@ def on_file_uploaded(files, chatbot, txt):
|
|
| 300 |
file_origin_name = os.path.basename(file.orig_name)
|
| 301 |
shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
|
| 302 |
err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
|
| 303 |
-
|
| 304 |
-
moved_files = [fp for fp in glob.glob(
|
|
|
|
| 305 |
txt = f'private_upload/{time_tag}'
|
| 306 |
moved_files_str = '\t\n\n'.join(moved_files)
|
| 307 |
chatbot.append(['我上传了文件,请查收',
|
| 308 |
-
f'[Local Message] 收到以下文件: \n\n{moved_files_str}'+
|
| 309 |
-
f'\n\n调用路径参数已自动修正到: \n\n{txt}'+
|
| 310 |
f'\n\n现在您点击任意实验功能时,以上文件将被作为输入参数'+err_msg])
|
| 311 |
return chatbot, txt
|
| 312 |
|
|
@@ -314,32 +361,37 @@ def on_file_uploaded(files, chatbot, txt):
|
|
| 314 |
def on_report_generated(files, chatbot):
|
| 315 |
from toolbox import find_recent_files
|
| 316 |
report_files = find_recent_files('gpt_log')
|
| 317 |
-
if len(report_files) == 0:
|
|
|
|
| 318 |
# files.extend(report_files)
|
| 319 |
chatbot.append(['汇总报告如何远程获取?', '汇总报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。'])
|
| 320 |
return report_files, chatbot
|
| 321 |
|
|
|
|
| 322 |
@lru_cache(maxsize=128)
|
| 323 |
def read_single_conf_with_lru_cache(arg):
|
| 324 |
-
try:
|
| 325 |
-
|
|
|
|
|
|
|
| 326 |
# 在读取API_KEY时,检查一下是不是忘了改config
|
| 327 |
-
if arg=='API_KEY':
|
| 328 |
# 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合
|
| 329 |
API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", r)
|
| 330 |
if API_MATCH:
|
| 331 |
print(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
| 332 |
else:
|
| 333 |
assert False, "正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
|
| 334 |
-
|
| 335 |
-
if arg=='proxies':
|
| 336 |
-
if r is None:
|
| 337 |
print('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问。建议:检查USE_PROXY选项是否修改。')
|
| 338 |
-
else:
|
| 339 |
print('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
|
| 340 |
assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。'
|
| 341 |
return r
|
| 342 |
|
|
|
|
| 343 |
def get_conf(*args):
|
| 344 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
| 345 |
res = []
|
|
@@ -348,14 +400,17 @@ def get_conf(*args):
|
|
| 348 |
res.append(r)
|
| 349 |
return res
|
| 350 |
|
|
|
|
| 351 |
def clear_line_break(txt):
|
| 352 |
txt = txt.replace('\n', ' ')
|
| 353 |
txt = txt.replace(' ', ' ')
|
| 354 |
txt = txt.replace(' ', ' ')
|
| 355 |
return txt
|
| 356 |
|
|
|
|
| 357 |
class DummyWith():
|
| 358 |
def __enter__(self):
|
| 359 |
return self
|
|
|
|
| 360 |
def __exit__(self, exc_type, exc_value, traceback):
|
| 361 |
-
return
|
|
|
|
| 1 |
+
import markdown
|
| 2 |
+
import mdtex2html
|
| 3 |
+
import threading
|
| 4 |
+
import importlib
|
| 5 |
+
import traceback
|
| 6 |
+
import importlib
|
| 7 |
+
import inspect
|
| 8 |
+
import re
|
| 9 |
from show_math import convert as convert_math
|
| 10 |
from functools import wraps, lru_cache
|
| 11 |
|
| 12 |
+
|
| 13 |
def ArgsGeneralWrapper(f):
|
| 14 |
"""
|
| 15 |
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
|
| 16 |
"""
|
| 17 |
def decorated(txt, txt2, *args, **kwargs):
|
| 18 |
txt_passon = txt
|
| 19 |
+
if txt == "" and txt2 != "":
|
| 20 |
+
txt_passon = txt2
|
| 21 |
yield from f(txt_passon, *args, **kwargs)
|
| 22 |
return decorated
|
| 23 |
|
|
|
|
| 27 |
# text = "maximum context length is 4097 tokens. However, your messages resulted in 4870 tokens"
|
| 28 |
pattern = r"(\d+)\s+tokens\b"
|
| 29 |
match = re.findall(pattern, text)
|
| 30 |
+
EXCEED_ALLO = 500 # 稍微留一点余地,否则在回复时会因余量太少出问题
|
| 31 |
max_limit = float(match[0]) - EXCEED_ALLO
|
| 32 |
current_tokens = float(match[1])
|
| 33 |
ratio = max_limit/current_tokens
|
|
|
|
| 36 |
except:
|
| 37 |
return 0.5, '不详'
|
| 38 |
|
| 39 |
+
|
| 40 |
def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], sys_prompt='', long_connection=True):
|
| 41 |
"""
|
| 42 |
调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
|
|
|
|
| 56 |
# list就是最简单的mutable结构,我们第一个位置放gpt输出,第二个位置传递报错信息
|
| 57 |
mutable = [None, '']
|
| 58 |
# multi-threading worker
|
| 59 |
+
|
| 60 |
def mt(i_say, history):
|
| 61 |
while True:
|
| 62 |
try:
|
| 63 |
if long_connection:
|
| 64 |
+
mutable[0] = predict_no_ui_long_connection(
|
| 65 |
+
inputs=i_say, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt)
|
| 66 |
else:
|
| 67 |
+
mutable[0] = predict_no_ui(
|
| 68 |
+
inputs=i_say, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt)
|
| 69 |
break
|
| 70 |
except ConnectionAbortedError as token_exceeded_error:
|
| 71 |
# 尝试计算比例,尽可能多地保留文本
|
| 72 |
+
p_ratio, n_exceed = get_reduce_token_percent(
|
| 73 |
+
str(token_exceeded_error))
|
| 74 |
if len(history) > 0:
|
| 75 |
+
history = [his[int(len(his) * p_ratio):]
|
| 76 |
+
for his in history if his is not None]
|
| 77 |
else:
|
| 78 |
+
i_say = i_say[: int(len(i_say) * p_ratio)]
|
| 79 |
mutable[1] = f'警告,文本过长将进行截断,Token溢出数:{n_exceed},截断比例:{(1-p_ratio):.0%}。'
|
| 80 |
except TimeoutError as e:
|
| 81 |
mutable[0] = '[Local Message] 请求超时。'
|
|
|
|
| 84 |
mutable[0] = f'[Local Message] 异常:{str(e)}.'
|
| 85 |
raise RuntimeError(f'[Local Message] 异常:{str(e)}.')
|
| 86 |
# 创建新线程发出http请求
|
| 87 |
+
thread_name = threading.Thread(target=mt, args=(i_say, history))
|
| 88 |
+
thread_name.start()
|
| 89 |
# 原来的线程则负责持续更新UI,实现一个超时倒计时,并等待新线程的任务完成
|
| 90 |
cnt = 0
|
| 91 |
while thread_name.is_alive():
|
| 92 |
cnt += 1
|
| 93 |
+
chatbot[-1] = (i_say_show_user,
|
| 94 |
+
f"[Local Message] {mutable[1]}waiting gpt response {cnt}/{TIMEOUT_SECONDS*2*(MAX_RETRY+1)}"+''.join(['.']*(cnt % 4)))
|
| 95 |
yield chatbot, history, '正常'
|
| 96 |
time.sleep(1)
|
| 97 |
# 把gpt的输出从mutable中取出来
|
| 98 |
gpt_say = mutable[0]
|
| 99 |
+
if gpt_say == '[Local Message] Failed with timeout.':
|
| 100 |
+
raise TimeoutError
|
| 101 |
return gpt_say
|
| 102 |
|
| 103 |
+
|
| 104 |
def write_results_to_file(history, file_name=None):
|
| 105 |
"""
|
| 106 |
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
| 107 |
"""
|
| 108 |
+
import os
|
| 109 |
+
import time
|
| 110 |
if file_name is None:
|
| 111 |
# file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
|
| 112 |
+
file_name = 'chatGPT分析报告' + \
|
| 113 |
+
time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
|
| 114 |
os.makedirs('./gpt_log/', exist_ok=True)
|
| 115 |
+
with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
|
| 116 |
f.write('# chatGPT 分析报告\n')
|
| 117 |
for i, content in enumerate(history):
|
| 118 |
try: # 这个bug没找到触发条件,暂时先这样顶一下
|
| 119 |
+
if type(content) != str:
|
| 120 |
+
content = str(content)
|
| 121 |
except:
|
| 122 |
continue
|
| 123 |
+
if i % 2 == 0:
|
| 124 |
+
f.write('## ')
|
| 125 |
f.write(content)
|
| 126 |
f.write('\n\n')
|
| 127 |
res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
|
| 128 |
print(res)
|
| 129 |
return res
|
| 130 |
|
| 131 |
+
|
| 132 |
def regular_txt_to_markdown(text):
|
| 133 |
"""
|
| 134 |
将普通文本转换为Markdown格式的文本。
|
|
|
|
| 138 |
text = text.replace('\n\n\n', '\n\n')
|
| 139 |
return text
|
| 140 |
|
| 141 |
+
|
| 142 |
def CatchException(f):
|
| 143 |
"""
|
| 144 |
装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
|
|
|
|
| 152 |
from toolbox import get_conf
|
| 153 |
proxies, = get_conf('proxies')
|
| 154 |
tb_str = '```\n' + traceback.format_exc() + '```'
|
| 155 |
+
if chatbot is None or len(chatbot) == 0:
|
| 156 |
+
chatbot = [["插件调度异常", "异常原因"]]
|
| 157 |
+
chatbot[-1] = (chatbot[-1][0],
|
| 158 |
+
f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
|
| 159 |
yield chatbot, history, f'异常 {e}'
|
| 160 |
return decorated
|
| 161 |
|
| 162 |
+
|
| 163 |
def HotReload(f):
|
| 164 |
"""
|
| 165 |
装饰器函数,实现函数插件热更新
|
|
|
|
| 171 |
yield from f_hot_reload(*args, **kwargs)
|
| 172 |
return decorated
|
| 173 |
|
| 174 |
+
|
| 175 |
def report_execption(chatbot, history, a, b):
|
| 176 |
"""
|
| 177 |
向chatbot中添加错误信息
|
| 178 |
"""
|
| 179 |
chatbot.append((a, b))
|
| 180 |
+
history.append(a)
|
| 181 |
+
history.append(b)
|
| 182 |
+
|
| 183 |
|
| 184 |
def text_divide_paragraph(text):
|
| 185 |
"""
|
|
|
|
| 196 |
text = "</br>".join(lines)
|
| 197 |
return text
|
| 198 |
|
| 199 |
+
|
| 200 |
def markdown_convertion(txt):
|
| 201 |
"""
|
| 202 |
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
|
|
|
| 204 |
pre = '<div class="markdown-body">'
|
| 205 |
suf = '</div>'
|
| 206 |
if ('$' in txt) and ('```' not in txt):
|
| 207 |
+
return pre + markdown.markdown(txt, extensions=['fenced_code', 'tables']) + '<br><br>' + markdown.markdown(convert_math(txt, splitParagraphs=False), extensions=['fenced_code', 'tables']) + suf
|
| 208 |
else:
|
| 209 |
+
return pre + markdown.markdown(txt, extensions=['fenced_code', 'tables']) + suf
|
| 210 |
+
|
| 211 |
|
| 212 |
def close_up_code_segment_during_stream(gpt_reply):
|
| 213 |
"""
|
| 214 |
在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的```
|
| 215 |
"""
|
| 216 |
+
if '```' not in gpt_reply:
|
| 217 |
+
return gpt_reply
|
| 218 |
+
if gpt_reply.endswith('```'):
|
| 219 |
+
return gpt_reply
|
| 220 |
|
| 221 |
# 排除了以上两个情况,我们
|
| 222 |
segments = gpt_reply.split('```')
|
|
|
|
| 226 |
return gpt_reply+'\n```'
|
| 227 |
else:
|
| 228 |
return gpt_reply
|
|
|
|
| 229 |
|
| 230 |
|
| 231 |
def format_io(self, y):
|
| 232 |
"""
|
| 233 |
将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。
|
| 234 |
"""
|
| 235 |
+
if y is None or y == []:
|
| 236 |
+
return []
|
| 237 |
i_ask, gpt_reply = y[-1]
|
| 238 |
+
i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
|
| 239 |
+
gpt_reply = close_up_code_segment_during_stream(
|
| 240 |
+
gpt_reply) # 当代码输出半截的时候,试着补上后个```
|
| 241 |
y[-1] = (
|
| 242 |
+
None if i_ask is None else markdown.markdown(
|
| 243 |
+
i_ask, extensions=['fenced_code', 'tables']),
|
| 244 |
None if gpt_reply is None else markdown_convertion(gpt_reply)
|
| 245 |
)
|
| 246 |
return y
|
|
|
|
| 302 |
return ''
|
| 303 |
return ''
|
| 304 |
|
| 305 |
+
|
| 306 |
def find_recent_files(directory):
|
| 307 |
"""
|
| 308 |
me: find files that is created with in one minutes under a directory with python, write a function
|
|
|
|
| 316 |
|
| 317 |
for filename in os.listdir(directory):
|
| 318 |
file_path = os.path.join(directory, filename)
|
| 319 |
+
if file_path.endswith('.log'):
|
| 320 |
+
continue
|
| 321 |
created_time = os.path.getmtime(file_path)
|
| 322 |
if created_time >= one_minute_ago:
|
| 323 |
+
if os.path.isdir(file_path):
|
| 324 |
+
continue
|
| 325 |
recent_files.append(file_path)
|
| 326 |
|
| 327 |
return recent_files
|
| 328 |
|
| 329 |
|
| 330 |
def on_file_uploaded(files, chatbot, txt):
|
| 331 |
+
if len(files) == 0:
|
| 332 |
+
return chatbot, txt
|
| 333 |
+
import shutil
|
| 334 |
+
import os
|
| 335 |
+
import time
|
| 336 |
+
import glob
|
| 337 |
from toolbox import extract_archive
|
| 338 |
+
try:
|
| 339 |
+
shutil.rmtree('./private_upload/')
|
| 340 |
+
except:
|
| 341 |
+
pass
|
| 342 |
time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
| 343 |
os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
|
| 344 |
err_msg = ''
|
|
|
|
| 346 |
file_origin_name = os.path.basename(file.orig_name)
|
| 347 |
shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
|
| 348 |
err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
|
| 349 |
+
dest_dir=f'private_upload/{time_tag}/{file_origin_name}.extract')
|
| 350 |
+
moved_files = [fp for fp in glob.glob(
|
| 351 |
+
'private_upload/**/*', recursive=True)]
|
| 352 |
txt = f'private_upload/{time_tag}'
|
| 353 |
moved_files_str = '\t\n\n'.join(moved_files)
|
| 354 |
chatbot.append(['我上传了文件,请查收',
|
| 355 |
+
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
|
| 356 |
+
f'\n\n调用路径参数已自动修正到: \n\n{txt}' +
|
| 357 |
f'\n\n现在您点击任意实验功能时,以上文件将被作为输入参数'+err_msg])
|
| 358 |
return chatbot, txt
|
| 359 |
|
|
|
|
| 361 |
def on_report_generated(files, chatbot):
|
| 362 |
from toolbox import find_recent_files
|
| 363 |
report_files = find_recent_files('gpt_log')
|
| 364 |
+
if len(report_files) == 0:
|
| 365 |
+
return None, chatbot
|
| 366 |
# files.extend(report_files)
|
| 367 |
chatbot.append(['汇总报告如何远程获取?', '汇总报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。'])
|
| 368 |
return report_files, chatbot
|
| 369 |
|
| 370 |
+
|
| 371 |
@lru_cache(maxsize=128)
|
| 372 |
def read_single_conf_with_lru_cache(arg):
|
| 373 |
+
try:
|
| 374 |
+
r = getattr(importlib.import_module('config_private'), arg)
|
| 375 |
+
except:
|
| 376 |
+
r = getattr(importlib.import_module('config'), arg)
|
| 377 |
# 在读取API_KEY时,检查一下是不是忘了改config
|
| 378 |
+
if arg == 'API_KEY':
|
| 379 |
# 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合
|
| 380 |
API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", r)
|
| 381 |
if API_MATCH:
|
| 382 |
print(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
| 383 |
else:
|
| 384 |
assert False, "正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
|
| 385 |
+
"(如果您刚更新过代码,请确保���版config_private文件中没有遗留任何新增键值)"
|
| 386 |
+
if arg == 'proxies':
|
| 387 |
+
if r is None:
|
| 388 |
print('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问。建议:检查USE_PROXY选项是否修改。')
|
| 389 |
+
else:
|
| 390 |
print('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
|
| 391 |
assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。'
|
| 392 |
return r
|
| 393 |
|
| 394 |
+
|
| 395 |
def get_conf(*args):
|
| 396 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
| 397 |
res = []
|
|
|
|
| 400 |
res.append(r)
|
| 401 |
return res
|
| 402 |
|
| 403 |
+
|
| 404 |
def clear_line_break(txt):
|
| 405 |
txt = txt.replace('\n', ' ')
|
| 406 |
txt = txt.replace(' ', ' ')
|
| 407 |
txt = txt.replace(' ', ' ')
|
| 408 |
return txt
|
| 409 |
|
| 410 |
+
|
| 411 |
class DummyWith():
|
| 412 |
def __enter__(self):
|
| 413 |
return self
|
| 414 |
+
|
| 415 |
def __exit__(self, exc_type, exc_value, traceback):
|
| 416 |
+
return
|