Spaces:
Runtime error
Runtime error
File size: 7,265 Bytes
8a2b97e 122b2e0 7fc63ef 122b2e0 0296cb0 8a2b97e 0296cb0 ce756cd 8a2b97e 0296cb0 ce756cd 7fc63ef 8a2b97e ce756cd 0296cb0 ce756cd 0296cb0 122b2e0 ce756cd 122b2e0 ce756cd 122b2e0 ce756cd 122b2e0 0296cb0 122b2e0 0296cb0 122b2e0 0296cb0 122b2e0 0296cb0 122b2e0 ce756cd 0296cb0 ce756cd 7fc63ef 8a2b97e ce756cd 122b2e0 ce756cd 8a2b97e ce756cd 122b2e0 ce756cd 122b2e0 7fc63ef d6af27f 7fc63ef 122b2e0 d6af27f 122b2e0 7fc63ef 8a2b97e 7398b02 332c046 0296cb0 b22f057 0296cb0 332c046 0296cb0 8a2b97e 122b2e0 0296cb0 122b2e0 8a2b97e b22f057 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import requests
import json
import gradio as gr
import pdfplumber
import pandas as pd
import time
from cnocr import CnOcr
from sentence_transformers import SentenceTransformer, models, util
word_embedding_model = models.Transformer('uer/sbert-base-chinese-nli', do_lower_case=True) # BERT模型
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls') # 取cls向量作为句向量
embedder = SentenceTransformer(modules=[word_embedding_model, pooling_model]) # 定义模型
ocr = CnOcr() # 初始化ocr模型
chat_url = 'https://souljoy-my-api.hf.space/chatgpt' # 你的url
headers = {
'Content-Type': 'application/json',
} # 你的headers
history_max_len = 500 # 机器人记忆的最大长度
all_max_len = 3000 # 输入的最大长度
def doc_emb(doc): # 文档向量化
texts = doc.split('\n') # 按行切分
emb_list = embedder.encode(texts) # 句向量化
print('\n'.join(texts))
return texts, emb_list, gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Markdown.update(
value="""操作说明 step 3:PDF解析提交成功! 🙋 可以开始对话啦~"""), gr.Chatbot.update(visible=True)
def get_response(open_ai_key, msg, bot, doc_text_list, doc_embeddings):
now_len = len(msg)
his_bg = -1
for i in range(len(bot) - 1, -1, -1):
if now_len + len(bot[i][0]) + len(bot[i][1]) > history_max_len:
break
now_len += len(bot[i][0]) + len(bot[i][1])
his_bg = i
history = [] if his_bg == -1 else bot[his_bg:]
query_embedding = embedder.encode([msg])
cos_scores = util.cos_sim(query_embedding, doc_embeddings)[0]
score_index = [[score, index] for score, index in zip(cos_scores, [i for i in range(len(cos_scores))])]
score_index.sort(key=lambda x: x[0], reverse=True)
print('score_index:\n', score_index)
index_set, sub_doc_list = set(), []
for s_i in score_index:
doc = doc_text_list[s_i[1]]
if now_len + len(doc) > all_max_len:
break
index_set.add(s_i[1])
now_len += len(doc)
# 可能段落截断错误,所以把上下段也加入进来
if s_i[1] > 0 and s_i[1] - 1 not in index_set:
doc = doc_text_list[s_i[1] - 1]
if now_len + len(doc) > all_max_len:
break
index_set.add(s_i[1] - 1)
now_len += len(doc)
if s_i[1] + 1 < len(doc_text_list) and s_i[1] + 1 not in index_set:
doc = doc_text_list[s_i[1] + 1]
if now_len + len(doc) > all_max_len:
break
index_set.add(s_i[1] + 1)
now_len += len(doc)
index_list = list(index_set)
index_list.sort()
for i in index_list:
sub_doc_list.append(doc_text_list[i])
document = '' if len(sub_doc_list) == 0 else '\n'.join(sub_doc_list)
messages = [{
"role": "system",
"content": "你是一个有用的助手,可以使用文章内容准确地回答问题。使用提供的文章来生成你的答案,但避免逐字复制文章。尽可能使用自己的话。准确、有用、简洁、清晰。"
}, {"role": "system", "content": "文章内容:\n" + document}]
for his in history:
messages.append({"role": "user", "content": his[0]})
messages.append({"role": "assistant", "content": his[1]})
messages.append({"role": "user", "content": msg})
req_json = {'messages': messages, 'key': open_ai_key, 'model': "gpt-3.5-turbo"}
data = {"content": json.dumps(req_json)}
print('data:\n', req_json)
result = requests.post(url=chat_url,
data=json.dumps(data),
headers=headers
)
res = result.json()['content']
bot.append([msg, res])
return bot[max(0, len(bot) - 3):]
def up_file(files):
doc_text_list = []
for idx, file in enumerate(files):
print(file.name)
with pdfplumber.open(file.name) as pdf:
for i in range(len(pdf.pages)):
# 读取PDF文档第i+1页
page = pdf.pages[i]
res_list = page.extract_text().split('\n')[:-1]
for j in range(len(page.images)):
# 获取图片的二进制流
img = page.images[j]
file_name = '{}-{}-{}.png'.format(str(time.time()), str(i), str(j))
with open(file_name, mode='wb') as f:
f.write(img['stream'].get_data())
try:
res = ocr.ocr(file_name)
except Exception as e:
res = []
if len(res) > 0:
res_list.append(' '.join([re['text'] for re in res]))
tables = page.extract_tables()
for table in tables:
# 第一列当成表头:
df = pd.DataFrame(table[1:], columns=table[0])
try:
records = json.loads(df.to_json(orient="records", force_ascii=False))
for rec in records:
res_list.append(json.dumps(rec, ensure_ascii=False))
except Exception as e:
res_list.append(str(df))
doc_text_list += res_list
doc_text_list = [str(text).strip() for text in doc_text_list if len(str(text).strip()) > 0]
print(doc_text_list)
return gr.Textbox.update(value='\n'.join(doc_text_list), visible=True), gr.Button.update(
visible=True), gr.Markdown.update(
value="操作说明 step 2:确认PDF解析结果(可修正),点击“提交解析结果”,随后进行对话")
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
open_ai_key = gr.Textbox(label='OpenAI API Key', placeholder='输入你的OpenAI API Key') # 你的OpenAI API Key
file = gr.File(file_types=['.pdf'], label='点击上传PDF,进行解析(支持多文档、表格、OCR)',
file_count='multiple') # 支持多文档、表格、OCR
txt = gr.Textbox(label='PDF解析结果', visible=False) # PDF解析结果
doc_bu = gr.Button(value='提交解析结果', visible=False) # 提交解析结果
doc_text_state = gr.State([]) # 存储PDF解析结果
doc_emb_state = gr.State([]) # 存储PDF解析结果的embedding
with gr.Column():
md = gr.Markdown("""操作说明 step 1:点击左侧区域,上传PDF,进行解析""") # 操作说明
chat_bot = gr.Chatbot(visible=False) # 聊天机器人
msg_txt = gr.Textbox(label='消息框', placeholder='输入消息,点击发送', visible=False) # 消息框
with gr.Row():
chat_bu = gr.Button(value='发送', visible=False)
file.change(up_file, [file], [txt, doc_bu, md])
doc_bu.click(doc_emb, [txt], [doc_text_state, doc_emb_state, msg_txt, chat_bu, md, chat_bot])
chat_bu.click(get_response, [open_ai_key, msg_txt, chat_bot, doc_text_state, doc_emb_state], [chat_bot])
if __name__ == "__main__":
demo.queue().launch() |