Spaces:
Runtime error
Runtime error
| import g4f | |
| import gradio as gr | |
| from g4f.Provider import ( | |
| Ails, | |
| You, | |
| Bing, | |
| Yqcloud, | |
| Theb, | |
| Aichat, | |
| Bard, | |
| Vercel, | |
| Forefront, | |
| Lockchat, | |
| Liaobots, | |
| H2o, | |
| ChatgptLogin, | |
| DeepAi, | |
| GetGpt | |
| ) | |
| import os | |
| import json | |
| import pandas as pd | |
| from models_for_langchain.model import CustomLLM | |
| from langchain.memory import ConversationBufferWindowMemory, ConversationTokenBufferMemory | |
| from langchain import LLMChain, PromptTemplate | |
| from langchain.prompts import ( | |
| ChatPromptTemplate, | |
| PromptTemplate, | |
| SystemMessagePromptTemplate, | |
| AIMessagePromptTemplate, | |
| HumanMessagePromptTemplate, | |
| ) | |
| provider_dict = { | |
| 'Ails': Ails, | |
| 'You': You, | |
| 'Bing': Bing, | |
| 'Yqcloud': Yqcloud, | |
| 'Theb': Theb, | |
| 'Aichat': Aichat, | |
| 'Bard': Bard, | |
| 'Vercel': Vercel, | |
| 'Forefront': Forefront, | |
| 'Lockchat': Lockchat, | |
| 'Liaobots': Liaobots, | |
| 'H2o': H2o, | |
| 'ChatgptLogin': ChatgptLogin, | |
| 'DeepAi': DeepAi, | |
| 'GetGpt': GetGpt | |
| } | |
| prompt_set_list = {} | |
| for prompt_file in os.listdir("prompt_set"): | |
| key = prompt_file | |
| if '.csv' in key: | |
| df = pd.read_csv("prompt_set/" + prompt_file) | |
| prompt_dict = dict(zip(df['act'], df['prompt'])) | |
| else: | |
| with open("prompt_set/" + prompt_file, encoding='utf-8') as f: | |
| ds = json.load(f) | |
| prompt_dict = {item["act"]: item["prompt"] for item in ds} | |
| prompt_set_list[key] = prompt_dict | |
| with gr.Blocks() as demo: | |
| llm = CustomLLM() | |
| template = """ | |
| Chat with human based on following instructions: | |
| ``` | |
| {system_instruction} | |
| ``` | |
| The following is a conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. | |
| {{chat_history}} | |
| Human: {{human_input}} | |
| Chatbot:""" | |
| memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history") | |
| chatbot = gr.Chatbot([], label='AI') | |
| msg = gr.Textbox(value="", label='请输入:') | |
| with gr.Row(): | |
| clear = gr.Button("清空对话", scale=2) | |
| chat_mode = gr.Checkbox(value=True, label='聊天模式', interactive=True, scale=1) | |
| system_msg = gr.Textbox(value="你是一名助手,可以解答问题。", label='系统提示') | |
| with gr.Row(): | |
| default_prompt_set = "1 中文提示词.json" | |
| prompt_set_name = gr.Dropdown(prompt_set_list.keys(), value=default_prompt_set, label='提示词集合') | |
| prompt_name = gr.Dropdown(prompt_set_list[default_prompt_set].keys(), label='提示词', min_width=20) | |
| with gr.Row(): | |
| model_name = gr.Dropdown(['gpt-3.5-turbo', 'gpt-4'], value='gpt-3.5-turbo', label='模型') | |
| provider_name = gr.Dropdown(provider_dict.keys(), value='GetGpt', label='提供者', min_width=20) | |
| def change_prompt_set(prompt_set_name): | |
| return gr.Dropdown.update(choices=list(prompt_set_list[prompt_set_name].keys())) | |
| def change_prompt(prompt_set_name, prompt_name): | |
| return gr.update(value=prompt_set_list[prompt_set_name][prompt_name]) | |
| def user(user_message, history = []): | |
| return gr.update(value="", interactive=False), history + [[user_message, None]] | |
| def bot(history, model_name, provider_name, system_msg, chat_mode): | |
| history[-1][1] = '' | |
| if len(system_msg)>3000: | |
| system_msg = system_msg[:2000] + system_msg[-1000:] | |
| if not chat_mode: | |
| global template, memory | |
| llm.model_name = model_name | |
| llm.provider_name = provider_name | |
| prompt = PromptTemplate( | |
| input_variables=["chat_history", "human_input"], template=template.format(system_instruction=system_msg) | |
| ) | |
| llm_chain = LLMChain( | |
| llm=llm, | |
| prompt=prompt, | |
| verbose=False, | |
| memory=memory, | |
| ) | |
| bot_msg = llm_chain.run(history[-1][0]) | |
| for c in bot_msg: | |
| history[-1][1] += c | |
| yield history | |
| else: | |
| prompt = """ | |
| 请你仔细阅读以下提示,然后针对用户的话进行回答。 | |
| 提示: | |
| ``` | |
| {} | |
| ``` | |
| 用户最新的话: | |
| ``` | |
| {} | |
| ``` | |
| 请回答: | |
| """ | |
| # print(history) | |
| messages = [] | |
| for user_message, assistant_message in history[:-1]: | |
| messages.append({"role": "user", "content": user_message}) | |
| messages.append({"role": "assistant", "content": assistant_message}) | |
| messages.append({"role": "user", "content": history[-1][0]}) | |
| # print(messages) | |
| bot_msg = g4f.ChatCompletion.create( | |
| model=model_name, | |
| provider=provider_dict[provider_name], | |
| messages=messages, | |
| stream=True) | |
| for c in bot_msg: | |
| history[-1][1] += c | |
| print(c, flush=True, end='') | |
| yield history | |
| def empty_chat(): | |
| global memory | |
| memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history") | |
| return None | |
| response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( | |
| bot, [chatbot, model_name, provider_name, system_msg, chat_mode], chatbot | |
| ) | |
| prompt_set_name.select(change_prompt_set, prompt_set_name, prompt_name) | |
| prompt_name.select(change_prompt, [prompt_set_name, prompt_name], system_msg) | |
| response.then(lambda: gr.update(interactive=True), None, [msg], queue=False) | |
| clear.click(empty_chat, None, [chatbot], queue=False) | |
| demo.title = "AI Chat" | |
| demo.queue() | |
| demo.launch() |