File size: 3,332 Bytes
bb9156b
 
 
 
 
6a7a9fa
bb9156b
e9954ba
 
 
 
 
 
55136e7
c266e5d
 
bb9156b
1a78461
bb9156b
1a78461
84fcd6b
bb9156b
 
 
 
 
 
 
6a7a9fa
 
 
 
 
 
 
 
 
 
bb9156b
6a7a9fa
 
 
 
 
 
 
 
 
 
 
 
 
 
9c2d0ad
6a7a9fa
 
bb9156b
 
 
a4140e9
8a37bd4
e9954ba
31e1e01
 
bb9156b
 
 
 
 
 
 
e9954ba
bb9156b
e9954ba
 
bb9156b
f67d90c
f74b6db
f67d90c
bb9156b
 
2410a17
bb9156b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import gradio as gr
import json
import asyncio
import os
from EdgeGPT import Chatbot, ConversationStyle
import re

"""
参考资料:
https://www.bilibili.com/video/BV1KV4y197bX/?spm_id_from=333.1007.tianma.2-2-5.click&vd_source=9dc7baa799a4531df364244d2e87ee01
https://blog.csdn.net/xmh_free/article/details/127210992
"""

# read cookie from local file
# with open('./cookies.json', 'r') as f:
#     cookies = json.load(f)

async def get_model_reply(prompt,style,cookies,context=[]):
    # combines the new question with a previous context

    context = [prompt]
    cookies = json.loads(cookies)
    # given the most recent context (4096 characters)
    # continue the text up to 2048 tokens ~ 8192 charaters
    bot = Chatbot(cookies=cookies)
    raw_data = await bot.ask(prompt, conversation_style=style)
    await bot.close()
    #print(raw_data)
    try:
        try:
            response = raw_data["item"]["messages"][1]["text"]
        except:
            response = raw_data["item"]["messages"][1]["adaptiveCards"][0]["body"][0]["text"]
        response = re.sub(r'\^', '', response)
        response = response.rstrip()
        context += [response]
        responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
        return responses, context

    except:
        try:
            if raw_data["item"]["throttling"]["numUserMessagesInConversation"] > raw_data["item"]["throttling"][
                "maxNumUserMessagesInConversation"]:
                response = ">>>请重新开启新的对话。"
                print(response)
                context += ["0"]
                responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
                return responses, context

        except:
            if raw_data["item"]["result"]["value"] == "Throttled":
                response = "> **错误: 我们很抱歉,但你已经达到了你在24小时内可以向Bing发送消息的最大数量。请稍后再查看!**"
                print(response)
                context += ["1"]
                responses = [(u, b) for u, b in zip(context[::2], context[1::2])]
                return responses, context


with gr.Blocks() as dialog_app:

    with gr.Tab("Cookies"):
        cookies = gr.Textbox(lines=2, label="输入bing.com中的cookies(JSON格式)")
    with gr.Tab("New Bing Chat GPT4"):
        gr.Markdown("# 连接 new-bing 接口,用的是GPT4的接口")
        chatbot = gr.Chatbot()
        state = gr.State([])
        markdown = gr.Markdown(label="Output")

        with gr.Row():
            inputs = gr.Textbox(
                label="输入问题",
                placeholder="请输入你的文本,确保已经正确填入bing.com中的cookies"
            )
            style = gr.Dropdown(label="回答倾向模式选择", choices=["creative", "balanced", "precise"], multiselect=False,
                                value="precise", type="value")

        inputs.submit(get_model_reply, [inputs, style, cookies,state ], [chatbot, state])
        send = gr.Button("发送请求.....")
        send.click(get_model_reply, [inputs, style, cookies, state], [chatbot, state],api_name="xiaolvgpt")

# launches the app in a new local port
dialog_app.launch(show_error=True)
# 为网站设置密码防止滥用
# dialog_app.launch(auth=("admin", "pass1234"))