vumichien commited on
Commit
107665c
·
1 Parent(s): 4b2acd6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +141 -0
app.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import openai
4
+ import json
5
+ import tiktoken
6
+ import pandas as pd
7
+
8
+ openai.api_key = os.environ["OPENAI_API_KEY"]
9
+ prompt_templates = {"Default ChatGPT": ""}
10
+
11
+ def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
12
+ """Returns the number of tokens used by a list of messages."""
13
+ try:
14
+ encoding = tiktoken.encoding_for_model(model)
15
+ except KeyError:
16
+ encoding = tiktoken.get_encoding("cl100k_base")
17
+ if model == "gpt-3.5-turbo":
18
+ num_tokens = 0
19
+ for message in messages:
20
+ num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
21
+ for key, value in message.items():
22
+ num_tokens += len(encoding.encode(value))
23
+ if key == "name": # if there's a name, the role is omitted
24
+ num_tokens += -1 # role is always required and always 1 token
25
+ num_tokens += 2 # every reply is primed with <im_start>assistant
26
+ return num_tokens
27
+ else:
28
+ raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
29
+ See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
30
+
31
+
32
+ def get_empty_state():
33
+ return {"total_tokens": 0, "messages": [], "threshold": 0}
34
+
35
+ def download_prompt_templates():
36
+ df = pd.read_csv('prompts.csv', encoding='unicode_escape')
37
+ prompt_templates.update(dict(zip(df['act'], df['prompt'])))
38
+ choices = list(prompt_templates.keys())
39
+ return gr.update(value=choices[0], choices=choices)
40
+
41
+ def on_token_change(user_token):
42
+ openai.api_key = user_token or os.environ.get("OPENAI_API_KEY")
43
+
44
+ def on_prompt_template_change(prompt_template):
45
+ if not isinstance(prompt_template, str): return
46
+ return prompt_templates[prompt_template]
47
+
48
+ def submit_message(prompt, prompt_template, temperature, max_tokens, state):
49
+
50
+ history = state['messages']
51
+
52
+ if not prompt:
53
+ return gr.update(value='', visible=state['total_tokens'] < 1_000), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']} / 4090", state
54
+
55
+ prompt_template = prompt_templates[prompt_template]
56
+ print(prompt_template)
57
+ system_prompt = []
58
+ if prompt_template:
59
+ system_prompt = [{ "role": "system", "content": prompt_template}]
60
+
61
+ prompt_msg = {"role": "user", "content": prompt }
62
+
63
+ # check length token message
64
+ messages = system_prompt + history + [prompt_msg]
65
+ history_id = 2
66
+ while num_tokens_from_messages(messages) >= 4090:
67
+ messages = system_prompt + history[history_id:] + [prompt_msg]
68
+ history_id +=2
69
+ state['threshold'] +=1
70
+ if history_id > len(history):
71
+ break
72
+ try:
73
+ completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages, temperature=temperature, max_tokens=max_tokens)
74
+ history.append(prompt_msg)
75
+ history.append(completion.choices[0].message.to_dict())
76
+
77
+ state['total_tokens'] += completion['usage']['total_tokens']
78
+
79
+ except Exception as e:
80
+ history.append(prompt_msg)
81
+ history.append({
82
+ "role": "system",
83
+ "content": f"Error: {e}"
84
+ })
85
+
86
+ total_tokens_used_msg = f"Total tokens used: {state['total_tokens']} / 4090. "
87
+ chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
88
+
89
+ if state['threshold'] >= 3:
90
+ input_visibility = False
91
+ total_tokens_used_msg += "Reach the limit of this conversation. Start the new one"
92
+ else:
93
+ input_visibility = True
94
+
95
+ return gr.update(value='', visible=input_visibility), chat_messages, total_tokens_used_msg, state
96
+
97
+ def clear_conversation():
98
+ return gr.update(value=None, visible=True), None, "", get_empty_state()
99
+
100
+ css = """
101
+ #col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
102
+ #chatbox {min-height: 400px;}
103
+ #header {text-align: center;}
104
+ #prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;}
105
+ #total_tokens_str {text-align: right; font-size: 0.8em; color: #666; height: 1em;}
106
+ #label {font-size: 0.8em; padding: 0.5em; margin: 0;}
107
+ """
108
+
109
+ with gr.Blocks(css=css) as demo:
110
+
111
+ state = gr.State(get_empty_state())
112
+
113
+
114
+ with gr.Column(elem_id="col-container"):
115
+ gr.Markdown("""## OpenAI ChatGPT with awesome prompts
116
+ Current limit is 4090 tokens per conversation<br>
117
+ Input your text with a custom insruction (If need).""",
118
+ elem_id="header")
119
+
120
+ with gr.Row():
121
+ with gr.Column():
122
+ chatbot = gr.Chatbot(elem_id="chatbox")
123
+ input_message = gr.Textbox(show_label=False, placeholder="Enter text and press enter", visible=True).style(container=False)
124
+ total_tokens_str = gr.Markdown(elem_id="total_tokens_str")
125
+ btn_clear_conversation = gr.Button("🔃 Start New Conversation")
126
+ with gr.Column():
127
+ prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys()))
128
+ prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview")
129
+
130
+ with gr.Accordion("Advanced parameters", open=False):
131
+ temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, interactive=True, label="Temperature (higher = more creative/chaotic)")
132
+ max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, interactive=True, label="Max tokens per response")
133
+
134
+ input_message.submit(submit_message, [input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
135
+ btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
136
+ prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
137
+
138
+ demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template])
139
+
140
+
141
+ demo.launch(debug=True, height='800px', share=True, auth=("admin", "dtm1234"))