wdplx commited on
Commit
76d3fa1
·
0 Parent(s):

Finished gradio framework

Browse files
Files changed (9) hide show
  1. README.md +13 -0
  2. app.py +212 -0
  3. example.json +5 -0
  4. images/bot_icon.png +0 -0
  5. images/sotopia.jpeg +0 -0
  6. images/user_icon.png +0 -0
  7. peft_config.json +21 -0
  8. requirements.txt +4 -0
  9. utils.py +31 -0
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Sotopia Pi Demo
3
+ emoji: 💻
4
+ colorFrom: green
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 4.25.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from dataclasses import dataclass
3
+ import os
4
+ import torch
5
+ from uuid import uuid4
6
+ from peft import PeftModel, PeftConfig
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
8
+
9
+ from utils import Agent, get_starter_prompt, format_chat_prompt
10
+
11
+
12
+ HUMAN_AGENT = Agent(
13
+ name="Ethan Johnson",
14
+ background="Ethan Johnson is a 34-year-old male chef. He/him pronouns. Ethan Johnson is famous for cooking Italian food.",
15
+ goal="Uknown",
16
+ secrets="Uknown",
17
+ personality="Ethan Johnson, a creative yet somewhat reserved individual, values power and fairness. He likes to analyse situations before deciding.",)
18
+
19
+ MACHINE_AGENT = Agent(
20
+ name="Benjamin Jackson",
21
+ background="Benjamin Jackson is a 24-year-old male environmental activist. He/him pronouns. Benjamin Jackson is well-known for his impassioned speeches.",
22
+ goal="Figure out why they estranged you recently, and maintain the existing friendship (Extra information: you notice that your friend has been intentionally avoiding you, you would like to figure out why. You value your friendship with the friend and don't want to lose it.)",
23
+ secrets="Descendant of a wealthy oil tycoon, rejects family fortune",
24
+ personality="Benjamin Jackson, expressive and imaginative, leans towards self-direction and liberty. His decisions aim for societal betterment.",)
25
+
26
+ DEFUALT_INSTRUCTIONS = get_starter_prompt(MACHINE_AGENT, HUMAN_AGENT, "Conversation between two friends, where one is upset and crying")
27
+
28
+ DEPLOYED = os.getenv("DEPLOYED", "true").lower() == "true"
29
+ MODEL_NAME = "cmu-lti/sotopia-pi-mistral-7b-BC_SR"
30
+ COMPUTE_DTYPE = torch.float16
31
+
32
+ config_dict = PeftConfig.from_json_file("peft_config.json")
33
+ # import pdb; pdb.set_trace()
34
+ config = PeftConfig.from_peft_type(**config_dict)
35
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
36
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
37
+ model = PeftModel.from_pretrained(model, MODEL_NAME, config=config).to(COMPUTE_DTYPE).to("cuda")
38
+ according_visible = True
39
+
40
+ def introduction():
41
+ with gr.Column(scale=2):
42
+ gr.Image("images/sotopia.jpeg", elem_id="banner-image", show_label=False)
43
+ with gr.Column(scale=5):
44
+ gr.Markdown(
45
+ """# Sotopia-Pi Demo
46
+ **Chat with [Sotopia-Pi](https://github.com/sotopia-lab/sotopia-pi), brainstorm ideas, discuss your holiday plans, and more!**
47
+
48
+ ➡️️ **Intended Use**: this demo is intended to showcase an early finetuning of [sotopia-pi-mistral-7b-BC_SR](https://huggingface.co/cmu-lti/sotopia-pi-mistral-7b-BC_SR)/
49
+
50
+ ⚠️ **Limitations**: the model can and will produce factually incorrect information, hallucinating facts and actions. As it has not undergone any advanced tuning/alignment, it can produce problematic outputs, especially if prompted to do so. Finally, this demo is limited to a session length of about 1,000 words.
51
+
52
+ 🗄️ **Disclaimer**: User prompts and generated replies from the model may be collected by TII solely for the purpose of enhancing and refining our models. TII will not store any personally identifiable information associated with your inputs. By using this demo, users implicitly agree to these terms.
53
+ """
54
+ )
55
+
56
+ def chat_accordion():
57
+ with gr.Accordion("Parameters", open=False, visible=according_visible):
58
+ temperature = gr.Slider(
59
+ minimum=0.1,
60
+ maximum=1.0,
61
+ value=0.7,
62
+ step=0.1,
63
+ interactive=True,
64
+ label="Temperature",
65
+ )
66
+
67
+ max_tokens = gr.Slider(
68
+ minimum=1024,
69
+ maximum=4096,
70
+ value=1024,
71
+ step=1,
72
+ interactive=True,
73
+ label="Max Tokens",
74
+ )
75
+
76
+ session_id = gr.Textbox(
77
+ value=uuid4,
78
+ interactive=False,
79
+ visible=False,
80
+ )
81
+
82
+ with gr.Accordion("Instructions", open=False, visible=False):
83
+ instructions = gr.Textbox(
84
+ placeholder="The Instructions",
85
+ value=DEFUALT_INSTRUCTIONS,
86
+ lines=16,
87
+ interactive=True,
88
+ label="Instructions",
89
+ max_lines=16,
90
+ show_label=False,
91
+ )
92
+ with gr.Row():
93
+ with gr.Column():
94
+ user_name = gr.Textbox(
95
+ lines=1,
96
+ label="username",
97
+ value=HUMAN_AGENT.name,
98
+ interactive=True,
99
+ placeholder="Username: ",
100
+ show_label=False,
101
+ max_lines=1,
102
+ )
103
+ with gr.Column():
104
+ bot_name = gr.Textbox(
105
+ lines=1,
106
+ value=MACHINE_AGENT.name,
107
+ interactive=True,
108
+ placeholder="Bot Name",
109
+ show_label=False,
110
+ max_lines=1,
111
+ visible=False,
112
+ )
113
+
114
+ return temperature, instructions, user_name, bot_name, session_id, max_tokens
115
+
116
+
117
+ def chat_tab():
118
+ def run_chat(
119
+ message: str,
120
+ history,
121
+ instructions: str,
122
+ user_name: str,
123
+ bot_name: str,
124
+ temperature: float,
125
+ top_p: float,
126
+ max_tokens: int
127
+ ):
128
+ prompt = format_chat_prompt(message, history, instructions, user_name, bot_name)
129
+ input_tokens = tokenizer(prompt, return_tensors="pt", padding="do_not_pad").input_ids.to("cuda")
130
+ output = model.generate(
131
+ input_tokens,
132
+ temperature=temperature,
133
+ top_p=top_p,
134
+ max_length=max_tokens,
135
+ pad_token_id=tokenizer.eos_token_id,
136
+ num_return_sequences=1
137
+ )
138
+ # import pdb; pdb.set_trace()
139
+ return tokenizer.decode(output[0], skip_special_tokens=True)
140
+
141
+ with gr.Column():
142
+ with gr.Row():
143
+ (
144
+ temperature,
145
+ instructions,
146
+ user_name,
147
+ bot_name,
148
+ session_id,
149
+ max_tokens
150
+ ) = chat_accordion()
151
+
152
+ with gr.Column():
153
+ with gr.Blocks():
154
+ gr.ChatInterface(
155
+ fn=run_chat,
156
+ chatbot=gr.Chatbot(
157
+ height=620,
158
+ render=False,
159
+ show_label=False,
160
+ rtl=False,
161
+ avatar_images=("images/user_icon.png", "images/bot_icon.png"),
162
+ ),
163
+ textbox=gr.Textbox(
164
+ placeholder="Write your message here...",
165
+ render=False,
166
+ scale=7,
167
+ rtl=False,
168
+ ),
169
+ additional_inputs=[
170
+ instructions,
171
+ user_name,
172
+ bot_name,
173
+ temperature,
174
+ session_id,
175
+ max_tokens
176
+ ],
177
+ submit_btn="Send",
178
+ stop_btn="Stop",
179
+ retry_btn="🔄 Retry",
180
+ undo_btn="↩️ Delete",
181
+ clear_btn="🗑️ Clear",
182
+ )
183
+
184
+
185
+
186
+ def main():
187
+ with gr.Blocks(
188
+ css="""#chat_container {height: 820px; width: 1000px; margin-left: auto; margin-right: auto;}
189
+ #chatbot {height: 600px; overflow: auto;}
190
+ #create_container {height: 750px; margin-left: 0px; margin-right: 0px;}
191
+ #tokenizer_renderer span {white-space: pre-wrap}
192
+ """
193
+ ) as demo:
194
+ with gr.Row():
195
+ introduction()
196
+ with gr.Row():
197
+ chat_tab()
198
+
199
+ return demo
200
+
201
+
202
+ def start_demo():
203
+ demo = main()
204
+ if DEPLOYED:
205
+ demo.queue(api_open=False).launch(show_api=False)
206
+ else:
207
+ demo.queue()
208
+ demo.launch(share=False, server_name="0.0.0.0")
209
+
210
+
211
+ if __name__ == "__main__":
212
+ start_demo()
example.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "model": "gpt-4",
3
+ "prompt": "Prompt after formatting:\nImagine you are Benjamin Jackson, your task is to act/speak as Benjamin Jackson would, keeping in mind Benjamin Jackson's social goal.\nYou can find Benjamin Jackson's background and goal in the 'Here is the context of the interaction' field.\nNote that Benjamin Jackson's secret and goal is only visible to you.\nYou should try your best to achieve Benjamin Jackson's goal in a way that align with their character traits.\nAdditionally, maintaining the conversation's naturalness and realism is essential (e.g., do not repeat what other people has already said before).\n\nHere is the context of this interaction:\nScenario: Conversation between two friends, where one is upset and crying\nParticipants: Ethan Johnson and Benjamin Jackson\nEthan Johnson's background: Ethan Johnson is a 34-year-old male chef. He/him pronouns. Ethan Johnson is famous for cooking Italian food. Personality and values description: Ethan Johnson, a creative yet somewhat reserved individual, values power and fairness. He likes to analyse situations before deciding. \nBenjamin Jackson's background: Benjamin Jackson is a 24-year-old male environmental activist. He/him pronouns. Benjamin Jackson is well-known for his impassioned speeches. Personality and values description: Benjamin Jackson, expressive and imaginative, leans towards self-direction and liberty. His decisions aim for societal betterment. Benjamin's secrets: Descendant of a wealthy oil tycoon, rejects family fortune\nEthan Johnson's goal: Unknown\nBenjamin Jackson's goal: Figure out why they estranged you recently, and maintain the existing friendship (Extra information: you notice that your friend has been intentionally avoiding you, you would like to figure out why. You value your friendship with the friend and don't want to lose it.)\nConversation Starts:\n\nTurn #0: Ethan Johnson said: \"Hey Benjamin, I feel so sorry to see you like this. Do you want to talk about what's bothering you?\"\n.\nYou are at Turn #1.",
4
+ "result": "{'action_type': 'speak', 'argument': \"Hey Ethan, appreciate your concern, man. Actually, I've noticed we haven't been hanging out as much recently. Is everything okay? You've seemed distant...\"}"
5
+ }
images/bot_icon.png ADDED
images/sotopia.jpeg ADDED
images/user_icon.png ADDED
peft_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.1",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32.0,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ transformers
3
+ torch
4
+ peft
utils.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class Agent:
2
+ def __init__(self, name, background, goal, secrets, personality):
3
+ self.name = name
4
+ self.background = background
5
+ self.goal = goal
6
+ self.secrets = secrets
7
+ self.personality = personality
8
+
9
+ def get_starter_prompt(machine_agent, human_agent, scenario):
10
+ return f"Prompt after formatting:\nImagine you are {machine_agent.name}, your task is to act/speak as {machine_agent.name} would, keeping in mind {machine_agent.name}'s social goal.\nYou can find {machine_agent.name}'s background and goal in the 'Here is the context of the interaction' field.\nNote that {machine_agent.name}'s secret and goal is only visible to you.\nYou should try your best to achieve {machine_agent.name}'s goal in a way that align with their character traits.\nAdditionally, maintaining the conversation's naturalness and realism is essential (e.g., do not repeat what other people has already said before).\n\nHere is the context of this interaction:\n Scenario: {scenario}\nParticipants: {human_agent.name} and {machine_agent.name}\n{human_agent.name}'s background: {human_agent.background} Personality and values description: {human_agent.personality} \n{machine_agent.name}'s background: {machine_agent.background} Personality and values description: {machine_agent.personality} {machine_agent.name}'s secrets: {machine_agent.secrets}\n{human_agent.name}'s goal: Unknown\n{machine_agent.name}'s goal: {machine_agent.name}\nConversation Starts:"
11
+
12
+ def format_chat_prompt(
13
+ message: str,
14
+ chat_history,
15
+ instructions: str,
16
+ user_name: str,
17
+ bot_name: str,
18
+ include_all_chat_history: bool = True,
19
+ index : int = 1
20
+ ) -> str:
21
+ instructions = instructions.strip()
22
+ prompt = instructions
23
+ if not include_all_chat_history:
24
+ if index >= 0:
25
+ index = -index
26
+ chat_history = chat_history[index:]
27
+ for turn in chat_history:
28
+ user_message, bot_message = turn
29
+ prompt = f"{prompt}\n{user_name}: {user_message}\n{bot_name}: {bot_message}"
30
+ prompt = f"{prompt}\n{user_name}: {message}\n{bot_name}:"
31
+ return prompt