JimmyTarbender ToniDan commited on
Commit
09b06e9
·
0 Parent(s):

Duplicate from ToniDan/DanToniGPT2FormalInformal

Browse files

Co-authored-by: Tony Dan <[email protected]>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +275 -0
  4. requirements.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: DanToniGPT2FormalInformal
3
+ emoji: 💻
4
+ colorFrom: indigo
5
+ colorTo: red
6
+ sdk: streamlit
7
+ sdk_version: 1.10.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: ToniDan/DanToniGPT2FormalInformal
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ import pandas as pd
4
+ import os
5
+ import torch
6
+ import torch.nn as nn
7
+ from transformers.activations import get_activation
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
+
10
+
11
+ st.title('GPT2: To see all prompt outlines: https://huggingface.co/BigSalmon/BigSalmon/InformalToFormalLincoln91Paraphrase')
12
+
13
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
+
15
+ @st.cache(allow_output_mutation=True)
16
+ def get_model():
17
+ tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln92Paraphrase")
18
+ model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln92Paraphrase")
19
+
20
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincolnMediumParaphraseConcise")
21
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincolnMediumParaphraseConcise")
22
+
23
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln91Paraphrase")
24
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln91Paraphrase")
25
+
26
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln90Paraphrase")
27
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln90Paraphrase")
28
+
29
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln88Paraphrase")
30
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln88Paraphrase")
31
+
32
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln86Paraphrase")
33
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln86Paraphrase")
34
+
35
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln82Paraphrase")
36
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln82Paraphrase")
37
+
38
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln79Paraphrase")
39
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln79Paraphrase")
40
+
41
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln74Paraphrase")
42
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln74Paraphrase")
43
+
44
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln72Paraphrase")
45
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln72Paraphrase")
46
+
47
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln64Paraphrase")
48
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln64Paraphrase")
49
+
50
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln60Paraphrase")
51
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln60Paraphrase")
52
+
53
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/GPTNeo1.3BInformalToFormal")
54
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo1.3BInformalToFormal")
55
+
56
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln55")
57
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln55")
58
+
59
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln51")
60
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln51")
61
+
62
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln45")
63
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln49")
64
+
65
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln43")
66
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln43")
67
+
68
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln41")
69
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln41")
70
+
71
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln38")
72
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln38")
73
+
74
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln37")
75
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln37")
76
+
77
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln36")
78
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln36")
79
+
80
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
81
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/MediumInformalToFormalLincoln")
82
+
83
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln35")
84
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln35")
85
+
86
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln31")
87
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln31")
88
+
89
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln21")
90
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln21")
91
+
92
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsOneSent")
93
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsOneSent")
94
+
95
+ #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/PointsToSentence")
96
+ #model = AutoModelForCausalLM.from_pretrained("BigSalmon/PointsToSentence")
97
+
98
+ return model, tokenizer
99
+
100
+ model, tokenizer = get_model()
101
+
102
+ g = """informal english: garage band has made people who know nothing about music good at creating music.
103
+ Translated into the Style of Abraham Lincoln: garage band ( offers the uninitiated in music the ability to produce professional-quality compositions / catapults those for whom music is an uncharted art the ability the realize masterpieces / stimulates music novice's competency to yield sublime arrangements / begets individuals of rudimentary musical talent the proficiency to fashion elaborate suites ).
104
+ informal english: chrome extensions can make doing regular tasks much easier to get done.
105
+ Translated into the Style of Abraham Lincoln: chrome extensions ( yield the boon of time-saving convenience / ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks / turbocharges the velocity with which one can conduct their obligations ).
106
+ informal english: broadband is finally expanding to rural areas, a great development that will thrust them into modern life.
107
+ Translated into the Style of Abraham Lincoln: broadband is ( ( finally / at last / after years of delay ) arriving in remote locations / springing to life in far-flung outposts / inching into even the most backwater corners of the nation ) that will leap-frog them into the twenty-first century.
108
+ informal english: google translate has made talking to people who do not share your language easier.
109
+ Translated into the Style of Abraham Lincoln: google translate ( imparts communicability to individuals whose native tongue differs / mitigates the trials of communication across linguistic barriers / hastens the bridging of semantic boundaries / mollifies the complexity of multilingual communication / avails itself to the internationalization of discussion / flexes its muscles to abet intercultural conversation / calms the tides of linguistic divergence ).
110
+ informal english: corn fields are all across illinois, visible once you leave chicago.
111
+ Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.
112
+ informal english: """
113
+
114
+ number_of_outputs = st.sidebar.slider("Number of Outputs", 5, 100)
115
+ log_nums = st.sidebar.slider("How Many Log Outputs?", 50, 600)
116
+
117
+ def BestProbs(prompt):
118
+ prompt = prompt.strip()
119
+ text = tokenizer.encode(prompt)
120
+ myinput, past_key_values = torch.tensor([text]), None
121
+ myinput = myinput
122
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
123
+ logits = logits[0,-1]
124
+ probabilities = torch.nn.functional.softmax(logits)
125
+ best_logits, best_indices = logits.topk(10)
126
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
127
+ for i in best_words[0:10]:
128
+ print("_______")
129
+ st.write(f"${i} $\n")
130
+ f = (f"${i} $\n")
131
+ m = (prompt + f"{i}")
132
+ BestProbs2(m)
133
+ return f
134
+
135
+ def BestProbs2(prompt):
136
+ prompt = prompt.strip()
137
+ text = tokenizer.encode(prompt)
138
+ myinput, past_key_values = torch.tensor([text]), None
139
+ myinput = myinput
140
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
141
+ logits = logits[0,-1]
142
+ probabilities = torch.nn.functional.softmax(logits)
143
+ best_logits, best_indices = logits.topk(20)
144
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
145
+ for i in best_words[0:20]:
146
+ print(i)
147
+ st.write(i)
148
+
149
+ def LogProbs(prompt):
150
+ col1 = []
151
+ col2 = []
152
+ prompt = prompt.strip()
153
+ text = tokenizer.encode(prompt)
154
+ myinput, past_key_values = torch.tensor([text]), None
155
+ myinput = myinput
156
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
157
+ logits = logits[0,-1]
158
+ probabilities = torch.nn.functional.softmax(logits)
159
+ best_logits, best_indices = logits.topk(10)
160
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
161
+ for i in best_words[0:10]:
162
+ print("_______")
163
+ f = i
164
+ col1.append(f)
165
+ m = (prompt + f"{i}")
166
+ #print("^^" + f + " ^^")
167
+ prompt = m.strip()
168
+ text = tokenizer.encode(prompt)
169
+ myinput, past_key_values = torch.tensor([text]), None
170
+ myinput = myinput
171
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
172
+ logits = logits[0,-1]
173
+ probabilities = torch.nn.functional.softmax(logits)
174
+ best_logits, best_indices = logits.topk(20)
175
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
176
+ for i in best_words[0:20]:
177
+ #print(i)
178
+ col2.append(i)
179
+ #print(col1)
180
+ #print(col2)
181
+ d = {col1[0]: [col2[0], col2[1], col2[2], col2[3], col2[4], col2[5], col2[6], col2[7], col2[8], col2[9], col2[10], col2[11], col2[12], col2[13], col2[14], col2[15], col2[16], col2[17], col2[18], col2[19]],
182
+ col1[1]: [col2[20], col2[21], col2[22], col2[23], col2[24], col2[25], col2[26], col2[27], col2[28], col2[29], col2[30], col2[31], col2[32], col2[33], col2[34], col2[35], col2[36], col2[37], col2[38], col2[39]],
183
+ col1[2]: [col2[40], col2[41], col2[42], col2[43], col2[44], col2[45], col2[46], col2[47], col2[48], col2[49], col2[50], col2[51], col2[52], col2[53], col2[54], col2[55], col2[56], col2[57], col2[58], col2[59]],
184
+ col1[3]: [col2[60], col2[61], col2[62], col2[63], col2[64], col2[65], col2[66], col2[67], col2[68], col2[69], col2[70], col2[71], col2[72], col2[73], col2[74], col2[75], col2[76], col2[77], col2[78], col2[79]],
185
+ col1[4]: [col2[80], col2[81], col2[82], col2[83], col2[84], col2[85], col2[86], col2[87], col2[88], col2[89], col2[90], col2[91], col2[92], col2[93], col2[94], col2[95], col2[96], col2[97], col2[98], col2[99]],
186
+ col1[5]: [col2[100], col2[101], col2[102], col2[103], col2[104], col2[105], col2[106], col2[107], col2[108], col2[109], col2[110], col2[111], col2[112], col2[113], col2[114], col2[115], col2[116], col2[117], col2[118], col2[119]],
187
+ col1[6]: [col2[120], col2[121], col2[122], col2[123], col2[124], col2[125], col2[126], col2[127], col2[128], col2[129], col2[130], col2[131], col2[132], col2[133], col2[134], col2[135], col2[136], col2[137], col2[138], col2[139]],
188
+ col1[7]: [col2[140], col2[141], col2[142], col2[143], col2[144], col2[145], col2[146], col2[147], col2[148], col2[149], col2[150], col2[151], col2[152], col2[153], col2[154], col2[155], col2[156], col2[157], col2[158], col2[159]],
189
+ col1[8]: [col2[160], col2[161], col2[162], col2[163], col2[164], col2[165], col2[166], col2[167], col2[168], col2[169], col2[170], col2[171], col2[172], col2[173], col2[174], col2[175], col2[176], col2[177], col2[178], col2[179]],
190
+ col1[9]: [col2[180], col2[181], col2[182], col2[183], col2[184], col2[185], col2[186], col2[187], col2[188], col2[189], col2[190], col2[191], col2[192], col2[193], col2[194], col2[195], col2[196], col2[197], col2[198], col2[199]]}
191
+ df = pd.DataFrame(data=d)
192
+ print(df)
193
+ st.write(df)
194
+ return df
195
+
196
+ def BestProbs5(prompt):
197
+ prompt = prompt.strip()
198
+ text = tokenizer.encode(prompt)
199
+ myinput, past_key_values = torch.tensor([text]), None
200
+ myinput = myinput
201
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
202
+ logits = logits[0,-1]
203
+ probabilities = torch.nn.functional.softmax(logits)
204
+ best_logits, best_indices = logits.topk(number_of_outputs)
205
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
206
+ for i in best_words[0:number_of_outputs]:
207
+ #print(i)
208
+ print("\n")
209
+ g = (prompt + i)
210
+ st.write(g)
211
+ l = run_generate(g, "hey")
212
+ st.write(l)
213
+
214
+ def run_generate(text, bad_words):
215
+ yo = []
216
+ input_ids = tokenizer.encode(text, return_tensors='pt')
217
+ res = len(tokenizer.encode(text))
218
+ bad_words = bad_words.split()
219
+ bad_word_ids = [[7829], [40940]]
220
+ for bad_word in bad_words:
221
+ bad_word = " " + bad_word
222
+ ids = tokenizer(bad_word).input_ids
223
+ bad_word_ids.append(ids)
224
+ sample_outputs = model.generate(
225
+ input_ids,
226
+ do_sample=True,
227
+ max_length= res + 5,
228
+ min_length = res + 5,
229
+ top_k=50,
230
+ temperature=1.0,
231
+ num_return_sequences=3,
232
+ bad_words_ids=bad_word_ids
233
+ )
234
+ for i in range(3):
235
+ e = tokenizer.decode(sample_outputs[i])
236
+ e = e.replace(text, "")
237
+ yo.append(e)
238
+ print(yo)
239
+ return yo
240
+
241
+ with st.form(key='my_form'):
242
+ prompt = st.text_area(label='Enter sentence', value=g, height=500)
243
+ submit_button = st.form_submit_button(label='Submit')
244
+ submit_button2 = st.form_submit_button(label='Fast Forward')
245
+ submit_button3 = st.form_submit_button(label='Fast Forward 2.0')
246
+ submit_button4 = st.form_submit_button(label='Get Top')
247
+
248
+ if submit_button:
249
+ with torch.no_grad():
250
+ text = tokenizer.encode(prompt)
251
+ myinput, past_key_values = torch.tensor([text]), None
252
+ myinput = myinput
253
+ myinput= myinput.to(device)
254
+ logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
255
+ logits = logits[0,-1]
256
+ probabilities = torch.nn.functional.softmax(logits)
257
+ best_logits, best_indices = logits.topk(log_nums)
258
+ best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
259
+ text.append(best_indices[0].item())
260
+ best_probabilities = probabilities[best_indices].tolist()
261
+ words = []
262
+ st.write(best_words)
263
+ if submit_button2:
264
+ print("----")
265
+ st.write("___")
266
+ m = LogProbs(prompt)
267
+ st.write("___")
268
+ st.write(m)
269
+ st.write("___")
270
+ if submit_button3:
271
+ print("----")
272
+ st.write("___")
273
+ st.write(BestProbs)
274
+ if submit_button4:
275
+ BestProbs5(prompt)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ git+https://github.com/huggingface/transformers.git
2
+ torch
3
+ accelerate