Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -28,8 +28,8 @@ def predict_gpt(text, model, tokenizer, temperature=1.0):
|
|
28 |
num_beams=4,
|
29 |
temperature= temperature,
|
30 |
top_p=0.75,
|
31 |
-
max_length=
|
32 |
-
length_penalty =
|
33 |
eos_token_id = tokenizer.eos_token_id,
|
34 |
pad_token_id = tokenizer.pad_token_id,
|
35 |
num_return_sequences = 1,
|
@@ -38,7 +38,7 @@ def predict_gpt(text, model, tokenizer, temperature=1.0):
|
|
38 |
)
|
39 |
decode = lambda x : tokenizer.decode(x, skip_special_tokens=True)
|
40 |
generated_text = list(map(decode, out['sequences']))[0].replace(text,'')
|
41 |
-
return
|
42 |
|
43 |
def predict_t5(text, model, tokenizer, temperature=1.2):
|
44 |
input_ids = tokenizer.encode(text, return_tensors="pt")
|
@@ -48,7 +48,7 @@ def predict_t5(text, model, tokenizer, temperature=1.2):
|
|
48 |
num_beams=4,
|
49 |
temperature=temperature,
|
50 |
top_p=0.35,
|
51 |
-
max_length=
|
52 |
length_penalty = -1.0,
|
53 |
output_attentions = True,
|
54 |
return_dict_in_generate=True,
|
|
|
28 |
num_beams=4,
|
29 |
temperature= temperature,
|
30 |
top_p=0.75,
|
31 |
+
max_length=356,
|
32 |
+
length_penalty = 1.5,
|
33 |
eos_token_id = tokenizer.eos_token_id,
|
34 |
pad_token_id = tokenizer.pad_token_id,
|
35 |
num_return_sequences = 1,
|
|
|
38 |
)
|
39 |
decode = lambda x : tokenizer.decode(x, skip_special_tokens=True)
|
40 |
generated_text = list(map(decode, out['sequences']))[0].replace(text,'')
|
41 |
+
return generated_text
|
42 |
|
43 |
def predict_t5(text, model, tokenizer, temperature=1.2):
|
44 |
input_ids = tokenizer.encode(text, return_tensors="pt")
|
|
|
48 |
num_beams=4,
|
49 |
temperature=temperature,
|
50 |
top_p=0.35,
|
51 |
+
max_length=356,
|
52 |
length_penalty = -1.0,
|
53 |
output_attentions = True,
|
54 |
return_dict_in_generate=True,
|