mipatov commited on
Commit
b42b839
·
1 Parent(s): b17f4f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -28,8 +28,8 @@ def predict_gpt(text, model, tokenizer, temperature=1.0):
28
  num_beams=4,
29
  temperature= temperature,
30
  top_p=0.75,
31
- max_length=512,
32
- length_penalty = 5.5,
33
  eos_token_id = tokenizer.eos_token_id,
34
  pad_token_id = tokenizer.pad_token_id,
35
  num_return_sequences = 1,
@@ -38,7 +38,7 @@ def predict_gpt(text, model, tokenizer, temperature=1.0):
38
  )
39
  decode = lambda x : tokenizer.decode(x, skip_special_tokens=True)
40
  generated_text = list(map(decode, out['sequences']))[0].replace(text,'')
41
- return "Описание : "+generated_text
42
 
43
  def predict_t5(text, model, tokenizer, temperature=1.2):
44
  input_ids = tokenizer.encode(text, return_tensors="pt")
@@ -48,7 +48,7 @@ def predict_t5(text, model, tokenizer, temperature=1.2):
48
  num_beams=4,
49
  temperature=temperature,
50
  top_p=0.35,
51
- max_length=512,
52
  length_penalty = -1.0,
53
  output_attentions = True,
54
  return_dict_in_generate=True,
 
28
  num_beams=4,
29
  temperature= temperature,
30
  top_p=0.75,
31
+ max_length=356,
32
+ length_penalty = 1.5,
33
  eos_token_id = tokenizer.eos_token_id,
34
  pad_token_id = tokenizer.pad_token_id,
35
  num_return_sequences = 1,
 
38
  )
39
  decode = lambda x : tokenizer.decode(x, skip_special_tokens=True)
40
  generated_text = list(map(decode, out['sequences']))[0].replace(text,'')
41
+ return generated_text
42
 
43
  def predict_t5(text, model, tokenizer, temperature=1.2):
44
  input_ids = tokenizer.encode(text, return_tensors="pt")
 
48
  num_beams=4,
49
  temperature=temperature,
50
  top_p=0.35,
51
+ max_length=356,
52
  length_penalty = -1.0,
53
  output_attentions = True,
54
  return_dict_in_generate=True,