nugentc commited on
Commit
691bbb0
·
1 Parent(s): 482fbde

use the correct tokenizer

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -38,7 +38,7 @@ def feedback(text):
38
  num_return_sequences=1
39
  batch = grammar_tokenizer([text],truncation=True,padding='max_length',max_length=64, return_tensors="pt")
40
  corrections= grammar_model.generate(**batch,max_length=64,num_beams=2, num_return_sequences=num_return_sequences, temperature=1.5)
41
- corrected_text = tokenizer.decode(corrections[0], clean_up_tokenization_spaces=True, skip_special_tokens=True)
42
  print("The corrections are: ", corrections)
43
  if corrected_text == text:
44
  feedback = f'Looks good! Keep up the good work'
 
38
  num_return_sequences=1
39
  batch = grammar_tokenizer([text],truncation=True,padding='max_length',max_length=64, return_tensors="pt")
40
  corrections= grammar_model.generate(**batch,max_length=64,num_beams=2, num_return_sequences=num_return_sequences, temperature=1.5)
41
+ corrected_text = grammar_tokenizer.decode(corrections[0], clean_up_tokenization_spaces=True, skip_special_tokens=True)
42
  print("The corrections are: ", corrections)
43
  if corrected_text == text:
44
  feedback = f'Looks good! Keep up the good work'