nugentc commited on
Commit
70fb29f
·
1 Parent(s): ecae342

fix input text issue

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -25,7 +25,7 @@ def feedback(text):
25
  # tokenized_phrases = grammar_tokenizer([text], return_tensors='pt', padding=True)
26
  # corrections = grammar_model.generate(**tokenized_phrases)
27
  # corrections = grammar_tokenizer.batch_decode(corrections, skip_special_tokens=True)
28
- batch = grammar_tokenizer([input_text],truncation=True,padding='max_length',max_length=64, return_tensors="pt").to(torch_device)
29
  corrections= grammar_model.generate(**batch,max_length=64,num_beams=2, num_return_sequences=num_return_sequences, temperature=1.5)
30
  print("The corrections are: ", corrections)
31
  if len(corrections) == 0:
 
25
  # tokenized_phrases = grammar_tokenizer([text], return_tensors='pt', padding=True)
26
  # corrections = grammar_model.generate(**tokenized_phrases)
27
  # corrections = grammar_tokenizer.batch_decode(corrections, skip_special_tokens=True)
28
+ batch = grammar_tokenizer([text],truncation=True,padding='max_length',max_length=64, return_tensors="pt").to(torch_device)
29
  corrections= grammar_model.generate(**batch,max_length=64,num_beams=2, num_return_sequences=num_return_sequences, temperature=1.5)
30
  print("The corrections are: ", corrections)
31
  if len(corrections) == 0: