MrVicente commited on
Commit
a7c96e0
·
1 Parent(s): 6e18097

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -4,12 +4,6 @@ from transformers import (
4
  BartTokenizer
5
  )
6
 
7
- model_name = 'unlisboa/bart_qa_assistant'
8
- tokenizer = BartTokenizer.from_pretrained(model_name)
9
- device = get_device()
10
- model = BartForConditionalGeneration.from_pretrained(model_name).to(device)
11
- model.eval()
12
-
13
  def get_device():
14
  # If there's a GPU available...
15
  if torch.cuda.is_available():
@@ -24,12 +18,18 @@ def get_device():
24
  device = torch.device("cpu")
25
  return device
26
 
 
 
 
 
 
 
27
  def run_bart(question, censor):
28
  print(question, censor)
29
 
30
  model_input = tokenizer(question_input, truncation=True, padding=True, return_tensors="pt")
31
  generated_answers_encoded = model.generate(input_ids=model_input["input_ids"].to(device),
32
- attention_mask=model_input["attention_mask"].to(device),
33
  #bad_words_ids=bad_words_ids,
34
  force_words_ids=None,
35
  min_length=1,
 
4
  BartTokenizer
5
  )
6
 
 
 
 
 
 
 
7
  def get_device():
8
  # If there's a GPU available...
9
  if torch.cuda.is_available():
 
18
  device = torch.device("cpu")
19
  return device
20
 
21
+ model_name = 'unlisboa/bart_qa_assistant'
22
+ tokenizer = BartTokenizer.from_pretrained(model_name)
23
+ device = get_device()
24
+ model = BartForConditionalGeneration.from_pretrained(model_name).to(device)
25
+ model.eval()
26
+
27
  def run_bart(question, censor):
28
  print(question, censor)
29
 
30
  model_input = tokenizer(question_input, truncation=True, padding=True, return_tensors="pt")
31
  generated_answers_encoded = model.generate(input_ids=model_input["input_ids"].to(device),
32
+ attention_mask=model_input["attention_mask"].to(device),
33
  #bad_words_ids=bad_words_ids,
34
  force_words_ids=None,
35
  min_length=1,