ballatraore commited on
Commit
589ac10
·
verified ·
1 Parent(s): 86e1f23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -9,10 +9,10 @@ import joblib
9
  # tokenizer = BartTokenizer.from_pretrained(model_name)
10
  # model = BartForConditionalGeneration.from_pretrained(model_name)
11
  model__ = joblib.load("model.joblib")
12
- tokenizer_ = joblib.load("tokenizer.joblib")
13
 
14
  def blagueur(prompt):
15
- inputs = tokenizer_(prompt, return_tensors="pt", truncation=True, padding="max_length", max_length=128)
16
  outputs = model__.generate(
17
  input_ids=inputs["input_ids"],
18
  attention_mask=inputs["attention_mask"],
@@ -20,7 +20,7 @@ def blagueur(prompt):
20
  num_beams=5,
21
  do_sample=True,
22
  temperature=0.9)
23
- return tokenizer_.decode(outputs[0], skip_special_tokens=True)
24
  # Configuration de l'interface Gradio
25
  demo = gr.Interface(
26
  fn=blagueur,
 
9
  # tokenizer = BartTokenizer.from_pretrained(model_name)
10
  # model = BartForConditionalGeneration.from_pretrained(model_name)
11
  model__ = joblib.load("model.joblib")
12
+ tokenizer__ = joblib.load("tokenizer.joblib")
13
 
14
  def blagueur(prompt):
15
+ inputs = tokenizer__(prompt, return_tensors="pt", truncation=True, padding="max_length", max_length=128)
16
  outputs = model__.generate(
17
  input_ids=inputs["input_ids"],
18
  attention_mask=inputs["attention_mask"],
 
20
  num_beams=5,
21
  do_sample=True,
22
  temperature=0.9)
23
+ return tokenizer__.decode(outputs[0], skip_special_tokens=True)
24
  # Configuration de l'interface Gradio
25
  demo = gr.Interface(
26
  fn=blagueur,