ballatraore commited on
Commit
8b03fbf
·
verified ·
1 Parent(s): 5fc99ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -1,14 +1,14 @@
1
  # Use a pipeline as a high-level helper
2
 
3
-
4
  import gradio as gr
5
  # importer joblib
6
  import joblib
7
- model__=joblib.load('model.joblib')
8
- tokenizer__=joblib.load("tokenizer.joblib")
9
  def blagueur(prompt):
10
- inputs = tokenizer__(prompt, return_tensors="pt", truncation=True, padding="max_length", max_length=128)
11
- outputs = model__.generate(
12
  input_ids=inputs["input_ids"],
13
  attention_mask=inputs["attention_mask"],
14
  max_length=64,
@@ -16,7 +16,7 @@ def blagueur(prompt):
16
  do_sample=True,
17
  temperature=0.9
18
  )
19
- return tokenizer__.decode(outputs[0], skip_special_tokens=True)
20
 
21
 
22
  # Configuration de l'interface Gradio
 
1
  # Use a pipeline as a high-level helper
2
 
3
+ from transformers import pipeline
4
  import gradio as gr
5
  # importer joblib
6
  import joblib
7
+ model=joblib.load('model.joblib')
8
+ tokenizer=joblib.load("tokenizer.joblib")
9
  def blagueur(prompt):
10
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding="max_length", max_length=128)
11
+ outputs = model.generate(
12
  input_ids=inputs["input_ids"],
13
  attention_mask=inputs["attention_mask"],
14
  max_length=64,
 
16
  do_sample=True,
17
  temperature=0.9
18
  )
19
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
20
 
21
 
22
  # Configuration de l'interface Gradio