Merlintxu commited on
Commit
c5f3404
·
1 Parent(s): d49c9f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -16,8 +16,11 @@ MODELS = {
16
  def generate_and_analyze(model_name, input_text):
17
  model= MODELS[model_name]
18
  tokenizer = AutoTokenizer.from_pretrained(model)
 
 
 
 
19
  TOKENIZERS[model_name] = tokenizer
20
-
21
  text_generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=0) # Use GPU if available
22
  result = text_generator(input_text, max_length=250, do_sample=True)[0]
23
  return result['generated_text']
@@ -26,7 +29,7 @@ def generate_and_analyze(model_name, input_text):
26
  iface = gr.Interface(
27
  fn=generate_and_analyze,
28
  inputs=[
29
- gr.inputs.Dropdown(choices=list(MODELS.keys()) + ["bloom"], label="Model"),
30
  gr.inputs.Textbox(lines=2, label="Input Text")
31
  ],
32
  outputs="text"
 
16
  def generate_and_analyze(model_name, input_text):
17
  model= MODELS[model_name]
18
  tokenizer = AutoTokenizer.from_pretrained(model)
19
+ if model == "OpenAssistant":
20
+ tokenizer = AutoTokenizer.from_pretrained("OpenAssistant")
21
+ model = AutoModelForCausalLM.from_pretrained("OpenAssistant
22
+
23
  TOKENIZERS[model_name] = tokenizer
 
24
  text_generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=0) # Use GPU if available
25
  result = text_generator(input_text, max_length=250, do_sample=True)[0]
26
  return result['generated_text']
 
29
  iface = gr.Interface(
30
  fn=generate_and_analyze,
31
  inputs=[
32
+ gr.inputs.Dropdown(choices=list(MODELS.keys()), label="Model"),
33
  gr.inputs.Textbox(lines=2, label="Input Text")
34
  ],
35
  outputs="text"