Merlintxu commited on
Commit
4d5764e
·
1 Parent(s): e795121

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -21
app.py CHANGED
@@ -10,29 +10,11 @@ MODELS = {
10
  "GPT2": "datificate/gpt2-small-spanish",
11
  }
12
 
13
- TOKENIZERS = {
14
- "T5": None,
15
- "LSpanishGPT2": None,
16
- "GPT2": None,
17
- }
18
-
19
- # Load Bloom model separately with memory optimizations
20
- model_bloom = AutoModelForCausalLM.from_pretrained("facebook/xglm-1.7B", low_cpu_mem_usage=True)
21
- tokenizer_bloom = AutoTokenizer.from_pretrained("facebook/xglm-1.7B")
22
-
23
  # Define your function
24
  def generate_and_analyze(model_name, input_text):
25
- # Load the model from the dictionary using the selected model name
26
- if model_name == "bloom":
27
- #model = model_bloom
28
- tokenizer = "" #tokenizer_bloom
29
-
30
- else:
31
- model = MODELS[model_name]
32
- tokenizer = TOKENIZERS[model_name]
33
- if tokenizer is None: # Load tokenizer if not already done
34
- tokenizer = AutoTokenizer.from_pretrained(model)
35
- TOKENIZERS[model_name] = tokenizer
36
 
37
  text_generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=0) # Use GPU if available
38
  result = text_generator(input_text, max_length=250, do_sample=True)[0]
 
10
  "GPT2": "datificate/gpt2-small-spanish",
11
  }
12
 
13
+ ´
 
 
 
 
 
 
 
 
 
14
  # Define your function
15
  def generate_and_analyze(model_name, input_text):
16
+ tokenizer = AutoTokenizer.from_pretrained(model)
17
+ TOKENIZERS[model_name] = tokenizer
 
 
 
 
 
 
 
 
 
18
 
19
  text_generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=0) # Use GPU if available
20
  result = text_generator(input_text, max_length=250, do_sample=True)[0]