Update app.py
Browse files
app.py
CHANGED
@@ -6,17 +6,18 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
6 |
nlp = spacy.load('es_core_news_sm')
|
7 |
|
8 |
models = {
|
9 |
-
"CRD716/ggml-LLaMa-65B-quantized": AutoModelForCausalLM.from_pretrained("CRD716/ggml-LLaMa-65B-quantized"),
|
10 |
-
"RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g": AutoModelForCausalLM.from_pretrained("RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g"),
|
11 |
-
"bertin-project/bertin-alpaca-lora-7b": AutoModelForCausalLM.from_pretrained("bertin-project/bertin-alpaca-lora-7b")
|
12 |
}
|
13 |
|
14 |
tokenizers = {
|
15 |
-
"CRD716/ggml-LLaMa-65B-quantized": AutoTokenizer.from_pretrained("CRD716/ggml-LLaMa-65B-quantized"),
|
16 |
-
"RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g": AutoTokenizer.from_pretrained("RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g"),
|
17 |
-
"bertin-project/bertin-alpaca-lora-7b": AutoTokenizer.from_pretrained("bertin-project/bertin-alpaca-lora-7b")
|
18 |
}
|
19 |
|
|
|
20 |
def generate(model_name: str):
|
21 |
model = models[model_name]
|
22 |
tokenizer = tokenizers[model_name]
|
|
|
6 |
nlp = spacy.load('es_core_news_sm')
|
7 |
|
8 |
models = {
|
9 |
+
"https://huggingface.co/CRD716/ggml-LLaMa-65B-quantized": AutoModelForCausalLM.from_pretrained("https://huggingface.co/CRD716/ggml-LLaMa-65B-quantized"),
|
10 |
+
"https://huggingface.co/RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g": AutoModelForCausalLM.from_pretrained("https://huggingface.co/RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g"),
|
11 |
+
"https://huggingface.co/bertin-project/bertin-alpaca-lora-7b": AutoModelForCausalLM.from_pretrained("https://huggingface.co/bertin-project/bertin-alpaca-lora-7b")
|
12 |
}
|
13 |
|
14 |
tokenizers = {
|
15 |
+
"https://huggingface.co/CRD716/ggml-LLaMa-65B-quantized": AutoTokenizer.from_pretrained("https://huggingface.co/CRD716/ggml-LLaMa-65B-quantized"),
|
16 |
+
"https://huggingface.co/RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g": AutoTokenizer.from_pretrained("https://huggingface.co/RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g"),
|
17 |
+
"https://huggingface.co/bertin-project/bertin-alpaca-lora-7b": AutoTokenizer.from_pretrained("https://huggingface.co/bertin-project/bertin-alpaca-lora-7b")
|
18 |
}
|
19 |
|
20 |
+
|
21 |
def generate(model_name: str):
|
22 |
model = models[model_name]
|
23 |
tokenizer = tokenizers[model_name]
|