Merlintxu commited on
Commit
8e89b38
·
1 Parent(s): 5bd9cf6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -6
app.py CHANGED
@@ -6,15 +6,11 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
6
  nlp = spacy.load('es_core_news_sm')
7
 
8
  models = {
9
- "https://huggingface.co/CRD716/ggml-LLaMa-65B-quantized": AutoModelForCausalLM.from_pretrained("https://huggingface.co/CRD716/ggml-LLaMa-65B-quantized"),
10
- "https://huggingface.co/RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g": AutoModelForCausalLM.from_pretrained("https://huggingface.co/RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g"),
11
- "https://huggingface.co/bertin-project/bertin-alpaca-lora-7b": AutoModelForCausalLM.from_pretrained("https://huggingface.co/bertin-project/bertin-alpaca-lora-7b")
12
  }
13
 
14
  tokenizers = {
15
- "https://huggingface.co/CRD716/ggml-LLaMa-65B-quantized": AutoTokenizer.from_pretrained("https://huggingface.co/CRD716/ggml-LLaMa-65B-quantized"),
16
- "https://huggingface.co/RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g": AutoTokenizer.from_pretrained("https://huggingface.co/RedXeol/bertin-gpt-j-6B-alpaca-4bit-128g"),
17
- "https://huggingface.co/bertin-project/bertin-alpaca-lora-7b": AutoTokenizer.from_pretrained("https://huggingface.co/bertin-project/bertin-alpaca-lora-7b")
18
  }
19
 
20
 
 
6
  nlp = spacy.load('es_core_news_sm')
7
 
8
  models = {
9
+ "https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor": AutoModelForCausalLM.from_pretrained("https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor"),
 
 
10
  }
11
 
12
  tokenizers = {
13
+ "https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor": AutoTokenizer.from_pretrained("https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor"),
 
 
14
  }
15
 
16