Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,18 +1,16 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
import uvicorn
|
|
|
3 |
|
4 |
model_name = "DavidAU/Llama-3.2-4X3B-MOE-Hell-California-Uncensored-10B-GGUF"
|
5 |
|
6 |
-
from transformers import AutoModel, AutoTokenizer, TextStreamer
|
7 |
-
import torch
|
8 |
-
|
9 |
# Load model and tokenizer
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
)
|
16 |
|
17 |
def llama2_chat(prompt):
|
18 |
inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)
|
|
|
1 |
from fastapi import FastAPI
|
2 |
import uvicorn
|
3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
|
5 |
model_name = "DavidAU/Llama-3.2-4X3B-MOE-Hell-California-Uncensored-10B-GGUF"
|
6 |
|
|
|
|
|
|
|
7 |
# Load model and tokenizer
|
8 |
+
|
9 |
+
model_id = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
|
10 |
+
filename = "tinyllama-1.1b-chat-v1.0.Q6_K.gguf"
|
11 |
+
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, gguf_file=filename)
|
13 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, gguf_file=filename)
|
14 |
|
15 |
def llama2_chat(prompt):
|
16 |
inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)
|