Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,30 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
-
tokenizer = AutoTokenizer.from_pretrained("nomic-ai/gpt4all-13b-snoozy")
|
5 |
|
6 |
-
model = AutoModelForCausalLM.from_pretrained("nomic-ai/gpt4all-13b-snoozy")
|
7 |
|
8 |
-
def generate_text(prompt):
|
9 |
-
inputs = tokenizer.encode(prompt, return_tensors="pt", max_length=1024, truncation=True)
|
10 |
-
outputs = model.generate(inputs, max_length=1024, num_return_sequences=1)
|
11 |
-
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
12 |
-
return generated_text
|
13 |
|
14 |
-
iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", title="GPT-4 Snoozy")
|
15 |
-
iface.launch()
|
|
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
#gr.Interface.load("models/nomic-ai/gpt4all-j").launch()
|
18 |
#gr.Interface.load("models/nomic-ai/gpt4all-lora").launch()
|
19 |
|
|
|
1 |
+
#import gradio as gr
|
2 |
+
#from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
+
#tokenizer = AutoTokenizer.from_pretrained("nomic-ai/gpt4all-13b-snoozy")
|
5 |
|
6 |
+
#model = AutoModelForCausalLM.from_pretrained("nomic-ai/gpt4all-13b-snoozy")
|
7 |
|
8 |
+
#def generate_text(prompt):
|
9 |
+
# inputs = tokenizer.encode(prompt, return_tensors="pt", max_length=1024, truncation=True)
|
10 |
+
# outputs = model.generate(inputs, max_length=1024, num_return_sequences=1)
|
11 |
+
# generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
12 |
+
# return generated_text
|
13 |
|
14 |
+
#iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", title="GPT-4 Snoozy")
|
15 |
+
#iface.launch()
|
16 |
+
from transformers import LlamaForCausalLM, LlamaTokenizer
|
17 |
|
18 |
+
tokenizer = LlamaTokenizer.from_pretrained("/output/path")
|
19 |
+
model = LlamaForCausalLM.from_pretrained("/output/path")
|
20 |
+
|
21 |
+
prompt = "Hey, are you conscious? Can you talk to me?"
|
22 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
23 |
+
|
24 |
+
# Generate
|
25 |
+
generate_ids = model.generate(inputs.input_ids, max_length=30)
|
26 |
+
output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_tokenization_spaces=False)[0]
|
27 |
+
print(output)
|
28 |
#gr.Interface.load("models/nomic-ai/gpt4all-j").launch()
|
29 |
#gr.Interface.load("models/nomic-ai/gpt4all-lora").launch()
|
30 |
|