Unified-Promt / app.py
DB2323's picture
Update app.py
d307df7
raw
history blame
1.65 kB
#import gradio as gr
#from transformers import AutoTokenizer, AutoModelForCausalLM
#tokenizer = AutoTokenizer.from_pretrained("nomic-ai/gpt4all-13b-snoozy")
#model = AutoModelForCausalLM.from_pretrained("nomic-ai/gpt4all-13b-snoozy")
#def generate_text(prompt):
# inputs = tokenizer.encode(prompt, return_tensors="pt", max_length=1024, truncation=True)
# outputs = model.generate(inputs, max_length=1024, num_return_sequences=1)
# generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# return generated_text
#iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", title="GPT-4 Snoozy")
#iface.launch()
from transformers import LlamaForCausalLM, LlamaTokenizer
tokenizer = LlamaTokenizer.from_pretrained("models/nomic-ai/gpt4all-13b-snoozy")
model = LlamaForCausalLM.from_pretrained("models/nomic-ai/gpt4all-13b-snoozy")
prompt = "Hey, are you conscious? Can you talk to me?"
inputs = tokenizer(prompt, return_tensors="pt")
# Generate
generate_ids = model.generate(inputs.input_ids, max_length=30)
output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_tokenization_spaces=False)[0]
print(output)
#gr.Interface.load("models/nomic-ai/gpt4all-j").launch()
#gr.Interface.load("models/nomic-ai/gpt4all-lora").launch()
#nomic-ai/gpt4all-13b-snoozy
#nomic-ai/gpt4all-j
#nomic-ai/gpt4all-lora
#nomic-ai/gpt4all-lora-epoch-3
#nomic-ai/gpt4all-j-lora
#gr.Interface.load("models/nomic-ai/gpt4all-j").launch()
#gr.Interface.load("models/nomic-ai/gpt4all-j").launch()
#gr.Interface.load("models/nomic-ai/gpt4all-j").launch()
#gr.Interface.load("models/nomic-ai/gpt4all-j").launch()