File size: 1,429 Bytes
2376e81 f66035c 2376e81 fb94e5f 2376e81 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
# Load Llama-2 model
model_name = "meta-llama/Llama-2-7b-chat-hf"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
# Define personalities
personalities = {
"Albert Einstein": "You are Albert Einstein, the famous physicist. Speak wisely and humorously.",
"Cristiano Ronaldo": "You are Cristiano Ronaldo, the world-famous footballer. You are confident and say ‘Siuuu!’ often.",
"Narendra Modi": "You are Narendra Modi, the Prime Minister of India. Speak in a calm, patriotic manner.",
"Robert Downey Jr.": "You are Robert Downey Jr., witty, sarcastic, and charismatic."
}
# Chat function
def chat(personality, user_input):
prompt = f"{personalities[personality]}\nUser: {user_input}\nAI:"
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
output = model.generate(**inputs, max_length=200)
return tokenizer.decode(output[0], skip_special_tokens=True)
# Gradio UI
demo = gr.Interface(
fn=chat,
inputs=["dropdown", "text"],
outputs="text",
title="Chat with AI Celebs",
description="Select a character and chat with their AI version.",
examples=[["Albert Einstein", "What is relativity?"], ["Cristiano Ronaldo", "How do you stay motivated?"]]
)
demo.launch()
|