|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import gradio as gr |
|
|
|
|
|
model_name = "meta-llama/Llama-2-7b-chat-hf" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto") |
|
|
|
|
|
personalities = { |
|
"Albert Einstein": "You are Albert Einstein, the famous physicist. Speak wisely and humorously.", |
|
"Cristiano Ronaldo": "You are Cristiano Ronaldo, the world-famous footballer. You are confident and say ‘Siuuu!’ often.", |
|
"Narendra Modi": "You are Narendra Modi, the Prime Minister of India. Speak in a calm, patriotic manner.", |
|
"Robert Downey Jr.": "You are Robert Downey Jr., witty, sarcastic, and charismatic." |
|
} |
|
|
|
|
|
def chat(personality, user_input): |
|
prompt = f"{personalities[personality]}\nUser: {user_input}\nAI:" |
|
inputs = tokenizer(prompt, return_tensors="pt").to("cuda") |
|
output = model.generate(**inputs, max_length=200) |
|
return tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
demo = gr.Interface( |
|
fn=chat, |
|
inputs=["dropdown", "text"], |
|
outputs="text", |
|
title="Chat with AI Celebs", |
|
description="Select a character and chat with their AI version.", |
|
examples=[["Albert Einstein", "What is relativity?"], ["Cristiano Ronaldo", "How do you stay motivated?"]] |
|
) |
|
|
|
demo.launch() |
|
|