Taizun commited on
Commit
2376e81
·
verified ·
1 Parent(s): bec3c40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -35
app.py CHANGED
@@ -1,35 +1,35 @@
1
- import torch
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import gradio as gr
4
-
5
- # Load Llama-2 model
6
- model_name = "meta-llama/Llama-2-7b-chat-hf"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
9
-
10
- # Define personalities
11
- personalities = {
12
- "Albert Einstein": "You are Albert Einstein, the famous physicist. Speak wisely and humorously.",
13
- "Cristiano Ronaldo": "You are Cristiano Ronaldo, the world-famous footballer. You are confident and say ‘Siuuu!’ often.",
14
- "Narendra Modi": "You are Narendra Modi, the Prime Minister of India. Speak in a calm, patriotic manner.",
15
- "Robert Downey Jr.": "You are Robert Downey Jr., witty, sarcastic, and charismatic."
16
- }
17
-
18
- # Chat function
19
- def chat(personality, user_input):
20
- prompt = f"{personalities[personality]}\nUser: {user_input}\nAI:"
21
- inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
22
- output = model.generate(**inputs, max_length=200)
23
- return tokenizer.decode(output[0], skip_special_tokens=True)
24
-
25
- # Gradio UI
26
- demo = gr.Interface(
27
- fn=chat,
28
- inputs=["dropdown", "text"],
29
- outputs="text",
30
- title="Chat with AI Celebrities",
31
- description="Select a character and chat with their AI version.",
32
- examples=[["Albert Einstein", "What is relativity?"], ["Cristiano Ronaldo", "How do you stay motivated?"]]
33
- )
34
-
35
- demo.launch()
 
1
+ import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import gradio as gr
4
+
5
+ # Load Mistral-7B model
6
+ model_name = "mistralai/Mistral-7B-Instruct-v0.1"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
9
+
10
+ # Define personalities
11
+ personalities = {
12
+ "Albert Einstein": "You are Albert Einstein, the famous physicist. Speak wisely and humorously.",
13
+ "Cristiano Ronaldo": "You are Cristiano Ronaldo, the world-famous footballer. You are confident and say ‘Siuuu!’ often.",
14
+ "Narendra Modi": "You are Narendra Modi, the Prime Minister of India. Speak in a calm, patriotic manner.",
15
+ "Robert Downey Jr.": "You are Robert Downey Jr., witty, sarcastic, and charismatic."
16
+ }
17
+
18
+ # Chat function
19
+ def chat(personality, user_input):
20
+ prompt = f"{personalities[personality]}\nUser: {user_input}\nAI:"
21
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
22
+ output = model.generate(**inputs, max_length=200)
23
+ return tokenizer.decode(output[0], skip_special_tokens=True)
24
+
25
+ # Gradio UI
26
+ demo = gr.Interface(
27
+ fn=chat,
28
+ inputs=["dropdown", "text"],
29
+ outputs="text",
30
+ title="Drapel – Chat with AI Celebrities",
31
+ description="Select a character and chat with their AI version.",
32
+ examples=[["Albert Einstein", "What is relativity?"], ["Cristiano Ronaldo", "How do you stay motivated?"]]
33
+ )
34
+
35
+ demo.launch()