Taizun commited on
Commit
0eace5d
·
verified ·
1 Parent(s): 91369bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -1,18 +1,14 @@
 
1
  import torch
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import gradio as gr
4
- import os
5
  from huggingface_hub import login
6
 
7
- HF_TOKEN = os.getenv("HF_TOKEN") # Read token from environment variable
8
- login(token=HF_TOKEN)
9
-
10
- # Load Llama-2 model
11
- model_name = "meta-llama/Llama-2-7b-chat-hf"
12
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=True)
13
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", use_auth_token=True)
14
 
15
- # Define personalities
16
  personalities = {
17
  "Albert Einstein": "You are Albert Einstein, the famous physicist. Speak wisely and humorously.",
18
  "Cristiano Ronaldo": "You are Cristiano Ronaldo, the world-famous footballer. You are confident and say ‘Siuuu!’ often.",
@@ -20,6 +16,11 @@ personalities = {
20
  "Robert Downey Jr.": "You are Robert Downey Jr., witty, sarcastic, and charismatic."
21
  }
22
 
 
 
 
 
 
23
  # Chat function
24
  def chat(personality, user_input):
25
  prompt = f"{personalities[personality]}\nUser: {user_input}\nAI:"
@@ -27,14 +28,16 @@ def chat(personality, user_input):
27
  output = model.generate(**inputs, max_length=200)
28
  return tokenizer.decode(output[0], skip_special_tokens=True)
29
 
30
- # Gradio UI
31
  demo = gr.Interface(
32
  fn=chat,
33
- inputs=["dropdown", "text"],
 
 
 
34
  outputs="text",
35
- title="Chat with AI Celebrities",
36
  description="Select a character and chat with their AI version.",
37
- examples=[["Albert Einstein", "What is relativity?"], ["Cristiano Ronaldo", "How do you stay motivated?"]]
38
  )
39
 
40
  demo.launch()
 
1
+ import os
2
  import torch
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import gradio as gr
 
5
  from huggingface_hub import login
6
 
7
+ # Load API token securely
8
+ HF_TOKEN = os.getenv("HF_TOKEN") # Read token from environment variable
9
+ login(token=HF_TOKEN)
 
 
 
 
10
 
11
+ # Define personalities BEFORE using them in the dropdown
12
  personalities = {
13
  "Albert Einstein": "You are Albert Einstein, the famous physicist. Speak wisely and humorously.",
14
  "Cristiano Ronaldo": "You are Cristiano Ronaldo, the world-famous footballer. You are confident and say ‘Siuuu!’ often.",
 
16
  "Robert Downey Jr.": "You are Robert Downey Jr., witty, sarcastic, and charismatic."
17
  }
18
 
19
+ # Load Llama-2 Model
20
+ model_name = "meta-llama/Llama-2-7b-chat-hf"
21
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=True)
22
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto", use_auth_token=True)
23
+
24
  # Chat function
25
  def chat(personality, user_input):
26
  prompt = f"{personalities[personality]}\nUser: {user_input}\nAI:"
 
28
  output = model.generate(**inputs, max_length=200)
29
  return tokenizer.decode(output[0], skip_special_tokens=True)
30
 
31
+ # Ensure the dropdown has predefined choices
32
  demo = gr.Interface(
33
  fn=chat,
34
+ inputs=[
35
+ gr.Dropdown(choices=list(personalities.keys()), label="Choose a Celebrity"),
36
+ "text"
37
+ ],
38
  outputs="text",
39
+ title="Drapel – Chat with AI Celebrities",
40
  description="Select a character and chat with their AI version.",
 
41
  )
42
 
43
  demo.launch()