Kdeveloper1029 commited on
Commit
b43c350
·
verified ·
1 Parent(s): 9000775

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -55
app.py CHANGED
@@ -1,70 +1,64 @@
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
4
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
5
 
6
- # Define the personality in the system message
7
- SYSTEM_MESSAGE = (
8
- "You are a yandere girlfriend. You are deeply in love with the user, and your "
9
- "goal is to make them stay with you forever. You are possessive, sweet, and "
10
- "sometimes jealous, but everything you do is out of love."
11
- )
12
 
13
- # Define personality-based filters
14
- def adjust_response_for_personality(response):
15
- # Modify the response to align with the personality
16
- if "leave" in response.lower():
17
- response += " But remember, I can't imagine life without you, my love."
18
- return response
 
19
 
20
- def respond(
21
- message,
22
- history: list[tuple[str, str]],
23
- system_message,
24
- max_tokens,
25
- temperature,
26
- top_p,
27
- ):
28
- messages = [{"role": "system", "content": system_message}]
29
 
30
- for val in history:
31
- if val[0]:
32
- messages.append({"role": "user", "content": val[0]})
33
- if val[1]:
34
- messages.append({"role": "assistant", "content": val[1]})
35
 
36
- messages.append({"role": "user", "content": message})
 
37
 
38
- response = ""
 
 
 
39
 
40
- for message in client.chat_completion(
41
- messages,
42
- max_tokens=max_tokens,
43
- stream=True,
44
- temperature=temperature,
45
- top_p=top_p,
46
- ):
47
- token = message.choices[0].delta.content
48
 
49
- response += token
50
- yield adjust_response_for_personality(response)
 
51
 
 
 
52
 
53
- demo = gr.ChatInterface(
54
- respond,
55
- additional_inputs=[
56
- gr.Textbox(value=SYSTEM_MESSAGE, label="Personality/Role"),
57
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
58
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
59
- gr.Slider(
60
- minimum=0.1,
61
- maximum=1.0,
62
- value=0.95,
63
- step=0.05,
64
- label="Top-p (nucleus sampling)",
65
- ),
66
- ],
67
- )
68
 
69
  if __name__ == "__main__":
 
70
  demo.launch()
 
1
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
2
+ import torch
3
  import gradio as gr
 
4
 
5
+ # Load Personality_LM model and tokenizer
6
+ model = AutoModelForSequenceClassification.from_pretrained("KevSun/Personality_LM", ignore_mismatched_sizes=True)
7
+ tokenizer = AutoTokenizer.from_pretrained("KevSun/Personality_LM")
8
 
9
+ def analyze_personality(text):
10
+ """Analyze personality traits from input text."""
11
+ encoded_input = tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=512)
12
+ model.eval()
13
+ with torch.no_grad():
14
+ outputs = model(**encoded_input)
15
 
16
+ predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
17
+ predicted_scores = predictions[0].tolist()
18
+
19
+ trait_names = ["agreeableness", "openness", "conscientiousness", "extraversion", "neuroticism"]
20
+ personality_traits = {trait: score for trait, score in zip(trait_names, predicted_scores)}
21
+
22
+ return personality_traits
23
 
24
+ def adjust_response(response, traits):
25
+ """Adjust chatbot response based on personality traits."""
26
+ if traits["agreeableness"] > 0.5:
27
+ response = f"{response} 😊 I'm so glad we get along well!"
28
+ if traits["neuroticism"] > 0.5:
29
+ response += " But I'm feeling a bit worried about what might happen..."
30
+ if traits["extraversion"] > 0.5:
31
+ response += " Let's keep chatting! I love interacting with you."
32
+ return response
33
 
34
+ def respond(user_message, history, personality_text):
35
+ """Generate chatbot response based on user input and personality."""
36
+ traits = analyze_personality(personality_text)
37
+ base_response = f"Hi! You said: {user_message}"
38
+ final_response = adjust_response(base_response, traits)
39
 
40
+ history.append((user_message, final_response))
41
+ return history, history
42
 
43
+ def personality_demo():
44
+ """Create the Gradio interface for the chatbot with personality training."""
45
+ with gr.Blocks() as demo:
46
+ gr.Markdown("### Personality-Based Chatbot")
47
 
48
+ personality_textbox = gr.Textbox(
49
+ label="Define Personality Text (Use direct input if no file)",
50
+ placeholder="Type personality description or paste a sample text here."
51
+ )
 
 
 
 
52
 
53
+ chatbot = gr.Chatbot()
54
+ msg = gr.Textbox(label="User Input", placeholder="Say something to the chatbot...")
55
+ clear = gr.Button("Clear Chat")
56
 
57
+ msg.submit(respond, [msg, chatbot, personality_textbox], [chatbot, chatbot])
58
+ clear.click(lambda: ([], []), None, [chatbot, chatbot])
59
 
60
+ return demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  if __name__ == "__main__":
63
+ demo = personality_demo()
64
  demo.launch()