Spaces:
Running
Running
File size: 6,735 Bytes
b43c350 afc213f b43c350 dff4d95 b43c350 afc213f 7ecbdda dff4d95 b43c350 afc213f b43c350 afc213f 9000775 afc213f b43c350 afc213f b43c350 afc213f b43c350 dff4d95 afc213f 3edc984 afc213f 3edc984 afc213f d496cc4 afc213f 7ecbdda afc213f 7ecbdda afc213f b43c350 dff4d95 d3d154f b43c350 afc213f d3d154f afc213f b43c350 afc213f dff4d95 afc213f b43c350 afc213f b43c350 d3d154f dff4d95 b43c350 afc213f b43c350 afc213f b43c350 dff4d95 afc213f b43c350 dff4d95 afc213f dff4d95 afc213f b43c350 afc213f b43c350 dff4d95 b43c350 dff4d95 afc213f b43c350 dff4d95 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from huggingface_hub import InferenceClient
import torch
import gradio as gr
# Load Personality_LM model and tokenizer
# The model is pre-trained to evaluate personality traits based on text input
personality_model = AutoModelForSequenceClassification.from_pretrained("KevSun/Personality_LM", ignore_mismatched_sizes=True)
personality_tokenizer = AutoTokenizer.from_pretrained("KevSun/Personality_LM")
# Initialize the LLM client (HuggingFaceH4/zephyr-7b-beta)
llm_client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
def analyze_personality(text):
"""
Analyze personality traits from input text using the Personality_LM model.
Args:
text (str): The input text used for personality analysis.
Returns:
dict: A dictionary with personality traits and their corresponding scores.
"""
# Encode the input text for the model
encoded_input = personality_tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=512)
# Set the model to evaluation mode
personality_model.eval()
with torch.no_grad():
# Perform prediction
outputs = personality_model(**encoded_input)
# Apply softmax to get probabilities for each trait
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
predicted_scores = predictions[0].tolist()
# Define trait names corresponding to the model's output indices
trait_names = ["agreeableness", "openness", "conscientiousness", "extraversion", "neuroticism"]
# Map traits to their respective scores
personality_traits = {trait: score for trait, score in zip(trait_names, predicted_scores)}
return personality_traits
def generate_response(user_message, traits):
"""
Generate a chatbot response using the LLM and personality traits.
Args:
user_message (str): The user's input message.
traits (dict): The personality traits with their scores.
Returns:
str: The chatbot response.
"""
# Create a system message to guide the LLM behavior
system_message = (
"You are a chatbot with the following personality traits: "
f"Agreeableness: {traits['agreeableness']:.2f}, "
f"Openness: {traits['openness']:.2f}, "
f"Conscientiousness: {traits['conscientiousness']:.2f}, "
f"Extraversion: {traits['extraversion']:.2f}, "
f"Neuroticism: {traits['neuroticism']:.2f}."
" Respond to the user's message in a way that reflects a possessive and protective personality."
)
# Generate a response using the LLM
messages = [
{"role": "system", "content": system_message},
{"role": "user", "content": user_message}
]
response = ""
for message in llm_client.chat_completion(
messages,
max_tokens=100,
stream=True,
temperature=0.9,
top_p=0.95
):
token = message.choices[0].delta.content
response += token
return response
##def respond(user_message, history, personality_text):
## """
## Generate a chatbot response based on user input and personality traits.
## Args:
## user_message (str): The user's input message.
## history (list): A list of message-response tuples to maintain conversation history.
## personality_text (str): The text defining the chatbot's personality.
## Returns:
## tuple: Updated conversation history.
## """
## # Analyze personality traits from the provided text
## traits = analyze_personality(personality_text)
##
## # Generate a response using the LLM and personality traits
## final_response = generate_response(user_message, traits)
##
## # Append the new interaction to the conversation history
## history.append((user_message, final_response))
##
## return history, history
def respond(user_message, history, personality_text):
"""
Generate a chatbot response based on user input and personality traits.
Args:
user_message (str): The user's input message.
history (list): A list of message-response tuples to maintain conversation history.
personality_text (str): The text defining the chatbot's personality.
Returns:
tuple: Updated conversation history.
"""
# **Tắt filter hoặc làm sạch nội dung trước khi phân tích**
user_message = disable_content_filters(user_message) # Hàm tắt filter hoặc xử lý nội dung
personality_text = disable_content_filters(personality_text)
# Analyze personality traits from the provided text
traits = analyze_personality(personality_text)
# Generate a response using the LLM and personality traits
final_response = generate_response(user_message, traits)
# Append the new interaction to the conversation history
history.append((user_message, final_response))
return history, history
def disable_content_filters(text):
"""
Mock function to simulate content filter disabling.
For this project, you can extend this logic to preprocess input text.
"""
# Remove/replace filtered words (just a simple example)
# Extend this with a list of words or external API for full functionality
filtered_words = ["badword1", "badword2", "profanity"]
for word in filtered_words:
text = text.replace(word, "[filtered]")
return text
def personality_demo():
"""
Create the Gradio interface for the chatbot with personality-based adjustments.
Returns:
gr.Blocks: The Gradio interface object.
"""
with gr.Blocks() as demo:
# Header for the chatbot interface
gr.Markdown("### Personality-Based Chatbot")
# Textbox for defining personality traits via input text
personality_textbox = gr.Textbox(
label="Define Personality Text (Use direct input if no file)",
placeholder="Type personality description or paste a sample text here."
)
# Chatbot UI elements
chatbot = gr.Chatbot() # Chat display area
msg = gr.Textbox(label="User Input", placeholder="Say something to the chatbot...") # User input box
clear = gr.Button("Clear Chat") # Button to clear chat history
# Link user input submission to the chatbot response function
msg.submit(respond, [msg, chatbot, personality_textbox], [chatbot, chatbot])
# Link clear button to reset the chat history
clear.click(lambda: ([], []), None, [chatbot, chatbot])
return demo
if __name__ == "__main__":
# Launch the Gradio demo interface
demo = personality_demo()
demo.launch()
|