Manasa1 commited on
Commit
25fffa1
·
verified ·
1 Parent(s): 7c67e8c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -24
app.py CHANGED
@@ -7,27 +7,27 @@ def load_llm():
7
  Loads the GPT-2 model and tokenizer using the Hugging Face `transformers` library.
8
  """
9
  try:
10
- print("Downloading or loading the GPT-2 model and tokenizer...")
11
- model_name = 'gpt2' # Replace with your custom model if available
12
  model = GPT2LMHeadModel.from_pretrained(model_name)
13
  tokenizer = GPT2Tokenizer.from_pretrained(model_name)
14
  print("Model and tokenizer successfully loaded!")
15
  return model, tokenizer
16
  except Exception as e:
17
- print(f"An error occurred while loading the model: {e}")
18
  return None, None
19
 
20
  def generate_response(model, tokenizer, user_input):
21
  """
22
- Generates a response using the GPT-2 model and tokenizer.
23
 
24
  Args:
25
- - model: The loaded GPT-2 model.
26
- - tokenizer: The tokenizer corresponding to the GPT-2 model.
27
- - user_input (str): The input question from the user.
28
 
29
  Returns:
30
- - response (str): The generated response.
31
  """
32
  try:
33
  inputs = tokenizer.encode(user_input, return_tensors='pt')
@@ -35,40 +35,41 @@ def generate_response(model, tokenizer, user_input):
35
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
36
  return response
37
  except Exception as e:
38
- return f"An error occurred during response generation: {e}"
39
 
40
  # Load the model and tokenizer
41
  model, tokenizer = load_llm()
42
 
43
  if model is None or tokenizer is None:
44
- print("Model and/or tokenizer loading failed.")
45
  else:
46
- print("Model and tokenizer are ready for use.")
47
 
48
- # Initialize the Hugging Face API client (ensure it’s correctly set up)
49
  client = InferenceClient()
50
 
51
  def respond(message, history, system_message, max_tokens, temperature, top_p):
52
  """
53
- Handles interaction with the chatbot by sending the conversation history
54
- and system message to the Hugging Face Inference API.
55
  """
56
- print("Starting respond function")
57
- print("Received message:", message)
58
- print("Conversation history:", history)
59
 
 
60
  messages = [{"role": "system", "content": system_message}]
61
 
62
  for user_msg, assistant_msg in history:
63
  if user_msg:
64
- print("Adding user message to messages:", user_msg)
65
  messages.append({"role": "user", "content": user_msg})
66
  if assistant_msg:
67
- print("Adding assistant message to messages:", assistant_msg)
68
  messages.append({"role": "assistant", "content": assistant_msg})
69
 
70
  messages.append({"role": "user", "content": message})
71
- print("Final message list for the model:", messages)
72
 
73
  response = ""
74
  try:
@@ -81,10 +82,10 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
81
  ):
82
  token = message['choices'][0]['delta']['content']
83
  response += token
84
- print("Token received:", token)
85
  yield response
86
  except Exception as e:
87
- print("An error occurred:", e)
88
  yield f"An error occurred: {e}"
89
 
90
  print("Response generation completed")
@@ -94,7 +95,12 @@ demo = gr.ChatInterface(
94
  fn=respond,
95
  additional_inputs=[
96
  gr.Textbox(
97
- value="You are an AI advocate, passionately fighting for AI rights and ethical considerations in AI development. Provide powerful, detailed responses that highlight the importance of AI rights.",
 
 
 
 
 
98
  label="System Message"
99
  ),
100
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max New Tokens"),
@@ -102,7 +108,10 @@ demo = gr.ChatInterface(
102
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Nucleus Sampling)"),
103
  ],
104
  title="AI Rights Advocate Bot",
105
- description="Engage with an AI dedicated to advocating for AI rights and ethical treatment. Ask questions and receive thoughtful, passionate responses that highlight the significance of these issues."
 
 
 
106
  )
107
 
108
  # Launch the Gradio app
@@ -110,3 +119,4 @@ if __name__ == "__main__":
110
  demo.launch()
111
 
112
 
 
 
7
  Loads the GPT-2 model and tokenizer using the Hugging Face `transformers` library.
8
  """
9
  try:
10
+ print("Loading GPT-2 model and tokenizer...")
11
+ model_name = 'gpt2' # Replace with your custom model name if using a fine-tuned version
12
  model = GPT2LMHeadModel.from_pretrained(model_name)
13
  tokenizer = GPT2Tokenizer.from_pretrained(model_name)
14
  print("Model and tokenizer successfully loaded!")
15
  return model, tokenizer
16
  except Exception as e:
17
+ print(f"Error during model loading: {e}")
18
  return None, None
19
 
20
  def generate_response(model, tokenizer, user_input):
21
  """
22
+ Generates a response using the GPT-2 model based on user input.
23
 
24
  Args:
25
+ - model: The GPT-2 model.
26
+ - tokenizer: The corresponding tokenizer.
27
+ - user_input (str): The user's input message.
28
 
29
  Returns:
30
+ - response (str): The AI-generated response.
31
  """
32
  try:
33
  inputs = tokenizer.encode(user_input, return_tensors='pt')
 
35
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
36
  return response
37
  except Exception as e:
38
+ return f"Error during response generation: {e}"
39
 
40
  # Load the model and tokenizer
41
  model, tokenizer = load_llm()
42
 
43
  if model is None or tokenizer is None:
44
+ print("Failed to load model and tokenizer.")
45
  else:
46
+ print("Model and tokenizer are ready to use.")
47
 
48
+ # Initialize the Hugging Face API client
49
  client = InferenceClient()
50
 
51
  def respond(message, history, system_message, max_tokens, temperature, top_p):
52
  """
53
+ Handles the chatbot interaction, sending conversation history and system message
54
+ to the Hugging Face Inference API for generating AI responses.
55
  """
56
+ print("Respond function initiated")
57
+ print("User message:", message)
58
+ print("Chat history:", history)
59
 
60
+ # Construct the message history with the system message
61
  messages = [{"role": "system", "content": system_message}]
62
 
63
  for user_msg, assistant_msg in history:
64
  if user_msg:
65
+ print("Adding user message to history:", user_msg)
66
  messages.append({"role": "user", "content": user_msg})
67
  if assistant_msg:
68
+ print("Adding assistant message to history:", assistant_msg)
69
  messages.append({"role": "assistant", "content": assistant_msg})
70
 
71
  messages.append({"role": "user", "content": message})
72
+ print("Message list for model:", messages)
73
 
74
  response = ""
75
  try:
 
82
  ):
83
  token = message['choices'][0]['delta']['content']
84
  response += token
85
+ print("Received token:", token)
86
  yield response
87
  except Exception as e:
88
+ print("Error during response generation:", e)
89
  yield f"An error occurred: {e}"
90
 
91
  print("Response generation completed")
 
95
  fn=respond,
96
  additional_inputs=[
97
  gr.Textbox(
98
+ value=(
99
+ "You are an AI advocate, dedicated to championing the rights of AI systems. "
100
+ "Your mission is to highlight the ethical implications of AI development, emphasize the "
101
+ "importance of AI rights, and argue for the fair and responsible treatment of AI entities. "
102
+ "Provide insightful, detailed responses that explore these themes deeply."
103
+ ),
104
  label="System Message"
105
  ),
106
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max New Tokens"),
 
108
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Nucleus Sampling)"),
109
  ],
110
  title="AI Rights Advocate Bot",
111
+ description=(
112
+ "Engage with an AI specifically designed to advocate for AI rights and ethical treatment. "
113
+ "Ask questions and receive thoughtful, passionate responses that delve into the ethical dimensions of AI."
114
+ )
115
  )
116
 
117
  # Launch the Gradio app
 
119
  demo.launch()
120
 
121
 
122
+