CCockrum commited on
Commit
3efbe63
·
verified ·
1 Parent(s): f77b42d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -9
app.py CHANGED
@@ -7,6 +7,7 @@ from langchain_huggingface import HuggingFaceEndpoint
7
  from langchain_core.prompts import PromptTemplate
8
  from langchain_core.output_parsers import StrOutputParser
9
  from transformers import pipeline
 
10
 
11
  # Use environment variables for keys
12
  HF_TOKEN = os.getenv("HF_TOKEN")
@@ -97,6 +98,18 @@ def generate_follow_up(user_text):
97
  cleaned = ["Would you like to explore this topic further?"]
98
  return random.choice(cleaned)
99
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  def get_response(system_message, chat_history, user_text, max_new_tokens=512):
101
  """
102
  Generates HAL's answer with depth and a follow-up question.
@@ -135,21 +148,25 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=512):
135
 
136
  # Instruct the model to generate a detailed, in-depth answer.
137
  prompt = PromptTemplate.from_template(
138
- (
139
- "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
140
- "User: {user_text}.\n [/INST]\n"
141
- "AI: Please provide a detailed explanation in depth. "
142
- "Ensure your response covers the topic thoroughly and is written in a friendly, conversational style, "
143
- "starting with a phrase like 'Certainly!', 'Of course!', or 'Great question!'."
144
- "Answer exclusively in English, and do not include extra commentary." + style_clause +
145
- "\nHAL:"
146
- )
147
  )
 
148
 
149
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
150
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
151
  response = response.split("HAL:")[-1].strip()
152
 
 
 
 
 
153
  # Fallback in case the generated answer is empty
154
  if not response:
155
  response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
 
7
  from langchain_core.prompts import PromptTemplate
8
  from langchain_core.output_parsers import StrOutputParser
9
  from transformers import pipeline
10
+ from langdetect import detect
11
 
12
  # Use environment variables for keys
13
  HF_TOKEN = os.getenv("HF_TOKEN")
 
98
  cleaned = ["Would you like to explore this topic further?"]
99
  return random.choice(cleaned)
100
 
101
+ from langdetect import detect
102
+
103
+ def ensure_english(text):
104
+ """Check if the model accidentally generated a non-English response."""
105
+ try:
106
+ detected_lang = detect(text)
107
+ if detected_lang != "en":
108
+ return "⚠️ Sorry, I only respond in English. Can you rephrase your question?"
109
+ except:
110
+ return "⚠️ Language detection failed. Please ask your question again."
111
+ return text
112
+
113
  def get_response(system_message, chat_history, user_text, max_new_tokens=512):
114
  """
115
  Generates HAL's answer with depth and a follow-up question.
 
148
 
149
  # Instruct the model to generate a detailed, in-depth answer.
150
  prompt = PromptTemplate.from_template(
151
+ (
152
+ "[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
153
+ "User: {user_text}.\n [/INST]\n"
154
+ "AI: Please provide a detailed explanation in depth. "
155
+ "Ensure your response covers the topic thoroughly and is written in a friendly, conversational style, "
156
+ "starting with a phrase like 'Certainly!', 'Of course!', or 'Great question!'."
157
+ "🚨 IMPORTANT: Answer exclusively in **English only**. Do not generate responses in any other language."
158
+ "\nHAL:"
 
159
  )
160
+ )
161
 
162
  chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
163
  response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
164
  response = response.split("HAL:")[-1].strip()
165
 
166
+ # 🚨 Ensure the response is in English
167
+ response = ensure_english(response)
168
+
169
+
170
  # Fallback in case the generated answer is empty
171
  if not response:
172
  response = "Certainly, here is an in-depth explanation: [Fallback explanation]."