techindia2025 commited on
Commit
645c015
·
verified ·
1 Parent(s): 8b29c0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -7
app.py CHANGED
@@ -49,11 +49,12 @@ except Exception as e:
49
  # Fallback to a smaller model or provide an error message
50
  raise
51
 
52
- # LangChain prompt template
53
  prompt = ChatPromptTemplate.from_messages([
54
  ("system", SYSTEM_PROMPT),
55
  MessagesPlaceholder(variable_name="history"),
56
- ("human", "{input}")
 
57
  ])
58
 
59
  # Memory store to maintain conversation history
@@ -65,7 +66,45 @@ def get_session_history(session_id: str) -> ChatMessageHistory:
65
  store[session_id] = ChatMessageHistory()
66
  return store[session_id]
67
 
68
- # Create a chain with memory
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  chain = prompt | llm
70
  chain_with_history = RunnableWithMessageHistory(
71
  chain,
@@ -91,13 +130,13 @@ def gradio_chat(user_message, history):
91
  # Extract the text from the response
92
  response_text = response.content if hasattr(response, "content") else str(response)
93
 
94
- # Format as "Virtual doctor: " response to match the expected format
95
- formatted_response = f"Virtual doctor: {response_text}"
96
 
97
- return formatted_response
98
  except Exception as e:
99
  print(f"Error processing message: {e}")
100
- return "Virtual doctor: I apologize, but I'm experiencing technical difficulties. Please try again."
101
 
102
  # Customize the CSS for better appearance
103
  css = """
 
49
  # Fallback to a smaller model or provide an error message
50
  raise
51
 
52
+ # Modify the prompt template with a clearer structure to prevent system prompt leakage
53
  prompt = ChatPromptTemplate.from_messages([
54
  ("system", SYSTEM_PROMPT),
55
  MessagesPlaceholder(variable_name="history"),
56
+ ("human", "{input}"),
57
+ ("system", "Remember to respond as Virtual Doctor without including system instructions in your reply.")
58
  ])
59
 
60
  # Memory store to maintain conversation history
 
66
  store[session_id] = ChatMessageHistory()
67
  return store[session_id]
68
 
69
+ # Create a more robust filtering chain that will intercept the model's responses
70
+ def filter_response(response_text):
71
+ """Filter out system prompts and format the response correctly"""
72
+ # Remove any system prompt references
73
+ if "system" in response_text.lower() and ("your goal is" in response_text.lower() or "professional virtual doctor" in response_text.lower()):
74
+ # Find the actual doctor response after any system text
75
+ for marker in ["Virtual Doctor:", "Virtual doctor:", "Human:"]:
76
+ if marker in response_text:
77
+ parts = response_text.split(marker)
78
+ if len(parts) > 1:
79
+ # Get the last part after any system prompts
80
+ response_text = parts[-1].strip()
81
+ break
82
+
83
+ # Remove any remaining system prompt text or instructions
84
+ filtered_text = []
85
+ skip_line = False
86
+ for line in response_text.split('\n'):
87
+ lower_line = line.lower()
88
+ if any(phrase in lower_line for phrase in [
89
+ "system:", "your goal is", "start by greeting", "wait for the user",
90
+ "do not make a final diagnosis", "be structured", "ask only 1 or 2"
91
+ ]):
92
+ skip_line = True
93
+ elif any(marker in line for marker in ["Virtual Doctor:", "Virtual doctor:", "Hello", "Thank you"]):
94
+ skip_line = False
95
+
96
+ if not skip_line:
97
+ filtered_text.append(line)
98
+
99
+ clean_text = '\n'.join(filtered_text).strip()
100
+
101
+ # Ensure proper formatting with "Virtual Doctor:" prefix
102
+ if not clean_text.startswith("Virtual Doctor:") and not clean_text.startswith("Virtual doctor:"):
103
+ clean_text = f"Virtual Doctor: {clean_text}"
104
+
105
+ return clean_text
106
+
107
+ # Chain with memory
108
  chain = prompt | llm
109
  chain_with_history = RunnableWithMessageHistory(
110
  chain,
 
130
  # Extract the text from the response
131
  response_text = response.content if hasattr(response, "content") else str(response)
132
 
133
+ # Apply our filtering function to clean up the response
134
+ clean_response = filter_response(response_text)
135
 
136
+ return clean_response
137
  except Exception as e:
138
  print(f"Error processing message: {e}")
139
+ return "Virtual Doctor: I apologize, but I'm experiencing technical difficulties. Please try again."
140
 
141
  # Customize the CSS for better appearance
142
  css = """