Manasa1 commited on
Commit
688cf9e
·
verified ·
1 Parent(s): c62f98f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -14
app.py CHANGED
@@ -7,6 +7,10 @@ from langchain_community.embeddings import HuggingFaceEmbeddings
7
  from langchain_groq import ChatGroq
8
  from langchain.memory import ConversationBufferMemory
9
  from langchain.chains import ConversationalRetrievalChain
 
 
 
 
10
  import os
11
  import nltk
12
  nltk.download('punkt')
@@ -31,11 +35,56 @@ def setup_vectorstore(documents):
31
  vectorstores = FAISS.from_documents(doc_chunks, embeddings)
32
  return vectorstores
33
 
34
- def create_chain(vectorstores):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  llm = ChatGroq(
36
  api_key=secret,
37
  model="llama-3.1-70b-versatile",
38
- temperature=0.7 # Slightly higher for more natural, expressive language
39
  )
40
  retriever = vectorstores.as_retriever()
41
  memory = ConversationBufferMemory(
@@ -48,6 +97,7 @@ def create_chain(vectorstores):
48
  llm=llm,
49
  retriever=retriever,
50
  memory=memory,
 
51
  verbose=True
52
  )
53
  return chain
@@ -72,12 +122,14 @@ if uploaded_file:
72
  f.write(uploaded_file.getbuffer())
73
 
74
  # Load and process the PDF document
 
75
  if "vectorstores" not in st.session_state:
76
- st.session_state.vectorstores = setup_vectorstore(load_documents(file_path))
77
 
78
- # Create the conversational chain with style adaptation
 
79
  if "conversation_chain" not in st.session_state:
80
- st.session_state.conversation_chain = create_chain(st.session_state.vectorstores)
81
 
82
  for message in st.session_state.chat_history:
83
  with st.chat_message(message["role"]):
@@ -90,17 +142,11 @@ if user_input:
90
  with st.chat_message("user"):
91
  st.markdown(user_input)
92
 
93
- # Add prompt for style adaptation
94
- style_prompt = (
95
- f"Respond in the same style and with the same mentality as the person in the uploaded document. "
96
- f"Analyze the PDF to understand the author’s way of thinking, attitudes, personality, and values. "
97
- f"Replicate this mentality by capturing their perspective, reasoning style, and tone in your response. "
98
- f"Answer the question below as if you were the author, preserving their personality, mindset, and outlook:\n\n{user_input}"
99
- )
100
-
101
- response = st.session_state.conversation_chain({"question": style_prompt})
102
 
103
  assistant_response = response["answer"]
104
  st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})
105
  with st.chat_message("assistant"):
106
  st.markdown(assistant_response)
 
 
7
  from langchain_groq import ChatGroq
8
  from langchain.memory import ConversationBufferMemory
9
  from langchain.chains import ConversationalRetrievalChain
10
+ from nltk import word_tokenize, FreqDist, sent_tokenize
11
+ from textblob import TextBlob
12
+ from sklearn.feature_extraction.text import TfidfVectorizer
13
+ from sklearn.decomposition import NMF
14
  import os
15
  import nltk
16
  nltk.download('punkt')
 
35
  vectorstores = FAISS.from_documents(doc_chunks, embeddings)
36
  return vectorstores
37
 
38
+ # Function to extract mentality traits (viewpoints, reasoning patterns, etc.)
39
+ def extract_mentality_traits(documents):
40
+ text = "\n".join(documents)
41
+ blob = TextBlob(text)
42
+
43
+ # Analyze tone
44
+ sentiment = blob.sentiment.polarity
45
+ tone = "neutral"
46
+ if sentiment > 0.2:
47
+ tone = "positive"
48
+ elif sentiment < -0.2:
49
+ tone = "negative"
50
+
51
+ # Common phrases and reasoning patterns
52
+ words = word_tokenize(text.lower())
53
+ fdist = FreqDist(words)
54
+ common_phrases = [word for word, count in fdist.most_common(10) if len(word) > 3]
55
+
56
+ # Topic Modeling to identify key viewpoints
57
+ vectorizer = TfidfVectorizer(stop_words='english')
58
+ doc_term_matrix = vectorizer.fit_transform([text])
59
+ nmf = NMF(n_components=2, random_state=1)
60
+ nmf.fit(doc_term_matrix)
61
+ topics = []
62
+ for topic in nmf.components_:
63
+ topic_words = [vectorizer.get_feature_names_out()[i] for i in topic.argsort()[-5:]]
64
+ topics.append(", ".join(topic_words))
65
+
66
+ mentality_traits = {
67
+ "tone": tone,
68
+ "common_phrases": common_phrases,
69
+ "reasoning_pattern": "deductive" if sentiment > 0 else "inductive",
70
+ "key_viewpoints": topics
71
+ }
72
+ return mentality_traits
73
+
74
+ def create_chain(vectorstores, mentality_traits):
75
+ # Create style prompt dynamically based on mentality traits
76
+ style_prompt = (
77
+ f"Think and respond in the same mindset as the person from the uploaded document. "
78
+ f"The author's tone is generally {mentality_traits['tone']}. "
79
+ f"Their reasoning style is mostly {mentality_traits['reasoning_pattern']} and they often discuss themes such as {', '.join(mentality_traits['key_viewpoints'])}. "
80
+ f"Use phrases like {', '.join(mentality_traits['common_phrases'])}. "
81
+ f"Respond as if you are the author, mirroring their values, reasoning style, and mentality in all replies."
82
+ )
83
+
84
  llm = ChatGroq(
85
  api_key=secret,
86
  model="llama-3.1-70b-versatile",
87
+ temperature=0.7
88
  )
89
  retriever = vectorstores.as_retriever()
90
  memory = ConversationBufferMemory(
 
97
  llm=llm,
98
  retriever=retriever,
99
  memory=memory,
100
+ style_prompt=style_prompt,
101
  verbose=True
102
  )
103
  return chain
 
122
  f.write(uploaded_file.getbuffer())
123
 
124
  # Load and process the PDF document
125
+ documents = load_documents(file_path)
126
  if "vectorstores" not in st.session_state:
127
+ st.session_state.vectorstores = setup_vectorstore(documents)
128
 
129
+ # Extract mentality traits and create the conversational chain with mental adaptation
130
+ mentality_traits = extract_mentality_traits(documents)
131
  if "conversation_chain" not in st.session_state:
132
+ st.session_state.conversation_chain = create_chain(st.session_state.vectorstores, mentality_traits)
133
 
134
  for message in st.session_state.chat_history:
135
  with st.chat_message(message["role"]):
 
142
  with st.chat_message("user"):
143
  st.markdown(user_input)
144
 
145
+ # Use the conversation chain to generate the response
146
+ response = st.session_state.conversation_chain({"question": user_input})
 
 
 
 
 
 
 
147
 
148
  assistant_response = response["answer"]
149
  st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})
150
  with st.chat_message("assistant"):
151
  st.markdown(assistant_response)
152
+