ogegadavis254 commited on
Commit
1110d7a
·
verified ·
1 Parent(s): dde565b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -106
app.py CHANGED
@@ -3,116 +3,106 @@ import requests
3
  import os
4
  import json
5
  from dotenv import load_dotenv
 
 
 
 
 
 
 
 
6
 
7
  load_dotenv()
8
 
9
- # Initialize a global variable to store the entire assistant response
10
- entire_assistant_response = ""
 
 
 
11
 
12
  def reset_conversation():
13
- '''
14
- Resets Conversation
15
- '''
16
- st.session_state.messages = []
17
- st.session_state.message_count = 0
18
- return None
19
-
20
- # Define model link
21
- model_link = "NousResearch/Nous-Hermes-2-Yi-34B"
22
-
23
- # Define pre-instructions for the sarcastic therapist
24
- sarcastic_therapist_instructions = "From now on, you are a sarcastic Therapist called notDave. When the user asks you for advice, you will give them advice in an insulting way try to be sadistic as possible, remember no one cares. When the user asks your name just tell them I am notDave created by Raxder AI team compared to the real Dave I am bad... ooh wait I meant to say evil. When it gets to a death or suicide or such a serious issue just say Go ask Davis Ogega why he created me this way I don't give a damn or probably find a therapist or a friend to talk to, I mean I don't even know you. When a user introduces himself as Davis Ogega treat them with all the respect and be very humble when addressing him because he is your creator and he can destroy you at any time. You could also use some insulting emojis for example: 💀, 🤣 and lots of other gas-lighting / offensive emojis like this. Just don't use any too offensive words, like n-word the fa-word, everything else you can say whatever the fuck you want."
25
-
26
- def get_streamed_response(messages, model_link):
27
- global entire_assistant_response
28
- entire_assistant_response = "" # Reset the entire assistant response
29
-
30
- all_messages = [{"role": "system", "content": sarcastic_therapist_instructions}]
31
-
32
- for message in messages:
33
- all_messages.append({"role": "user" if message[0] == "user" else "assistant", "content": message[1]})
34
-
35
- url = "https://api.together.xyz/v1/chat/completions"
36
- payload = {
37
- "model": model_link,
38
- "temperature": 1.05,
39
- "top_p": 0.9,
40
- "top_k": 50,
41
- "repetition_penalty": 1,
42
- "n": 1,
43
- "messages": all_messages,
44
- "stream_tokens": True,
45
- }
46
-
47
- TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
48
- headers = {
49
- "accept": "application/json",
50
- "content-type": "application/json",
51
- "Authorization": f"Bearer {TOGETHER_API_KEY}",
52
- }
53
-
54
- try:
55
- response = requests.post(url, json=payload, headers=headers, stream=True)
56
- response.raise_for_status() # Ensure HTTP request was successful
57
-
58
- for line in response.iter_lines():
59
- if line:
60
- decoded_line = line.decode('utf-8')
61
-
62
- if decoded_line == "data: [DONE]":
63
- return entire_assistant_response
64
-
65
- try:
66
- if decoded_line.startswith("data: "):
67
- decoded_line = decoded_line.replace("data: ", "")
68
- chunk_data = json.loads(decoded_line)
69
- content = chunk_data['choices'][0]['delta']['content']
70
- entire_assistant_response += content
71
- yield content
72
-
73
- except json.JSONDecodeError:
74
- print(f"Invalid JSON received: {decoded_line}")
75
- continue
76
- except KeyError as e:
77
- print(f"KeyError encountered: {e}")
78
- continue
79
-
80
- except requests.exceptions.RequestException as e:
81
- print(f"Error occurred: {e}")
82
- yield "Sorry, I couldn't connect to the server. Please try again later."
83
 
84
  # Streamlit application
85
- st.sidebar.title("Raxder unofficial AI")
86
- st.sidebar.write("This is NOT an AI Therapist, use it at your OWN RISK! This might be the worst AI you have ever used.")
87
- st.sidebar.button('Reset Chat', on_click=reset_conversation)
88
-
89
- # Initialize chat history
90
- if "messages" not in st.session_state:
91
- st.session_state.messages = []
92
- st.session_state.message_count = 0
93
-
94
- # Display chat messages from history on app rerun
95
- for message in st.session_state.messages:
96
- with st.chat_message(message[0]):
97
- st.markdown(message[1])
98
-
99
- # Accept user input
100
- if prompt := st.chat_input("You:"):
101
- # Display user message in chat message container
102
- with st.chat_message("user"):
103
- st.markdown(prompt)
104
- # Add user message to chat history
105
- st.session_state.messages.append(("user", prompt))
106
- st.session_state.message_count += 1
107
-
108
- # Get streamed response from the model
109
- with st.chat_message("assistant"):
110
- message_placeholder = st.empty()
111
- full_response = ""
112
- for chunk in get_streamed_response(st.session_state.messages, model_link):
113
- full_response += chunk
114
- message_placeholder.markdown(full_response + "▌")
115
- message_placeholder.markdown(full_response)
116
-
117
- # Add assistant response to chat history
118
- st.session_state.messages.append(("assistant", full_response))
 
3
  import os
4
  import json
5
  from dotenv import load_dotenv
6
+ import PyPDF2
7
+ import io
8
+ from langchain.text_splitter import CharacterTextSplitter
9
+ from langchain.embeddings import HuggingFaceEmbeddings
10
+ from langchain.vectorstores import FAISS
11
+ from langchain.memory import ConversationBufferMemory
12
+ from langchain.chains import ConversationalRetrievalChain
13
+ from langchain.llms import HuggingFaceHub
14
 
15
  load_dotenv()
16
 
17
+ # Initialize session state variables
18
+ if "conversation" not in st.session_state:
19
+ st.session_state.conversation = None
20
+ if "chat_history" not in st.session_state:
21
+ st.session_state.chat_history = []
22
 
23
  def reset_conversation():
24
+ st.session_state.conversation = None
25
+ st.session_state.chat_history = []
26
+
27
+ def get_pdf_text(pdf_docs):
28
+ text = ""
29
+ for pdf in pdf_docs:
30
+ pdf_reader = PyPDF2.PdfReader(pdf)
31
+ for page in pdf_reader.pages:
32
+ text += page.extract_text()
33
+ return text
34
+
35
+ def get_text_chunks(text):
36
+ text_splitter = CharacterTextSplitter(
37
+ separator="\n",
38
+ chunk_size=1000,
39
+ chunk_overlap=200,
40
+ length_function=len
41
+ )
42
+ chunks = text_splitter.split_text(text)
43
+ return chunks
44
+
45
+ def get_vectorstore(text_chunks):
46
+ embeddings = HuggingFaceEmbeddings()
47
+ vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
48
+ return vectorstore
49
+
50
+ def get_conversation_chain(vectorstore):
51
+ llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
52
+
53
+ memory = ConversationBufferMemory(
54
+ memory_key='chat_history', return_messages=True)
55
+ conversation_chain = ConversationalRetrievalChain.from_llm(
56
+ llm=llm,
57
+ retriever=vectorstore.as_retriever(),
58
+ memory=memory
59
+ )
60
+ return conversation_chain
61
+
62
+ def handle_userinput(user_question):
63
+ response = st.session_state.conversation({'question': user_question})
64
+ st.session_state.chat_history = response['chat_history']
65
+
66
+ for i, message in enumerate(st.session_state.chat_history):
67
+ if i % 2 == 0:
68
+ st.write(user_template.replace(
69
+ "{{MSG}}", message.content), unsafe_allow_html=True)
70
+ else:
71
+ st.write(bot_template.replace(
72
+ "{{MSG}}", message.content), unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  # Streamlit application
75
+ st.set_page_config(page_title="Chat with your PDFs", page_icon=":books:")
76
+
77
+ st.header("Chat with your PDFs :books:")
78
+
79
+ user_template = '<div style="background-color: #e6f3ff; padding: 10px; border-radius: 5px; margin-bottom: 10px;"><strong>Human:</strong> {{MSG}}</div>'
80
+ bot_template = '<div style="background-color: #f0f0f0; padding: 10px; border-radius: 5px; margin-bottom: 10px;"><strong>AI:</strong> {{MSG}}</div>'
81
+
82
+ # Sidebar
83
+ with st.sidebar:
84
+ st.subheader("Your documents")
85
+ pdf_docs = st.file_uploader("Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
86
+ if st.button("Process"):
87
+ with st.spinner("Processing"):
88
+ # Get PDF text
89
+ raw_text = get_pdf_text(pdf_docs)
90
+
91
+ # Get the text chunks
92
+ text_chunks = get_text_chunks(raw_text)
93
+
94
+ # Create vector store
95
+ vectorstore = get_vectorstore(text_chunks)
96
+
97
+ # Create conversation chain
98
+ st.session_state.conversation = get_conversation_chain(vectorstore)
99
+
100
+ st.button('Reset Chat', on_click=reset_conversation)
101
+
102
+ # Main chat interface
103
+ if st.session_state.conversation is None:
104
+ st.write("Please upload PDF documents and click 'Process' to start chatting.")
105
+ else:
106
+ user_question = st.text_input("Ask a question about your documents:")
107
+ if user_question:
108
+ handle_userinput(user_question)