awacke1 commited on
Commit
885834e
Β·
1 Parent(s): d94cec6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -26
app.py CHANGED
@@ -16,7 +16,6 @@ from bs4 import BeautifulSoup
16
  from collections import deque
17
  from audio_recorder_streamlit import audio_recorder
18
 
19
-
20
  def generate_filename(prompt, file_type):
21
  central = pytz.timezone('US/Central')
22
  safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
@@ -135,23 +134,6 @@ def read_file_content(file,max_length):
135
  return ""
136
 
137
 
138
- # Sidebar and global
139
- openai.api_key = os.getenv('OPENAI_KEY')
140
- st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")
141
- menu = ["htm", "txt", "xlsx", "csv", "md", "py"] #619
142
- choice = st.sidebar.selectbox("Output File Type:", menu)
143
- model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
144
-
145
- # Audio, transcribe, GPT:
146
- filename = save_and_play_audio(audio_recorder)
147
- if filename is not None:
148
- transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
149
- st.write(transcription)
150
- gptOutput = chat_with_model(transcription, '') # *************************************
151
- filename = generate_filename(transcription, choice)
152
- create_file(filename, transcription, gptOutput)
153
- st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
154
-
155
 
156
  def chat_with_file_contents(prompt, file_content):
157
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
@@ -160,6 +142,7 @@ def chat_with_file_contents(prompt, file_content):
160
  conversation.append({'role': 'assistant', 'content': file_content})
161
  response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
162
  return response['choices'][0]['message']['content']
 
163
 
164
  # Sidebar and global
165
  openai.api_key = os.getenv('OPENAI_KEY')
@@ -173,16 +156,18 @@ filename = save_and_play_audio(audio_recorder)
173
  if filename is not None:
174
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
175
  st.write(transcription)
176
- gptOutput = chat_with_model(transcription, '')
177
  filename = generate_filename(transcription, choice)
178
  create_file(filename, transcription, gptOutput)
179
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
180
 
 
181
  def main():
182
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
183
 
184
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
185
  with collength:
 
186
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
187
  with colupload:
188
  uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "xlsx","csv","html", "htm", "md", "txt"])
@@ -208,17 +193,27 @@ def main():
208
  else:
209
  if st.button(f"Chat about Section {i+1}"):
210
  st.write('Reasoning with your inputs...')
211
- response = chat_with_model(user_prompt, section)
212
  st.write('Response:')
213
  st.write(response)
214
  document_responses[i] = response
215
- filename = generate_filename(f"{user_prompt}_Section_{i+1}", choice)
216
  create_file(filename, user_prompt, response)
217
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
218
 
219
- # ... document_responses logic remains the same
220
-
221
- all_files = [file for file in glob.glob("*.{}".format(choice))]
 
 
 
 
 
 
 
 
 
 
222
 
223
  for file in all_files:
224
  col1, col2, col3 = st.sidebar.columns([5,1,1]) # adjust the ratio as needed
@@ -234,6 +229,7 @@ def main():
234
  response = chat_with_file_contents(user_prompt, file_contents)
235
  st.write('Response:')
236
  st.write(response)
 
237
  filename = generate_filename(user_prompt, choice)
238
  create_file(filename, user_prompt, response)
239
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
@@ -241,6 +237,6 @@ def main():
241
  if st.button("πŸ—‘", key="delete_"+file):
242
  os.remove(file)
243
  st.experimental_rerun()
244
-
245
  if __name__ == "__main__":
246
- main()
 
16
  from collections import deque
17
  from audio_recorder_streamlit import audio_recorder
18
 
 
19
  def generate_filename(prompt, file_type):
20
  central = pytz.timezone('US/Central')
21
  safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
 
134
  return ""
135
 
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
 
138
  def chat_with_file_contents(prompt, file_content):
139
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
 
142
  conversation.append({'role': 'assistant', 'content': file_content})
143
  response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
144
  return response['choices'][0]['message']['content']
145
+
146
 
147
  # Sidebar and global
148
  openai.api_key = os.getenv('OPENAI_KEY')
 
156
  if filename is not None:
157
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
158
  st.write(transcription)
159
+ gptOutput = chat_with_model(transcription, '') # *************************************
160
  filename = generate_filename(transcription, choice)
161
  create_file(filename, transcription, gptOutput)
162
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
163
 
164
+
165
  def main():
166
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
167
 
168
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
169
  with collength:
170
+ #max_length = 12000 - optimal for gpt35 turbo. 2x=24000 for gpt4. 8x=96000 for gpt4-32k.
171
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
172
  with colupload:
173
  uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "xlsx","csv","html", "htm", "md", "txt"])
 
193
  else:
194
  if st.button(f"Chat about Section {i+1}"):
195
  st.write('Reasoning with your inputs...')
196
+ response = chat_with_model(user_prompt, section) # *************************************
197
  st.write('Response:')
198
  st.write(response)
199
  document_responses[i] = response
200
+ filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
201
  create_file(filename, user_prompt, response)
202
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
203
 
204
+ if st.button('πŸ’¬ Chat'):
205
+ st.write('Reasoning with your inputs...')
206
+ response = chat_with_model(user_prompt, ''.join(list(document_sections))) # *************************************
207
+ st.write('Response:')
208
+ st.write(response)
209
+
210
+ filename = generate_filename(user_prompt, choice)
211
+ create_file(filename, user_prompt, response)
212
+ st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
213
+
214
+ all_files = glob.glob("*.*")
215
+ all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names
216
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
217
 
218
  for file in all_files:
219
  col1, col2, col3 = st.sidebar.columns([5,1,1]) # adjust the ratio as needed
 
229
  response = chat_with_file_contents(user_prompt, file_contents)
230
  st.write('Response:')
231
  st.write(response)
232
+
233
  filename = generate_filename(user_prompt, choice)
234
  create_file(filename, user_prompt, response)
235
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
 
237
  if st.button("πŸ—‘", key="delete_"+file):
238
  os.remove(file)
239
  st.experimental_rerun()
240
+
241
  if __name__ == "__main__":
242
+ main()