awacke1 commited on
Commit
1caa57f
·
1 Parent(s): df1dc7d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -16
app.py CHANGED
@@ -229,6 +229,8 @@ def main():
229
  # Sidebar and global
230
  openai.api_key = os.getenv('OPENAI_API_KEY')
231
  st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")
 
 
232
  menu = ["htm", "txt", "xlsx", "csv", "md", "py"] #619
233
  choice = st.sidebar.selectbox("Output File Type:", menu)
234
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
@@ -237,35 +239,29 @@ def main():
237
  filename = save_and_play_audio(audio_recorder)
238
  if filename is not None:
239
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
240
- #gptOutput = chat_with_model(transcription, '', model_choice) # *************************************
241
- #filename = generate_filename(transcription, choice)
242
- #create_file(filename, transcription, gptOutput)
243
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
244
 
245
-
246
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
247
 
 
248
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
249
  with collength:
250
- #max_length = 12000 - optimal for gpt35 turbo. 2x=24000 for gpt4. 8x=96000 for gpt4-32k.
251
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
252
  with colupload:
253
  uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "xlsx","csv","html", "htm", "md", "txt"])
254
-
 
255
  document_sections = deque()
256
  document_responses = {}
257
-
258
  if uploaded_file is not None:
259
  file_content = read_file_content(uploaded_file, max_length)
260
  document_sections.extend(divide_document(file_content, max_length))
261
-
262
  if len(document_sections) > 0:
263
-
264
  if st.button("👁️ View Upload"):
265
  st.markdown("**Sections of the uploaded file:**")
266
  for i, section in enumerate(list(document_sections)):
267
  st.markdown(f"**Section {i+1}**\n{section}")
268
-
269
  st.markdown("**Chat with the model:**")
270
  for i, section in enumerate(list(document_sections)):
271
  if i in document_responses:
@@ -330,13 +326,9 @@ def main():
330
  if next_action=='search':
331
  file_content_area = st.text_area("File Contents:", file_contents, height=500)
332
  st.write('Reasoning with your inputs...')
333
- #response = chat_with_file_contents(user_prompt, file_contents)
334
  response = chat_with_model(user_prompt, file_contents, model_choice)
335
- #st.write('Response:')
336
- #st.write(response)
337
-
338
- filename = generate_filename(file_content_area, choice)
339
- create_file(filename, file_content_area, response)
340
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
341
 
342
  if __name__ == "__main__":
 
229
  # Sidebar and global
230
  openai.api_key = os.getenv('OPENAI_API_KEY')
231
  st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")
232
+
233
+ # File type for output, model choice
234
  menu = ["htm", "txt", "xlsx", "csv", "md", "py"] #619
235
  choice = st.sidebar.selectbox("Output File Type:", menu)
236
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
 
239
  filename = save_and_play_audio(audio_recorder)
240
  if filename is not None:
241
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
 
 
 
242
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
243
 
244
+ # prompt interfaces
245
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
246
 
247
+ # file section interface for prompts against large documents as context
248
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
249
  with collength:
 
250
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
251
  with colupload:
252
  uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "xlsx","csv","html", "htm", "md", "txt"])
253
+
254
+ # Document section chat
255
  document_sections = deque()
256
  document_responses = {}
 
257
  if uploaded_file is not None:
258
  file_content = read_file_content(uploaded_file, max_length)
259
  document_sections.extend(divide_document(file_content, max_length))
 
260
  if len(document_sections) > 0:
 
261
  if st.button("👁️ View Upload"):
262
  st.markdown("**Sections of the uploaded file:**")
263
  for i, section in enumerate(list(document_sections)):
264
  st.markdown(f"**Section {i+1}**\n{section}")
 
265
  st.markdown("**Chat with the model:**")
266
  for i, section in enumerate(list(document_sections)):
267
  if i in document_responses:
 
326
  if next_action=='search':
327
  file_content_area = st.text_area("File Contents:", file_contents, height=500)
328
  st.write('Reasoning with your inputs...')
 
329
  response = chat_with_model(user_prompt, file_contents, model_choice)
330
+ filename = generate_filename(file_contents, choice)
331
+ create_file(filename, file_contents, response)
 
 
 
332
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
333
 
334
  if __name__ == "__main__":