awacke1 commited on
Commit
29a52bc
Β·
1 Parent(s): bba25a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -42
app.py CHANGED
@@ -7,60 +7,75 @@ import json
7
  import mistune
8
  import pytz
9
  import math
 
 
10
  from datetime import datetime
11
  from openai import ChatCompletion
12
  from xml.etree import ElementTree as ET
13
  from bs4 import BeautifulSoup
14
  from collections import deque
 
15
 
16
- openai.api_key = os.getenv('OPENAI_KEY')
17
- st.set_page_config(
18
- page_title="GPT Streamlit Document Reasoner",
19
- layout="wide")
20
-
21
- menu = ["txt", "htm", "md", "py"]
22
- choice = st.sidebar.selectbox("Output file type:", menu)
23
- choicePrefix = "Output file type is "
24
-
25
- if choice == "txt":
26
- st.sidebar.write(choicePrefix + "Text File.")
27
- elif choice == "htm":
28
- st.sidebar.write(choicePrefix + "HTML5.")
29
- elif choice == "md":
30
- st.sidebar.write(choicePrefix + "Markdown.")
31
- elif choice == "py":
32
- st.sidebar.write(choicePrefix + "Python Code.")
33
-
34
- model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
35
 
36
  def chat_with_model(prompt, document_section):
37
  model = model_choice
38
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
39
  conversation.append({'role': 'user', 'content': prompt})
40
- conversation.append({'role': 'assistant', 'content': document_section})
 
41
  response = openai.ChatCompletion.create(model=model, messages=conversation)
 
42
  return response['choices'][0]['message']['content']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
- def generate_filename(prompt, file_type):
45
- central = pytz.timezone('US/Central')
46
- safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
47
- safe_prompt = "".join(x for x in prompt if x.isalnum())[:45]
48
- return f"{safe_date_time}_{safe_prompt}.{file_type}"
 
 
 
 
49
 
50
  def create_file(filename, prompt, response):
51
  if filename.endswith(".txt"):
52
  with open(filename, 'w') as file:
53
- file.write(f"Prompt:\n{prompt}\nResponse:\n{response}")
54
  elif filename.endswith(".htm"):
55
  with open(filename, 'w') as file:
56
- file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
57
  elif filename.endswith(".md"):
58
  with open(filename, 'w') as file:
59
- file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")
60
-
61
  def truncate_document(document, length):
62
  return document[:length]
63
-
64
  def divide_document(document, max_length):
65
  return [document[i:i+max_length] for i in range(0, len(document), max_length)]
66
 
@@ -72,6 +87,12 @@ def get_table_download_link(file_path):
72
  ext = os.path.splitext(file_name)[1] # get the file extension
73
  if ext == '.txt':
74
  mime_type = 'text/plain'
 
 
 
 
 
 
75
  elif ext == '.htm':
76
  mime_type = 'text/html'
77
  elif ext == '.md':
@@ -109,15 +130,44 @@ def read_file_content(file,max_length):
109
  else:
110
  return ""
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  def main():
113
- user_prompt = st.text_area("Your question:", '', height=120)
114
 
115
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
116
  with collength:
117
  #max_length = 12000 - optimal for gpt35 turbo. 2x=24000 for gpt4. 8x=96000 for gpt4-32k.
118
- max_length = st.slider("Context Section Length", min_value=1000, max_value=128000, value=12000, step=1000)
119
  with colupload:
120
- uploaded_file = st.file_uploader("Choose a file", type=["xml", "json", "html", "htm", "md", "txt"])
121
 
122
  document_sections = deque()
123
  document_responses = {}
@@ -139,8 +189,8 @@ def main():
139
  st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
140
  else:
141
  if st.button(f"Chat about Section {i+1}"):
142
- st.write('Thinking and Reasoning with your inputs...')
143
- response = chat_with_model(user_prompt, section)
144
  st.write('Response:')
145
  st.write(response)
146
  document_responses[i] = response
@@ -149,26 +199,60 @@ def main():
149
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
150
 
151
  if st.button('πŸ’¬ Chat'):
152
- st.write('Thinking and Reasoning with your inputs...')
153
- response = chat_with_model(user_prompt, ''.join(list(document_sections)))
154
  st.write('Response:')
155
  st.write(response)
156
 
157
  filename = generate_filename(user_prompt, choice)
158
  create_file(filename, user_prompt, response)
159
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
160
-
161
  all_files = glob.glob("*.*")
162
  all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names
163
- all_files = sorted(all_files, key=lambda x: (os.path.splitext(x)[1], x)) # sort by file type and file name
 
 
 
 
164
  for file in all_files:
165
- col1, col3 = st.sidebar.columns([5,1]) # adjust the ratio as needed
166
  with col1:
 
 
 
 
 
167
  st.markdown(get_table_download_link(file), unsafe_allow_html=True)
168
  with col3:
 
 
 
 
 
 
 
 
 
 
169
  if st.button("πŸ—‘", key="delete_"+file):
170
  os.remove(file)
171
  st.experimental_rerun()
172
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  if __name__ == "__main__":
174
- main()
 
7
  import mistune
8
  import pytz
9
  import math
10
+ import requests
11
+
12
  from datetime import datetime
13
  from openai import ChatCompletion
14
  from xml.etree import ElementTree as ET
15
  from bs4 import BeautifulSoup
16
  from collections import deque
17
+ from audio_recorder_streamlit import audio_recorder
18
 
19
+ def generate_filename(prompt, file_type):
20
+ central = pytz.timezone('US/Central')
21
+ safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
22
+ safe_prompt = "".join(x for x in prompt if x.isalnum())[:45]
23
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  def chat_with_model(prompt, document_section):
26
  model = model_choice
27
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
28
  conversation.append({'role': 'user', 'content': prompt})
29
+ if len(document_section)>0:
30
+ conversation.append({'role': 'assistant', 'content': document_section})
31
  response = openai.ChatCompletion.create(model=model, messages=conversation)
32
+ #return response
33
  return response['choices'][0]['message']['content']
34
+
35
+ def transcribe_audio(openai_key, file_path, model):
36
+ OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
37
+ headers = {
38
+ "Authorization": f"Bearer {openai_key}",
39
+ }
40
+ with open(file_path, 'rb') as f:
41
+ data = {'file': f}
42
+ response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
43
+ if response.status_code == 200:
44
+ st.write(response.json())
45
+
46
+ response2 = chat_with_model(response.json().get('text'), '') # *************************************
47
+ st.write('Responses:')
48
+ #st.write(response)
49
+ st.write(response2)
50
+ return response.json().get('text')
51
+ else:
52
+ st.write(response.json())
53
+ st.error("Error in API call.")
54
+ return None
55
 
56
+ def save_and_play_audio(audio_recorder):
57
+ audio_bytes = audio_recorder()
58
+ if audio_bytes:
59
+ filename = generate_filename("Recording", "wav")
60
+ with open(filename, 'wb') as f:
61
+ f.write(audio_bytes)
62
+ st.audio(audio_bytes, format="audio/wav")
63
+ return filename
64
+ return None
65
 
66
  def create_file(filename, prompt, response):
67
  if filename.endswith(".txt"):
68
  with open(filename, 'w') as file:
69
+ file.write(f"{prompt}\n{response}")
70
  elif filename.endswith(".htm"):
71
  with open(filename, 'w') as file:
72
+ file.write(f"{prompt} {response}")
73
  elif filename.endswith(".md"):
74
  with open(filename, 'w') as file:
75
+ file.write(f"{prompt}\n\n{response}")
76
+
77
  def truncate_document(document, length):
78
  return document[:length]
 
79
  def divide_document(document, max_length):
80
  return [document[i:i+max_length] for i in range(0, len(document), max_length)]
81
 
 
87
  ext = os.path.splitext(file_name)[1] # get the file extension
88
  if ext == '.txt':
89
  mime_type = 'text/plain'
90
+ elif ext == '.py':
91
+ mime_type = 'text/plain'
92
+ elif ext == '.xlsx':
93
+ mime_type = 'text/plain'
94
+ elif ext == '.csv':
95
+ mime_type = 'text/plain'
96
  elif ext == '.htm':
97
  mime_type = 'text/html'
98
  elif ext == '.md':
 
130
  else:
131
  return ""
132
 
133
+
134
+
135
+ def chat_with_file_contents(prompt, file_content):
136
+ conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
137
+ conversation.append({'role': 'user', 'content': prompt})
138
+ if len(file_content)>0:
139
+ conversation.append({'role': 'assistant', 'content': file_content})
140
+ response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
141
+ return response['choices'][0]['message']['content']
142
+
143
+
144
+ # Sidebar and global
145
+ openai.api_key = os.getenv('OPENAI_KEY')
146
+ st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")
147
+ menu = ["htm", "txt", "xlsx", "csv", "md", "py"] #619
148
+ choice = st.sidebar.selectbox("Output File Type:", menu)
149
+ model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
150
+
151
+ # Audio, transcribe, GPT:
152
+ filename = save_and_play_audio(audio_recorder)
153
+ if filename is not None:
154
+ transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
155
+ st.write(transcription)
156
+ gptOutput = chat_with_model(transcription, '') # *************************************
157
+ filename = generate_filename(transcription, choice)
158
+ create_file(filename, transcription, gptOutput)
159
+ st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
160
+
161
+
162
  def main():
163
+ user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
164
 
165
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
166
  with collength:
167
  #max_length = 12000 - optimal for gpt35 turbo. 2x=24000 for gpt4. 8x=96000 for gpt4-32k.
168
+ max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
169
  with colupload:
170
+ uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "xlsx","csv","html", "htm", "md", "txt"])
171
 
172
  document_sections = deque()
173
  document_responses = {}
 
189
  st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
190
  else:
191
  if st.button(f"Chat about Section {i+1}"):
192
+ st.write('Reasoning with your inputs...')
193
+ response = chat_with_model(user_prompt, section) # *************************************
194
  st.write('Response:')
195
  st.write(response)
196
  document_responses[i] = response
 
199
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
200
 
201
  if st.button('πŸ’¬ Chat'):
202
+ st.write('Reasoning with your inputs...')
203
+ response = chat_with_model(user_prompt, ''.join(list(document_sections))) # *************************************
204
  st.write('Response:')
205
  st.write(response)
206
 
207
  filename = generate_filename(user_prompt, choice)
208
  create_file(filename, user_prompt, response)
209
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
210
+
211
  all_files = glob.glob("*.*")
212
  all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names
213
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
214
+
215
+ # sidebar of files
216
+ file_contents=''
217
+ next_action=''
218
  for file in all_files:
219
+ col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed
220
  with col1:
221
+ if st.button("🌐", key="md_"+file): # md emoji button
222
+ with open(file, 'r') as f:
223
+ file_contents = f.read()
224
+ next_action='md'
225
+ with col2:
226
  st.markdown(get_table_download_link(file), unsafe_allow_html=True)
227
  with col3:
228
+ if st.button("πŸ“‚", key="open_"+file): # open emoji button
229
+ with open(file, 'r') as f:
230
+ file_contents = f.read()
231
+ next_action='open'
232
+ with col4:
233
+ if st.button("πŸ”", key="read_"+file): # search emoji button
234
+ with open(file, 'r') as f:
235
+ file_contents = f.read()
236
+ next_action='search'
237
+ with col5:
238
  if st.button("πŸ—‘", key="delete_"+file):
239
  os.remove(file)
240
  st.experimental_rerun()
241
+
242
+ if len(file_contents) > 0:
243
+ if next_action=='open':
244
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
245
+ if next_action=='md':
246
+ st.markdown(file_contents)
247
+ if next_action=='search':
248
+ file_content_area = st.text_area("File Contents:", file_contents, height=500)
249
+ st.write('Reasoning with your inputs...')
250
+ response = chat_with_file_contents(file_contents)
251
+ st.write('Response:')
252
+ st.write(response)
253
+ filename = generate_filename(file_content_area, choice)
254
+ create_file(filename, file_content_area, response)
255
+ st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
256
+
257
  if __name__ == "__main__":
258
+ main()