awacke1 commited on
Commit
b1ab1de
Β·
1 Parent(s): 0da1c62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -43
app.py CHANGED
@@ -7,37 +7,75 @@ import json
7
  import mistune
8
  import pytz
9
  import math
 
 
10
  from datetime import datetime
11
  from openai import ChatCompletion
12
  from xml.etree import ElementTree as ET
13
  from bs4 import BeautifulSoup
14
  from collections import deque
 
15
 
16
  openai.api_key = os.getenv('OPENAI_KEY')
17
- st.set_page_config(
18
- page_title="GPT Streamlit Document Reasoner",
19
- layout="wide")
20
 
21
  menu = ["txt", "htm", "md", "py"]
22
- choice = st.sidebar.selectbox("Output file type:", menu)
23
- choicePrefix = "Output file type is "
24
- if choice == "txt":
25
- st.sidebar.write(choicePrefix + "Text File.")
26
- elif choice == "htm":
27
- st.sidebar.write(choicePrefix + "HTML5.")
28
- elif choice == "md":
29
- st.sidebar.write(choicePrefix + "Markdown.")
30
- elif choice == "py":
31
- st.sidebar.write(choicePrefix + "Python Code.")
32
-
33
- max_length = st.sidebar.slider("Max document length", min_value=1000, max_value=32000, value=2000, step=1000)
34
 
35
  def generate_filename(prompt, file_type):
36
  central = pytz.timezone('US/Central')
37
  safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
38
- safe_prompt = "".join(x for x in prompt if x.isalnum())[:28]
39
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  def create_file(filename, prompt, response):
42
  if filename.endswith(".txt"):
43
  with open(filename, 'w') as file:
@@ -55,15 +93,6 @@ def truncate_document(document, length):
55
  def divide_document(document, max_length):
56
  return [document[i:i+max_length] for i in range(0, len(document), max_length)]
57
 
58
- def chat_with_model(prompt, document_section):
59
- model = "gpt-3.5-turbo"
60
- conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
61
- conversation.append({'role': 'user', 'content': prompt})
62
- conversation.append({'role': 'assistant', 'content': document_section})
63
- response = openai.ChatCompletion.create(model=model, messages=conversation)
64
- return response['choices'][0]['message']['content']
65
-
66
-
67
  def get_table_download_link(file_path):
68
  with open(file_path, 'r') as file:
69
  data = file.read()
@@ -81,7 +110,6 @@ def get_table_download_link(file_path):
81
  href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
82
  return href
83
 
84
-
85
  def CompressXML(xml_text):
86
  root = ET.fromstring(xml_text)
87
  for elem in list(root.iter()):
@@ -111,10 +139,15 @@ def read_file_content(file,max_length):
111
  return ""
112
 
113
  def main():
114
- user_prompt = st.text_area("Your question:", '', height=120)
115
- uploaded_file = st.file_uploader("Choose a file", type=["xml", "json", "html", "htm", "md", "txt"])
116
- max_length = 4000
117
-
 
 
 
 
 
118
  document_sections = deque()
119
  document_responses = {}
120
 
@@ -123,17 +156,19 @@ def main():
123
  document_sections.extend(divide_document(file_content, max_length))
124
 
125
  if len(document_sections) > 0:
126
- st.markdown("**Sections of the uploaded file:**")
127
- for i, section in enumerate(list(document_sections)):
128
- st.markdown(f"**Section {i+1}**\n{section}")
129
-
 
 
130
  st.markdown("**Chat with the model:**")
131
  for i, section in enumerate(list(document_sections)):
132
  if i in document_responses:
133
  st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
134
  else:
135
  if st.button(f"Chat about Section {i+1}"):
136
- st.write('Thinking and Reasoning with your inputs...')
137
  response = chat_with_model(user_prompt, section)
138
  st.write('Response:')
139
  st.write(response)
@@ -142,9 +177,8 @@ def main():
142
  create_file(filename, user_prompt, response)
143
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
144
 
145
-
146
  if st.button('πŸ’¬ Chat'):
147
- st.write('Thinking and Reasoning with your inputs...')
148
  response = chat_with_model(user_prompt, ''.join(list(document_sections)))
149
  st.write('Response:')
150
  st.write(response)
@@ -153,15 +187,18 @@ def main():
153
  create_file(filename, user_prompt, response)
154
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
155
 
156
- all_files = glob.glob("*.txt") + glob.glob("*.htm") + glob.glob("*.md")
 
 
 
157
  for file in all_files:
158
- col1, col2 = st.sidebar.columns([4,1]) # adjust the ratio as needed
159
  with col1:
160
  st.markdown(get_table_download_link(file), unsafe_allow_html=True)
161
- with col2:
162
- if st.button("πŸ—‘", key=file):
163
  os.remove(file)
164
  st.experimental_rerun()
165
-
166
  if __name__ == "__main__":
167
- main()
 
7
  import mistune
8
  import pytz
9
  import math
10
+ import requests
11
+
12
  from datetime import datetime
13
  from openai import ChatCompletion
14
  from xml.etree import ElementTree as ET
15
  from bs4 import BeautifulSoup
16
  from collections import deque
17
+ from audio_recorder_streamlit import audio_recorder
18
 
19
  openai.api_key = os.getenv('OPENAI_KEY')
20
+ st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")
 
 
21
 
22
  menu = ["txt", "htm", "md", "py"]
23
+ choice = st.sidebar.selectbox("Output File Type:", menu)
24
+ model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
 
 
 
 
 
 
 
 
 
 
25
 
26
  def generate_filename(prompt, file_type):
27
  central = pytz.timezone('US/Central')
28
  safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
29
+ safe_prompt = "".join(x for x in prompt if x.isalnum())[:45]
30
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
31
 
32
+ def chat_with_model(prompt, document_section):
33
+ model = model_choice
34
+ conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
35
+ conversation.append({'role': 'user', 'content': prompt})
36
+ if len(document_section)>0:
37
+ conversation.append({'role': 'assistant', 'content': document_section})
38
+ response = openai.ChatCompletion.create(model=model, messages=conversation)
39
+ return response['choices'][0]['message']['content']
40
+
41
+ def transcribe_audio(openai_key, file_path, model):
42
+ OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
43
+ headers = {
44
+ "Authorization": f"Bearer {openai_key}",
45
+ }
46
+ with open(file_path, 'rb') as f:
47
+ data = {'file': f}
48
+ response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
49
+ if response.status_code == 200:
50
+ st.write(response.json())
51
+ response2 = chat_with_model(response.json().get('text'), '')
52
+ st.write('Responses:')
53
+ #st.write(response)
54
+ st.write(response2)
55
+ return response.json().get('text')
56
+ else:
57
+ st.write(response.json())
58
+ st.error("Error in API call.")
59
+ return None
60
+
61
+ def save_and_play_audio(audio_recorder):
62
+ audio_bytes = audio_recorder()
63
+ if audio_bytes:
64
+ filename = generate_filename("Recording", "wav")
65
+ with open(filename, 'wb') as f:
66
+ f.write(audio_bytes)
67
+ st.audio(audio_bytes, format="audio/wav")
68
+ return filename
69
+ return None
70
+
71
+ filename = save_and_play_audio(audio_recorder)
72
+ if filename is not None:
73
+ #if st.button("Transcribe"):
74
+
75
+ transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
76
+ st.write(transcription)
77
+ chat_with_model(transcription, '') # push transcript through as prompt
78
+
79
  def create_file(filename, prompt, response):
80
  if filename.endswith(".txt"):
81
  with open(filename, 'w') as file:
 
93
  def divide_document(document, max_length):
94
  return [document[i:i+max_length] for i in range(0, len(document), max_length)]
95
 
 
 
 
 
 
 
 
 
 
96
  def get_table_download_link(file_path):
97
  with open(file_path, 'r') as file:
98
  data = file.read()
 
110
  href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
111
  return href
112
 
 
113
  def CompressXML(xml_text):
114
  root = ET.fromstring(xml_text)
115
  for elem in list(root.iter()):
 
139
  return ""
140
 
141
  def main():
142
+ user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
143
+
144
+ collength, colupload = st.columns([2,3]) # adjust the ratio as needed
145
+ with collength:
146
+ #max_length = 12000 - optimal for gpt35 turbo. 2x=24000 for gpt4. 8x=96000 for gpt4-32k.
147
+ max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
148
+ with colupload:
149
+ uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "html", "htm", "md", "txt"])
150
+
151
  document_sections = deque()
152
  document_responses = {}
153
 
 
156
  document_sections.extend(divide_document(file_content, max_length))
157
 
158
  if len(document_sections) > 0:
159
+
160
+ if st.button("πŸ‘οΈ View Upload"):
161
+ st.markdown("**Sections of the uploaded file:**")
162
+ for i, section in enumerate(list(document_sections)):
163
+ st.markdown(f"**Section {i+1}**\n{section}")
164
+
165
  st.markdown("**Chat with the model:**")
166
  for i, section in enumerate(list(document_sections)):
167
  if i in document_responses:
168
  st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
169
  else:
170
  if st.button(f"Chat about Section {i+1}"):
171
+ st.write('Reasoning with your inputs...')
172
  response = chat_with_model(user_prompt, section)
173
  st.write('Response:')
174
  st.write(response)
 
177
  create_file(filename, user_prompt, response)
178
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
179
 
 
180
  if st.button('πŸ’¬ Chat'):
181
+ st.write('Reasoning with your inputs...')
182
  response = chat_with_model(user_prompt, ''.join(list(document_sections)))
183
  st.write('Response:')
184
  st.write(response)
 
187
  create_file(filename, user_prompt, response)
188
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
189
 
190
+ all_files = glob.glob("*.*")
191
+ all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names
192
+ all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
193
+
194
  for file in all_files:
195
+ col1, col3 = st.sidebar.columns([5,1]) # adjust the ratio as needed
196
  with col1:
197
  st.markdown(get_table_download_link(file), unsafe_allow_html=True)
198
+ with col3:
199
+ if st.button("πŸ—‘", key="delete_"+file):
200
  os.remove(file)
201
  st.experimental_rerun()
202
+
203
  if __name__ == "__main__":
204
+ main()