awacke1 commited on
Commit
63102ca
ยท
1 Parent(s): 354ca3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -31
app.py CHANGED
@@ -23,29 +23,6 @@ menu = ["txt", "htm", "md", "py"]
23
  choice = st.sidebar.selectbox("Output File Type:", menu)
24
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
25
 
26
- audio_enabled = st.sidebar.checkbox("Audio", value=False)
27
-
28
- if audio_enabled:
29
- def save_and_play_audio(audio_recorder):
30
- audio_bytes = audio_recorder()
31
- if audio_bytes:
32
- filename = generate_filename("Recording", "wav")
33
- with open(filename, 'wb') as f:
34
- f.write(audio_bytes)
35
- st.sidebar.audio(audio_bytes, format="audio/wav")
36
- return filename
37
- return None
38
-
39
- # Updated to call direct from transcription to chat inference.
40
- filename = save_and_play_audio(audio_recorder)
41
- if filename is not None:
42
- transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
43
- st.sidebar.markdown('### Transcription:')
44
- st.sidebar.write(transcription)
45
-
46
- # max_length moved to the sidebar
47
- max_length = st.sidebar.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
48
-
49
  def generate_filename(prompt, file_type):
50
  central = pytz.timezone('US/Central')
51
  safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
@@ -53,6 +30,20 @@ def generate_filename(prompt, file_type):
53
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
54
 
55
  TEMPERATURE = st.sidebar.slider("Adjust Creativity:", min_value=0.1, max_value=1.0, value=0.5, step=0.1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  def chat_with_model(prompt, document_section):
57
  model = model_choice
58
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
@@ -60,7 +51,7 @@ def chat_with_model(prompt, document_section):
60
  conversation.append({'role': 'assistant', 'content': document_section})
61
  response = openai.ChatCompletion.create(model=model, messages=conversation, temperature=TEMPERATURE)
62
  return response['choices'][0]['message']['content']
63
-
64
  def create_file(filename, prompt, response):
65
  if filename.endswith(".txt"):
66
  with open(filename, 'w') as file:
@@ -71,7 +62,7 @@ def create_file(filename, prompt, response):
71
  elif filename.endswith(".md"):
72
  with open(filename, 'w') as file:
73
  file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")
74
-
75
  # Updated to auto process transcript to chatgpt in AI pipeline from Whisper to ChatGPT
76
  def transcribe_audio(openai_key, file_path, model):
77
  OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
@@ -93,7 +84,7 @@ def transcribe_audio(openai_key, file_path, model):
93
  st.write(response.json())
94
  st.error("Error in API call.")
95
  return None
96
-
97
  def save_and_play_audio(audio_recorder):
98
  audio_bytes = audio_recorder()
99
  if audio_bytes:
@@ -105,7 +96,7 @@ def save_and_play_audio(audio_recorder):
105
  return None
106
 
107
  # Updated to call direct from transcription to chat inference.
108
- filename = save_and_play_audio(audio_recorder)
109
  if filename is not None:
110
  #if st.button("Transcribe"):
111
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
@@ -143,7 +134,7 @@ def CompressXML(xml_text):
143
  if isinstance(elem.tag, str) and 'Comment' in elem.tag:
144
  elem.parent.remove(elem)
145
  return ET.tostring(root, encoding='unicode', method="xml")
146
-
147
  def read_file_content(file,max_length):
148
  if file.type == "application/json":
149
  content = json.load(file)
@@ -174,7 +165,7 @@ def main():
174
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
175
  with colupload:
176
  uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "html", "htm", "md", "txt"])
177
-
178
  document_sections = deque()
179
  document_responses = {}
180
 
@@ -183,12 +174,12 @@ def main():
183
  document_sections.extend(divide_document(file_content, max_length))
184
 
185
  if len(document_sections) > 0:
186
-
187
  if st.button("๐Ÿ‘๏ธ View Upload"):
188
  st.markdown("**Sections of the uploaded file:**")
189
  for i, section in enumerate(list(document_sections)):
190
  st.markdown(f"**Section {i+1}**\n{section}")
191
-
192
  st.markdown("**Chat with the model:**")
193
  for i, section in enumerate(list(document_sections)):
194
  if i in document_responses:
 
23
  choice = st.sidebar.selectbox("Output File Type:", menu)
24
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  def generate_filename(prompt, file_type):
27
  central = pytz.timezone('US/Central')
28
  safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
 
30
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
31
 
32
  TEMPERATURE = st.sidebar.slider("Adjust Creativity:", min_value=0.1, max_value=1.0, value=0.5, step=0.1)
33
+
34
+ audio_checkbox = st.sidebar.checkbox("Audio", value=False)
35
+
36
+ if audio_checkbox:
37
+ record_audio = st.sidebar.button("Record Audio")
38
+ if record_audio:
39
+ filename = save_and_play_audio(audio_recorder)
40
+ if filename is not None:
41
+ transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
42
+ st.markdown('### Transcription:')
43
+ st.write(transcription)
44
+ else:
45
+ record_audio = False
46
+
47
  def chat_with_model(prompt, document_section):
48
  model = model_choice
49
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
 
51
  conversation.append({'role': 'assistant', 'content': document_section})
52
  response = openai.ChatCompletion.create(model=model, messages=conversation, temperature=TEMPERATURE)
53
  return response['choices'][0]['message']['content']
54
+
55
  def create_file(filename, prompt, response):
56
  if filename.endswith(".txt"):
57
  with open(filename, 'w') as file:
 
62
  elif filename.endswith(".md"):
63
  with open(filename, 'w') as file:
64
  file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")
65
+
66
  # Updated to auto process transcript to chatgpt in AI pipeline from Whisper to ChatGPT
67
  def transcribe_audio(openai_key, file_path, model):
68
  OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
 
84
  st.write(response.json())
85
  st.error("Error in API call.")
86
  return None
87
+
88
  def save_and_play_audio(audio_recorder):
89
  audio_bytes = audio_recorder()
90
  if audio_bytes:
 
96
  return None
97
 
98
  # Updated to call direct from transcription to chat inference.
99
+ filename = save_and_play_audio(audio_recorder) if record_audio else None
100
  if filename is not None:
101
  #if st.button("Transcribe"):
102
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
 
134
  if isinstance(elem.tag, str) and 'Comment' in elem.tag:
135
  elem.parent.remove(elem)
136
  return ET.tostring(root, encoding='unicode', method="xml")
137
+
138
  def read_file_content(file,max_length):
139
  if file.type == "application/json":
140
  content = json.load(file)
 
165
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
166
  with colupload:
167
  uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "html", "htm", "md", "txt"])
168
+
169
  document_sections = deque()
170
  document_responses = {}
171
 
 
174
  document_sections.extend(divide_document(file_content, max_length))
175
 
176
  if len(document_sections) > 0:
177
+
178
  if st.button("๐Ÿ‘๏ธ View Upload"):
179
  st.markdown("**Sections of the uploaded file:**")
180
  for i, section in enumerate(list(document_sections)):
181
  st.markdown(f"**Section {i+1}**\n{section}")
182
+
183
  st.markdown("**Chat with the model:**")
184
  for i, section in enumerate(list(document_sections)):
185
  if i in document_responses: