awacke1 commited on
Commit
d3c2aa8
ยท
1 Parent(s): 63102ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -26
app.py CHANGED
@@ -29,29 +29,14 @@ def generate_filename(prompt, file_type):
29
  safe_prompt = "".join(x for x in prompt if x.isalnum())[:45]
30
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
31
 
32
- TEMPERATURE = st.sidebar.slider("Adjust Creativity:", min_value=0.1, max_value=1.0, value=0.5, step=0.1)
33
-
34
- audio_checkbox = st.sidebar.checkbox("Audio", value=False)
35
-
36
- if audio_checkbox:
37
- record_audio = st.sidebar.button("Record Audio")
38
- if record_audio:
39
- filename = save_and_play_audio(audio_recorder)
40
- if filename is not None:
41
- transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
42
- st.markdown('### Transcription:')
43
- st.write(transcription)
44
- else:
45
- record_audio = False
46
-
47
  def chat_with_model(prompt, document_section):
48
  model = model_choice
49
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
50
  conversation.append({'role': 'user', 'content': prompt})
51
  conversation.append({'role': 'assistant', 'content': document_section})
52
- response = openai.ChatCompletion.create(model=model, messages=conversation, temperature=TEMPERATURE)
53
  return response['choices'][0]['message']['content']
54
-
55
  def create_file(filename, prompt, response):
56
  if filename.endswith(".txt"):
57
  with open(filename, 'w') as file:
@@ -62,7 +47,7 @@ def create_file(filename, prompt, response):
62
  elif filename.endswith(".md"):
63
  with open(filename, 'w') as file:
64
  file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")
65
-
66
  # Updated to auto process transcript to chatgpt in AI pipeline from Whisper to ChatGPT
67
  def transcribe_audio(openai_key, file_path, model):
68
  OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
@@ -84,7 +69,7 @@ def transcribe_audio(openai_key, file_path, model):
84
  st.write(response.json())
85
  st.error("Error in API call.")
86
  return None
87
-
88
  def save_and_play_audio(audio_recorder):
89
  audio_bytes = audio_recorder()
90
  if audio_bytes:
@@ -96,7 +81,7 @@ def save_and_play_audio(audio_recorder):
96
  return None
97
 
98
  # Updated to call direct from transcription to chat inference.
99
- filename = save_and_play_audio(audio_recorder) if record_audio else None
100
  if filename is not None:
101
  #if st.button("Transcribe"):
102
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
@@ -134,7 +119,7 @@ def CompressXML(xml_text):
134
  if isinstance(elem.tag, str) and 'Comment' in elem.tag:
135
  elem.parent.remove(elem)
136
  return ET.tostring(root, encoding='unicode', method="xml")
137
-
138
  def read_file_content(file,max_length):
139
  if file.type == "application/json":
140
  content = json.load(file)
@@ -165,7 +150,7 @@ def main():
165
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
166
  with colupload:
167
  uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "html", "htm", "md", "txt"])
168
-
169
  document_sections = deque()
170
  document_responses = {}
171
 
@@ -174,12 +159,12 @@ def main():
174
  document_sections.extend(divide_document(file_content, max_length))
175
 
176
  if len(document_sections) > 0:
177
-
178
  if st.button("๐Ÿ‘๏ธ View Upload"):
179
  st.markdown("**Sections of the uploaded file:**")
180
  for i, section in enumerate(list(document_sections)):
181
  st.markdown(f"**Section {i+1}**\n{section}")
182
-
183
  st.markdown("**Chat with the model:**")
184
  for i, section in enumerate(list(document_sections)):
185
  if i in document_responses:
@@ -222,5 +207,4 @@ def main():
222
  st.experimental_rerun()
223
 
224
  if __name__ == "__main__":
225
- main()
226
-
 
29
  safe_prompt = "".join(x for x in prompt if x.isalnum())[:45]
30
  return f"{safe_date_time}_{safe_prompt}.{file_type}"
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  def chat_with_model(prompt, document_section):
33
  model = model_choice
34
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
35
  conversation.append({'role': 'user', 'content': prompt})
36
  conversation.append({'role': 'assistant', 'content': document_section})
37
+ response = openai.ChatCompletion.create(model=model, messages=conversation)
38
  return response['choices'][0]['message']['content']
39
+
40
  def create_file(filename, prompt, response):
41
  if filename.endswith(".txt"):
42
  with open(filename, 'w') as file:
 
47
  elif filename.endswith(".md"):
48
  with open(filename, 'w') as file:
49
  file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")
50
+
51
  # Updated to auto process transcript to chatgpt in AI pipeline from Whisper to ChatGPT
52
  def transcribe_audio(openai_key, file_path, model):
53
  OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
 
69
  st.write(response.json())
70
  st.error("Error in API call.")
71
  return None
72
+
73
  def save_and_play_audio(audio_recorder):
74
  audio_bytes = audio_recorder()
75
  if audio_bytes:
 
81
  return None
82
 
83
  # Updated to call direct from transcription to chat inference.
84
+ filename = save_and_play_audio(audio_recorder)
85
  if filename is not None:
86
  #if st.button("Transcribe"):
87
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
 
119
  if isinstance(elem.tag, str) and 'Comment' in elem.tag:
120
  elem.parent.remove(elem)
121
  return ET.tostring(root, encoding='unicode', method="xml")
122
+
123
  def read_file_content(file,max_length):
124
  if file.type == "application/json":
125
  content = json.load(file)
 
150
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
151
  with colupload:
152
  uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "html", "htm", "md", "txt"])
153
+
154
  document_sections = deque()
155
  document_responses = {}
156
 
 
159
  document_sections.extend(divide_document(file_content, max_length))
160
 
161
  if len(document_sections) > 0:
162
+
163
  if st.button("๐Ÿ‘๏ธ View Upload"):
164
  st.markdown("**Sections of the uploaded file:**")
165
  for i, section in enumerate(list(document_sections)):
166
  st.markdown(f"**Section {i+1}**\n{section}")
167
+
168
  st.markdown("**Chat with the model:**")
169
  for i, section in enumerate(list(document_sections)):
170
  if i in document_responses:
 
207
  st.experimental_rerun()
208
 
209
  if __name__ == "__main__":
210
+ main()