Spaces:
Runtime error
Runtime error
Update backupapp.py
Browse files- backupapp.py +40 -31
backupapp.py
CHANGED
@@ -23,32 +23,53 @@ menu = ["txt", "htm", "md", "py"]
|
|
23 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
24 |
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
25 |
|
26 |
-
|
27 |
def generate_filename(prompt, file_type):
|
28 |
central = pytz.timezone('US/Central')
|
29 |
safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
|
30 |
safe_prompt = "".join(x for x in prompt if x.isalnum())[:45]
|
31 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
def transcribe_audio(openai_key, file_path, model):
|
34 |
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
|
35 |
headers = {
|
36 |
"Authorization": f"Bearer {openai_key}",
|
37 |
}
|
38 |
-
|
39 |
with open(file_path, 'rb') as f:
|
40 |
data = {'file': f}
|
41 |
response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
|
42 |
-
|
43 |
if response.status_code == 200:
|
44 |
-
st.write(
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
46 |
else:
|
47 |
st.write(response.json())
|
48 |
st.error("Error in API call.")
|
49 |
return None
|
50 |
-
|
51 |
-
|
52 |
def save_and_play_audio(audio_recorder):
|
53 |
audio_bytes = audio_recorder()
|
54 |
if audio_bytes:
|
@@ -59,30 +80,13 @@ def save_and_play_audio(audio_recorder):
|
|
59 |
return filename
|
60 |
return None
|
61 |
|
|
|
62 |
filename = save_and_play_audio(audio_recorder)
|
63 |
if filename is not None:
|
64 |
-
if st.button("Transcribe"):
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
def chat_with_model(prompt, document_section):
|
69 |
-
model = model_choice
|
70 |
-
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
71 |
-
conversation.append({'role': 'user', 'content': prompt})
|
72 |
-
conversation.append({'role': 'assistant', 'content': document_section})
|
73 |
-
response = openai.ChatCompletion.create(model=model, messages=conversation)
|
74 |
-
return response['choices'][0]['message']['content']
|
75 |
-
|
76 |
-
def create_file(filename, prompt, response):
|
77 |
-
if filename.endswith(".txt"):
|
78 |
-
with open(filename, 'w') as file:
|
79 |
-
file.write(f"Prompt:\n{prompt}\nResponse:\n{response}")
|
80 |
-
elif filename.endswith(".htm"):
|
81 |
-
with open(filename, 'w') as file:
|
82 |
-
file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
|
83 |
-
elif filename.endswith(".md"):
|
84 |
-
with open(filename, 'w') as file:
|
85 |
-
file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")
|
86 |
|
87 |
def truncate_document(document, length):
|
88 |
return document[:length]
|
@@ -98,6 +102,8 @@ def get_table_download_link(file_path):
|
|
98 |
ext = os.path.splitext(file_name)[1] # get the file extension
|
99 |
if ext == '.txt':
|
100 |
mime_type = 'text/plain'
|
|
|
|
|
101 |
elif ext == '.htm':
|
102 |
mime_type = 'text/html'
|
103 |
elif ext == '.md':
|
@@ -191,11 +197,14 @@ def main():
|
|
191 |
for file in all_files:
|
192 |
col1, col3 = st.sidebar.columns([5,1]) # adjust the ratio as needed
|
193 |
with col1:
|
194 |
-
|
|
|
|
|
|
|
195 |
with col3:
|
196 |
if st.button("π", key="delete_"+file):
|
197 |
os.remove(file)
|
198 |
st.experimental_rerun()
|
199 |
-
|
200 |
if __name__ == "__main__":
|
201 |
main()
|
|
|
23 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
24 |
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
25 |
|
|
|
26 |
def generate_filename(prompt, file_type):
|
27 |
central = pytz.timezone('US/Central')
|
28 |
safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
|
29 |
safe_prompt = "".join(x for x in prompt if x.isalnum())[:45]
|
30 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
31 |
|
32 |
+
def chat_with_model(prompt, document_section):
|
33 |
+
model = model_choice
|
34 |
+
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
35 |
+
conversation.append({'role': 'user', 'content': prompt})
|
36 |
+
conversation.append({'role': 'assistant', 'content': document_section})
|
37 |
+
response = openai.ChatCompletion.create(model=model, messages=conversation)
|
38 |
+
return response['choices'][0]['message']['content']
|
39 |
+
|
40 |
+
def create_file(filename, prompt, response):
|
41 |
+
if filename.endswith(".txt"):
|
42 |
+
with open(filename, 'w') as file:
|
43 |
+
file.write(f"Prompt:\n{prompt}\nResponse:\n{response}")
|
44 |
+
elif filename.endswith(".htm"):
|
45 |
+
with open(filename, 'w') as file:
|
46 |
+
file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
|
47 |
+
elif filename.endswith(".md"):
|
48 |
+
with open(filename, 'w') as file:
|
49 |
+
file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")
|
50 |
+
|
51 |
+
# Updated to auto process transcript to chatgpt in AI pipeline from Whisper to ChatGPT
|
52 |
def transcribe_audio(openai_key, file_path, model):
|
53 |
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
|
54 |
headers = {
|
55 |
"Authorization": f"Bearer {openai_key}",
|
56 |
}
|
|
|
57 |
with open(file_path, 'rb') as f:
|
58 |
data = {'file': f}
|
59 |
response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
|
|
|
60 |
if response.status_code == 200:
|
61 |
+
st.write('Reasoning with your transcription..')
|
62 |
+
transcript=response.json().get('text')
|
63 |
+
st.write(transcript)
|
64 |
+
gptResponse = chat_with_model(transcript, '') # send transcript to ChatGPT
|
65 |
+
filename = generate_filename(transcript, choice) # auto name file with date and prompt per output file type
|
66 |
+
create_file(filename, transcript, gptResponse) # write output file
|
67 |
+
return gptResponse
|
68 |
else:
|
69 |
st.write(response.json())
|
70 |
st.error("Error in API call.")
|
71 |
return None
|
72 |
+
|
|
|
73 |
def save_and_play_audio(audio_recorder):
|
74 |
audio_bytes = audio_recorder()
|
75 |
if audio_bytes:
|
|
|
80 |
return filename
|
81 |
return None
|
82 |
|
83 |
+
# Updated to call direct from transcription to chat inference.
|
84 |
filename = save_and_play_audio(audio_recorder)
|
85 |
if filename is not None:
|
86 |
+
#if st.button("Transcribe"):
|
87 |
+
transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
|
88 |
+
st.markdown('### Transcription:')
|
89 |
+
st.write(transcription)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
def truncate_document(document, length):
|
92 |
return document[:length]
|
|
|
102 |
ext = os.path.splitext(file_name)[1] # get the file extension
|
103 |
if ext == '.txt':
|
104 |
mime_type = 'text/plain'
|
105 |
+
elif ext == '.wav':
|
106 |
+
mime_type = 'audio/x-wav'
|
107 |
elif ext == '.htm':
|
108 |
mime_type = 'text/html'
|
109 |
elif ext == '.md':
|
|
|
197 |
for file in all_files:
|
198 |
col1, col3 = st.sidebar.columns([5,1]) # adjust the ratio as needed
|
199 |
with col1:
|
200 |
+
try:
|
201 |
+
st.markdown(get_table_download_link(file), unsafe_allow_html=True)
|
202 |
+
except Exception as e:
|
203 |
+
st.error(f"Error occurred while processing file {file}: {str(e)}")
|
204 |
with col3:
|
205 |
if st.button("π", key="delete_"+file):
|
206 |
os.remove(file)
|
207 |
st.experimental_rerun()
|
208 |
+
|
209 |
if __name__ == "__main__":
|
210 |
main()
|