Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -24,59 +24,48 @@ choice = st.sidebar.selectbox("Output File Type:", menu)
|
|
24 |
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
25 |
|
26 |
|
27 |
-
|
28 |
-
audio_bytes = audio_recorder()
|
29 |
-
if audio_bytes:
|
30 |
-
now = datetime.datetime.now()
|
31 |
-
filename = f"audio_{now.strftime('%Y%m%d_%H%M%S')}.wav"
|
32 |
-
with open(filename, 'wb') as f:
|
33 |
-
f.write(audio_bytes)
|
34 |
-
st.audio(audio_bytes, format="audio/wav")
|
35 |
-
return filename
|
36 |
-
return None
|
37 |
-
|
38 |
-
filename = save_and_play_audio(audio_recorder, st)
|
39 |
-
if filename:
|
40 |
-
st.write(f"Audio file has been saved as {filename}")
|
41 |
-
else:
|
42 |
-
st.write("No audio data was recorded")
|
43 |
-
|
44 |
-
|
45 |
def transcribe_audio_ui(openai_key, file_path):
|
46 |
OPENAI_API_KEY = openai_key
|
47 |
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
|
48 |
-
|
49 |
headers = {
|
50 |
"Authorization": f"Bearer {OPENAI_API_KEY}",
|
51 |
"Content-Type": "multipart/form-data",
|
52 |
}
|
53 |
-
|
54 |
-
# Function to call the API
|
55 |
def transcribe_audio(file_path, model):
|
56 |
data = {
|
57 |
'file': open(file_path, 'rb'),
|
58 |
'model': model,
|
59 |
}
|
60 |
-
|
61 |
response = requests.post(OPENAI_API_URL, headers=headers, files=data)
|
62 |
-
|
63 |
if response.status_code == 200:
|
64 |
return response.json().get('text')
|
65 |
else:
|
66 |
st.error("Error in API call.")
|
67 |
return None
|
68 |
-
|
69 |
-
# Streamlit UI
|
70 |
-
st.title("Audio Transcription Service")
|
71 |
-
|
72 |
-
audio_file = st.file_uploader("Upload your audio file", type=['mp3'])
|
73 |
-
|
74 |
-
if audio_file is not None:
|
75 |
if st.button("Transcribe"):
|
76 |
-
transcription = transcribe_audio(
|
77 |
st.write(transcription)
|
78 |
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
def chat_with_model(prompt, document_section):
|
82 |
model = model_choice
|
|
|
24 |
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
25 |
|
26 |
|
27 |
+
# Audio record and transcribe:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
def transcribe_audio_ui(openai_key, file_path):
|
29 |
OPENAI_API_KEY = openai_key
|
30 |
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
|
|
|
31 |
headers = {
|
32 |
"Authorization": f"Bearer {OPENAI_API_KEY}",
|
33 |
"Content-Type": "multipart/form-data",
|
34 |
}
|
|
|
|
|
35 |
def transcribe_audio(file_path, model):
|
36 |
data = {
|
37 |
'file': open(file_path, 'rb'),
|
38 |
'model': model,
|
39 |
}
|
|
|
40 |
response = requests.post(OPENAI_API_URL, headers=headers, files=data)
|
|
|
41 |
if response.status_code == 200:
|
42 |
return response.json().get('text')
|
43 |
else:
|
44 |
st.error("Error in API call.")
|
45 |
return None
|
46 |
+
if file_path is not None:
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
if st.button("Transcribe"):
|
48 |
+
transcription = transcribe_audio(file_path, "whisper-1")
|
49 |
st.write(transcription)
|
50 |
|
51 |
+
def save_and_play_audio(audio_recorder):
|
52 |
+
audio_bytes = audio_recorder()
|
53 |
+
if audio_bytes:
|
54 |
+
now = datetime.datetime.now()
|
55 |
+
filename = f"audio_{now.strftime('%Y%m%d_%H%M%S')}.wav"
|
56 |
+
with open(filename, 'wb') as f:
|
57 |
+
f.write(audio_bytes)
|
58 |
+
st.audio(audio_bytes, format="audio/wav")
|
59 |
+
return filename
|
60 |
+
return None
|
61 |
+
|
62 |
+
filename = save_and_play_audio(audio_recorder)
|
63 |
+
if filename:
|
64 |
+
st.write(f"Audio file has been saved as {filename}")
|
65 |
+
transcribe_audio_ui(openai.api_key, filename)
|
66 |
+
else:
|
67 |
+
st.write("No audio data was recorded")
|
68 |
+
|
69 |
|
70 |
def chat_with_model(prompt, document_section):
|
71 |
model = model_choice
|