Update app.py
Browse files
app.py
CHANGED
|
@@ -53,17 +53,19 @@ def process_image(image_input):
|
|
| 53 |
|
| 54 |
def process_audio(audio_input):
|
| 55 |
if audio_input:
|
| 56 |
-
transcription =
|
| 57 |
model="whisper-1",
|
| 58 |
file=audio_input,
|
| 59 |
)
|
| 60 |
-
response =
|
| 61 |
model=MODEL,
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
|
|
|
|
|
|
| 65 |
)
|
| 66 |
-
st.markdown(response.choices[0].
|
| 67 |
|
| 68 |
def process_video(video_input):
|
| 69 |
if video_input:
|
|
|
|
| 53 |
|
| 54 |
def process_audio(audio_input):
|
| 55 |
if audio_input:
|
| 56 |
+
transcription = client.audio.transcriptions.create(
|
| 57 |
model="whisper-1",
|
| 58 |
file=audio_input,
|
| 59 |
)
|
| 60 |
+
response = client.chat.completions.create(
|
| 61 |
model=MODEL,
|
| 62 |
+
messages=[
|
| 63 |
+
{"role": "system", "content":"""You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."""},
|
| 64 |
+
{"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription.text}"}],}
|
| 65 |
+
],
|
| 66 |
+
temperature=0,
|
| 67 |
)
|
| 68 |
+
st.markdown(response.choices[0].message.content)
|
| 69 |
|
| 70 |
def process_video(video_input):
|
| 71 |
if video_input:
|