Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ from langdetect import detect
|
|
4 |
from transformers import pipeline
|
5 |
import os
|
6 |
|
7 |
-
openai.api_key = os.getenv("OPENAI_API_KEY") # Set this
|
8 |
|
9 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
10 |
|
@@ -12,16 +12,15 @@ def process_audio(audio_path):
|
|
12 |
if not audio_path or not isinstance(audio_path, str):
|
13 |
return "No audio file provided.", "", "", ""
|
14 |
try:
|
15 |
-
# Send audio to OpenAI Whisper API
|
16 |
with open(audio_path, "rb") as audio_file:
|
17 |
-
|
18 |
model="whisper-1",
|
19 |
file=audio_file,
|
20 |
response_format="text"
|
21 |
)
|
22 |
-
transcript =
|
23 |
except Exception as e:
|
24 |
-
return f"Error in transcription: {e}", "", "", ""
|
25 |
try:
|
26 |
detected_lang = detect(transcript)
|
27 |
except Exception:
|
@@ -31,22 +30,22 @@ def process_audio(audio_path):
|
|
31 |
transcript_en = transcript
|
32 |
if detected_lang != "en":
|
33 |
try:
|
34 |
-
# Re-send with task=translate for translation to English
|
35 |
with open(audio_path, "rb") as audio_file:
|
36 |
-
|
37 |
model="whisper-1",
|
38 |
file=audio_file,
|
39 |
response_format="text"
|
40 |
)
|
41 |
-
transcript_en =
|
42 |
except Exception as e:
|
43 |
-
transcript_en = f"Error translating: {e}"
|
44 |
try:
|
45 |
summary = summarizer(transcript_en, max_length=100, min_length=30, do_sample=False)
|
46 |
summary_text = summary[0]["summary_text"]
|
|
|
47 |
except Exception as e:
|
48 |
-
summary_text = f"Error summarizing: {e}"
|
49 |
-
return lang_text, transcript, transcript_en, summary_text
|
50 |
|
51 |
iface = gr.Interface(
|
52 |
fn=process_audio,
|
|
|
4 |
from transformers import pipeline
|
5 |
import os
|
6 |
|
7 |
+
openai.api_key = os.getenv("OPENAI_API_KEY") # Set this in HF Space secrets
|
8 |
|
9 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
10 |
|
|
|
12 |
if not audio_path or not isinstance(audio_path, str):
|
13 |
return "No audio file provided.", "", "", ""
|
14 |
try:
|
|
|
15 |
with open(audio_path, "rb") as audio_file:
|
16 |
+
transcript = openai.audio.transcriptions.create(
|
17 |
model="whisper-1",
|
18 |
file=audio_file,
|
19 |
response_format="text"
|
20 |
)
|
21 |
+
transcript = str(transcript).strip() # Force to string
|
22 |
except Exception as e:
|
23 |
+
return f"Error in transcription: {str(e)}", "", "", ""
|
24 |
try:
|
25 |
detected_lang = detect(transcript)
|
26 |
except Exception:
|
|
|
30 |
transcript_en = transcript
|
31 |
if detected_lang != "en":
|
32 |
try:
|
|
|
33 |
with open(audio_path, "rb") as audio_file:
|
34 |
+
transcript_en = openai.audio.translations.create(
|
35 |
model="whisper-1",
|
36 |
file=audio_file,
|
37 |
response_format="text"
|
38 |
)
|
39 |
+
transcript_en = str(transcript_en).strip()
|
40 |
except Exception as e:
|
41 |
+
transcript_en = f"Error translating: {str(e)}"
|
42 |
try:
|
43 |
summary = summarizer(transcript_en, max_length=100, min_length=30, do_sample=False)
|
44 |
summary_text = summary[0]["summary_text"]
|
45 |
+
summary_text = str(summary_text)
|
46 |
except Exception as e:
|
47 |
+
summary_text = f"Error summarizing: {str(e)}"
|
48 |
+
return str(lang_text), str(transcript), str(transcript_en), str(summary_text)
|
49 |
|
50 |
iface = gr.Interface(
|
51 |
fn=process_audio,
|