Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,10 +3,14 @@ import tempfile
|
|
3 |
import os
|
4 |
import yt_dlp
|
5 |
from moviepy.editor import VideoFileClip
|
6 |
-
|
|
|
|
|
|
|
|
|
7 |
|
8 |
# Set your OpenAI API key (make sure it's set in Hugging Face Spaces secrets)
|
9 |
-
openai.api_key = os.getenv("OPENAI_API_KEY")
|
10 |
|
11 |
# ---------------------------
|
12 |
# Helper Functions
|
@@ -43,16 +47,31 @@ def extract_audio(video_path: str) -> str:
|
|
43 |
st.error(f"Error extracting audio: {e}")
|
44 |
return None
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
def transcribe_audio(audio_path: str) -> str:
|
47 |
"""Transcribe the audio to text using OpenAI's Whisper API."""
|
48 |
try:
|
49 |
with open(audio_path, "rb") as audio_file:
|
50 |
-
transcript =
|
51 |
-
|
|
|
|
|
|
|
|
|
52 |
except Exception as e:
|
53 |
st.error(f"Error transcribing audio: {e}")
|
54 |
return ""
|
55 |
|
|
|
|
|
56 |
def generate_summary(transcript_text: str) -> str:
|
57 |
"""Generate a concise summary of the transcript using OpenAI."""
|
58 |
prompt = f"Summarize the following document in a concise manner, highlighting the key points that a student should know:\n\n{transcript_text}"
|
@@ -60,7 +79,7 @@ def generate_summary(transcript_text: str) -> str:
|
|
60 |
{"role": "system", "content": "You are an educational assistant."},
|
61 |
{"role": "user", "content": prompt}
|
62 |
]
|
63 |
-
completion =
|
64 |
return completion.choices[0].message.content.strip()
|
65 |
|
66 |
def get_chat_response(transcript_text: str, conversation_history: list, user_query: str) -> str:
|
@@ -68,7 +87,7 @@ def get_chat_response(transcript_text: str, conversation_history: list, user_que
|
|
68 |
messages = conversation_history + [
|
69 |
{"role": "user", "content": f"Based on the following document:\n\n{transcript_text}\n\nQuestion: {user_query}"}
|
70 |
]
|
71 |
-
completion =
|
72 |
return completion.choices[0].message.content.strip()
|
73 |
|
74 |
# ---------------------------
|
|
|
3 |
import os
|
4 |
import yt_dlp
|
5 |
from moviepy.editor import VideoFileClip
|
6 |
+
|
7 |
+
from openai import OpenAI
|
8 |
+
client = OpenAI()
|
9 |
+
|
10 |
+
|
11 |
|
12 |
# Set your OpenAI API key (make sure it's set in Hugging Face Spaces secrets)
|
13 |
+
#openai.api_key = os.getenv("OPENAI_API_KEY")
|
14 |
|
15 |
# ---------------------------
|
16 |
# Helper Functions
|
|
|
47 |
st.error(f"Error extracting audio: {e}")
|
48 |
return None
|
49 |
|
50 |
+
# audio_file = open("/path/to/file/speech.mp3", "rb")
|
51 |
+
# transcription = client.audio.transcriptions.create(
|
52 |
+
# model="whisper-1",
|
53 |
+
# file=audio_file,
|
54 |
+
# response_format="text"
|
55 |
+
# )
|
56 |
+
|
57 |
+
# print(transcription.text)
|
58 |
+
|
59 |
def transcribe_audio(audio_path: str) -> str:
|
60 |
"""Transcribe the audio to text using OpenAI's Whisper API."""
|
61 |
try:
|
62 |
with open(audio_path, "rb") as audio_file:
|
63 |
+
transcript = client.audio.transcriptions.create(
|
64 |
+
model="whisper-1",
|
65 |
+
file=audio_file,
|
66 |
+
response_format="text"
|
67 |
+
)
|
68 |
+
return transcript.text
|
69 |
except Exception as e:
|
70 |
st.error(f"Error transcribing audio: {e}")
|
71 |
return ""
|
72 |
|
73 |
+
|
74 |
+
|
75 |
def generate_summary(transcript_text: str) -> str:
|
76 |
"""Generate a concise summary of the transcript using OpenAI."""
|
77 |
prompt = f"Summarize the following document in a concise manner, highlighting the key points that a student should know:\n\n{transcript_text}"
|
|
|
79 |
{"role": "system", "content": "You are an educational assistant."},
|
80 |
{"role": "user", "content": prompt}
|
81 |
]
|
82 |
+
completion = client.chat.completions.create(model="gpt-4o-mini", messages=messages)
|
83 |
return completion.choices[0].message.content.strip()
|
84 |
|
85 |
def get_chat_response(transcript_text: str, conversation_history: list, user_query: str) -> str:
|
|
|
87 |
messages = conversation_history + [
|
88 |
{"role": "user", "content": f"Based on the following document:\n\n{transcript_text}\n\nQuestion: {user_query}"}
|
89 |
]
|
90 |
+
completion = client.chat.completions.create(model="gpt-4o-mini", messages=messages)
|
91 |
return completion.choices[0].message.content.strip()
|
92 |
|
93 |
# ---------------------------
|