bluenevus commited on
Commit
da7b836
·
verified ·
1 Parent(s): 54c226c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -114
app.py CHANGED
@@ -29,118 +29,7 @@ qwen_model_name = "Qwen/Qwen2.5-3B-Instruct"
29
  qwen_tokenizer = AutoTokenizer.from_pretrained(qwen_model_name, trust_remote_code=True)
30
  qwen_model = AutoModelForCausalLM.from_pretrained(qwen_model_name, trust_remote_code=True).to(device)
31
 
32
- def download_audio_from_url(url):
33
- try:
34
- if "youtube.com" in url or "youtu.be" in url:
35
- print("Processing YouTube URL...")
36
- yt = YouTube(url)
37
- audio_stream = yt.streams.filter(only_audio=True).first()
38
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
39
- audio_stream.download(output_path=temp_file.name)
40
- audio_bytes = open(temp_file.name, "rb").read()
41
- os.unlink(temp_file.name)
42
- elif "share" in url:
43
- print("Processing shareable link...")
44
- response = requests.get(url)
45
- soup = BeautifulSoup(response.content, 'html.parser')
46
- video_tag = soup.find('video')
47
- if video_tag and 'src' in video_tag.attrs:
48
- video_url = video_tag['src']
49
- print(f"Extracted video URL: {video_url}")
50
- else:
51
- raise ValueError("Direct video URL not found in the shareable link.")
52
- response = requests.get(video_url)
53
- audio_bytes = response.content
54
- else:
55
- print(f"Downloading video from URL: {url}")
56
- response = requests.get(url)
57
- audio_bytes = response.content
58
-
59
- print(f"Successfully downloaded {len(audio_bytes)} bytes of data")
60
- return audio_bytes
61
- except Exception as e:
62
- print(f"Error in download_audio_from_url: {str(e)}")
63
- raise
64
-
65
- def transcribe_audio(audio_file):
66
- try:
67
- print("Loading audio file...")
68
- audio = AudioSegment.from_file(audio_file)
69
- audio = audio.set_channels(1).set_frame_rate(16000)
70
- audio_array = torch.tensor(audio.get_array_of_samples()).float()
71
-
72
- print(f"Audio duration: {len(audio) / 1000:.2f} seconds")
73
- print("Starting transcription...")
74
- input_features = whisper_processor(audio_array, sampling_rate=16000, return_tensors="pt").input_features.to(device)
75
-
76
- # Create attention mask
77
- attention_mask = torch.ones_like(input_features)
78
-
79
- # Generate with specific parameters
80
- predicted_ids = whisper_model.generate(
81
- input_features,
82
- attention_mask=attention_mask,
83
- language='en',
84
- task='translate'
85
- )
86
- transcription = whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)
87
-
88
- print(f"Transcription complete. Length: {len(transcription[0])} characters")
89
- if len(transcription[0]) < 10:
90
- raise ValueError(f"Transcription too short: {transcription[0]}")
91
- return transcription[0]
92
- except Exception as e:
93
- print(f"Error in transcribe_audio: {str(e)}")
94
- raise
95
-
96
- def separate_speakers(transcription):
97
- print("Starting speaker separation...")
98
- prompt = f"""Analyze the following transcribed text and separate it into different speakers. Identify potential speaker changes based on context, content shifts, or dialogue patterns. Format the output as follows:
99
-
100
- 1. Label speakers as "Speaker 1", "Speaker 2", etc.
101
- 2. Start each speaker's text on a new line beginning with their label.
102
- 3. Separate different speakers' contributions with a blank line.
103
- 4. If the same speaker continues, do not insert a blank line or repeat the speaker label.
104
-
105
- Now, please process the following transcribed text:
106
-
107
- {transcription}
108
- """
109
-
110
- inputs = qwen_tokenizer(prompt, return_tensors="pt").to(device)
111
- with torch.no_grad():
112
- outputs = qwen_model.generate(**inputs, max_new_tokens=4000)
113
- result = qwen_tokenizer.decode(outputs[0], skip_special_tokens=True)
114
-
115
- # Extract the processed text (remove the instruction part)
116
- processed_text = result.split("Now, please process the following transcribed text:")[-1].strip()
117
-
118
- print("Speaker separation complete.")
119
- return processed_text
120
-
121
- def transcribe_video(url):
122
- try:
123
- print(f"Attempting to download audio from URL: {url}")
124
- audio_bytes = download_audio_from_url(url)
125
- print(f"Successfully downloaded {len(audio_bytes)} bytes of audio data")
126
-
127
- with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio:
128
- AudioSegment.from_file(io.BytesIO(audio_bytes)).export(temp_audio.name, format="wav")
129
- transcript = transcribe_audio(temp_audio.name)
130
-
131
- os.unlink(temp_audio.name)
132
-
133
- if len(transcript) < 10:
134
- raise ValueError("Transcription too short, possibly failed")
135
-
136
- print("Separating speakers...")
137
- separated_transcript = separate_speakers(transcript)
138
-
139
- return separated_transcript
140
- except Exception as e:
141
- error_message = f"An error occurred: {str(e)}"
142
- print(error_message)
143
- return error_message
144
 
145
  app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
146
 
@@ -190,7 +79,7 @@ def update_transcription(n_clicks, url):
190
  if thread.is_alive():
191
  return "Transcription timed out after 10 minutes", {'display': 'none'}
192
 
193
- transcript = thread.result if hasattr(thread, 'result') else "Transcription failed"
194
 
195
  if transcript and not transcript.startswith("An error occurred"):
196
  return dbc.Card([
@@ -217,5 +106,5 @@ def download_transcript(n_clicks, transcription_output):
217
 
218
  if __name__ == '__main__':
219
  print("Starting the Dash application...")
220
- app.run(debug=True, host='0.0.0.0', port=7860)
221
  print("Dash application has finished running.")
 
29
  qwen_tokenizer = AutoTokenizer.from_pretrained(qwen_model_name, trust_remote_code=True)
30
  qwen_model = AutoModelForCausalLM.from_pretrained(qwen_model_name, trust_remote_code=True).to(device)
31
 
32
+ # ... (keep all the existing functions as they are)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
35
 
 
79
  if thread.is_alive():
80
  return "Transcription timed out after 10 minutes", {'display': 'none'}
81
 
82
+ transcript = getattr(thread, 'result', "Transcription failed")
83
 
84
  if transcript and not transcript.startswith("An error occurred"):
85
  return dbc.Card([
 
106
 
107
  if __name__ == '__main__':
108
  print("Starting the Dash application...")
109
+ app.run_server(debug=True, host='0.0.0.0', port=7860)
110
  print("Dash application has finished running.")