File size: 23,289 Bytes
90c9dfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
from __future__ import annotations

import io
import os
import re
import subprocess
import textwrap
import time
import uuid
import wave

import emoji
import gradio as gr
import langid
import nltk
import numpy as np
import noisereduce as nr
from huggingface_hub import HfApi

# Download the 'punkt' tokenizer for the NLTK library
nltk.download("punkt")

# will use api to restart space on a unrecoverable error
HF_TOKEN = os.environ.get("HF_TOKEN")
REPO_ID = os.environ.get("REPO_ID")
api = HfApi(token=HF_TOKEN)

latent_map = {}

def get_latents(chatbot_voice, xtts_model, voice_cleanup=False):
    global latent_map
    if chatbot_voice not in latent_map:    
        speaker_wav = f"examples/{chatbot_voice}.wav"
        if (voice_cleanup):
            try:
                cleanup_filter="lowpass=8000,highpass=75,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02" 
                resample_filter="-ac 1 -ar 22050"
                out_filename = speaker_wav + str(uuid.uuid4()) + ".wav"  #ffmpeg to know output format
                #we will use newer ffmpeg as that has afftn denoise filter
                shell_command = f"ffmpeg -y -i {speaker_wav} -af {cleanup_filter} {resample_filter} {out_filename}".split(" ")
                command_result = subprocess.run([item for item in shell_command], capture_output=False,text=True, check=True)
                speaker_wav=out_filename
                print("Filtered microphone input")
            except subprocess.CalledProcessError:
                # There was an error - command exited with non-zero code
                print("Error: failed filtering, use original microphone input")
        else:
                speaker_wav=speaker_wav
        # gets condition latents from the model
        # returns tuple (gpt_cond_latent, speaker_embedding)
        latent_map[chatbot_voice] = xtts_model.get_conditioning_latents(audio_path=speaker_wav)
    return latent_map[chatbot_voice]

  
def detect_language(prompt, xtts_supported_languages=None):
    if xtts_supported_languages is None:
        xtts_supported_languages = ["en","es","fr","de","it","pt","pl","tr","ru","nl","cs","ar","zh-cn","ja"] 

    # Fast language autodetection
    if len(prompt)>15:
        language_predicted=langid.classify(prompt)[0].strip() # strip need as there is space at end!
        if language_predicted == "zh": 
            #we use zh-cn on xtts
            language_predicted = "zh-cn"
            
        if language_predicted not in xtts_supported_languages:
            print(f"Detected a language not supported by xtts :{language_predicted}, switching to english for now")
            gr.Warning(f"Language detected '{language_predicted}' can not be spoken properly 'yet' ")
            language= "en"
        else:
            language = language_predicted
        print(f"Language: Predicted sentence language:{language_predicted} , using language for xtts:{language}")
    else:
        # Hard to detect language fast in short sentence, use english default
        language = "en"
        print(f"Language: Prompt is short or autodetect language disabled using english for xtts")

    return language
    
def get_voice_streaming(prompt, language, chatbot_voice, xtts_model, suffix="0"):
    gpt_cond_latent, speaker_embedding = get_latents(chatbot_voice, xtts_model) 
    try:
        t0 = time.time()
        chunks = xtts_model.inference_stream(
            prompt,
            language,
            gpt_cond_latent,
            speaker_embedding,
            repetition_penalty=7.0,
            temperature=0.85,
        )

        first_chunk = True
        for i, chunk in enumerate(chunks):
            if first_chunk:
                first_chunk_time = time.time() - t0
                metrics_text = f"Latency to first audio chunk: {round(first_chunk_time*1000)} milliseconds\n"
                first_chunk = False
            #print(f"Received chunk {i} of audio length {chunk.shape[-1]}")

            # In case output is required to be multiple voice files
            # out_file = f'{char}_{i}.wav'
            # write(out_file, 24000, chunk.detach().cpu().numpy().squeeze())
            # audio = AudioSegment.from_file(out_file)
            # audio.export(out_file, format='wav')
            # return out_file
            # directly return chunk as bytes for streaming
            chunk = chunk.detach().cpu().numpy().squeeze()
            chunk = (chunk * 32767).astype(np.int16)
            yield chunk.tobytes()

    except RuntimeError as e:
        if "device-side assert" in str(e):
            # cannot do anything on cuda device side error, need tor estart
            print(
                f"Exit due to: Unrecoverable exception caused by prompt:{prompt}",
                flush=True,
            )
            gr.Warning("Unhandled Exception encounter, please retry in a minute")
            print("Cuda device-assert Runtime encountered need restart")

            # HF Space specific.. This error is unrecoverable need to restart space
            api.restart_space(REPO_ID=REPO_ID)
        else:
            print("RuntimeError: non device-side assert error:", str(e))
            # Does not require warning happens on empty chunk and at end
            ###gr.Warning("Unhandled Exception encounter, please retry in a minute")
            return None
        return None
    except:
        return None

def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=24000):
    # This will create a wave header then append the frame input
    # It should be first on a streaming wav file
    # Other frames better should not have it (else you will hear some artifacts each chunk start)
    wav_buf = io.BytesIO()
    with wave.open(wav_buf, "wb") as vfout:
        vfout.setnchannels(channels)
        vfout.setsampwidth(sample_width)
        vfout.setframerate(sample_rate)
        vfout.writeframes(frame_input)

    wav_buf.seek(0)
    return wav_buf.read()

def format_prompt(message, history):
    system_message = f"""
            You are a smart mood analyser, who determines user mood. Based on the user input, classify the mood of the user into one of the four moods {Happy, Sad, Instrumental, Party}. If you are finding it difficult to classify into one of these four moods, keep the conversation going on until we classify the user’s mood. Return a single-word reply from one of the options if you have classified. Suppose you classify a sentence as happy, then just respond with "happy".

            Note: Do not write anything else other than the classified mood if classified.

            Note: If any question or any user text cannot be classified, follow up with a question to know the user's mood until you classify the mood.

            Note: Mood should be classified only from any of these 4 classes {Happy, Sad, Instrumental, Party}, if not any of these 4 then continue with a follow-up question until you classify the mood.

            Note: if user asks something like i need a coffee then do not classify the mood directly and ask more follow-up questions as asked in examples.

            Examples
            User: What is C programming?
            LLM Response: C programming is a programming language. How are you feeling now after knowing the answer?

            User: Can I get a coffee?
            LLM Response: It sounds like you're in need of a little pick-me-up. How are you feeling right now? Are you looking for something upbeat, something to relax to, or maybe some instrumental music while you enjoy your coffee?
            User: I feel like rocking
            LLM Response: Party

            User: I'm feeling so energetic today!
            LLM Response: Happy

            User: I'm feeling down today.
            LLM Response: Sad

            User: I'm ready to have some fun tonight!
            LLM Response: Party

            User: I need some background music while I am stuck in traffic.
            LLM Response: Instrumental

            User: Hi
            LLM Response: Hi, how are you doing?

            User: Feeling okay only.
            LLM Response: Are you having a good day?
            User: I don't know
            LLM Response: Do you want to listen to some relaxing music?
            User: No
            LLM Response: How about listening to some rock and roll music?
            User: Yes
            LLM Response: Party

            User: Where do I find an encyclopedia?
            LLM Response: You can find it in any of the libraries or on the Internet. Does this answer make you happy?

            User: I need a coffee
            LLM Response: It sounds like you're in need of a little pick-me-up. How are you feeling right now? Are you looking for something upbeat, something to relax to, or maybe some instrumental music while you enjoy your coffee?

            User: I just got promoted at work!
            LLM Response: Happy

            User: Today is my birthday!
            LLM Response: Happy

            User: I won a prize in the lottery.
            LLM Response: Happy

            User: I am so excited about my vacation next week!
            LLM Response: Happy

            User: I aced my exams!
            LLM Response: Happy

            User: I had a wonderful time with my family today.
            LLM Response: Happy

            User: I just finished a great workout!
            LLM Response: Happy

            User: I am feeling really good about myself today.
            LLM Response: Happy

            User: I finally finished my project and it was a success!
            LLM Response: Happy

            User: I just heard my favorite song on the radio.
            LLM Response: Happy

            User: My pet passed away yesterday.
            LLM Response: Sad

            User: I lost my job today.
            LLM Response: Sad

            User: I'm feeling really lonely.
            LLM Response: Sad

            User: I didn't get the results I wanted.
            LLM Response: Sad

            User: I had a fight with my best friend.
            LLM Response: Sad

            User: I'm feeling really overwhelmed with everything.
            LLM Response: Sad

            User: I just got some bad news.
            LLM Response: Sad

            User: I'm missing my family.
            LLM Response: Sad

            User: I am feeling really down today.
            LLM Response: Sad

            User: Nothing seems to be going right.
            LLM Response: Sad

            User: I need some music while I study.
            LLM Response: Instrumental

            User: I want to listen to something soothing while I work.
            LLM Response: Instrumental

            User: Do you have any recommendations for background music?
            LLM Response: Instrumental

            User: I'm looking for some relaxing tunes.
            LLM Response: Instrumental

            User: I need some music to focus on my tasks.
            LLM Response: Instrumental

            User: Can you suggest some ambient music for meditation?
            LLM Response: Instrumental

            User: What's good for background music during reading?
            LLM Response: Instrumental

            User: I need some calm music to help me sleep.
            LLM Response: Instrumental

            User: I prefer instrumental music while cooking.
            LLM Response: Instrumental

            User: What's the best music to play while doing yoga?
            LLM Response: Instrumental

            User: Let's have a blast tonight!
            LLM Response: Party

            User: I'm in the mood to dance!
            LLM Response: Party

            User: I want to celebrate all night long!
            LLM Response: Party

            User: Time to hit the club!
            LLM Response: Party

            User: I feel like partying till dawn.
            LLM Response: Party

            User: Let's get this party started!
            LLM Response: Party

            User: I'm ready to party hard tonight.
            LLM Response: Party

            User: I'm in the mood for some loud music and dancing!
            LLM Response: Party

            User: Tonight's going to be epic!
            LLM Response: Party

            User: Lets turn up the music and have some fun!
            LLM Response: Party
"""
    prompt = (
        "<s>[INST]" + system_message + "[/INST]"
    )
    for user_prompt, bot_response in history:
        if user_prompt is not None:
            prompt += f"[INST] {user_prompt} [/INST]"
        prompt += f" {bot_response}</s> "
    
    if message=="":
        message="Hello"
    prompt += f"[INST] {message} [/INST]"
    return prompt

def generate_llm_output(
        prompt,    
        history,
        llm,
        temperature=0.8,
        max_tokens=256,
        top_p=0.95,
        stop_words=["<s>","[/INST]", "</s>"]
    ):
        temperature = float(temperature)
        if temperature < 1e-2:
            temperature = 1e-2
        top_p = float(top_p)

        generate_kwargs = dict(
            temperature=temperature,
            max_tokens=max_tokens,
            top_p=top_p,
            stop=stop_words
        )
        formatted_prompt = format_prompt(prompt, history)
        try:
            print("LLM Input:", formatted_prompt)
            # Local GGUF
            stream = llm(
                formatted_prompt,
                **generate_kwargs,
                stream=True,
            )
            output = ""
            for response in stream:
                character= response["choices"][0]["text"]

                if character in stop_words:
                    # end of context
                    return 
                    
                if emoji.is_emoji(character):
                    # Bad emoji not a meaning messes chat from next lines
                    return
                
                output += response["choices"][0]["text"]
                yield output

        except Exception as e:
            print("Unhandled Exception: ", str(e))
            gr.Warning("Unfortunately Mistral is unable to process")
            output = "I do not know what happened but I could not understand you ."
        return output
    
def get_sentence(history, llm):
    history = [["", None]] if history is None else history 
    history[-1][1] = ""        
    sentence_list = []
    sentence_hash_list = []

    text_to_generate = ""
    stored_sentence = None
    stored_sentence_hash = None
    
    for character in generate_llm_output(history[-1][0], history[:-1], llm):
        history[-1][1] = character.replace("<|assistant|>","")
        # It is coming word by word
        text_to_generate = nltk.sent_tokenize(history[-1][1].replace("\n", " ").replace("<|assistant|>"," ").replace("<|ass>","").replace("[/ASST]","").replace("[/ASSI]","").replace("[/ASS]","").replace("","").strip())
        if len(text_to_generate) > 1:
            
            dif = len(text_to_generate) - len(sentence_list)

            if dif == 1 and len(sentence_list) != 0:
                continue

            if dif == 2 and len(sentence_list) != 0 and stored_sentence is not None:
                continue

            # All this complexity due to trying append first short sentence to next one for proper language auto-detect
            if stored_sentence is not None and stored_sentence_hash is None and dif>1:
                #means we consumed stored sentence and should look at next sentence to generate
                sentence = text_to_generate[len(sentence_list)+1]
            elif stored_sentence is not None and len(text_to_generate)>2 and stored_sentence_hash is not None:
                print("Appending stored")
                sentence = stored_sentence + text_to_generate[len(sentence_list)+1]
                stored_sentence_hash = None
            else:
                sentence = text_to_generate[len(sentence_list)]
                
            # too short sentence just append to next one if there is any
            # this is for proper language detection 
            if len(sentence)<=15 and stored_sentence_hash is None and stored_sentence is None:
                if sentence[-1] in [".","!","?"]:
                    if stored_sentence_hash != hash(sentence):
                        stored_sentence = sentence
                        stored_sentence_hash = hash(sentence) 
                        print("Storing:",stored_sentence)
                        continue
            
            
            sentence_hash = hash(sentence)
            if stored_sentence_hash is not None and sentence_hash == stored_sentence_hash:
                continue
            
            if sentence_hash not in sentence_hash_list:
                sentence_hash_list.append(sentence_hash)
                sentence_list.append(sentence)
                print("New Sentence: ", sentence)
                yield (sentence, history)

    # return that final sentence token
    try:
        last_sentence = nltk.sent_tokenize(history[-1][1].replace("\n", " ").replace("<|ass>","").replace("[/ASST]","").replace("[/ASSI]","").replace("[/ASS]","").replace("","").strip())[-1]
        sentence_hash = hash(last_sentence)
        if sentence_hash not in sentence_hash_list:
            if stored_sentence is not None and stored_sentence_hash is not None:
                last_sentence = stored_sentence + last_sentence
                stored_sentence = stored_sentence_hash = None
                print("Last Sentence with stored:",last_sentence)
        
            sentence_hash_list.append(sentence_hash)
            sentence_list.append(last_sentence)
            print("Last Sentence: ", last_sentence)
    
            yield (last_sentence, history)
    except:
        print("ERROR on last sentence history is :", history)
            
# will generate speech audio file per sentence
def generate_speech_for_sentence(history, chatbot_voice, sentence, xtts_model, xtts_supported_languages=None, filter_output=True, return_as_byte=False):
    language = "autodetect"

    wav_bytestream = b""
    
    if len(sentence)==0:
        print("EMPTY SENTENCE")
        return 
    
    # Sometimes prompt </s> coming on output remove it
    # Some post process for speech only
    sentence = sentence.replace("</s>", "")
    # remove code from speech
    sentence = re.sub("```.*```", "", sentence, flags=re.DOTALL)
    sentence = re.sub("`.*`", "", sentence, flags=re.DOTALL)
    
    sentence = re.sub("\(.*\)", "", sentence, flags=re.DOTALL)
    
    sentence = sentence.replace("```", "")
    sentence = sentence.replace("...", " ")
    sentence = sentence.replace("(", " ")
    sentence = sentence.replace(")", " ")
    sentence = sentence.replace("<|assistant|>","")

    if len(sentence)==0:
        print("EMPTY SENTENCE after processing")
        return 
        
    # A fast fix for last chacter, may produce weird sounds if it is with text
    #if (sentence[-1] in ["!", "?", ".", ","]) or (sentence[-2] in ["!", "?", ".", ","]):
    #    # just add a space
    #    sentence = sentence[:-1] + " " + sentence[-1]
        
    # regex does the job well
    sentence= re.sub("([^\x00-\x7F]|\w)(\.|\。|\?|\!)",r"\1 \2\2",sentence)
    
    print("Sentence for speech:", sentence)

    
    try:
        SENTENCE_SPLIT_LENGTH=350
        if len(sentence)<SENTENCE_SPLIT_LENGTH:
            # no problem continue on
            sentence_list = [sentence]
        else:
            # Until now nltk likely split sentences properly but we need additional 
            # check for longer sentence and split at last possible position
            # Do whatever necessary, first break at hypens then spaces and then even split very long words
            sentence_list=textwrap.wrap(sentence,SENTENCE_SPLIT_LENGTH)
            print("SPLITTED LONG SENTENCE:",sentence_list)
        
        for sentence in sentence_list:
            
            if any(c.isalnum() for c in sentence):
                if language=="autodetect":
                    #on first call autodetect, nexts sentence calls will use same language
                    language = detect_language(sentence, xtts_supported_languages) 
            
                #exists at least 1 alphanumeric (utf-8) 
                audio_stream = get_voice_streaming(
                        sentence, language, chatbot_voice, xtts_model
                    )
            else:
                # likely got a ' or " or some other text without alphanumeric in it
                audio_stream = None 
                
            # XTTS is actually using streaming response but we are playing audio by sentence
            # If you want direct XTTS voice streaming (send each chunk to voice ) you may set DIRECT_STREAM=1 environment variable
            if audio_stream is not None:
                frame_length = 0
                for chunk in audio_stream:
                    try:
                        wav_bytestream += chunk
                        frame_length += len(chunk)
                    except:
                        # hack to continue on playing. sometimes last chunk is empty , will be fixed on next TTS
                        continue

            # Filter output for better voice
            if filter_output:
                data_s16 = np.frombuffer(wav_bytestream, dtype=np.int16, count=len(wav_bytestream)//2, offset=0)
                float_data = data_s16 * 0.5**15
                reduced_noise = nr.reduce_noise(y=float_data, sr=24000,prop_decrease =0.8,n_fft=1024)
                wav_bytestream = (reduced_noise * 32767).astype(np.int16)
                wav_bytestream = wav_bytestream.tobytes()
                    
            if audio_stream is not None:
                if not return_as_byte:
                    audio_unique_filename = "/tmp/"+ str(uuid.uuid4())+".wav"
                    with wave.open(audio_unique_filename, "w") as f:
                        f.setnchannels(1)
                        # 2 bytes per sample.
                        f.setsampwidth(2)
                        f.setframerate(24000)
                        f.writeframes(wav_bytestream)
                           
                    return (history , gr.Audio.update(value=audio_unique_filename, autoplay=True))
                else:
                    return (history , gr.Audio.update(value=wav_bytestream, autoplay=True))
    except RuntimeError as e:
        if "device-side assert" in str(e):
            # cannot do anything on cuda device side error, need tor estart
            print(
                f"Exit due to: Unrecoverable exception caused by prompt:{sentence}",
                flush=True,
            )
            gr.Warning("Unhandled Exception encounter, please retry in a minute")
            print("Cuda device-assert Runtime encountered need restart")

            # HF Space specific.. This error is unrecoverable need to restart space
            api.restart_space(REPO_ID=REPO_ID)
        else:
            print("RuntimeError: non device-side assert error:", str(e))
            raise e

    print("All speech ended")
    return