import gradio as gr import whisper import ffmpeg import streamlit.components.v1 as components def transcribe_audio(audio_file): model = whisper.load_model("small") result = model.transcribe(audio_file, fp16=False) return result["text"] # def inference(): # html = ( # "
" # "image One" # + "
" # ) # return html def main(): audio_input = gr.inputs.Audio(source="upload", type="filepath") output_text = gr.outputs.Textbox() logo_html = 'Logo' # Balise HTML pour le logo # Créer un composant Streamlit avec le logo components.html(logo_html) iface = gr.Interface(fn=transcribe_audio, inputs=audio_input, outputs=output_text, title="Transciption Audio DIGITALIXSA", description="Charger l'audio") iface.launch(auth=("admin", "pass1234")) if __name__ == '__main__': main() #