File size: 1,563 Bytes
ac744bd
 
9e88fc4
ad662e5
9e88fc4
 
0640556
bd5bf73
042f554
ef3b7e0
 
9e88fc4
 
 
621f710
ef3b7e0
621f710
 
ef3b7e0
 
9e88fc4
621f710
 
9e88fc4
96dd1aa
3269c49
d40722d
3b53be4
 
51d1a4d
d80aad7
ef3b7e0
9e88fc4
d80aad7
9e88fc4
ef3b7e0
 
1542c74
9e88fc4
 
 
5d81cd6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
#import gradio as gr
#gr.Interface.load("models/pyannote/speaker-diarization").launch()

from fastapi import FastAPI, UploadFile
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
#from pyannote.audio import Pipeline
 
from transformers import pipeline # le framework de huggingface
#from datasets import load_dataset, Audio # ça c'est pour entrainer mon modele


app = FastAPI()

#deepneurones = pipeline("text2text-generation", model="google/flan-t5-small")
#deepneurones = pipeline("automatic-speech-recognition")# la liste des pipelines de huggingface est disponible ici :https://huggingface.co/docs/transformers/quicktour. pipeline() telecharge dans un cache local le modele deeplearning
#deepneurones= pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") # il faut choisir un modele
deepneurones = Pipeline.from_pretrained("pyannote/speaker-diarization")
@app.get("/healthcheck")
def healthcheck():
    
    #output = deepneurones(input)

    #pipeline("file.wav")
    return {"output":"OK"}
@app.post("/stt")
async def stt(file: str =  UploadFile(...)):
    #file_content = base64.b64decode(file)
    file_content = await file.read()
    #dataset = load_dataset("PolyAI/minds14", name="en-US", split="train")
    results = deepneurones(file_content)
    return {"output":results}
#app.mount("/", StaticFiles(directory="static", html=True), name="static")
    

@app.get("/")
def index() -> FileResponse:
    return FileResponse(path="/home/user/app/index.html", media_type="text/html")