Spaces:
Runtime error
Runtime error
File size: 2,392 Bytes
215cca8 d8b9844 eb829ce 215cca8 c89df82 215cca8 eb829ce 0fc8f14 14c2ff3 0fc8f14 eb829ce 215cca8 eb829ce 215cca8 eb829ce c89df82 eb829ce 14c2ff3 eb829ce c89df82 eb829ce c89df82 eb829ce c89df82 eb829ce c89df82 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
import streamlit as st
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import datetime
from transformers import pipeline
import gradio as gr
@st.experimental_singleton
def get_db_firestore():
cred = credentials.Certificate('test.json')
firebase_admin.initialize_app(cred, {'projectId': u'clinical-nlp-b9117',})
db = firestore.client()
return db
db = get_db_firestore()
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
def transcribe(audio):
text = asr(audio)["text"]
return text
#gr.Interface(
# fn=transcribe,
# inputs=gr.inputs.Audio(source="microphone", type="filepath"),
# outputs="text").launch()
classifier = pipeline("text-classification")
def speech_to_text(speech):
text = asr(speech)["text"]
upsertoftheminute(u'TimeSeries', u'DocumentofMinuteText', u'TestUser1', u'🧠🌳Yggdrasil🌳🧠', text, 2022)
return text
def text_to_sentiment(text):
sentiment = classifier(text)[0]["label"]
upsertoftheminute(u'TimeSeries', u'DocumentofMinuteSentiment', u'TestUser1', u'🧠🌳Yggdrasil🌳🧠', sentiment, 2022)
return sentiment
def upsert(text):
date_time =str(datetime.datetime.today()).split()[0]
doc_ref = db.collection('Text2SpeechSentimentSave').document('Text2SpeechSentimentSave')
doc_ref.set({u'firefield': 'Text2SpeechSentimentSave', u'first': 'Text2SpeechSentimentSave', u'last': 'Text2SpeechSentimentSave', u'born': date_time,})
saved = select('Text2SpeechSentimentSave','Text2SpeechSentimentSave')
return saved
def select(collection, document):
doc_ref = db.collection(collection).document(document)
doc = doc_ref.get()
docid = ("The id is: ", doc.id)
contents = ("The contents are: ", doc.to_dict())
return contents
demo = gr.Blocks()
with demo:
#audio_file = gr.Audio(type="filepath")
audio_file = gr.inputs.Audio(source="microphone", type="filepath")
text = gr.Textbox()
label = gr.Label()
saved = gr.Label()
b1 = gr.Button("Recognize Speech")
b2 = gr.Button("Classify Sentiment")
b3 = gr.Button("Save Speech to Text")
b1.click(speech_to_text, inputs=audio_file, outputs=text)
b2.click(text_to_sentiment, inputs=text, outputs=label)
b3.click(text_to_sentiment, inputs=text, outputs=saved)
demo.launch(share=True) |