awacke1's picture
Update app.py
c4892d1 verified
raw
history blame
1.97 kB
import gradio as gr
import edge_tts
import asyncio
import tempfile
import os
from huggingface_hub import InferenceClient
import re
from streaming_stt_nemo import Model
import torch
import random
import pandas as pd
from datetime import datetime
# ... (previous code remains the same)
# Initialize an empty DataFrame to store the history
history_df = pd.DataFrame(columns=['Timestamp', 'Request', 'Response'])
def models(text, model="Mixtral 8x7B", seed=42):
global history_df
seed = int(randomize_seed_fn(seed))
generator = torch.Generator().manual_seed(seed)
client = client_fn(model)
generate_kwargs = dict(
max_new_tokens=300,
seed=seed
)
formatted_prompt = system_instructions1 + text + "[JARVIS]"
stream = client.text_generation(
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
if not response.token.text == "</s>":
output += response.token.text
# Add the current interaction to the history DataFrame
new_row = pd.DataFrame({
'Timestamp': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")], # Convert to string
'Request': [text],
'Response': [output]
})
history_df = pd.concat([history_df, new_row], ignore_index=True)
return output
async def respond(audio, model, seed):
user = transcribe(audio)
reply = models(user, model, seed)
communicate = edge_tts.Communicate(reply)
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
tmp_path = tmp_file.name
await communicate.save(tmp_path)
yield tmp_path
def display_history():
return history_df
def download_history():
return history_df.to_csv(index=False)
# ... (rest of the code remains the same)
if __name__ == "__main__":
demo.queue(max_size=200).launch(share=True) # Added share=True for public link