awacke1's picture
Update app.py
b650068
raw
history blame
4.67 kB
import streamlit as st
import openai
import os
import base64
import glob
import json
import mistune
import pytz
import math
import requests
from datetime import datetime
from openai import ChatCompletion
from xml.etree import ElementTree as ET
from bs4 import BeautifulSoup
from collections import deque
from audio_recorder_streamlit import audio_recorder
openai.api_key = os.getenv('OPENAI_KEY')
st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")
menu = ["txt", "htm", "md", "py"]
choice = st.sidebar.selectbox("Output File Type:", menu)
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
def generate_filename(prompt, file_type):
central = pytz.timezone('US/Central')
safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
safe_prompt = "".join(x for x in prompt if x.isalnum())[:45]
return f"{safe_date_time}_{safe_prompt}.{file_type}"
def chat_with_model(prompt, document_section):
model = model_choice
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
conversation.append({'role': 'user', 'content': prompt})
conversation.append({'role': 'assistant', 'content': document_section})
response = openai.ChatCompletion.create(model=model, messages=conversation)
return response['choices'][0]['message']['content']
def transcribe_audio(openai_key, file_path, model):
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
headers = {
"Authorization": f"Bearer {openai_key}",
}
with open(file_path, 'rb') as f:
data = {'file': f}
response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
if response.status_code == 200:
st.write(response.json())
response2 = chat_with_model(response.json().get('text'), '')
st.write('Responses:')
st.write(response2)
return response.json().get('text')
else:
st.write(response.json())
st.error("Error in API call.")
return None
def save_and_play_audio(audio_recorder):
audio_bytes = audio_recorder()
if audio_bytes:
filename = generate_filename("Recording", "wav")
with open(filename, 'wb') as f:
f.write(audio_bytes)
st.audio(audio_bytes, format="audio/wav")
return filename
return None
def transcribe_and_chat(openai_key, file_path, model):
transcription = transcribe_audio(openai_key, file_path, model)
if transcription is not None:
response = chat_with_model(transcription, '')
return transcription, response
else:
return None, None
def create_file(filename, prompt, response):
if filename.endswith(".txt"):
with open(filename, 'w') as file:
file.write(f"Prompt:\n{prompt}\nResponse:\n{response}")
elif filename.endswith(".htm"):
with open(filename, 'w') as file:
file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
elif filename.endswith(".md"):
with open(filename, 'w') as file:
file.write(f"# Prompt: \n {prompt} \n # Response: \n {response}")
elif filename.endswith(".py"):
with open(filename, 'w') as file:
file.write(f"# Prompt: \n'''{prompt}'''\n # Response: \n'''{response}'''")
else:
st.error("Unsupported file type!")
def main():
st.sidebar.header("Choose your Input Method:")
input_choice = st.sidebar.radio("",('Type it out','Record it'))
if input_choice == 'Type it out':
st.header("Type your query")
prompt = st.text_area("Input")
document_section = st.text_area("Document Section")
if st.button("Get Response"):
filename = generate_filename(prompt, choice)
response = chat_with_model(prompt, document_section)
st.write('Response:')
st.write(response)
create_file(filename, prompt, response)
elif input_choice == 'Record it':
st.header("Record your query")
st.write("Press record to start recording")
recorder_instance = audio_recorder() # Here is the change
filename = save_and_play_audio(recorder_instance)
if filename:
if st.button("Transcribe and Chat"):
transcription, response = transcribe_and_chat(openai.api_key, filename, "whisper-1")
if transcription and response:
st.write('Transcription:')
st.write(transcription)
st.write('Response:')
st.write(response)
if __name__ == "__main__":
main()