Spaces:
Sleeping
Sleeping
File size: 4,673 Bytes
2ba3952 9bfc205 2ba3952 d7e561a d923697 d7e561a d11baa8 d923697 9bfc205 738a092 d923697 d11baa8 d923697 d11baa8 d923697 d11baa8 d923697 d11baa8 d923697 702c71f d11baa8 702c71f 50f3b7e 702c71f b650068 702c71f d7e561a d11baa8 50f3b7e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
import streamlit as st
import openai
import os
import base64
import glob
import json
import mistune
import pytz
import math
import requests
from datetime import datetime
from openai import ChatCompletion
from xml.etree import ElementTree as ET
from bs4 import BeautifulSoup
from collections import deque
from audio_recorder_streamlit import audio_recorder
openai.api_key = os.getenv('OPENAI_KEY')
st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")
menu = ["txt", "htm", "md", "py"]
choice = st.sidebar.selectbox("Output File Type:", menu)
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
def generate_filename(prompt, file_type):
central = pytz.timezone('US/Central')
safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
safe_prompt = "".join(x for x in prompt if x.isalnum())[:45]
return f"{safe_date_time}_{safe_prompt}.{file_type}"
def chat_with_model(prompt, document_section):
model = model_choice
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
conversation.append({'role': 'user', 'content': prompt})
conversation.append({'role': 'assistant', 'content': document_section})
response = openai.ChatCompletion.create(model=model, messages=conversation)
return response['choices'][0]['message']['content']
def transcribe_audio(openai_key, file_path, model):
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
headers = {
"Authorization": f"Bearer {openai_key}",
}
with open(file_path, 'rb') as f:
data = {'file': f}
response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
if response.status_code == 200:
st.write(response.json())
response2 = chat_with_model(response.json().get('text'), '')
st.write('Responses:')
st.write(response2)
return response.json().get('text')
else:
st.write(response.json())
st.error("Error in API call.")
return None
def save_and_play_audio(audio_recorder):
audio_bytes = audio_recorder()
if audio_bytes:
filename = generate_filename("Recording", "wav")
with open(filename, 'wb') as f:
f.write(audio_bytes)
st.audio(audio_bytes, format="audio/wav")
return filename
return None
def transcribe_and_chat(openai_key, file_path, model):
transcription = transcribe_audio(openai_key, file_path, model)
if transcription is not None:
response = chat_with_model(transcription, '')
return transcription, response
else:
return None, None
def create_file(filename, prompt, response):
if filename.endswith(".txt"):
with open(filename, 'w') as file:
file.write(f"Prompt:\n{prompt}\nResponse:\n{response}")
elif filename.endswith(".htm"):
with open(filename, 'w') as file:
file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
elif filename.endswith(".md"):
with open(filename, 'w') as file:
file.write(f"# Prompt: \n {prompt} \n # Response: \n {response}")
elif filename.endswith(".py"):
with open(filename, 'w') as file:
file.write(f"# Prompt: \n'''{prompt}'''\n # Response: \n'''{response}'''")
else:
st.error("Unsupported file type!")
def main():
st.sidebar.header("Choose your Input Method:")
input_choice = st.sidebar.radio("",('Type it out','Record it'))
if input_choice == 'Type it out':
st.header("Type your query")
prompt = st.text_area("Input")
document_section = st.text_area("Document Section")
if st.button("Get Response"):
filename = generate_filename(prompt, choice)
response = chat_with_model(prompt, document_section)
st.write('Response:')
st.write(response)
create_file(filename, prompt, response)
elif input_choice == 'Record it':
st.header("Record your query")
st.write("Press record to start recording")
recorder_instance = audio_recorder() # Here is the change
filename = save_and_play_audio(recorder_instance)
if filename:
if st.button("Transcribe and Chat"):
transcription, response = transcribe_and_chat(openai.api_key, filename, "whisper-1")
if transcription and response:
st.write('Transcription:')
st.write(transcription)
st.write('Response:')
st.write(response)
if __name__ == "__main__":
main()
|