Spaces:
Sleeping
Sleeping
import streamlit as st | |
import openai | |
import os | |
import json | |
import requests | |
from datetime import datetime | |
from collections import deque | |
from openai import ChatCompletion | |
from audio_recorder_streamlit import audio_recorder | |
# Initialize configurations | |
configurations = {} | |
config_file = "configurations.json" | |
if os.path.exists(config_file): | |
with open(config_file, "r") as file: | |
configurations = json.load(file) | |
openai.api_key = os.getenv('OPENAI_KEY') | |
st.set_page_config(page_title="GPT Streamlit Document Reasoner", layout="wide") | |
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301')) | |
user_prompt = st.text_area( | |
"Enter prompts, instructions & questions:", | |
configurations.get("user_prompt", ""), | |
height=100 | |
) | |
system_prompt = configurations.get("system_prompt", "You are a helpful assistant.") | |
def generate_filename(prompt, file_type): | |
safe_date_time = datetime.now().strftime("%m%d_%I%M") | |
safe_prompt = "".join(x for x in prompt if x.isalnum())[:45] | |
return f"{safe_date_time}_{safe_prompt}.{file_type}" | |
def chat_with_model(prompt, document_section): | |
conversation = [{'role': 'system', 'content': system_prompt}] | |
conversation.append({'role': 'user', 'content': prompt}) | |
if document_section: | |
conversation.append({'role': 'assistant', 'content': document_section}) | |
response = openai.ChatCompletion.create(model=model_choice, messages=conversation) | |
return response | |
def save_and_play_audio(): | |
audio_bytes = audio_recorder() | |
if audio_bytes: | |
filename = generate_filename("Recording", "wav") | |
with open(filename, 'wb') as f: | |
f.write(audio_bytes) | |
st.audio(audio_bytes, format="audio/wav") | |
return filename | |
return None | |
def create_file(filename, prompt, response): | |
with open(filename, 'w') as file: | |
file.write(f"Prompt:\n{prompt}\nResponse:\n{response}") | |
def divide_document(document, max_length): | |
return [document[i:i+max_length] for i in range(0, len(document), max_length)] | |
def handle_uploaded_file(uploaded_file, max_length): | |
file_content = uploaded_file.read().decode() | |
return divide_document(file_content, max_length) | |
def main(): | |
max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000) | |
uploaded_file = st.file_uploader("Add a file for context:", type=["txt"]) | |
document_sections = deque() | |
if uploaded_file is not None: | |
document_sections.extend(handle_uploaded_file(uploaded_file, max_length)) | |
document_responses = {} | |
for i, section in enumerate(document_sections): | |
if st.button(f"Chat about Section {i+1}"): | |
response = chat_with_model(user_prompt, section) | |
document_responses[i] = response | |
filename = generate_filename(f"{user_prompt}_section_{i+1}", "txt") | |
create_file(filename, user_prompt, response) | |
if st.button('Chat'): | |
response = chat_with_model(user_prompt, ''.join(document_sections)) | |
filename = generate_filename(user_prompt, "txt") | |
create_file(filename, user_prompt, response) | |
configurations["user_prompt"] = user_prompt | |
configurations["system_prompt"] = system_prompt | |
with open(config_file, "w") as file: | |
json.dump(configurations, file) | |
if __name__ == "__main__": | |
main() | |