awacke1's picture
Update app.py
51bd956
raw
history blame
3.26 kB
import streamlit as st
import openai
import os
import base64
import glob
import json
import mistune
import pytz
import math
from datetime import datetime
from openai import ChatCompletion
from xml.etree import ElementTree as ET
from bs4 import BeautifulSoup
from collections import deque
# Rest of your code goes here...
openai.api_key = os.getenv('OPENAI_KEY')
st.set_page_config(
page_title="GPT Streamlit Document Reasoner",
layout="wide")
# Rest of your code goes here...
def divide_document(document, max_length):
# Split document into sections, each of about 2000 words or 4000 characters
return [document[i:i+max_length] for i in range(0, len(document), max_length)]
def chat_with_model(prompt, document_section):
model = "gpt-3.5-turbo"
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
conversation.append({'role': 'user', 'content': prompt})
conversation.append({'role': 'assistant', 'content': document_section})
response = openai.ChatCompletion.create(model=model, messages=conversation)
return response['choices'][0]['message']['content']
def main():
user_prompt = st.text_area("Your question:", '', height=120)
uploaded_file = st.file_uploader("Choose a file", type=["xml", "json", "html", "htm", "md", "txt"])
max_length = 4000
document_sections = deque()
document_responses = {}
if uploaded_file is not None:
file_content = read_file_content(uploaded_file, max_length)
document_sections.extend(divide_document(file_content, max_length))
if len(document_sections) > 0:
st.markdown("**Sections of the uploaded file:**")
for i, section in enumerate(list(document_sections)):
st.markdown(f"**Section {i+1}**\n{section}")
st.markdown("**Chat with the model:**")
for i, section in enumerate(list(document_sections)):
if i in document_responses:
st.markdown(f"**Section {i+1}**\n{document_responses[i]}")
else:
if st.button(f"Chat about Section {i+1}"):
st.write('Thinking and Reasoning with your inputs...')
response = chat_with_model(user_prompt, section)
st.write('Response:')
st.write(response)
document_responses[i] = response
if st.button('πŸ’¬ Chat'):
st.write('Thinking and Reasoning with your inputs...')
response = chat_with_model(user_prompt, ''.join(list(document_sections)))
st.write('Response:')
st.write(response)
filename = generate_filename(user_prompt, choice)
create_file(filename, user_prompt, response)
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
all_files = glob.glob("*.txt") + glob.glob("*.htm") + glob.glob("*.md")
for file in all_files:
col1, col2 = st.sidebar.columns([4,1]) # adjust the ratio as needed
with col1:
st.markdown(get_table_download_link(file), unsafe_allow_html=True)
with col2:
if st.button("πŸ—‘", key=file):
os.remove(file)
st.experimental_rerun()
if __name__ == "__main__":
main()