File size: 6,279 Bytes
7c0cd2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7eb442c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c0cd2b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7169ae8
7c0cd2b
 
 
 
 
 
 
 
 
7169ae8
7c0cd2b
 
 
 
 
 
 
 
 
 
 
0bf5e04
7169ae8
 
7eb442c
7169ae8
7c0cd2b
 
 
 
 
7169ae8
 
7c0cd2b
ef75150
 
 
 
7eb442c
ef75150
 
 
 
7c0cd2b
7eb442c
 
 
 
 
 
 
 
 
 
 
 
 
7c0cd2b
 
 
 
 
 
 
 
 
 
 
 
ef75150
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import streamlit as st
import openai
import os
import base64
import glob
import json
import mistune
import pytz
import math
from datetime import datetime
from openai import ChatCompletion
from xml.etree import ElementTree as ET
from bs4 import BeautifulSoup
from collections import deque

openai.api_key = os.getenv('OPENAI_KEY')
st.set_page_config(
    page_title="GPT Streamlit Document Reasoner",
    layout="wide")

# Custom CSS to increase scrollbar width
st.markdown("""
    <style>
    ::-webkit-scrollbar {
      width: 20px;
    }

    /* Track */
    ::-webkit-scrollbar-track {
      background: #f1f1f1;
    }

    /* Handle */
    ::-webkit-scrollbar-thumb {
      background: #888;
    }

    /* Handle on hover */
    ::-webkit-scrollbar-thumb:hover {
      background: #555;
    }
    </style>
    """, unsafe_allow_html=True)

menu = ["txt", "htm", "md", "py"]
choice = st.sidebar.selectbox("Output file type:", menu)
choicePrefix = "Output file type is "
if choice == "txt":
     st.sidebar.write(choicePrefix + "Text File.")
elif choice == "htm":
     st.sidebar.write(choicePrefix + "HTML5.")
elif choice == "md":
     st.sidebar.write(choicePrefix + "Markdown.")
elif choice == "py":
     st.sidebar.write(choicePrefix + "Python Code.")

def generate_filename(prompt, file_type):
    central = pytz.timezone('US/Central')
    safe_date_time = datetime.now(central).strftime("%m%d_%I%M")  
    safe_prompt = "".join(x for x in prompt if x.isalnum())[:28]
    return f"{safe_date_time}_{safe_prompt}.{file_type}"

def create_file(filename, prompt, response):
    if filename.endswith(".txt"):
        with open(filename, 'w') as file:
            file.write(f"Prompt:\n{prompt}\nResponse:\n{response}")
    elif filename.endswith(".htm"):
        with open(filename, 'w') as file:
            file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
    elif filename.endswith(".md"):
        with open(filename, 'w') as file:
            file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")

def chat_with_model(prompt, document_section):
    model = "gpt-3.5-turbo"
    conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
    conversation.append({'role': 'user', 'content': prompt})
    conversation.append({'role': 'assistant', 'content': document_section})
    response = openai.ChatCompletion.create(model=model, messages=conversation)
    return response['choices'][0]['message']['content']

def get_table_download_link(file_path):
    with open(file_path, 'r') as file:
        data = file.read()
    b64 = base64.b64encode(data.encode()).decode()  
    file_name = os.path.basename(file_path)
    ext = os.path.splitext(file_name)[1]  # get the file extension
    if ext == '.txt':
        mime_type = 'text/plain'
    elif ext == '.htm':
        mime_type = 'text/html'
    elif ext == '.md':
        mime_type = 'text/markdown'
    else:
        mime_type = 'application/octet-stream'  # general binary data type
    href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
    return href

def read_file_content(file):
    if file.type == "application/json":
        content = json.load(file)
        return str(content)
    elif file.type == "text/html" or file.type == "text/htm":
        content = BeautifulSoup(file, "html.parser")
        return content.text
    elif file.type == "application/xml" or file.type == "text/xml":
        tree = ET.parse(file)
        root = tree.getroot()
        xml = ET.tostring(root, encoding='unicode')
        return xml
    elif file.type == "text/markdown" or file.type == "text/md":
        md = mistune.create_markdown()
        content = md(file.read().decode())
        return content
    elif file.type == "text/plain":
        return file.getvalue().decode()
    else:
        return ""

def main():
    col1, col2 = st.columns([1, 2])

    with col1:
        user_prompt = st.text_area("Your question:", '', height=150)  # Increased height for question text box
        uploaded_file = st.file_uploader("Choose a file", type=["xml", "json", "html", "htm", "md", "txt"])

    document_sections = deque()
    document_responses = {}

    if uploaded_file is not None:
        file_content = read_file_content(uploaded_file)
        document_sections.append(file_content)

    with col2:
        if st.button('πŸ’¬ Chat'):
            st.write('Thinking and Reasoning with your inputs...')
            response = chat_with_model(user_prompt, ''.join(list(document_sections)))
            response_area = st.text_area('Response:', value=response, height=400)
            filename = generate_filename(user_prompt, choice)
            create_file(filename, user_prompt, response)
            st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)

    if len(document_sections) > 0:
        for i, section in reversed(list(enumerate(list(document_sections)))):
            if i in document_responses:
                st.markdown(f"**Section {i+1} Content:**\n{section}")
                response_area = st.text_area(f"Section {i+1} Response:", value=document_responses[i], height=400)
            else:
                if st.button(f"Chat about Section {i+1}"):
                    st.write('Thinking and Reasoning with your inputs...')
                    response = chat_with_model(user_prompt, section)
                    document_responses[i] = response
                    response_area = st.text_area(f"Section {i+1} Response:", value=response, height=400)
                    filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
                    create_file(filename, user_prompt, response)
                    st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)

    all_files = glob.glob("*.txt") + glob.glob("*.htm") + glob.glob("*.md")
    for file in all_files:
        col1, col2 = st.sidebar.columns([4,1])  # adjust the ratio as needed
        with col1:
            st.markdown(get_table_download_link(file), unsafe_allow_html=True)
        with col2:
            if st.button("πŸ—‘", key=file):
                os.remove(file)
                st.experimental_rerun()

if __name__ == "__main__":
    main()