File size: 6,352 Bytes
6e969ba
 
 
 
 
a02f054
8e4ee34
6e969ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
afd67ae
6e969ba
 
 
 
 
 
 
 
 
 
a02f054
11b82b8
6e969ba
11b82b8
f8850ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125c22a
bb5d0f4
 
 
 
 
 
 
a02f054
 
 
 
 
 
 
125c22a
f8850ff
 
 
a02f054
 
 
 
 
f8850ff
6e969ba
 
 
7e684c4
6e969ba
7e684c4
a02f054
6e969ba
 
 
 
a02f054
 
 
 
6e969ba
 
 
 
 
 
 
 
 
0979664
6e969ba
 
 
0979664
f3a97a2
 
 
6e969ba
 
0979664
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import streamlit as st
import openai
import os
import base64
import glob
import json
from xml.etree import ElementTree as ET
from datetime import datetime
from dotenv import load_dotenv
from openai import ChatCompletion

load_dotenv()

openai.api_key = os.getenv('OPENAI_KEY')

def chat_with_model(prompts):
    model = "gpt-3.5-turbo"

    conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
    conversation.extend([{'role': 'user', 'content': prompt} for prompt in prompts])

    response = openai.ChatCompletion.create(model=model, messages=conversation)
    return response['choices'][0]['message']['content']

def generate_filename(prompt):
    safe_date_time = datetime.now().strftime("%m_%d_%H_%M")
    safe_prompt = "".join(x for x in prompt if x.isalnum())[:50]
    return f"{safe_date_time}_{safe_prompt}.htm"

def create_file(filename, prompt, response):
    with open(filename, 'w') as file:
        file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")

def get_table_download_link(file_path):
    with open(file_path, 'r') as file:
        data = file.read()
    b64 = base64.b64encode(data.encode()).decode()
    href = f'<a href="data:file/htm;base64,{b64}" target="_blank" download="{os.path.basename(file_path)}">{os.path.basename(file_path)}</a>'
    return href

def CompressXML(xml_text):
    tree = ET.ElementTree(ET.fromstring(xml_text))
    for elem in tree.iter():
        if isinstance(elem.tag, ET.Comment):
            elem.getparent().remove(elem)
    return ET.tostring(tree.getroot(), encoding='unicode')

def read_file_content(file):
    if file.type == "application/json":
        content = json.load(file)
        return str(content)
    elif file.type == "text/html":
        content = BeautifulSoup(file, "html.parser")
        return content.text
    elif file.type == "application/xml" or file.type == "text/xml":
        xml_text = file.getvalue().decode()
        compressed_text = CompressXML(xml_text)
        return compressed_text
    elif file.type == "text/plain":
        return file.getvalue().decode()
    else:
        return ""

def main():
    st.title("Chat with AI")

    prompts = ['']

    user_prompt = st.text_area("Your question:", '', height=120)
    uploaded_file = st.file_uploader("Choose a file", type=["xml", "json", "htm", "txt"])

    if user_prompt:
        prompts.append(user_prompt)

    if uploaded_file is not None:
        file_content = read_file_content(uploaded_file)
        st.markdown(f"**Content Added to Prompt:**\n{file_content}")
        prompts.append(file_content)

    if st.button('Chat'):
        st.write('Chatting with GPT-3...')
        response = chat_with_model(prompts)
        st.write('Response:')
        st.write(response)

        filename = generate_filename(user_prompt)
        create_file(filename, user_prompt, response)

        st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)

    htm_files = glob.glob("*.htm")
    for file in htm_files:
        st.sidebar.mark




User
show full code listing
ChatGPT
Here is the full Python script:

python
Copy code
import streamlit as st
import openai
import os
import base64
import glob
import json
from xml.etree import ElementTree as ET
from datetime import datetime
from dotenv import load_dotenv
from openai import ChatCompletion

load_dotenv()

openai.api_key = os.getenv('OPENAI_KEY')

def chat_with_model(prompts):
    model = "gpt-3.5-turbo"

    conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
    conversation.extend([{'role': 'user', 'content': prompt} for prompt in prompts])

    response = openai.ChatCompletion.create(model=model, messages=conversation)
    return response['choices'][0]['message']['content']

def generate_filename(prompt):
    safe_date_time = datetime.now().strftime("%m_%d_%H_%M")
    safe_prompt = "".join(x for x in prompt if x.isalnum())[:50]
    return f"{safe_date_time}_{safe_prompt}.htm"

def create_file(filename, prompt, response):
    with open(filename, 'w') as file:
        file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")

def get_table_download_link(file_path):
    with open(file_path, 'r') as file:
        data = file.read()
    b64 = base64.b64encode(data.encode()).decode()
    href = f'<a href="data:file/htm;base64,{b64}" target="_blank" download="{os.path.basename(file_path)}">{os.path.basename(file_path)}</a>'
    return href

def CompressXML(xml_text):
    tree = ET.ElementTree(ET.fromstring(xml_text))
    for elem in tree.iter():
        if isinstance(elem.tag, ET.Comment):
            elem.getparent().remove(elem)
    return ET.tostring(tree.getroot(), encoding='unicode')

def read_file_content(file):
    if file.type == "application/json":
        content = json.load(file)
        return str(content)
    elif file.type == "text/html":
        content = BeautifulSoup(file, "html.parser")
        return content.text
    elif file.type == "application/xml" or file.type == "text/xml":
        xml_text = file.getvalue().decode()
        compressed_text = CompressXML(xml_text)
        return compressed_text
    elif file.type == "text/plain":
        return file.getvalue().decode()
    else:
        return ""


def main():
    st.title("Chat with AI")

    prompts = ['']

    user_prompt = st.text_area("Your question:", '', height=120)
    uploaded_file = st.file_uploader("Choose a file", type=["xml", "json", "htm", "txt"])

    if user_prompt:
        prompts.append(user_prompt)

    if uploaded_file is not None:
        file_content = read_file_content(uploaded_file)
        prompts.append(file_content)

    if st.button('Chat'):
        st.write('Chatting with GPT-3...')
        response = chat_with_model(prompts)
        st.write('Response:')
        st.write(response)

        filename = generate_filename(user_prompt)
        create_file(filename, user_prompt, response)

        st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)

    htm_files = glob.glob("*.htm")
    for file in htm_files:
        st.sidebar.markdown(get_table_download_link(file), unsafe_allow_html=True)
        if st.sidebar.button(f"Delete {file}"):
            os.remove(file)
            st.experimental_rerun()

if __name__ == "__main__":
    main()