File size: 3,408 Bytes
6e969ba
 
 
 
 
a02f054
8e4ee34
6e969ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
afd67ae
6e969ba
 
 
 
 
 
 
 
 
 
a02f054
11b82b8
6e969ba
11b82b8
f1cfab3
 
 
 
 
f8850ff
81ab5e3
f1cfab3
 
 
 
 
 
f8850ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6e969ba
 
0979664
f3a97a2
 
6e969ba
 
0979664
81ab5e3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import streamlit as st
import openai
import os
import base64
import glob
import json
from xml.etree import ElementTree as ET
from datetime import datetime
from dotenv import load_dotenv
from openai import ChatCompletion

load_dotenv()

openai.api_key = os.getenv('OPENAI_KEY')

def chat_with_model(prompts):
    model = "gpt-3.5-turbo"

    conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
    conversation.extend([{'role': 'user', 'content': prompt} for prompt in prompts])

    response = openai.ChatCompletion.create(model=model, messages=conversation)
    return response['choices'][0]['message']['content']

def generate_filename(prompt):
    safe_date_time = datetime.now().strftime("%m_%d_%H_%M")
    safe_prompt = "".join(x for x in prompt if x.isalnum())[:50]
    return f"{safe_date_time}_{safe_prompt}.htm"

def create_file(filename, prompt, response):
    with open(filename, 'w') as file:
        file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")

def get_table_download_link(file_path):
    with open(file_path, 'r') as file:
        data = file.read()
    b64 = base64.b64encode(data.encode()).decode()
    href = f'<a href="data:file/htm;base64,{b64}" target="_blank" download="{os.path.basename(file_path)}">{os.path.basename(file_path)}</a>'
    return href

def add_parent_info(elem, parent=None):
    for child in elem:
        child.parent = parent
        add_parent_info(child, parent=elem)

def CompressXML(xml_text):
    root = ET.fromstring(xml_text)
    add_parent_info(root)
    for elem in list(root.iter()):
        if isinstance(elem.tag, str) and 'Comment' in elem.tag:
            elem.parent.remove(elem)
    return ET.tostring(root, encoding='unicode', method="xml")


def read_file_content(file):
    if file.type == "application/json":
        content = json.load(file)
        return str(content)
    elif file.type == "text/html":
        content = BeautifulSoup(file, "html.parser")
        return content.text
    elif file.type == "application/xml" or file.type == "text/xml":
        xml_text = file.getvalue().decode()
        compressed_text = CompressXML(xml_text)
        return compressed_text
    elif file.type == "text/plain":
        return file.getvalue().decode()
    else:
        return ""

def main():
    st.title("Chat with AI")

    prompts = ['']

    user_prompt = st.text_area("Your question:", '', height=120)
    uploaded_file = st.file_uploader("Choose a file", type=["xml", "json", "htm", "txt"])

    if user_prompt:
        prompts.append(user_prompt)

    if uploaded_file is not None:
        file_content = read_file_content(uploaded_file)
        st.markdown(f"**Content Added to Prompt:**\n{file_content}")
        prompts.append(file_content)

    if st.button('Chat'):
        st.write('Chatting with GPT-3...')
        response = chat_with_model(prompts)
        st.write('Response:')
        st.write(response)

        filename = generate_filename(user_prompt)
        create_file(filename, user_prompt, response)

        st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)

    htm_files = glob.glob("*.htm")
    for file in htm_files:
        st.sidebar.markdown(get_table_download_link(file), unsafe_allow_html=True)
        if st.sidebar.button(f"Delete {file}"):
            os.remove(file)

if __name__ == "__main__":
    main()