File size: 4,931 Bytes
2ba3952
9bfc205
2ba3952
d7e561a
 
d923697
 
 
 
 
 
d7e561a
d11baa8
d923697
 
 
 
9bfc205
738a092
d923697
d11baa8
d923697
 
 
d11baa8
50f3b7e
 
d923697
 
 
 
 
d11baa8
d923697
 
 
 
 
d11baa8
 
 
d923697
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50f3b7e
d923697
 
 
50f3b7e
d11baa8
d923697
 
 
 
 
 
 
 
 
 
 
d11baa8
d923697
50f3b7e
 
 
 
d923697
50f3b7e
 
 
 
 
 
 
d923697
 
50f3b7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d923697
 
 
 
 
d7e561a
d11baa8
50f3b7e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import streamlit as st
import openai
import os
import base64
import glob
import json
import mistune
import pytz
import math
import requests

from datetime import datetime
from openai import ChatCompletion
from xml.etree import ElementTree as ET
from bs4 import BeautifulSoup
from collections import deque
from audio_recorder_streamlit import audio_recorder

openai.api_key = os.getenv('OPENAI_KEY')
st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")

menu = ["txt", "htm", "md", "py"]
choice = st.sidebar.selectbox("Output File Type:", menu)
model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))

uploaded_files = []

def generate_filename(prompt, file_type):
    central = pytz.timezone('US/Central')
    safe_date_time = datetime.now(central).strftime("%m%d_%I%M")  
    safe_prompt = "".join(x for x in prompt if x.isalnum())[:45]
    return f"{safe_date_time}_{safe_prompt}.{file_type}"

def chat_with_model(prompt, document_section):
    model = model_choice
    conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
    conversation.append({'role': 'user', 'content': prompt})
    conversation.append({'role': 'assistant', 'content': document_section})
    response = openai.ChatCompletion.create(model=model, messages=conversation)
    return response['choices'][0]['message']['content']

def transcribe_audio(openai_key, file_path, model):
    OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
    headers = {
        "Authorization": f"Bearer {openai_key}",
    }
    with open(file_path, 'rb') as f:
        data = {'file': f}
        response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
    if response.status_code == 200:
        st.write(response.json())
        response2 = chat_with_model(response.json().get('text'), '')
        st.write('Responses:')
        st.write(response2)
        return response.json().get('text')
    else:
        st.write(response.json())
        st.error("Error in API call.")
        return None

def save_and_play_audio(audio_recorder):
    audio_bytes = audio_recorder()
    if audio_bytes:
        filename = generate_filename("Recording", "wav")
        with open(filename, 'wb') as f:
            f.write(audio_bytes)
        st.audio(audio_bytes, format="audio/wav")
        uploaded_files.append(filename)  # Add the new file name to the list
        return filename
    return None

# ... (All your other function definitions)

def main():
    user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)

    collength, colupload = st.columns([2,3])  # adjust the ratio as needed
    with collength:
        max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
    with colupload:
        uploaded_file = st.file_uploader("Add a file for context:", type=["xml", "json", "html", "htm", "md", "txt"])
    
    document_sections = deque()
    document_responses = {}

    if uploaded_file is not None:
        file_content = uploaded_file.getvalue().decode('utf-8')
        # Handle different file types here
        # ...
        document_sections.append(file_content)
        
    new_filename = save_and_play_audio(audio_recorder)
    if new_filename is not None:
        st.write(f'File {new_filename} uploaded.')
        if st.button("Transcribe"):
            transcription = transcribe_audio(openai.api_key, new_filename, "whisper-1")
            st.write(transcription)
            chat_with_model(transcription, '') # push transcript through as prompt

    if st.button('💬 Chat'):
        user_responses = document_responses.get(user_prompt, [])
        if len(user_responses) > 0:
            st.write(user_responses[-1])
        else:
            if document_sections:
                st.write('First document section:')
                st.write(document_sections[0])
                document_responses[user_prompt] = [chat_with_model(user_prompt, document_sections[0])]
                st.write(document_responses[user_prompt][-1])
            else:
                document_responses[user_prompt] = [chat_with_model(user_prompt, '')]
                st.write(document_responses[user_prompt][-1])
                
    if uploaded_files:
        st.write(f'Last uploaded file: {uploaded_files[-1]}')

    for filename in uploaded_files:
        if st.button(f"Transcribe and Chat for {filename}"):
            transcription, response = transcribe_and_chat(openai.api_key, filename, "whisper-1")
            if transcription is not None and response is not None:
                filename = generate_filename(transcription, choice)
                create_file(filename, transcription, response)
                st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)

if __name__ == "__main__":
    main()