Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -36,7 +36,18 @@ def chat_with_model(prompt, document_section):
|
|
36 |
conversation.append({'role': 'assistant', 'content': document_section})
|
37 |
response = openai.ChatCompletion.create(model=model, messages=conversation)
|
38 |
return response['choices'][0]['message']['content']
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
# Updated to auto process transcript to chatgpt in AI pipeline from Whisper to ChatGPT
|
41 |
def transcribe_audio(openai_key, file_path, model):
|
42 |
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
|
@@ -50,7 +61,18 @@ def transcribe_audio(openai_key, file_path, model):
|
|
50 |
st.write('Reasoning with your transcription..')
|
51 |
st.write(response.json())
|
52 |
response2 = chat_with_model(response.json().get('text'), '') # send transcript to ChatGPT
|
53 |
-
create_file(filename,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
return response2
|
55 |
else:
|
56 |
st.write(response.json())
|
@@ -77,16 +99,6 @@ if filename is not None:
|
|
77 |
#chat_with_model(transcription, '') # push transcript through as prompt
|
78 |
#response2 = chat_with_model(transcription.json().get('text'), '')
|
79 |
|
80 |
-
def create_file(filename, prompt, response):
|
81 |
-
if filename.endswith(".txt"):
|
82 |
-
with open(filename, 'w') as file:
|
83 |
-
file.write(f"Prompt:\n{prompt}\nResponse:\n{response}")
|
84 |
-
elif filename.endswith(".htm"):
|
85 |
-
with open(filename, 'w') as file:
|
86 |
-
file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
|
87 |
-
elif filename.endswith(".md"):
|
88 |
-
with open(filename, 'w') as file:
|
89 |
-
file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")
|
90 |
|
91 |
def truncate_document(document, length):
|
92 |
return document[:length]
|
|
|
36 |
conversation.append({'role': 'assistant', 'content': document_section})
|
37 |
response = openai.ChatCompletion.create(model=model, messages=conversation)
|
38 |
return response['choices'][0]['message']['content']
|
39 |
+
|
40 |
+
def create_file(filename, prompt, response):
|
41 |
+
if filename.endswith(".txt"):
|
42 |
+
with open(filename, 'w') as file:
|
43 |
+
file.write(f"Prompt:\n{prompt}\nResponse:\n{response}")
|
44 |
+
elif filename.endswith(".htm"):
|
45 |
+
with open(filename, 'w') as file:
|
46 |
+
file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
|
47 |
+
elif filename.endswith(".md"):
|
48 |
+
with open(filename, 'w') as file:
|
49 |
+
file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")
|
50 |
+
|
51 |
# Updated to auto process transcript to chatgpt in AI pipeline from Whisper to ChatGPT
|
52 |
def transcribe_audio(openai_key, file_path, model):
|
53 |
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
|
|
|
61 |
st.write('Reasoning with your transcription..')
|
62 |
st.write(response.json())
|
63 |
response2 = chat_with_model(response.json().get('text'), '') # send transcript to ChatGPT
|
64 |
+
def create_file(filename, prompt, response):
|
65 |
+
if filename.endswith(".txt"):
|
66 |
+
with open(filename, 'w') as file:
|
67 |
+
file.write(f"Prompt:\n{prompt}\nResponse:\n{response}")
|
68 |
+
elif filename.endswith(".htm"):
|
69 |
+
with open(filename, 'w') as file:
|
70 |
+
file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
|
71 |
+
elif filename.endswith(".md"):
|
72 |
+
with open(filename, 'w') as file:
|
73 |
+
file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")
|
74 |
+
|
75 |
+
(filename, response, response2)
|
76 |
return response2
|
77 |
else:
|
78 |
st.write(response.json())
|
|
|
99 |
#chat_with_model(transcription, '') # push transcript through as prompt
|
100 |
#response2 = chat_with_model(transcription.json().get('text'), '')
|
101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
def truncate_document(document, length):
|
104 |
return document[:length]
|