Spaces:
Runtime error
Runtime error
Update backup.py
Browse files
backup.py
CHANGED
@@ -3,11 +3,13 @@ import openai
|
|
3 |
import os
|
4 |
import base64
|
5 |
import glob
|
|
|
6 |
import json
|
7 |
import mistune
|
8 |
import pytz
|
9 |
import math
|
10 |
import requests
|
|
|
11 |
import time
|
12 |
import re
|
13 |
import textract
|
@@ -106,40 +108,56 @@ def create_file(filename, prompt, response, should_save=True):
|
|
106 |
# Step 3: Check if the response contains Python code
|
107 |
has_python_code = bool(re.search(r"```python([\s\S]*?)```", response))
|
108 |
|
109 |
-
# Step 4:
|
110 |
-
|
111 |
-
# Create Prompt file
|
112 |
-
with open(f"{base_filename}-Prompt.txt", 'w') as file:
|
113 |
-
file.write(prompt)
|
114 |
-
|
115 |
-
# Create Response file
|
116 |
-
with open(f"{base_filename}-Response.md", 'w') as file:
|
117 |
-
file.write(response)
|
118 |
|
119 |
-
|
120 |
-
|
121 |
-
# Extract Python code from the response
|
122 |
-
python_code = re.findall(r"```python([\s\S]*?)```", response)[0].strip()
|
123 |
|
124 |
-
|
125 |
-
|
126 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
|
|
|
|
140 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
def truncate_document(document, length):
|
142 |
return document[:length]
|
|
|
143 |
def divide_document(document, max_length):
|
144 |
return [document[i:i+max_length] for i in range(0, len(document), max_length)]
|
145 |
|
@@ -225,7 +243,7 @@ def readitaloud(result):
|
|
225 |
</html>
|
226 |
'''
|
227 |
|
228 |
-
components.html(documentHTML5, width=
|
229 |
#return result
|
230 |
|
231 |
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
|
|
3 |
import os
|
4 |
import base64
|
5 |
import glob
|
6 |
+
import io
|
7 |
import json
|
8 |
import mistune
|
9 |
import pytz
|
10 |
import math
|
11 |
import requests
|
12 |
+
import sys
|
13 |
import time
|
14 |
import re
|
15 |
import textract
|
|
|
108 |
# Step 3: Check if the response contains Python code
|
109 |
has_python_code = bool(re.search(r"```python([\s\S]*?)```", response))
|
110 |
|
111 |
+
# Step 4: Initialize the combined content
|
112 |
+
combined_content = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
+
# Add Prompt with markdown title and emoji
|
115 |
+
combined_content += "# Prompt π\n" + prompt + "\n\n"
|
|
|
|
|
116 |
|
117 |
+
# Add Response with markdown title and emoji
|
118 |
+
combined_content += "# Response π¬\n" + response + "\n\n"
|
119 |
|
120 |
+
# Check for Python code or other resources and add them with markdown title and emoji
|
121 |
+
resources = re.findall(r"```([\s\S]*?)```", response)
|
122 |
+
for resource in resources:
|
123 |
+
# Check if the resource contains Python code
|
124 |
+
if "python" in resource.lower():
|
125 |
+
# Remove the word 'python' from the beginning of the code block
|
126 |
+
cleaned_code = re.sub(r'^\s*python', '', resource, flags=re.IGNORECASE | re.MULTILINE)
|
127 |
+
|
128 |
+
# Add Code Results title with markdown and emoji
|
129 |
+
combined_content += "# Code Results π\n"
|
130 |
|
131 |
+
# Capture standard output
|
132 |
+
original_stdout = sys.stdout
|
133 |
+
sys.stdout = io.StringIO()
|
134 |
+
|
135 |
+
# Execute cleaned Python code and capture the output
|
136 |
+
try:
|
137 |
+
exec(cleaned_code)
|
138 |
+
code_output = sys.stdout.getvalue()
|
139 |
+
combined_content += f"```\n{code_output}\n```\n\n"
|
140 |
+
realtimeEvalResponse = "# Code Results π\n" + "```" + code_output + "```\n\n"
|
141 |
+
st.write(realtimeEvalResponse)
|
142 |
+
|
143 |
+
except Exception as e:
|
144 |
+
combined_content += f"```python\nError executing Python code: {e}\n```\n\n"
|
145 |
|
146 |
+
# Restore the original standard output
|
147 |
+
sys.stdout = original_stdout
|
148 |
+
else:
|
149 |
+
# Add Resource title with markdown and emoji for non-Python resources
|
150 |
+
combined_content += "# Resource π οΈ\n" + "```" + resource + "```\n\n"
|
151 |
+
|
152 |
+
# Write the combined content into one file
|
153 |
+
with open(f"{base_filename}-Combined.md", 'w') as file:
|
154 |
+
file.write(combined_content)
|
155 |
+
|
156 |
+
|
157 |
+
|
158 |
def truncate_document(document, length):
|
159 |
return document[:length]
|
160 |
+
|
161 |
def divide_document(document, max_length):
|
162 |
return [document[i:i+max_length] for i in range(0, len(document), max_length)]
|
163 |
|
|
|
243 |
</html>
|
244 |
'''
|
245 |
|
246 |
+
components.html(documentHTML5, width=800, height=300)
|
247 |
#return result
|
248 |
|
249 |
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|