Spaces:
Sleeping
Sleeping
Commit
·
68cd41b
1
Parent(s):
bf35343
Added image container and fixed issues with layout and facility information
Browse files
app.py
CHANGED
@@ -6,14 +6,19 @@ import os
|
|
6 |
|
7 |
CSS ="""
|
8 |
.contain { display: flex; flex-direction: column; }
|
9 |
-
.gradio-container { height: 100vh !important; }
|
10 |
.svelte-vt1mxs div:first-child { flex-grow: 1; overflow: auto;}
|
11 |
#chatbot { flex-grow: 1; overflow: auto;}
|
12 |
-
|
13 |
-
footer {visibility: hidden}
|
14 |
.app.svelte-182fdeq.svelte-182fdeq {
|
15 |
max-width: 100vw !important;
|
16 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
"""
|
18 |
|
19 |
openAIToken = os.environ['openAIToken']
|
@@ -77,7 +82,12 @@ def process_text_chunk(text, storage):
|
|
77 |
if "#" in text and storage["is_loading_suggestions"] != True:
|
78 |
storage["is_loading_markup"] = True
|
79 |
|
80 |
-
if
|
|
|
|
|
|
|
|
|
|
|
81 |
accumulative_string = storage["accumulative_string"] + text
|
82 |
if storage["is_loading_suggestions"] == True:
|
83 |
if "#s#" in accumulative_string:
|
@@ -89,17 +99,43 @@ def process_text_chunk(text, storage):
|
|
89 |
storage["is_loading_suggestions"] = False
|
90 |
local_message = accumulative_string
|
91 |
accumulative_string = ""
|
92 |
-
|
93 |
if "#p#" in accumulative_string:
|
94 |
parts = accumulative_string.split("#p#")
|
95 |
-
|
96 |
-
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
98 |
elif "#" in accumulative_string and "#p" not in accumulative_string and not accumulative_string.endswith("#"):
|
99 |
-
storage["markup_string"] = accumulative_string[4:]
|
100 |
storage["is_loading_markup"] = False
|
101 |
local_message = accumulative_string
|
102 |
accumulative_string = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
storage["accumulative_string"] = accumulative_string
|
104 |
else:
|
105 |
local_message = text
|
@@ -110,8 +146,10 @@ def handle_events(threadId, chat_history, storage):
|
|
110 |
"list_of_suggestions" : [],
|
111 |
"is_loading_suggestions" : False,
|
112 |
"is_loading_markup" : False,
|
|
|
113 |
"accumulative_string" : "",
|
114 |
-
"markup_string": ""
|
|
|
115 |
})
|
116 |
try:
|
117 |
with client.beta.threads.runs.stream(
|
@@ -124,7 +162,7 @@ def handle_events(threadId, chat_history, storage):
|
|
124 |
local_message, storage = process_text_chunk(text, storage)
|
125 |
if local_message is not None:
|
126 |
chat_history[-1][1] += local_message
|
127 |
-
|
128 |
if event.event == 'thread.run.requires_action':
|
129 |
result = handle_requires_action(event.data)
|
130 |
tool_outputs = [x["tool_output"] for x in result]
|
@@ -137,22 +175,22 @@ def handle_events(threadId, chat_history, storage):
|
|
137 |
local_message, storage = process_text_chunk(text, storage)
|
138 |
if local_message is not None:
|
139 |
chat_history[-1][1] += local_message
|
140 |
-
|
141 |
action_stream.close()
|
142 |
stream.until_done()
|
143 |
print("")
|
144 |
-
return [chat_history, storage, storage["markup_string"]]
|
145 |
except Exception as e:
|
146 |
print(e)
|
147 |
chat_history[-1][1] = "Error occured during processing your message. Please try again"
|
148 |
-
yield [chat_history, storage, storage["markup_string"]]
|
149 |
|
150 |
def initiate_chatting(chat_history, storage):
|
151 |
threadId = storage["threadId"]
|
152 |
chat_history = [[None, ""]]
|
153 |
add_message_to_openai(initial_message, threadId)
|
154 |
for response in handle_events(threadId, chat_history, storage):
|
155 |
-
yield response
|
156 |
|
157 |
def respond_on_user_msg(chat_history, storage):
|
158 |
message = chat_history[-1][0]
|
@@ -166,17 +204,19 @@ def respond_on_user_msg(chat_history, storage):
|
|
166 |
def create_application():
|
167 |
with gr.Blocks(css=CSS, fill_height=True) as demo:
|
168 |
storage = gr.State({"list_of_suggestions": [], "is_loading_suggestions": False, "is_loading_markup": False, "accumulative_string": "", "markup_string": ""})
|
169 |
-
|
|
|
170 |
with gr.Column(scale=4):
|
171 |
chatbot = gr.Chatbot(label="Facility managment bot", line_breaks=False, height=300, show_label=False, show_share_button=False, elem_id="chatbot")
|
172 |
-
|
|
|
|
|
|
|
|
|
|
|
173 |
markdown = gr.Markdown(label="Bullet-list", value="# Facility information")
|
174 |
-
|
175 |
-
|
176 |
-
for i in range(6):
|
177 |
-
btn = gr.Button(visible=False, size="sm")
|
178 |
-
btn_list.append(btn)
|
179 |
-
msg = gr.Textbox(label="Answer", interactive=False)
|
180 |
|
181 |
def user(user_message, history):
|
182 |
return "", history + [[user_message, None]]
|
@@ -198,7 +238,7 @@ def create_application():
|
|
198 |
return message_box
|
199 |
|
200 |
add_user_message_flow = [user, [msg,chatbot], [msg,chatbot]]
|
201 |
-
chat_response_flow = [respond_on_user_msg, [chatbot, storage], [chatbot, storage, markdown]]
|
202 |
update_suggestions_flow = [update_suggestions, storage, btn_list]
|
203 |
hide_suggestions_flow = [hide_suggestions, None, btn_list]
|
204 |
disable_msg_flow = [disable_msg, None, msg]
|
@@ -220,7 +260,7 @@ def create_application():
|
|
220 |
).then(*enable_msg_flow)
|
221 |
|
222 |
demo.load(create_thread_openai, inputs=storage, outputs=storage
|
223 |
-
).then(initiate_chatting, inputs=[chatbot, storage], outputs=[chatbot, storage, markdown]
|
224 |
).then(*update_suggestions_flow
|
225 |
).then(*enable_msg_flow)
|
226 |
return demo
|
|
|
6 |
|
7 |
CSS ="""
|
8 |
.contain { display: flex; flex-direction: column; }
|
|
|
9 |
.svelte-vt1mxs div:first-child { flex-grow: 1; overflow: auto;}
|
10 |
#chatbot { flex-grow: 1; overflow: auto;}
|
11 |
+
footer {display: none !important;}
|
|
|
12 |
.app.svelte-182fdeq.svelte-182fdeq {
|
13 |
max-width: 100vw !important;
|
14 |
}
|
15 |
+
#main_container {
|
16 |
+
height: 95vh;
|
17 |
+
}
|
18 |
+
#markup_container {
|
19 |
+
height: 100%;
|
20 |
+
overflow:auto;
|
21 |
+
}
|
22 |
"""
|
23 |
|
24 |
openAIToken = os.environ['openAIToken']
|
|
|
82 |
if "#" in text and storage["is_loading_suggestions"] != True:
|
83 |
storage["is_loading_markup"] = True
|
84 |
|
85 |
+
if "<" in text:
|
86 |
+
storage["is_loading_suggestions"] = False
|
87 |
+
storage["is_loading_markup"] = False
|
88 |
+
storage["is_loading_svg"] = True
|
89 |
+
|
90 |
+
if storage["is_loading_suggestions"] == True or storage["is_loading_markup"] == True or storage["is_loading_svg"] == True:
|
91 |
accumulative_string = storage["accumulative_string"] + text
|
92 |
if storage["is_loading_suggestions"] == True:
|
93 |
if "#s#" in accumulative_string:
|
|
|
99 |
storage["is_loading_suggestions"] = False
|
100 |
local_message = accumulative_string
|
101 |
accumulative_string = ""
|
102 |
+
elif storage["is_loading_markup"]:
|
103 |
if "#p#" in accumulative_string:
|
104 |
parts = accumulative_string.split("#p#")
|
105 |
+
if len(parts) > 2:
|
106 |
+
accumulative_string = parts[0] + parts[2]
|
107 |
+
storage["markup_string"] = parts[1]
|
108 |
+
storage["is_loading_markup"] = False
|
109 |
+
else:
|
110 |
+
local_message = parts[0]
|
111 |
+
accumulative_string = "#p#" + parts[1]
|
112 |
+
storage["markup_string"] = parts[1]
|
113 |
elif "#" in accumulative_string and "#p" not in accumulative_string and not accumulative_string.endswith("#"):
|
|
|
114 |
storage["is_loading_markup"] = False
|
115 |
local_message = accumulative_string
|
116 |
accumulative_string = ""
|
117 |
+
else:
|
118 |
+
if "<" in accumulative_string and "<s" not in accumulative_string and not accumulative_string.endswith("<"):
|
119 |
+
storage["is_loading_svg"] = False
|
120 |
+
local_message = accumulative_string
|
121 |
+
accumulative_string = ""
|
122 |
+
elif "<svg" in accumulative_string:
|
123 |
+
parts = accumulative_string.split("<svg")
|
124 |
+
if "#p#" in parts[0]:
|
125 |
+
info_parts = parts[0].split('#p#')
|
126 |
+
local_message = info_parts[0]
|
127 |
+
else:
|
128 |
+
local_message = parts[0]
|
129 |
+
|
130 |
+
if "</svg>" in parts[1]:
|
131 |
+
svg_ending = ("<svg" + parts[1]).split('</svg>')
|
132 |
+
storage["svg"] = svg_ending[0] + '</svg>'
|
133 |
+
accumulative_string = svg_ending[1]
|
134 |
+
storage["is_loading_svg"] = False
|
135 |
+
else:
|
136 |
+
accumulative_string = "<svg" + parts[1]
|
137 |
+
storage["svg"] = accumulative_string
|
138 |
+
|
139 |
storage["accumulative_string"] = accumulative_string
|
140 |
else:
|
141 |
local_message = text
|
|
|
146 |
"list_of_suggestions" : [],
|
147 |
"is_loading_suggestions" : False,
|
148 |
"is_loading_markup" : False,
|
149 |
+
"is_loading_svg": False,
|
150 |
"accumulative_string" : "",
|
151 |
+
"markup_string": "",
|
152 |
+
"svg": ""
|
153 |
})
|
154 |
try:
|
155 |
with client.beta.threads.runs.stream(
|
|
|
162 |
local_message, storage = process_text_chunk(text, storage)
|
163 |
if local_message is not None:
|
164 |
chat_history[-1][1] += local_message
|
165 |
+
yield [ chat_history, storage, storage["markup_string"], storage["svg"]]
|
166 |
if event.event == 'thread.run.requires_action':
|
167 |
result = handle_requires_action(event.data)
|
168 |
tool_outputs = [x["tool_output"] for x in result]
|
|
|
175 |
local_message, storage = process_text_chunk(text, storage)
|
176 |
if local_message is not None:
|
177 |
chat_history[-1][1] += local_message
|
178 |
+
yield [chat_history, storage, storage["markup_string"], storage["svg"]]
|
179 |
action_stream.close()
|
180 |
stream.until_done()
|
181 |
print("")
|
182 |
+
return [chat_history, storage, storage["markup_string"], storage["svg"]]
|
183 |
except Exception as e:
|
184 |
print(e)
|
185 |
chat_history[-1][1] = "Error occured during processing your message. Please try again"
|
186 |
+
yield [chat_history, storage, storage["markup_string"], storage["svg"]]
|
187 |
|
188 |
def initiate_chatting(chat_history, storage):
|
189 |
threadId = storage["threadId"]
|
190 |
chat_history = [[None, ""]]
|
191 |
add_message_to_openai(initial_message, threadId)
|
192 |
for response in handle_events(threadId, chat_history, storage):
|
193 |
+
yield response
|
194 |
|
195 |
def respond_on_user_msg(chat_history, storage):
|
196 |
message = chat_history[-1][0]
|
|
|
204 |
def create_application():
|
205 |
with gr.Blocks(css=CSS, fill_height=True) as demo:
|
206 |
storage = gr.State({"list_of_suggestions": [], "is_loading_suggestions": False, "is_loading_markup": False, "accumulative_string": "", "markup_string": ""})
|
207 |
+
btn_list = []
|
208 |
+
with gr.Row(elem_id="main_container"):
|
209 |
with gr.Column(scale=4):
|
210 |
chatbot = gr.Chatbot(label="Facility managment bot", line_breaks=False, height=300, show_label=False, show_share_button=False, elem_id="chatbot")
|
211 |
+
with gr.Row():
|
212 |
+
for i in range(6):
|
213 |
+
btn = gr.Button(visible=False, size="sm")
|
214 |
+
btn_list.append(btn)
|
215 |
+
msg = gr.Textbox(label="Answer", interactive=False)
|
216 |
+
with gr.Column(scale=1, elem_id="markup_container"):
|
217 |
markdown = gr.Markdown(label="Bullet-list", value="# Facility information")
|
218 |
+
with gr.Row(variant="compact"):
|
219 |
+
svg_container = gr.HTML(label="SVG Container", value="""""")
|
|
|
|
|
|
|
|
|
220 |
|
221 |
def user(user_message, history):
|
222 |
return "", history + [[user_message, None]]
|
|
|
238 |
return message_box
|
239 |
|
240 |
add_user_message_flow = [user, [msg,chatbot], [msg,chatbot]]
|
241 |
+
chat_response_flow = [respond_on_user_msg, [chatbot, storage], [chatbot, storage, markdown, svg_container]]
|
242 |
update_suggestions_flow = [update_suggestions, storage, btn_list]
|
243 |
hide_suggestions_flow = [hide_suggestions, None, btn_list]
|
244 |
disable_msg_flow = [disable_msg, None, msg]
|
|
|
260 |
).then(*enable_msg_flow)
|
261 |
|
262 |
demo.load(create_thread_openai, inputs=storage, outputs=storage
|
263 |
+
).then(initiate_chatting, inputs=[chatbot, storage], outputs=[chatbot, storage, markdown, svg_container]
|
264 |
).then(*update_suggestions_flow
|
265 |
).then(*enable_msg_flow)
|
266 |
return demo
|