oleksandrburlakov commited on
Commit
bc55efe
·
1 Parent(s): 6d36c4a

Add preview

Browse files
Files changed (1) hide show
  1. app.py +57 -29
app.py CHANGED
@@ -7,8 +7,12 @@ import os
7
  CSS ="""
8
  .contain { display: flex; flex-direction: column; }
9
  .gradio-container { height: 100vh !important; }
 
10
  #chatbot { flex-grow: 1; overflow: auto;}
11
  footer {visibility: hidden}
 
 
 
12
  """
13
 
14
  openAIToken = os.environ['openAIToken']
@@ -63,30 +67,51 @@ def create_suggestions_list(suggestions):
63
  update_hide = [gr.update(visible=False, value="") for _ in range(6-len(suggestions))]
64
  return update_show + update_hide
65
 
66
- def process_text_chunk(text, list_of_suggestions, string_of_suggestions, is_loading_suggestions):
67
  print(text, end="", flush=True)
68
  local_message = None
69
  if "[" in text:
70
- is_loading_suggestions = True
71
-
72
- if is_loading_suggestions != True:
73
- local_message = text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  else:
75
- string_of_suggestions = string_of_suggestions + text
76
- if "#s#" in string_of_suggestions:
77
- is_loading_suggestions = False
78
- list_of_suggestions, local_message = transform_suggestions_into_list(string_of_suggestions)
79
- string_of_suggestions = ""
80
- elif "]" in string_of_suggestions and "]#" not in string_of_suggestions and not string_of_suggestions.endswith("]"):
81
- is_loading_suggestions = False
82
- local_message = string_of_suggestions
83
- string_of_suggestions = ""
84
- return local_message, list_of_suggestions, string_of_suggestions, is_loading_suggestions
85
 
86
  def handle_events(threadId, chat_history, storage):
87
- list_of_suggestions = []
88
- string_of_suggestions = ""
89
- is_loading_suggestions = False
 
 
 
 
90
  try:
91
  with client.beta.threads.runs.stream(
92
  thread_id=threadId,
@@ -95,10 +120,10 @@ def handle_events(threadId, chat_history, storage):
95
  for event in stream:
96
  if event.event == "thread.message.delta" and event.data.delta.content:
97
  text = event.data.delta.content[0].text.value
98
- local_message, list_of_suggestions, string_of_suggestions, is_loading_suggestions = process_text_chunk(text, list_of_suggestions, string_of_suggestions, is_loading_suggestions)
99
  if local_message is not None:
100
  chat_history[-1][1] += local_message
101
- yield [ chat_history, storage]
102
  if event.event == 'thread.run.requires_action':
103
  result = handle_requires_action(event.data)
104
  tool_outputs = [x["tool_output"] for x in result]
@@ -108,19 +133,18 @@ def handle_events(threadId, chat_history, storage):
108
  tool_outputs=tool_outputs,
109
  ) as action_stream:
110
  for text in action_stream.text_deltas:
111
- local_message, list_of_suggestions, string_of_suggestions, is_loading_suggestions = process_text_chunk(text, list_of_suggestions, string_of_suggestions, is_loading_suggestions)
112
  if local_message is not None:
113
  chat_history[-1][1] += local_message
114
- yield [chat_history, storage]
115
  action_stream.close()
116
  stream.until_done()
117
  print("")
118
- storage["list_of_suggestions"] = list_of_suggestions
119
- return [chat_history, storage]
120
  except Exception as e:
121
  print(e)
122
  chat_history[-1][1] = "Error occured during processing your message. Please try again"
123
- yield [chat_history, storage]
124
 
125
  def initiate_chatting(chat_history, storage):
126
  threadId = storage["threadId"]
@@ -140,8 +164,12 @@ def respond_on_user_msg(chat_history, storage):
140
 
141
  def create_application():
142
  with gr.Blocks(css=CSS, fill_height=True) as demo:
143
- storage = gr.State({})
144
- chatbot = gr.Chatbot(label="Facility managment bot", line_breaks=False, show_label=False, show_share_button=False, elem_id="chatbot", height=300)
 
 
 
 
145
  btn_list = []
146
  with gr.Row():
147
  for i in range(6):
@@ -169,7 +197,7 @@ def create_application():
169
  return message_box
170
 
171
  add_user_message_flow = [user, [msg,chatbot], [msg,chatbot]]
172
- chat_response_flow = [respond_on_user_msg, [chatbot, storage], [chatbot, storage]]
173
  update_suggestions_flow = [update_suggestions, storage, btn_list]
174
  hide_suggestions_flow = [hide_suggestions, None, btn_list]
175
  disable_msg_flow = [disable_msg, None, msg]
@@ -191,7 +219,7 @@ def create_application():
191
  ).then(*enable_msg_flow)
192
 
193
  demo.load(create_thread_openai, inputs=storage, outputs=storage
194
- ).then(initiate_chatting, inputs=[chatbot, storage], outputs=[chatbot, storage]
195
  ).then(*update_suggestions_flow
196
  ).then(*enable_msg_flow)
197
  return demo
 
7
  CSS ="""
8
  .contain { display: flex; flex-direction: column; }
9
  .gradio-container { height: 100vh !important; }
10
+ .svelte-vt1mxs div:first-child { flex-grow: 1; overflow: auto;}
11
  #chatbot { flex-grow: 1; overflow: auto;}
12
  footer {visibility: hidden}
13
+ .app.svelte-182fdeq.svelte-182fdeq {
14
+ max-width: 100vw !important;
15
+ }
16
  """
17
 
18
  openAIToken = os.environ['openAIToken']
 
67
  update_hide = [gr.update(visible=False, value="") for _ in range(6-len(suggestions))]
68
  return update_show + update_hide
69
 
70
+ def process_text_chunk(text, storage):
71
  print(text, end="", flush=True)
72
  local_message = None
73
  if "[" in text:
74
+ storage["is_loading_suggestions"] = True
75
+
76
+ if "#" in text and storage["is_loading_suggestions"] != True:
77
+ storage["is_loading_markup"] = True
78
+
79
+ if storage["is_loading_suggestions"] == True or storage["is_loading_markup"] == True:
80
+ accumulative_string = storage["accumulative_string"] + text
81
+ if storage["is_loading_suggestions"] == True:
82
+ if "#s#" in accumulative_string:
83
+ storage["is_loading_suggestions"] = False
84
+ list_of_suggestions, local_message = transform_suggestions_into_list(accumulative_string)
85
+ storage["list_of_suggestions"] = list_of_suggestions
86
+ accumulative_string = ""
87
+ elif "]" in accumulative_string and "]#" not in accumulative_string and not accumulative_string.endswith("]"):
88
+ storage["is_loading_suggestions"] = False
89
+ local_message = accumulative_string
90
+ accumulative_string = ""
91
+ else:
92
+ if "#p#" in accumulative_string:
93
+ parts = accumulative_string.split("#p#")
94
+ local_message = parts[0]
95
+ accumulative_string = "#p#" + parts[1]
96
+ storage["markup_string"] = accumulative_string[3:]
97
+ elif "#" in accumulative_string and "#p" not in accumulative_string and not accumulative_string.endswith("#"):
98
+ storage["markup_string"] = accumulative_string[4:]
99
+ storage["is_loading_markup"] = False
100
+ local_message = accumulative_string
101
+ accumulative_string = ""
102
+ storage["accumulative_string"] = accumulative_string
103
  else:
104
+ local_message = text
105
+ return local_message, storage
 
 
 
 
 
 
 
 
106
 
107
  def handle_events(threadId, chat_history, storage):
108
+ storage.update({
109
+ "list_of_suggestions" : [],
110
+ "is_loading_suggestions" : False,
111
+ "is_loading_markup" : False,
112
+ "accumulative_string" : "",
113
+ "markup_string": ""
114
+ })
115
  try:
116
  with client.beta.threads.runs.stream(
117
  thread_id=threadId,
 
120
  for event in stream:
121
  if event.event == "thread.message.delta" and event.data.delta.content:
122
  text = event.data.delta.content[0].text.value
123
+ local_message, storage = process_text_chunk(text, storage)
124
  if local_message is not None:
125
  chat_history[-1][1] += local_message
126
+ yield [ chat_history, storage, storage["markup_string"]]
127
  if event.event == 'thread.run.requires_action':
128
  result = handle_requires_action(event.data)
129
  tool_outputs = [x["tool_output"] for x in result]
 
133
  tool_outputs=tool_outputs,
134
  ) as action_stream:
135
  for text in action_stream.text_deltas:
136
+ local_message, storage = process_text_chunk(text, storage)
137
  if local_message is not None:
138
  chat_history[-1][1] += local_message
139
+ yield [chat_history, storage, storage["markup_string"]]
140
  action_stream.close()
141
  stream.until_done()
142
  print("")
143
+ return [chat_history, storage, storage["markup_string"]]
 
144
  except Exception as e:
145
  print(e)
146
  chat_history[-1][1] = "Error occured during processing your message. Please try again"
147
+ yield [chat_history, storage, storage["markup_string"]]
148
 
149
  def initiate_chatting(chat_history, storage):
150
  threadId = storage["threadId"]
 
164
 
165
  def create_application():
166
  with gr.Blocks(css=CSS, fill_height=True) as demo:
167
+ storage = gr.State({"list_of_suggestions": [], "is_loading_suggestions": False, "is_loading_markup": False, "accumulative_string": "", "markup_string": ""})
168
+ with gr.Row():
169
+ with gr.Column(scale=4):
170
+ chatbot = gr.Chatbot(label="Facility managment bot", line_breaks=False, height=300, show_label=False, show_share_button=False, elem_id="chatbot")
171
+ with gr.Column(scale=1):
172
+ markdown = gr.Markdown(label="Bullet-list", value="# Facility information")
173
  btn_list = []
174
  with gr.Row():
175
  for i in range(6):
 
197
  return message_box
198
 
199
  add_user_message_flow = [user, [msg,chatbot], [msg,chatbot]]
200
+ chat_response_flow = [respond_on_user_msg, [chatbot, storage], [chatbot, storage, markdown]]
201
  update_suggestions_flow = [update_suggestions, storage, btn_list]
202
  hide_suggestions_flow = [hide_suggestions, None, btn_list]
203
  disable_msg_flow = [disable_msg, None, msg]
 
219
  ).then(*enable_msg_flow)
220
 
221
  demo.load(create_thread_openai, inputs=storage, outputs=storage
222
+ ).then(initiate_chatting, inputs=[chatbot, storage], outputs=[chatbot, storage, markdown]
223
  ).then(*update_suggestions_flow
224
  ).then(*enable_msg_flow)
225
  return demo