Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -23,21 +23,13 @@ model, tokenizer = load_model(
|
|
23 |
|
24 |
stream_model = StreamModel(model, tokenizer)
|
25 |
|
26 |
-
def chat_stream(
|
27 |
-
context,
|
28 |
-
instruction,
|
29 |
-
state_chatbot,
|
30 |
-
):
|
31 |
-
if len(context) > 500 or len(instruction) > 150:
|
32 |
-
raise gr.Error("context or prompt is too long!")
|
33 |
-
|
34 |
bot_summarized_response = ''
|
35 |
# user input should be appropriately formatted (don't be confused by the function name)
|
36 |
instruction_display = common_post_process(instruction)
|
37 |
instruction_prompt, conv_length = generate_prompt(instruction, state_chatbot, context)
|
38 |
|
39 |
if conv_length > num_of_characters_to_keep:
|
40 |
-
instruction_prompt = generate_prompt(SPECIAL_STRS["summarize"], state_chatbot, context)[0]
|
41 |
|
42 |
state_chatbot = state_chatbot + [
|
43 |
(
|
@@ -57,7 +49,7 @@ def chat_stream(
|
|
57 |
"β
summarization is done and set as context"
|
58 |
)
|
59 |
print(f"bot_summarized_response: {bot_summarized_response}")
|
60 |
-
yield (state_chatbot, state_chatbot, f"{context}. {bot_summarized_response}")
|
61 |
|
62 |
instruction_prompt = generate_prompt(instruction, state_chatbot, f"{context} {bot_summarized_response}")[0]
|
63 |
|
@@ -70,6 +62,7 @@ def chat_stream(
|
|
70 |
|
71 |
instruction_display = None if instruction_display == SPECIAL_STRS["continue"] else instruction_display
|
72 |
state_chatbot = state_chatbot + [(instruction_display, None)]
|
|
|
73 |
|
74 |
prev_index = 0
|
75 |
agg_tokens = ""
|
@@ -93,7 +86,7 @@ def chat_stream(
|
|
93 |
instruction_display,
|
94 |
processed_response
|
95 |
)
|
96 |
-
yield (state_chatbot, state_chatbot, f"{context} {bot_summarized_response}")
|
97 |
break
|
98 |
else:
|
99 |
agg_tokens = ""
|
@@ -102,7 +95,7 @@ def chat_stream(
|
|
102 |
if agg_tokens == "":
|
103 |
processed_response, to_exit = post_process_stream(tokens)
|
104 |
state_chatbot[-1] = (instruction_display, processed_response)
|
105 |
-
yield (state_chatbot, state_chatbot, f"{context} {bot_summarized_response}")
|
106 |
|
107 |
if to_exit:
|
108 |
break
|
@@ -112,9 +105,10 @@ def chat_stream(
|
|
112 |
yield (
|
113 |
state_chatbot,
|
114 |
state_chatbot,
|
115 |
-
f"{context} {bot_summarized_response}"
|
116 |
)
|
117 |
|
|
|
118 |
def chat_batch(
|
119 |
contexts,
|
120 |
instructions,
|
@@ -156,8 +150,8 @@ with gr.Blocks(css=PARENT_BLOCK_CSS) as demo:
|
|
156 |
chatbot = gr.Chatbot(elem_id='chatbot', label="Alpaca-LoRA")
|
157 |
instruction_txtbox = gr.Textbox(placeholder="What do you want to say to AI?", label="Instruction")
|
158 |
with gr.Row():
|
159 |
-
send_prompt_btn = gr.Button(value="Send Prompt")
|
160 |
cancel_btn = gr.Button(value="Cancel")
|
|
|
161 |
|
162 |
with gr.Accordion("Helper Buttons", open=False):
|
163 |
gr.Markdown(f"`Continue` lets AI to complete the previous incomplete answers. `Summarize` lets AI to summarize the conversations so far.")
|
@@ -194,12 +188,12 @@ with gr.Blocks(css=PARENT_BLOCK_CSS) as demo:
|
|
194 |
gr.Markdown(f"{BOTTOM_LINE}")
|
195 |
|
196 |
|
197 |
-
send_event =
|
198 |
chat_stream,
|
199 |
[context_txtbox, instruction_txtbox, state_chatbot],
|
200 |
[state_chatbot, chatbot, context_txtbox],
|
201 |
)
|
202 |
-
reset_event =
|
203 |
reset_textbox,
|
204 |
[],
|
205 |
[instruction_txtbox],
|
@@ -232,6 +226,15 @@ with gr.Blocks(css=PARENT_BLOCK_CSS) as demo:
|
|
232 |
cancels=[
|
233 |
send_event, continue_event, summarize_event
|
234 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
)
|
236 |
|
237 |
demo.queue(
|
|
|
23 |
|
24 |
stream_model = StreamModel(model, tokenizer)
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
bot_summarized_response = ''
|
27 |
# user input should be appropriately formatted (don't be confused by the function name)
|
28 |
instruction_display = common_post_process(instruction)
|
29 |
instruction_prompt, conv_length = generate_prompt(instruction, state_chatbot, context)
|
30 |
|
31 |
if conv_length > num_of_characters_to_keep:
|
32 |
+
instruction_prompt = generate_prompt(SPECIAL_STRS["summarize"], state_chatbot, context, partial=True)[0]
|
33 |
|
34 |
state_chatbot = state_chatbot + [
|
35 |
(
|
|
|
49 |
"β
summarization is done and set as context"
|
50 |
)
|
51 |
print(f"bot_summarized_response: {bot_summarized_response}")
|
52 |
+
yield (state_chatbot, state_chatbot, f"{context}. {bot_summarized_response}".strip())
|
53 |
|
54 |
instruction_prompt = generate_prompt(instruction, state_chatbot, f"{context} {bot_summarized_response}")[0]
|
55 |
|
|
|
62 |
|
63 |
instruction_display = None if instruction_display == SPECIAL_STRS["continue"] else instruction_display
|
64 |
state_chatbot = state_chatbot + [(instruction_display, None)]
|
65 |
+
yield (state_chatbot, state_chatbot, f"{context}. {bot_summarized_response}".strip())
|
66 |
|
67 |
prev_index = 0
|
68 |
agg_tokens = ""
|
|
|
86 |
instruction_display,
|
87 |
processed_response
|
88 |
)
|
89 |
+
yield (state_chatbot, state_chatbot, f"{context} {bot_summarized_response}".strip())
|
90 |
break
|
91 |
else:
|
92 |
agg_tokens = ""
|
|
|
95 |
if agg_tokens == "":
|
96 |
processed_response, to_exit = post_process_stream(tokens)
|
97 |
state_chatbot[-1] = (instruction_display, processed_response)
|
98 |
+
yield (state_chatbot, state_chatbot, f"{context} {bot_summarized_response}".strip())
|
99 |
|
100 |
if to_exit:
|
101 |
break
|
|
|
105 |
yield (
|
106 |
state_chatbot,
|
107 |
state_chatbot,
|
108 |
+
f"{context} {bot_summarized_response}".strip()
|
109 |
)
|
110 |
|
111 |
+
|
112 |
def chat_batch(
|
113 |
contexts,
|
114 |
instructions,
|
|
|
150 |
chatbot = gr.Chatbot(elem_id='chatbot', label="Alpaca-LoRA")
|
151 |
instruction_txtbox = gr.Textbox(placeholder="What do you want to say to AI?", label="Instruction")
|
152 |
with gr.Row():
|
|
|
153 |
cancel_btn = gr.Button(value="Cancel")
|
154 |
+
reset_btn = gr.Button(value="Reset")
|
155 |
|
156 |
with gr.Accordion("Helper Buttons", open=False):
|
157 |
gr.Markdown(f"`Continue` lets AI to complete the previous incomplete answers. `Summarize` lets AI to summarize the conversations so far.")
|
|
|
188 |
gr.Markdown(f"{BOTTOM_LINE}")
|
189 |
|
190 |
|
191 |
+
send_event = instruction_txtbox.submit(
|
192 |
chat_stream,
|
193 |
[context_txtbox, instruction_txtbox, state_chatbot],
|
194 |
[state_chatbot, chatbot, context_txtbox],
|
195 |
)
|
196 |
+
reset_event = instruction_txtbox.submit(
|
197 |
reset_textbox,
|
198 |
[],
|
199 |
[instruction_txtbox],
|
|
|
226 |
cancels=[
|
227 |
send_event, continue_event, summarize_event
|
228 |
]
|
229 |
+
)
|
230 |
+
|
231 |
+
reset_btn.click(
|
232 |
+
reset_everything,
|
233 |
+
[context_txtbox, instruction_txtbox, state_chatbot],
|
234 |
+
[state_chatbot, chatbot, context_txtbox, instruction_txtbox],
|
235 |
+
cancels=[
|
236 |
+
send_event, continue_event, summarize_event
|
237 |
+
]
|
238 |
)
|
239 |
|
240 |
demo.queue(
|