svjack commited on
Commit
63df88b
ยท
1 Parent(s): fbc8f71

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +544 -0
app.py ADDED
@@ -0,0 +1,544 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #### pip install advertools
2
+ #### pip install emoji
3
+ #### pip install emoji-chengyu
4
+ #### pip install gradio-client
5
+
6
+ #### prefer to run in chorme, others may have problem in change hock function
7
+
8
+ import gradio as gr
9
+ import pandas as pd
10
+
11
+ import emoji
12
+ from advertools.emoji import emoji_df
13
+ from copy import deepcopy
14
+ import numpy as np
15
+
16
+ from emoji_chengyu.data import DefaultChengyuManager
17
+ from emoji_chengyu.puzzle import make_one_puzzle, gen_puzzle
18
+
19
+ from Lex import *
20
+ '''
21
+ lex = Lexica(query="man woman fire snow").images()
22
+ '''
23
+ from PIL import Image
24
+ import requests
25
+
26
+ from zipfile import ZipFile
27
+
28
+ from time import sleep
29
+ sleep_time = 0.5
30
+
31
+ import requests
32
+
33
+ def translate_zh_to_en(zh_text):
34
+ assert type(zh_text) == type("")
35
+ response = requests.post("https://svjack-translate-chinese-to-english.hf.space/run/predict", json={
36
+ "data": [
37
+ zh_text,
38
+ ]}).json()
39
+ data = response["data"]
40
+ data = data[0]
41
+ data = data["English Question"]
42
+ return data
43
+
44
+ def translate_en_to_zh(en_text):
45
+ assert type(en_text) == type("")
46
+ response = requests.post("https://svjack-translate.hf.space/run/predict", json={
47
+ "data": [
48
+ en_text,
49
+ "en",
50
+ "zh",
51
+ ]}).json()
52
+ data = response["data"]
53
+ data = data[0]
54
+ data = data["Target Question"]
55
+ return data
56
+
57
+ from gradio_client import Client
58
+ llm_client = Client("https://svjack-wizardlm-13b-ggml.hf.space/--replicas/bnqpc/")
59
+
60
+ '''
61
+ llm_result = llm_client.predict(
62
+ "Use following emojis to generate a short description of a scene , the emojis are ๐Ÿ‘จ๐Ÿ‘ฉ๐Ÿ”ฅโ„๏ธ", # str in 'Question/Instruction' Textbox component
63
+ 0.8, # int | float (numeric value between 0.1 and 1.0) in 'Temperature' Slider component
64
+ 0.95, # int | float (numeric value between 0.0 and 1.0) in 'Top-p (nucleus sampling)' Slider component
65
+ 40, # int | float (numeric value between 5 and 80) in 'Top-k' Slider component
66
+ 256, # int | float (numeric value between 0 and 1024) in 'Maximum new tokens' Slider component
67
+ 52, # int | float in 'Seed' Number component
68
+ fn_index=1
69
+ )
70
+ '''
71
+
72
+ def run_llm_client(llm_client, prompt):
73
+ llm_result = llm_client.predict(
74
+ prompt, # str in 'Question/Instruction' Textbox component
75
+ 0.8, # int | float (numeric value between 0.1 and 1.0) in 'Temperature' Slider component
76
+ 0.95, # int | float (numeric value between 0.0 and 1.0) in 'Top-p (nucleus sampling)' Slider component
77
+ 40, # int | float (numeric value between 5 and 80) in 'Top-k' Slider component
78
+ 256, # int | float (numeric value between 0 and 1024) in 'Maximum new tokens' Slider component
79
+ 52, # int | float in 'Seed' Number component
80
+ fn_index=1
81
+ )
82
+ return llm_result
83
+
84
+ def chengyu_emoji_to_im_prompt(chengyu_emoji_input,
85
+ llm_prompt_input, llm_client = llm_client):
86
+ chengyu, emoji = chengyu_emoji_input.split(":")
87
+ chengyu_en = translate_zh_to_en(chengyu)
88
+ print("{}\t\t\t{}".format(chengyu_emoji_input ,chengyu_en))
89
+ llm_prompt = llm_prompt_input.format(chengyu_en, emoji)
90
+ im_en_prompt = run_llm_client(llm_client, llm_prompt)
91
+ im_zh_prompt = translate_en_to_zh(im_en_prompt)
92
+ return im_en_prompt, im_zh_prompt
93
+
94
+ def min_dim_to_size(img, size = 512):
95
+ h, w = img.size
96
+ ratio = size / max(h, w)
97
+ h, w = map(lambda x: int(x * ratio), [h, w])
98
+ return ( ratio ,img.resize((h, w)) )
99
+
100
+ def lexica(prompt, limit_size = 128, ratio_size = 256 + 128):
101
+ if not prompt or not prompt.strip():
102
+ return []
103
+ prompt = prompt.strip()
104
+ lex = Lexica(query=prompt).images()
105
+ lex = lex[:limit_size]
106
+ lex = list(map(lambda x: x.replace("full_jpg", "sm2"), lex))
107
+ lex_ = []
108
+ for ele in lex:
109
+ try:
110
+ im = Image.open(
111
+ requests.get(ele, stream = True).raw
112
+ )
113
+ lex_.append(im)
114
+ except:
115
+ print("err")
116
+ sleep(sleep_time)
117
+ assert lex_
118
+ lex = list(map(lambda x: min_dim_to_size(x, ratio_size)[1], lex_))
119
+ return lex
120
+
121
+ def search(emoji_outputs, emoji2text_or_not, llm_prompt_input, llm_client = llm_client):
122
+ assert emoji2text_or_not in ["Emoji to Text", "Only Emoji", "ChengYu with Emoji"]
123
+ if emoji2text_or_not == "Only Emoji":
124
+ emoji_outputs = extract_2(emoji_outputs)[1]
125
+ l = lexica(emoji_outputs.replace(":", ""))
126
+ return (l, "", "")
127
+ elif emoji2text_or_not == "Emoji to Text":
128
+ assert "{}" in llm_prompt_input
129
+ emoji_outputs = extract_2(emoji_outputs)[1]
130
+ llm_prompt = llm_prompt_input.format(emoji_outputs)
131
+ llm_en_output = run_llm_client(llm_client, llm_prompt)
132
+ llm_zh_output = translate_en_to_zh(llm_en_output)
133
+ tail_list = ["someone do something"]
134
+ for tail in tail_list:
135
+ if tail in llm_en_output and len(llm_en_output.split(tail)[-1]) > (5 * 3):
136
+ llm_en_output = llm_en_output.split(tail)[-1]
137
+ l = lexica(llm_en_output)
138
+ return (l, llm_en_output, llm_zh_output)
139
+ else:
140
+ assert "{}" in llm_prompt_input
141
+ a, b = extract_2(emoji_outputs)
142
+ a, b = a.strip(), b.strip()
143
+ if not a and not b:
144
+ return ([], "", "")
145
+ emoji_outputs = "{}:{}".format(a, b)
146
+ llm_en_output, llm_zh_output = chengyu_emoji_to_im_prompt(emoji_outputs, llm_prompt_input)
147
+ l = lexica(llm_en_output)
148
+ return (l, llm_en_output, llm_zh_output)
149
+
150
+
151
+ def enterpix(prompt, limit_size = 100, ratio_size = 256 + 128, use_key = "bigThumbnailUrl"):
152
+ resp = requests.post(
153
+ url = "https://www.enterpix.app/enterpix/v1/image/prompt-search",
154
+ data= {
155
+ "length": limit_size,
156
+ "platform": "stable-diffusion,midjourney",
157
+ "prompt": prompt,
158
+ "start": 0
159
+ }
160
+ )
161
+ resp = resp.json()
162
+ resp = list(map(lambda x: x[use_key], resp["images"]))
163
+ lex_ = []
164
+ for ele in resp:
165
+ try:
166
+ im = Image.open(
167
+ requests.get(ele, stream = True).raw
168
+ )
169
+ lex_.append(im)
170
+ except:
171
+ print("err")
172
+ sleep(sleep_time)
173
+ assert lex_
174
+ resp = list(map(lambda x: min_dim_to_size(x, ratio_size)[1], lex_))
175
+ return resp
176
+
177
+ def zip_ims(g):
178
+ from uuid import uuid1
179
+ if g is None:
180
+ return None
181
+ l = list(map(lambda x: x["name"], g))
182
+ if not l:
183
+ return None
184
+ zip_file_name ="tmp.zip"
185
+ with ZipFile(zip_file_name ,"w") as zipObj:
186
+ for ele in l:
187
+ zipObj.write(ele, "{}.png".format(uuid1()))
188
+ #zipObj.write(file2.name, "file2")
189
+ return zip_file_name
190
+
191
+ emoji_order_list = [
192
+ ["๐Ÿ‡", ["๐Ÿฅข","๐Ÿผ", "๐Ÿฑ", "๐Ÿ‡", "๐Ÿฆ€"]],
193
+ ["๐Ÿ˜€", ["๐Ÿฅฐ", "๐Ÿ˜•", "๐Ÿ˜บ", "๐Ÿ’‹", "๐Ÿ’ฉ"]],
194
+ ["๐Ÿต", ["๐Ÿต", "๐Ÿฆƒ", "๐ŸŒ", "๐Ÿณ"]],
195
+ ["๐Ÿ“”", ["๐Ÿ‘“" ,"๐Ÿ“”", "๐Ÿšช", "๐Ÿ”‹", "๐ŸŽฅ"]],
196
+ ["๐ŸŽƒ", ["โšฝ", "๐ŸŽƒ", "๐ŸŽฏ", "๐ŸŽญ", "๐ŸŽ–๏ธ"]],
197
+ #["๐ŸŒ", ["๐ŸŒ", "๐Ÿ "๏ธ, "โ›ฒ", "๐Ÿ”"๏ธ]],
198
+ ["๐Ÿ‘‹", ["๐Ÿ‘", "๐Ÿ’ช", "๐Ÿ‘‹", "๐Ÿ‘Œ",]],
199
+ ["๐ŸŒ", ["๐ŸŒ", "โ›ฒ", "๐Ÿ ",]],
200
+ ]
201
+
202
+ sub_cate_num = 5
203
+ sub_cate_size = 36
204
+ sub_col_num = 6
205
+
206
+ def list_to_square(l, col_num = 10):
207
+ assert type(l) == type([])
208
+ row_num = len(l) // col_num
209
+ res = len(l) % col_num
210
+ if res > 0:
211
+ res_for_add = col_num - res
212
+ else:
213
+ res_for_add = 0
214
+ ll = np.asarray(l).reshape([-1, col_num]).tolist()
215
+ return ll
216
+ l_ = deepcopy(l) + [""] * res_for_add
217
+ return list_to_square(l_, col_num)
218
+
219
+ def append_emojis(selected_index: gr.SelectData, dataframe_origin, emoji_prompt):
220
+ val = dataframe_origin.iloc[selected_index.index[0], selected_index.index[1]]
221
+ if val.strip():
222
+ #emoji_prompt = emoji_prompt + val
223
+ a, b = extract_2(emoji_prompt)
224
+ aa, bb = extract_2(val)
225
+ a, b = a.strip(), b.strip()
226
+ aa, bb = aa.strip(), bb.strip()
227
+ emoji_prompt = "{}:{}".format(a + aa, b + bb)
228
+ return emoji_prompt
229
+
230
+ def append_chengyu_emojis(selected_index: gr.SelectData, dataframe_origin, emoji_prompt, append_or_replace, emoji2text_or_not):
231
+ val = dataframe_origin.iloc[selected_index.index[0], selected_index.index[1]]
232
+ if type(val) != type("") or not val:
233
+ return emoji_prompt
234
+ assert emoji2text_or_not in ["Emoji to Text", "Only Emoji", "ChengYu with Emoji"]
235
+ assert append_or_replace in ["replace", "append"]
236
+ a, b = extract_2(emoji_prompt)
237
+ aa, bb = extract_2(val)
238
+ if append_or_replace == "append":
239
+ '''
240
+ if emoji2text_or_not in ["Emoji to Text", "Only Emoji"]:
241
+ emoji_prompt = emoji_prompt + val.split(":")[-1]
242
+ else:
243
+ a, b = val.split(":")
244
+ emoji_prompt = "{}:{}".format(a, emoji_prompt + b)
245
+ '''
246
+ a, b = a + aa, b + bb
247
+ else:
248
+ '''
249
+ if emoji2text_or_not in ["Emoji to Text", "Only Emoji"]:
250
+ emoji_prompt = val.split(":")[-1]
251
+ else:
252
+ emoji_prompt = val
253
+ '''
254
+ a, b = aa, bb
255
+ a = a.strip()
256
+ b = b.strip()
257
+ emoji_prompt = "{}:{}".format(a, b)
258
+ return emoji_prompt
259
+
260
+ def extract_emojis(s):
261
+ #return ''.join(c for c in s if c in emoji.UNICODE_EMOJI['en'])
262
+ dl = emoji.emoji_list(s)
263
+ return "".join(map(lambda x: x["emoji"], dl))
264
+
265
+ def extract_2(s):
266
+ b = extract_emojis(s)
267
+ a = "".join(filter(lambda x: x not in b + ":", list(s)))
268
+ return a, b
269
+
270
+ def gen_emojis_by_chengyu(words):
271
+ assert type(words) == type("")
272
+ out = DefaultChengyuManager.get_by_word(words)
273
+ if out is None:
274
+ return ""
275
+ out = "".join(make_one_puzzle(out).puzzle)
276
+ out = extract_emojis(out)
277
+ return out
278
+
279
+ def gen_emojis_by_sample(search_count=5000):
280
+ pg = gen_puzzle(manager=DefaultChengyuManager, search_count=search_count)
281
+ df = pd.DataFrame(list(map(lambda x: {
282
+ "words": "".join(x.chengyu_item.word_list),
283
+ "emoji": x.puzzle_str,
284
+ "score": sum(x.mask)
285
+ } ,pg)))
286
+ df = df[df["score"] == 4]
287
+ df = df[df["words"].map(lambda x: len(x) == 4)]
288
+ req = []
289
+ col0 = set([])
290
+ col1 = set([])
291
+ col2 = set([])
292
+ col3 = set([])
293
+ for i, r in df.iterrows():
294
+ words = r["words"]
295
+ emoji = r["emoji"]
296
+ if emoji[0] in col0:
297
+ continue
298
+ col0.add(emoji[0])
299
+ if emoji[1] in col1:
300
+ continue
301
+ col1.add(emoji[1])
302
+ if emoji[2] in col2:
303
+ continue
304
+ col2.add(emoji[2])
305
+ if emoji[3] in col3:
306
+ continue
307
+ col3.add(emoji[3])
308
+ req.append(
309
+ r.to_dict()
310
+ )
311
+ df = pd.DataFrame(req)
312
+ if len(df) < 21:
313
+ return gen_emojis_by_sample(search_count=search_count)
314
+ df = pd.DataFrame(
315
+ np.asarray(df.apply(lambda x: x.to_dict(), axis = 1).head(21).map(lambda x:
316
+ "{}:{}".format(x["words"],x["emoji"])
317
+ ).tolist()).reshape(
318
+ (7, 3)
319
+ )
320
+ )
321
+ return df
322
+
323
+ def append_pure_to_input(emoji_outputs ,only_emoji_outputs):
324
+ return emoji_outputs + only_emoji_outputs
325
+
326
+ def outputs_rec_format(emoji_outputs):
327
+ a, b = extract_2(emoji_outputs)
328
+ a, b = a.strip(), b.strip()
329
+ emoji_outputs = "{}:{}".format(a, b)
330
+ return emoji_outputs
331
+
332
+ css = """
333
+ #frame span{
334
+ font-size: 1.5em; display: flex; align-items: center;
335
+ }
336
+ """
337
+
338
+ ###with gr.Blocks(css="custom.css") as demo:
339
+ with gr.Blocks(css = css) as demo:
340
+ title = gr.HTML(
341
+ """<h1 align="center"> <font size="+10"> ๐Ÿ•Œ Emojis to StableDiffusion World ๐ŸŒ </font> </h1>""",
342
+ elem_id="title",
343
+ )
344
+
345
+ frame_list = []
346
+ with gr.Row():
347
+ with gr.Column(label = "Emoji samples, You can click to use them"):
348
+ sub_title_0 = gr.Markdown(
349
+ value="### Emoji samples, You can click to use them",
350
+ visible=True,
351
+ #elem_id="selected_model",
352
+ )
353
+ #for group, df in emoji_df.groupby("group"):
354
+ for group_order_ele, sub_group_order_list in emoji_order_list:
355
+ #group_first = df["emoji"].iloc[0]
356
+ group_first = group_order_ele
357
+ df_group = emoji_df[emoji_df["emoji"] == group_first]["group"].iloc[0]
358
+ df = emoji_df[emoji_df["group"] == df_group]
359
+ with gr.Tab("{} {}".format(group_first, df_group)):
360
+ #for ii ,(sub_group, dff) in enumerate(df.groupby("sub_group")):
361
+ for ii in range(len(sub_group_order_list)):
362
+ sub_first = sub_group_order_list[ii]
363
+ df_sub_group = emoji_df[emoji_df["emoji"] == sub_first]["sub_group"].iloc[0]
364
+ dff = df[df["sub_group"] == df_sub_group]
365
+ if ii >= sub_cate_num:
366
+ break
367
+ sub_first = dff["emoji"].iloc[0]
368
+ sub_l = dff["emoji"].values.tolist()[:sub_cate_size]
369
+ sub_l_square = list_to_square(sub_l, sub_col_num)
370
+ g_frame = gr.DataFrame(sub_l_square,
371
+ interactive=False, headers = [''] * sub_col_num,
372
+ #datatype="markdown"
373
+ elem_id="frame",
374
+ label = "{} {}".format(sub_first, df_sub_group)
375
+ )
376
+ #g_frame = gr.Matrix(sub_l_square, label = sub_first,)
377
+ frame_list.append(g_frame)
378
+ with gr.Column():
379
+ with gr.Row():
380
+ with gr.Column():
381
+ sub_title_1 = gr.Markdown(
382
+ value="### ChengYu to Emoji combinations, You can click to use them, Don't forget edit them after click, to make it meaningful",
383
+ visible=True,
384
+ #elem_id="selected_model",
385
+ )
386
+ chengyu_frame = gr.DataFrame(gen_emojis_by_sample(),
387
+ interactive=False, headers = [''] * sub_col_num,
388
+ #datatype="markdown"
389
+ elem_id="chengyu_frame",
390
+ #label = "ChengYu to Emoji combinations, You can click to use them"
391
+ )
392
+ with gr.Row():
393
+ chengyu_reset_button = gr.Button("Reset ChengYu Emojis",
394
+ elem_id="run_button")
395
+ with gr.Row():
396
+ append_or_replace = gr.Radio(choices=["replace", "append"],
397
+ value="replace", label="ChengYu Emoji Append or Replace to below", elem_id="text_radio")
398
+ with gr.Row():
399
+ emoji_outputs = gr.Textbox(label="Emoji Prompt Input", show_label=True, lines=1, max_lines=20,
400
+ min_width = 256, placeholder="Click Emoji from left with some emoji input manually", elem_id="prompt",
401
+ interactive=True)
402
+ clean_button = gr.Button("Clear", elem_id="clear_button")
403
+ '''
404
+ with gr.Column():
405
+ clean_button = gr.Button("Clean Emojis", elem_id="clean_button")
406
+ emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
407
+ '''
408
+ with gr.Row():
409
+ ### \n ๐Ÿ•Œ Only Emoji \n ๐Ÿ•Œโžก๏ธ๐Ÿ”ค Emoji to Text \n ๐Ÿ“–โš–๏ธ๐Ÿ•Œ ChengYu with Emoji
410
+ with gr.Row():
411
+ emoji2text_or_not = gr.Radio(choices=["Only Emoji", "Emoji to Text", "ChengYu with Emoji"],
412
+ value="Only Emoji", label="Emoji &| Text to get images or translate them to Text by LLM",
413
+ elem_id="trans_radio",
414
+ info = "๐Ÿ•Œ Only Emoji ----------- ๐Ÿ•Œโžก๏ธ๐Ÿ”ค Emoji to Text ------- ๐Ÿ“–โš–๏ธ๐Ÿ•Œ ChengYu with Emoji"
415
+ )
416
+ llm_prompt_input = gr.Textbox(label="Emoji to Text Prompt template used by LLM", show_label=True,
417
+ lines=1, max_lines=20,
418
+ min_width = 256,
419
+ value="Use following emojis to generate a short description of a scene , use the pattern someone do something , the emojis are {}"
420
+ , elem_id="prompt",
421
+ interactive=True)
422
+ llm_outputs = gr.Textbox(label="Emoji to Text Prompt translate by LLM Output", show_label=True,
423
+ lines=1, max_lines=20,
424
+ min_width = 256, placeholder="Emoji describe by Text", elem_id="prompt",
425
+ interactive=True)
426
+ llm_zh_outputs = gr.Textbox(label="Emoji to Text Prompt translate by LLM Output in Chinese", show_label=True,
427
+ lines=1, max_lines=20,
428
+ min_width = 256, placeholder="Emoji describe by Chinese", elem_id="prompt",
429
+ interactive=True)
430
+ '''
431
+ with gr.Row():
432
+ emoji_gen_chengyu_input = gr.Textbox(label="ChengYu Prompt Input", show_label=False, lines=1, max_lines=20,
433
+ min_width = 256, placeholder="input ChengYu manually, like: ๅŠฑ็ฒพๅ›พๆฒป", elem_id="prompt",
434
+ interactive=True)
435
+
436
+ with gr.Row():
437
+ only_emoji_outputs = gr.Textbox(label="Only Emoji Prompt Output", show_label=False, lines=1, max_lines=20,
438
+ min_width = 256, placeholder="Filter out only emoji charactors", elem_id="prompt", interactive=True)
439
+ #gr.Slider(label='Number of images ', minimum = 4, maximum = 20, step = 1, value = 4)]
440
+ append_button = gr.Button("Append Only Emojis to Emoji Prompt Output", elem_id="append_button")
441
+ only_emoji_outputs_button = gr.Button("Retrieve Images Only Emoji", elem_id="run_button")
442
+
443
+ with gr.Row():
444
+ #text_button = gr.Button("Retrieve Images", elem_id="run_button")
445
+ emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
446
+ '''
447
+ with gr.Row():
448
+ emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
449
+
450
+ with gr.Row():
451
+ with gr.Column():
452
+ outputs = gr.Gallery(lable='Output gallery', elem_id="gallery",).style(grid=5,height=768 - 64 - 32,
453
+ allow_preview=False, label = "retrieve Images")
454
+ exp_title = gr.HTML(
455
+ """<br/><br/><h5 align="center"> <font size="+1"> Emojis examples live in ๐Ÿ•Œ travel to StableDiffusion ๐ŸŒ </font> </h5>""",
456
+ #elem_id="title",
457
+ )
458
+ gr.Examples(
459
+ [
460
+ ["๐Ÿ”", "Only Emoji"],
461
+ ["๐Ÿ”ฅ๐ŸŒฒ", "Only Emoji"],
462
+ ["๐Ÿฑ๐Ÿฝ๏ธ๐Ÿ‘จ", "Emoji to Text"],
463
+ ["๐Ÿป๐Ÿฆโ„๏ธ๐ŸŒŠ", "Only Emoji"],
464
+ ["๐ŸŒŽ๐Ÿฆถ๐Ÿ‘‚๐Ÿ’ค", "Emoji to Text"],
465
+ ["๐Ÿ‘ฉโ€๐Ÿ”ฌ๐Ÿ—ฃโ˜•๐Ÿ‘จโ€๐ŸŽจ", "Emoji to Text"],
466
+ ["ๅˆ€ๅฑฑ็ซๆตท:๐Ÿ™ƒโšก๐Ÿ‘ฉโ€๐Ÿš’๐Ÿงœโ€โ™‚๏ธ", "ChengYu with Emoji"],
467
+ ["ไผ ไธบไฝณ่ฏ:๐Ÿก๐Ÿงฃ๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ง๐Ÿ‘ฉโ€๐ŸŽจ", "ChengYu with Emoji"],
468
+ ["ๅคง้“่‡ณ็ฎ€:๐Ÿ‡จ๐Ÿ‡ณ๐ŸŒพ๐Ÿง ๐Ÿ‘€โ€โ™‚๏ธ", "ChengYu with Emoji"],
469
+ #["๐Ÿ‘œ๐Ÿ‘š๐Ÿงฃ๐Ÿ‘ธ๐ŸฐโœŒ๏ธ", "Emoji to Text"],
470
+ #["๐Ÿ‘๐Ÿ๐ŸŽƒ๐Ÿ‘ถ๐Ÿฅ›๐ŸŒพ๐Ÿ•ต๐Ÿƒโ€โ™€๏ธ", "Emoji to Text"],
471
+ #["๐Ÿ™ƒ๐Ÿ’๐Ÿ‘‹๐Ÿ‘จโ€๐Ÿ”ง๐Ÿ‘„๐Ÿฅ€๐ŸŒŽ๐ŸŒ™", "Emoji to Text"],
472
+ #["ๆ˜ฅๆฑŸๆฝฎๆฐด่ฟžๆตทๅนณ:๐ŸŒž๐ŸŒบ๐Ÿ’ฆ๐Ÿคค๐Ÿชท๐ŸŒŠโ˜€๏ธ", "ChengYu with Emoji"],
473
+ #["ๆœˆๆ˜Žๆ˜Ÿ็จ€๏ผŒไนŒ้นŠๅ—้ฃžใ€‚:๐ŸŒ›๐ŸŒ›โœจ๐Ÿซ…๐Ÿฆ๐Ÿฆข๐Ÿชฝ๐Ÿชฝ", "ChengYu with Emoji"]
474
+ ],
475
+ inputs = [emoji_outputs, emoji2text_or_not],
476
+ #label = "๐Ÿ•Œ Examples"
477
+ )
478
+ gr.Examples(
479
+ [
480
+ #["๐Ÿ”", "Only Emoji"],
481
+ #["๐Ÿ”ฅ๐ŸŒฒ", "Only Emoji"],
482
+ #["๐Ÿฑ๐Ÿฝ๏ธ๐Ÿ‘จ", "Emoji to Text"],
483
+ #["๐Ÿป๐Ÿฆโ„๏ธ๐ŸŒŠ", "Only Emoji"],
484
+ #["๐ŸŒŽ๐Ÿฆถ๐Ÿ‘‚๐Ÿ’ค", "Emoji to Text"],
485
+ #["๐Ÿ‘ฉโ€๐Ÿ”ฌ๐Ÿ—ฃโ˜•๐Ÿ‘จโ€๐ŸŽจ", "Emoji to Text"],
486
+ #["ๅˆ€ๅฑฑ็ซๆตท:๐Ÿ™ƒโšก๐Ÿ‘ฉโ€๐Ÿš’๐Ÿงœโ€โ™‚๏ธ", "ChengYu with Emoji"],
487
+ #["ไผ ไธบไฝณ่ฏ:๐Ÿก๐Ÿงฃ๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ง๐Ÿ‘ฉโ€๐ŸŽจ", "ChengYu with Emoji"],
488
+ #["ๅคง้“่‡ณ็ฎ€:๐Ÿ‡จ๐Ÿ‡ณ๐ŸŒพ๐Ÿง ๐Ÿ‘€โ€โ™‚๏ธ", "ChengYu with Emoji"],
489
+ ["๐Ÿ‘œ๐Ÿ‘š๐Ÿงฃ๐Ÿ‘ธ๐ŸฐโœŒ๏ธ", "Emoji to Text"],
490
+ ["๐Ÿ‘๐Ÿ๐ŸŽƒ๐Ÿ‘ถ๐Ÿฅ›๐ŸŒพ๐Ÿ•ต๐Ÿƒโ€โ™€๏ธ", "Emoji to Text"],
491
+ ["๐Ÿ™ƒ๐Ÿ’๐Ÿ‘‹๐Ÿ‘จโ€๐Ÿ”ง๐Ÿ‘„๐Ÿฅ€๐ŸŒŽ๐ŸŒ™", "Emoji to Text"],
492
+ ["ๆ˜ฅๆฑŸๆฝฎๆฐด่ฟžๆตทๅนณ:๐ŸŒž๐ŸŒบ๐Ÿ’ฆ๐Ÿคค๐Ÿชท๐ŸŒŠโ˜€๏ธ", "ChengYu with Emoji"],
493
+ ["ๆœˆๆ˜Žๆ˜Ÿ็จ€๏ผŒไนŒ้นŠๅ—้ฃžใ€‚:๐ŸŒ›๐ŸŒ›โœจ๐Ÿซ…๐Ÿฆ๐Ÿฆข๐Ÿชฝ๐Ÿชฝ", "ChengYu with Emoji"]
494
+ ],
495
+ inputs = [emoji_outputs, emoji2text_or_not],
496
+ #label = "๐Ÿ•Œ Examples"
497
+ )
498
+
499
+ with gr.Row():
500
+ with gr.Tab(label = "Download"):
501
+ zip_button = gr.Button("Zip Images to Download", elem_id="zip_button")
502
+ downloads = gr.File(label = "Image zipped", elem_id = "zip_file")
503
+
504
+ ### ["Only Emoji", "Emoji to Text", "ChengYu with Emoji"]
505
+ emoji2text_or_not.change(
506
+ fn = lambda x: "" if x == "Only Emoji" else (
507
+ "Use following emojis to generate a short description of a scene , use the pattern someone do something , the emojis are {}" if x == "Emoji to Text" \
508
+ else "Use following emojis to make a short description of the scene about '{}', the emojis are {}"
509
+ )
510
+ , inputs = emoji2text_or_not, outputs = llm_prompt_input
511
+ )
512
+
513
+ for g in frame_list:
514
+ g.select(fn = append_emojis, inputs = [g, emoji_outputs], outputs = emoji_outputs)
515
+
516
+ ##
517
+ chengyu_frame.select(fn = append_chengyu_emojis, inputs = [chengyu_frame, emoji_outputs, append_or_replace, emoji2text_or_not],
518
+ outputs = emoji_outputs)
519
+ chengyu_reset_button.click(fn = lambda _: gen_emojis_by_sample(), outputs = chengyu_frame)
520
+
521
+ clean_button.click(fn = lambda _: "", outputs = emoji_outputs)
522
+ #emoji_outputs.change(fn = extract_emojis, inputs = emoji_outputs, outputs = only_emoji_outputs)
523
+ '''
524
+ emoji_outputs.change(
525
+ fn = outputs_rec_format
526
+ , inputs = emoji_outputs, outputs = emoji_outputs)
527
+ '''
528
+ '''
529
+ emoji_gen_chengyu_input.change(fn = gen_emojis_by_chengyu, inputs = emoji_gen_chengyu_input,
530
+ outputs = only_emoji_outputs)
531
+ append_button.click(fn = append_pure_to_input, inputs = [emoji_outputs ,only_emoji_outputs],
532
+ outputs = emoji_outputs)
533
+ '''
534
+
535
+ #emoji_outputs_button.click(lexica, inputs=emoji_outputs, outputs=outputs)
536
+ emoji_outputs_button.click(search,
537
+ inputs=[emoji_outputs, emoji2text_or_not, llm_prompt_input],
538
+ outputs=[outputs, llm_outputs, llm_zh_outputs])
539
+
540
+ zip_button.click(
541
+ zip_ims, inputs = outputs, outputs=downloads
542
+ )
543
+
544
+ demo.launch("0.0.0.0")