svjack commited on
Commit
8317ac1
ยท
1 Parent(s): 1d554bc

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +616 -0
app.py ADDED
@@ -0,0 +1,616 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #### pip install advertools
2
+ #### pip install emoji
3
+ #### pip install emoji-chengyu
4
+ #### pip install gradio-client
5
+
6
+ #### prefer to run in chorme, others may have problem in change hock function
7
+
8
+ import gradio as gr
9
+ import pandas as pd
10
+
11
+ import emoji
12
+ from advertools.emoji import emoji_df
13
+ from copy import deepcopy
14
+ import numpy as np
15
+
16
+ from emoji_chengyu.data import DefaultChengyuManager
17
+ from emoji_chengyu.puzzle import make_one_puzzle, gen_puzzle
18
+
19
+ from Lex import *
20
+ '''
21
+ lex = Lexica(query="man woman fire snow").images()
22
+ '''
23
+ from PIL import Image
24
+ import requests
25
+
26
+ from zipfile import ZipFile
27
+
28
+ from time import sleep
29
+ sleep_time = 0.5
30
+
31
+ import requests
32
+
33
+ import chatglm_cpp
34
+ import gradio as gr
35
+ from pathlib import Path
36
+
37
+ model_file_path = "chatglm2-ggml-q4_0.bin"
38
+ chatglm_llm = chatglm_cpp.Pipeline(Path(model_file_path))
39
+
40
+ def translate_zh_to_en(zh_text):
41
+ assert type(zh_text) == type("")
42
+ '''
43
+ response = requests.post("https://svjack-translate-chinese-to-english.hf.space/run/predict", json={
44
+ "data": [
45
+ zh_text,
46
+ ]}).json()
47
+ '''
48
+ response = requests.post("https://svjack-ctranslate.hf.space/run/predict", json={
49
+ "data": [
50
+ zh_text,
51
+ "zh",
52
+ "en",
53
+ ]}).json()
54
+ data = response["data"]
55
+ data = data[0]
56
+ #data = data["English Question"]
57
+ data = data["Target Question"]
58
+ return data
59
+
60
+ def translate_en_to_zh(en_text):
61
+ assert type(en_text) == type("")
62
+ '''
63
+ response = requests.post("https://svjack-translate.hf.space/run/predict", json={
64
+ "data": [
65
+ en_text,
66
+ "en",
67
+ "zh",
68
+ ]}).json()
69
+ '''
70
+ response = requests.post("https://svjack-ctranslate.hf.space/run/predict", json={
71
+ "data": [
72
+ en_text,
73
+ "en",
74
+ "zh",
75
+ ]}).json()
76
+ data = response["data"]
77
+ data = data[0]
78
+ data = data["Target Question"]
79
+ return data
80
+
81
+ from gradio_client import Client
82
+ llm_client = Client("https://svjack-wizardlm-13b-ggml.hf.space/--replicas/bnqpc/")
83
+
84
+ '''
85
+ llm_result = llm_client.predict(
86
+ "Use following emojis to generate a short description of a scene , the emojis are ๐Ÿ‘จ๐Ÿ‘ฉ๐Ÿ”ฅโ„๏ธ", # str in 'Question/Instruction' Textbox component
87
+ 0.8, # int | float (numeric value between 0.1 and 1.0) in 'Temperature' Slider component
88
+ 0.95, # int | float (numeric value between 0.0 and 1.0) in 'Top-p (nucleus sampling)' Slider component
89
+ 40, # int | float (numeric value between 5 and 80) in 'Top-k' Slider component
90
+ 256, # int | float (numeric value between 0 and 1024) in 'Maximum new tokens' Slider component
91
+ 52, # int | float in 'Seed' Number component
92
+ fn_index=1
93
+ )
94
+ '''
95
+
96
+ def run_llm_client(llm_client, prompt):
97
+ llm_result = llm_client.predict(
98
+ prompt, # str in 'Question/Instruction' Textbox component
99
+ 0.8, # int | float (numeric value between 0.1 and 1.0) in 'Temperature' Slider component
100
+ 0.95, # int | float (numeric value between 0.0 and 1.0) in 'Top-p (nucleus sampling)' Slider component
101
+ 40, # int | float (numeric value between 5 and 80) in 'Top-k' Slider component
102
+ 256, # int | float (numeric value between 0 and 1024) in 'Maximum new tokens' Slider component
103
+ 52, # int | float in 'Seed' Number component
104
+ fn_index=1
105
+ )
106
+ return llm_result
107
+
108
+ def chengyu_emoji_to_im_prompt(chengyu_emoji_input,
109
+ llm_prompt_input, llm_client = llm_client):
110
+ chengyu, emoji = chengyu_emoji_input.split(":")
111
+ chengyu_en = translate_zh_to_en(chengyu)
112
+ print("{}\t\t\t{}".format(chengyu_emoji_input ,chengyu_en))
113
+ llm_prompt = llm_prompt_input.format(chengyu_en, emoji)
114
+ im_en_prompt = run_llm_client(llm_client, llm_prompt)
115
+ im_zh_prompt = translate_en_to_zh(im_en_prompt)
116
+ return im_en_prompt, im_zh_prompt
117
+
118
+ def min_dim_to_size(img, size = 512):
119
+ h, w = img.size
120
+ ratio = size / max(h, w)
121
+ h, w = map(lambda x: int(x * ratio), [h, w])
122
+ return ( ratio ,img.resize((h, w)) )
123
+
124
+ def lexica(prompt, limit_size = 128, ratio_size = 256 + 128):
125
+ if not prompt or not prompt.strip():
126
+ return []
127
+ prompt = prompt.strip()
128
+ lex = Lexica(query=prompt).images()
129
+ lex = lex[:limit_size]
130
+ lex = list(map(lambda x: x.replace("full_jpg", "sm2"), lex))
131
+ lex_ = []
132
+ for ele in lex:
133
+ try:
134
+ im = Image.open(
135
+ requests.get(ele, stream = True).raw
136
+ )
137
+ lex_.append(im)
138
+ except:
139
+ print("err")
140
+ sleep(sleep_time)
141
+ assert lex_
142
+ lex = list(map(lambda x: min_dim_to_size(x, ratio_size)[1], lex_))
143
+ return lex
144
+
145
+ def search(emoji_outputs, emoji2text_or_not, llm_prompt_input, llm_client = llm_client):
146
+ assert emoji2text_or_not in ["Emoji to Text", "Only Emoji", "ChengYu with Emoji"]
147
+ if emoji2text_or_not == "Only Emoji":
148
+ emoji_outputs = extract_2(emoji_outputs)[1]
149
+ l = lexica(emoji_outputs.replace(":", ""))
150
+ return (l, "", "")
151
+ elif emoji2text_or_not == "Emoji to Text":
152
+ assert "{}" in llm_prompt_input
153
+ emoji_outputs = extract_2(emoji_outputs)[1]
154
+ llm_prompt = llm_prompt_input.format(emoji_outputs)
155
+ llm_en_output = run_llm_client(llm_client, llm_prompt)
156
+ llm_zh_output = translate_en_to_zh(llm_en_output)
157
+ tail_list = ["someone do something"]
158
+ for tail in tail_list:
159
+ if tail in llm_en_output and len(llm_en_output.split(tail)[-1]) > (5 * 3):
160
+ llm_en_output = llm_en_output.split(tail)[-1]
161
+ l = lexica(llm_en_output)
162
+ return (l, llm_en_output, llm_zh_output)
163
+ else:
164
+ assert "{}" in llm_prompt_input
165
+ a, b = extract_2(emoji_outputs)
166
+ a, b = a.strip(), b.strip()
167
+ if not a and not b:
168
+ return ([], "", "")
169
+ emoji_outputs = "{}:{}".format(a, b)
170
+ llm_en_output, llm_zh_output = chengyu_emoji_to_im_prompt(emoji_outputs, llm_prompt_input)
171
+ l = lexica(llm_en_output)
172
+ return (l, llm_en_output, llm_zh_output)
173
+
174
+
175
+ def enterpix(prompt, limit_size = 100, ratio_size = 256 + 128, use_key = "bigThumbnailUrl"):
176
+ resp = requests.post(
177
+ url = "https://www.enterpix.app/enterpix/v1/image/prompt-search",
178
+ data= {
179
+ "length": limit_size,
180
+ "platform": "stable-diffusion,midjourney",
181
+ "prompt": prompt,
182
+ "start": 0
183
+ }
184
+ )
185
+ resp = resp.json()
186
+ resp = list(map(lambda x: x[use_key], resp["images"]))
187
+ lex_ = []
188
+ for ele in resp:
189
+ try:
190
+ im = Image.open(
191
+ requests.get(ele, stream = True).raw
192
+ )
193
+ lex_.append(im)
194
+ except:
195
+ print("err")
196
+ sleep(sleep_time)
197
+ assert lex_
198
+ resp = list(map(lambda x: min_dim_to_size(x, ratio_size)[1], lex_))
199
+ return resp
200
+
201
+ def zip_ims(g):
202
+ from uuid import uuid1
203
+ if g is None:
204
+ return None
205
+ l = list(map(lambda x: x["name"], g))
206
+ if not l:
207
+ return None
208
+ zip_file_name ="tmp.zip"
209
+ with ZipFile(zip_file_name ,"w") as zipObj:
210
+ for ele in l:
211
+ zipObj.write(ele, "{}.png".format(uuid1()))
212
+ #zipObj.write(file2.name, "file2")
213
+ return zip_file_name
214
+
215
+ emoji_order_list = [
216
+ ["๐Ÿ‡", ["๐Ÿฅข","๐Ÿผ", "๐Ÿฑ", "๐Ÿ‡", "๐Ÿฆ€"]],
217
+ ["๐Ÿ˜€", ["๐Ÿฅฐ", "๐Ÿ˜•", "๐Ÿ˜บ", "๐Ÿ’‹", "๐Ÿ’ฉ"]],
218
+ ["๐Ÿต", ["๐Ÿต", "๐Ÿฆƒ", "๐ŸŒ", "๐Ÿณ"]],
219
+ ["๐Ÿ“”", ["๐Ÿ‘“" ,"๐Ÿ“”", "๐Ÿšช", "๐Ÿ”‹", "๐ŸŽฅ"]],
220
+ ["๐ŸŽƒ", ["โšฝ", "๐ŸŽƒ", "๐ŸŽฏ", "๐ŸŽญ", "๐ŸŽ–๏ธ"]],
221
+ #["๐ŸŒ", ["๐ŸŒ", "๐Ÿ "๏ธ, "โ›ฒ", "๐Ÿ”"๏ธ]],
222
+ ["๐Ÿ‘‹", ["๐Ÿ‘", "๐Ÿ’ช", "๐Ÿ‘‹", "๐Ÿ‘Œ",]],
223
+ ["๐ŸŒ", ["๐ŸŒ", "โ›ฒ", "๐Ÿ ",]],
224
+ ]
225
+
226
+ sub_cate_num = 5
227
+ sub_cate_size = 36
228
+ sub_col_num = 6
229
+
230
+ def list_to_square(l, col_num = 10):
231
+ assert type(l) == type([])
232
+ row_num = len(l) // col_num
233
+ res = len(l) % col_num
234
+ if res > 0:
235
+ res_for_add = col_num - res
236
+ else:
237
+ res_for_add = 0
238
+ ll = np.asarray(l).reshape([-1, col_num]).tolist()
239
+ return ll
240
+ l_ = deepcopy(l) + [""] * res_for_add
241
+ return list_to_square(l_, col_num)
242
+
243
+ def append_emojis(selected_index: gr.SelectData, dataframe_origin, emoji_prompt):
244
+ val = dataframe_origin.iloc[selected_index.index[0], selected_index.index[1]]
245
+ if val.strip():
246
+ #emoji_prompt = emoji_prompt + val
247
+ a, b = extract_2(emoji_prompt)
248
+ aa, bb = extract_2(val)
249
+ a, b = a.strip(), b.strip()
250
+ aa, bb = aa.strip(), bb.strip()
251
+ emoji_prompt = "{}:{}".format(a + aa, b + bb)
252
+ return emoji_prompt
253
+
254
+ def append_chengyu_emojis(selected_index: gr.SelectData, dataframe_origin, emoji_prompt, append_or_replace, emoji2text_or_not):
255
+ val = dataframe_origin.iloc[selected_index.index[0], selected_index.index[1]]
256
+ if type(val) != type("") or not val:
257
+ return emoji_prompt
258
+ assert emoji2text_or_not in ["Emoji to Text", "Only Emoji", "ChengYu with Emoji"]
259
+ assert append_or_replace in ["replace", "append"]
260
+ a, b = extract_2(emoji_prompt)
261
+ aa, bb = extract_2(val)
262
+ if append_or_replace == "append":
263
+ '''
264
+ if emoji2text_or_not in ["Emoji to Text", "Only Emoji"]:
265
+ emoji_prompt = emoji_prompt + val.split(":")[-1]
266
+ else:
267
+ a, b = val.split(":")
268
+ emoji_prompt = "{}:{}".format(a, emoji_prompt + b)
269
+ '''
270
+ a, b = a + aa, b + bb
271
+ else:
272
+ '''
273
+ if emoji2text_or_not in ["Emoji to Text", "Only Emoji"]:
274
+ emoji_prompt = val.split(":")[-1]
275
+ else:
276
+ emoji_prompt = val
277
+ '''
278
+ a, b = aa, bb
279
+ a = a.strip()
280
+ b = b.strip()
281
+ emoji_prompt = "{}:{}".format(a, b)
282
+ return emoji_prompt
283
+
284
+ def extract_emojis(s):
285
+ #return ''.join(c for c in s if c in emoji.UNICODE_EMOJI['en'])
286
+ dl = emoji.emoji_list(s)
287
+ return "".join(map(lambda x: x["emoji"], dl))
288
+
289
+ def extract_2(s):
290
+ b = extract_emojis(s)
291
+ a = "".join(filter(lambda x: x not in b + ":", list(s)))
292
+ return a, b
293
+
294
+ def gen_emojis_by_chengyu(words):
295
+ assert type(words) == type("")
296
+ out = DefaultChengyuManager.get_by_word(words)
297
+ if out is None:
298
+ return ""
299
+ out = "".join(make_one_puzzle(out).puzzle)
300
+ out = extract_emojis(out)
301
+ return out
302
+
303
+ def gen_emojis_by_sample(search_count=5000):
304
+ pg = gen_puzzle(manager=DefaultChengyuManager, search_count=search_count)
305
+ df = pd.DataFrame(list(map(lambda x: {
306
+ "words": "".join(x.chengyu_item.word_list),
307
+ "emoji": x.puzzle_str,
308
+ "score": sum(x.mask)
309
+ } ,pg)))
310
+ df = df[df["score"] == 4]
311
+ df = df[df["words"].map(lambda x: len(x) == 4)]
312
+ req = []
313
+ col0 = set([])
314
+ col1 = set([])
315
+ col2 = set([])
316
+ col3 = set([])
317
+ for i, r in df.iterrows():
318
+ words = r["words"]
319
+ emoji = r["emoji"]
320
+ if emoji[0] in col0:
321
+ continue
322
+ col0.add(emoji[0])
323
+ if emoji[1] in col1:
324
+ continue
325
+ col1.add(emoji[1])
326
+ if emoji[2] in col2:
327
+ continue
328
+ col2.add(emoji[2])
329
+ if emoji[3] in col3:
330
+ continue
331
+ col3.add(emoji[3])
332
+ req.append(
333
+ r.to_dict()
334
+ )
335
+ df = pd.DataFrame(req)
336
+ if len(df) < 21:
337
+ return gen_emojis_by_sample(search_count=search_count)
338
+ df = pd.DataFrame(
339
+ np.asarray(df.apply(lambda x: x.to_dict(), axis = 1).head(21).map(lambda x:
340
+ "{}:{}".format(x["words"],x["emoji"])
341
+ ).tolist()).reshape(
342
+ (7, 3)
343
+ )
344
+ )
345
+ return df
346
+
347
+ def append_pure_to_input(emoji_outputs ,only_emoji_outputs):
348
+ return emoji_outputs + only_emoji_outputs
349
+
350
+ def outputs_rec_format(emoji_outputs):
351
+ a, b = extract_2(emoji_outputs)
352
+ a, b = a.strip(), b.strip()
353
+ emoji_outputs = "{}:{}".format(a, b)
354
+ return emoji_outputs
355
+
356
+ def fix_emojis_by_glm(emoji_outputs):
357
+ l = [
358
+ 'ๆœๆ–ฏๅค•ๆ–ฏ',
359
+ '๐ŸŒž๐Ÿ•›๐ŸŒ‡๐ŸŒ›',
360
+ 'ๅ‡บๅ…ถไธๆ„',
361
+ '๐Ÿช–๐ŸŽ๐Ÿ—ก๏ธโœŒ๏ธ',
362
+ '็™พ็ดซๅƒ็บข',
363
+ '๐ŸŽŽ๐ŸŒน๐ŸŠ๐Ÿƒ',
364
+ '่ƒŒ็ข‘่ฆ†ๅฑ€',
365
+ '๐Ÿ€„๏ธโ™Ÿ๏ธ๐Ÿณ๏ธ๐Ÿ’€',
366
+ 'ๆ˜ฅ่›‡็ง‹่š“',
367
+ '๐ŸŒž๐Ÿ๐Ÿ๐Ÿชฑ',
368
+ 'ไผ ้ฃŽๆ‰‡็ซ',
369
+ 'โ˜๏ธ๐ŸŒฌ๏ธ๐Ÿชญ๐Ÿ”ฅ',
370
+ 'ไธๅฏ้€พ่ถŠ',
371
+ '๐Ÿ’ช๐Ÿƒ๐Ÿฅฑโ˜น๏ธ',
372
+ 'ไธนไนฆ็™ฝ้ฉฌ',
373
+ '๐Ÿ“„๐Ÿ“–๐Ÿ˜ป๐ŸŽ',
374
+ 'ๆŒจๅ†ปๅ—้ฅฟ',
375
+ '๐ŸŒฌ๏ธ๐Ÿฅถ๐Ÿฝ๏ธ๐Ÿ˜ฑ',
376
+ '็™ฝ้ฉฌ้ž้ฉฌ',
377
+ '๐Ÿ˜„๐ŸŒŸ๐ŸŽ๐Ÿ˜',
378
+ 'ๆŠฑ็މๆก็ ',
379
+ '๐Ÿซ‚๐Ÿ’ฐ๐Ÿซณ๐Ÿงง',
380
+ '้“้ชจไป™้ฃŽ',
381
+ 'โœ๏ธ๐Ÿ‘จ๐Ÿ•Œ๐Ÿ‘ผ',
382
+ 'ๆ˜ฅๅŽ็ง‹ๅฎž',
383
+ '๐ŸŒž๐ŸŒฝ๐Ÿ๐Ÿ‰',
384
+ 'ๆ˜ฅ้ฃŽๅค้›จ',
385
+ 'โ˜€๏ธ๐ŸŒฌ๏ธ๐ŸŒง๏ธโšก๏ธ'
386
+ ]
387
+ a, b = extract_2(emoji_outputs)
388
+ a, b = a.strip(), b.strip()
389
+ if a:
390
+ b = chatglm_llm.chat(
391
+ history= ["่ฏทๆ‰ง่กŒๅฐ†ๆˆ่ฏญ็ฟป่ฏ‘ๆˆemoji็š„ไปปๅŠก๏ผŒไธ‹้ขๆ˜ฏไธ€ไบ›ไพ‹ๅญใ€‚", "ๅฅฝ็š„ใ€‚"] + l + \
392
+ [a], do_sample=False
393
+ )
394
+ b = extract_emojis(b)
395
+ emoji_outputs = "{}:{}".format(a, b)
396
+ return emoji_outputs
397
+
398
+
399
+ css = """
400
+ #frame span{
401
+ font-size: 1.5em; display: flex; align-items: center;
402
+ }
403
+ """
404
+
405
+ ###with gr.Blocks(css="custom.css") as demo:
406
+ with gr.Blocks(css = css) as demo:
407
+ title = gr.HTML(
408
+ """<h1 align="center"> <font size="+10"> ๐Ÿ•Œ Emojis to StableDiffusion World ๐ŸŒ </font> </h1>""",
409
+ elem_id="title",
410
+ )
411
+
412
+ frame_list = []
413
+ with gr.Row():
414
+ with gr.Column(label = "Emoji samples, You can click to use them"):
415
+ sub_title_0 = gr.Markdown(
416
+ value="### Emoji samples, You can click to use them",
417
+ visible=True,
418
+ #elem_id="selected_model",
419
+ )
420
+ #for group, df in emoji_df.groupby("group"):
421
+ for group_order_ele, sub_group_order_list in emoji_order_list:
422
+ #group_first = df["emoji"].iloc[0]
423
+ group_first = group_order_ele
424
+ df_group = emoji_df[emoji_df["emoji"] == group_first]["group"].iloc[0]
425
+ df = emoji_df[emoji_df["group"] == df_group]
426
+ with gr.Tab("{} {}".format(group_first, df_group)):
427
+ #for ii ,(sub_group, dff) in enumerate(df.groupby("sub_group")):
428
+ for ii in range(len(sub_group_order_list)):
429
+ sub_first = sub_group_order_list[ii]
430
+ df_sub_group = emoji_df[emoji_df["emoji"] == sub_first]["sub_group"].iloc[0]
431
+ dff = df[df["sub_group"] == df_sub_group]
432
+ if ii >= sub_cate_num:
433
+ break
434
+ sub_first = dff["emoji"].iloc[0]
435
+ sub_l = dff["emoji"].values.tolist()[:sub_cate_size]
436
+ sub_l_square = list_to_square(sub_l, sub_col_num)
437
+ g_frame = gr.DataFrame(sub_l_square,
438
+ interactive=False, headers = [''] * sub_col_num,
439
+ #datatype="markdown"
440
+ elem_id="frame",
441
+ label = "{} {}".format(sub_first, df_sub_group)
442
+ )
443
+ #g_frame = gr.Matrix(sub_l_square, label = sub_first,)
444
+ frame_list.append(g_frame)
445
+ with gr.Column():
446
+ with gr.Row():
447
+ with gr.Column():
448
+ sub_title_1 = gr.Markdown(
449
+ value="### ChengYu to Pinyin Emoji combinations, You can click to use them, Don't forget edit or Fix them after click, to make it meaningful",
450
+ visible=True,
451
+ #elem_id="selected_model",
452
+ )
453
+ chengyu_frame = gr.DataFrame(gen_emojis_by_sample(),
454
+ interactive=False, headers = [''] * sub_col_num,
455
+ #datatype="markdown"
456
+ elem_id="chengyu_frame",
457
+ #label = "ChengYu to Emoji combinations, You can click to use them"
458
+ )
459
+ with gr.Row():
460
+ chengyu_reset_button = gr.Button("Reset ChengYu Emojis",
461
+ elem_id="run_button")
462
+ with gr.Row():
463
+ append_or_replace = gr.Radio(choices=["replace", "append"],
464
+ value="replace", label="ChengYu Emoji Append or Replace to below", elem_id="text_radio")
465
+ with gr.Row():
466
+ with gr.Row():
467
+ emoji_outputs = gr.Textbox(label="Emoji Prompt Input", show_label=True, lines=1, max_lines=20,
468
+ min_width = 256, placeholder="Click Emoji from left with some emoji input manually", elem_id="prompt",
469
+ interactive=True, info = "Generate by Click, and can edit by yourself, look up Examples below")
470
+ with gr.Row():
471
+ clean_button = gr.Button("Clear", elem_id="clear_button",
472
+ label = "Clear all content")
473
+ fix_button = gr.Button("Fix Pinyin Emoji to Means (4min 2 cores, 30s 12 cores)", elem_id="fix_button",
474
+ label = "Fix Emojis by Text part use model, takes 4min in 2 cores and 30s in 12 cores in cpu"
475
+ )
476
+ '''
477
+ with gr.Column():
478
+ clean_button = gr.Button("Clean Emojis", elem_id="clean_button")
479
+ emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
480
+ '''
481
+ with gr.Row():
482
+ ### \n ๐Ÿ•Œ Only Emoji \n ๐Ÿ•Œโžก๏ธ๐Ÿ”ค Emoji to Text \n ๐Ÿ“–โš–๏ธ๐Ÿ•Œ ChengYu with Emoji
483
+ with gr.Row():
484
+ emoji2text_or_not = gr.Radio(choices=["Only Emoji", "Emoji to Text", "ChengYu with Emoji"],
485
+ value="Only Emoji", label="Emoji &| Text to get images or translate them to Text by LLM",
486
+ elem_id="trans_radio",
487
+ info = "๐Ÿ•Œ Only Emoji -- ๐Ÿ•Œโžก๏ธ๐Ÿ”ค Emoji to Text -- ๐Ÿ“–โš–๏ธ๐Ÿ•Œ ChengYu with Emoji -- ๐Ÿ“–โžก๏ธ๐Ÿ•Œโš–๏ธ๐Ÿ“– Fix + ChengYu with Emoji"
488
+ )
489
+ llm_prompt_input = gr.Textbox(label="Emoji to Text Prompt template used by LLM", show_label=True,
490
+ lines=1, max_lines=20,
491
+ min_width = 256,
492
+ value="Use following emojis to generate a short description of a scene , use the pattern someone do something , the emojis are {}"
493
+ , elem_id="prompt",
494
+ interactive=True)
495
+ llm_outputs = gr.Textbox(label="Emoji to Text Prompt translate by LLM Output", show_label=True,
496
+ lines=1, max_lines=20,
497
+ min_width = 256, placeholder="Emoji describe by Text", elem_id="prompt",
498
+ interactive=True)
499
+ llm_zh_outputs = gr.Textbox(label="Emoji to Text Prompt translate by LLM Output in Chinese", show_label=True,
500
+ lines=1, max_lines=20,
501
+ min_width = 256, placeholder="Emoji describe by Chinese", elem_id="prompt",
502
+ interactive=True)
503
+ '''
504
+ with gr.Row():
505
+ emoji_gen_chengyu_input = gr.Textbox(label="ChengYu Prompt Input", show_label=False, lines=1, max_lines=20,
506
+ min_width = 256, placeholder="input ChengYu manually, like: ๅŠฑ็ฒพๅ›พๆฒป", elem_id="prompt",
507
+ interactive=True)
508
+ with gr.Row():
509
+ only_emoji_outputs = gr.Textbox(label="Only Emoji Prompt Output", show_label=False, lines=1, max_lines=20,
510
+ min_width = 256, placeholder="Filter out only emoji charactors", elem_id="prompt", interactive=True)
511
+ #gr.Slider(label='Number of images ', minimum = 4, maximum = 20, step = 1, value = 4)]
512
+ append_button = gr.Button("Append Only Emojis to Emoji Prompt Output", elem_id="append_button")
513
+ only_emoji_outputs_button = gr.Button("Retrieve Images Only Emoji", elem_id="run_button")
514
+ with gr.Row():
515
+ #text_button = gr.Button("Retrieve Images", elem_id="run_button")
516
+ emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
517
+ '''
518
+ with gr.Row():
519
+ emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
520
+
521
+ with gr.Row():
522
+ with gr.Column():
523
+ outputs = gr.Gallery(lable='Output gallery', elem_id="gallery",).style(grid=5,height=768 - 64 - 32,
524
+ allow_preview=False, label = "retrieve Images")
525
+ exp_title = gr.HTML(
526
+ """<br/><br/><h5 align="center"> <font size="+1"> Emojis examples live in ๐Ÿ•Œ travel to StableDiffusion ๐ŸŒ </font> </h5>""",
527
+ #elem_id="title",
528
+ )
529
+ gr.Examples(
530
+ [
531
+ ["๐Ÿ”", "Only Emoji"],
532
+ ["๐Ÿ”ฅ๐ŸŒฒ", "Only Emoji"],
533
+ ["๐Ÿฑ๐Ÿฝ๏ธ๐Ÿ‘จ", "Emoji to Text"],
534
+ ["๐Ÿป๐Ÿฆโ„๏ธ๐ŸŒŠ", "Only Emoji"],
535
+ ["๐ŸŒŽ๐Ÿฆถ๐Ÿ‘‚๐Ÿ’ค", "Emoji to Text"],
536
+ ["๐Ÿ‘ฉโ€๐Ÿ”ฌ๐Ÿ—ฃโ˜•๐Ÿ‘จโ€๐ŸŽจ", "Emoji to Text"],
537
+ ["ๅˆ€ๅฑฑ็ซๆตท:๐Ÿ™ƒโšก๐Ÿ‘ฉโ€๐Ÿš’๐Ÿงœโ€โ™‚๏ธ", "ChengYu with Emoji"],
538
+ ["ไผ ไธบไฝณ่ฏ:๐Ÿก๐Ÿงฃ๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ง๐Ÿ‘ฉโ€๐ŸŽจ", "ChengYu with Emoji"],
539
+ ["ๅคง้“่‡ณ็ฎ€:๐Ÿ‡จ๐Ÿ‡ณ๐ŸŒพ๐Ÿง ๐Ÿ‘€โ€โ™‚๏ธ", "ChengYu with Emoji"],
540
+ #["๐Ÿ‘œ๐Ÿ‘š๐Ÿงฃ๐Ÿ‘ธ๐ŸฐโœŒ๏ธ", "Emoji to Text"],
541
+ #["๐Ÿ‘๐Ÿ๐ŸŽƒ๐Ÿ‘ถ๐Ÿฅ›๐ŸŒพ๐Ÿ•ต๐Ÿƒโ€โ™€๏ธ", "Emoji to Text"],
542
+ #["๐Ÿ™ƒ๐Ÿ’๐Ÿ‘‹๐Ÿ‘จโ€๐Ÿ”ง๐Ÿ‘„๐Ÿฅ€๐ŸŒŽ๐ŸŒ™", "Emoji to Text"],
543
+ #["ๆ˜ฅๆฑŸๆฝฎๆฐด่ฟžๆตทๅนณ:๐ŸŒž๐ŸŒบ๐Ÿ’ฆ๐Ÿคค๐Ÿชท๐ŸŒŠโ˜€๏ธ", "ChengYu with Emoji"],
544
+ #["ๆœˆๆ˜Žๆ˜Ÿ็จ€๏ผŒไนŒ้นŠๅ—้ฃžใ€‚:๐ŸŒ›๐ŸŒ›โœจ๐Ÿซ…๐Ÿฆ๐Ÿฆ†๐Ÿฆข๐Ÿชฝ", "ChengYu with Emoji"]
545
+ ],
546
+ inputs = [emoji_outputs, emoji2text_or_not],
547
+ #label = "๐Ÿ•Œ Examples"
548
+ )
549
+ gr.Examples(
550
+ [
551
+ #["๐Ÿ”", "Only Emoji"],
552
+ #["๐Ÿ”ฅ๐ŸŒฒ", "Only Emoji"],
553
+ #["๐Ÿฑ๐Ÿฝ๏ธ๐Ÿ‘จ", "Emoji to Text"],
554
+ #["๐Ÿป๐Ÿฆโ„๏ธ๐ŸŒŠ", "Only Emoji"],
555
+ #["๐ŸŒŽ๐Ÿฆถ๐Ÿ‘‚๐Ÿ’ค", "Emoji to Text"],
556
+ #["๐Ÿ‘ฉโ€๐Ÿ”ฌ๐Ÿ—ฃโ˜•๐Ÿ‘จโ€๐ŸŽจ", "Emoji to Text"],
557
+ #["ๅˆ€ๅฑฑ็ซๆตท:๐Ÿ™ƒโšก๐Ÿ‘ฉโ€๐Ÿš’๐Ÿงœโ€โ™‚๏ธ", "ChengYu with Emoji"],
558
+ #["ไผ ไธบไฝณ่ฏ:๐Ÿก๐Ÿงฃ๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ง๐Ÿ‘ฉโ€๐ŸŽจ", "ChengYu with Emoji"],
559
+ #["ๅคง้“่‡ณ็ฎ€:๐Ÿ‡จ๐Ÿ‡ณ๐ŸŒพ๐Ÿง ๐Ÿ‘€โ€โ™‚๏ธ", "ChengYu with Emoji"],
560
+ ["๐Ÿ‘œ๐Ÿ‘š๐Ÿงฃ๐Ÿ‘ธ๐ŸฐโœŒ๏ธ", "Emoji to Text"],
561
+ ["๐Ÿ‘๐Ÿ๐ŸŽƒ๐Ÿ‘ถ๐Ÿฅ›๐ŸŒพ๐Ÿ•ต๐Ÿƒโ€โ™€๏ธ", "Emoji to Text"],
562
+ ["๐Ÿ™ƒ๐Ÿ’๐Ÿ‘‹๐Ÿ‘จโ€๐Ÿ”ง๐Ÿ‘„๐Ÿฅ€๐ŸŒŽ๐ŸŒ™", "Emoji to Text"],
563
+ ["ๆ˜ฅๆฑŸๆฝฎๆฐด่ฟžๆตทๅนณ:๐ŸŒž๐ŸŒบ๐Ÿ’ฆ๐Ÿคค๐Ÿชท๐ŸŒŠโ˜€๏ธ", "ChengYu with Emoji"],
564
+ ["ๆœˆๆ˜Žๆ˜Ÿ็จ€๏ผŒไนŒ้นŠๅ—้ฃžใ€‚:๐ŸŒ›๐ŸŒ›โœจ๐Ÿซ…๐Ÿฆ๐Ÿฆ†๐Ÿฆข๐Ÿชฝ", "ChengYu with Emoji"]
565
+ ],
566
+ inputs = [emoji_outputs, emoji2text_or_not],
567
+ #label = "๐Ÿ•Œ Examples"
568
+ )
569
+
570
+ with gr.Row():
571
+ with gr.Tab(label = "Download"):
572
+ zip_button = gr.Button("Zip Images to Download", elem_id="zip_button")
573
+ downloads = gr.File(label = "Image zipped", elem_id = "zip_file")
574
+
575
+ ### ["Only Emoji", "Emoji to Text", "ChengYu with Emoji"]
576
+ emoji2text_or_not.change(
577
+ fn = lambda x: "" if x == "Only Emoji" else (
578
+ "Use following emojis to generate a short description of a scene , use the pattern someone do something , the emojis are {}" if x == "Emoji to Text" \
579
+ else "Use following emojis to make a short description of the scene about '{}', the emojis are {}"
580
+ )
581
+ , inputs = emoji2text_or_not, outputs = llm_prompt_input
582
+ )
583
+
584
+ for g in frame_list:
585
+ g.select(fn = append_emojis, inputs = [g, emoji_outputs], outputs = emoji_outputs)
586
+
587
+ ##
588
+ chengyu_frame.select(fn = append_chengyu_emojis, inputs = [chengyu_frame, emoji_outputs, append_or_replace, emoji2text_or_not],
589
+ outputs = emoji_outputs)
590
+ chengyu_reset_button.click(fn = lambda _: gen_emojis_by_sample(), outputs = chengyu_frame)
591
+
592
+ clean_button.click(fn = lambda _: "", outputs = emoji_outputs)
593
+ fix_button.click(fn = fix_emojis_by_glm, inputs = emoji_outputs, outputs = emoji_outputs)
594
+ #emoji_outputs.change(fn = extract_emojis, inputs = emoji_outputs, outputs = only_emoji_outputs)
595
+ '''
596
+ emoji_outputs.change(
597
+ fn = outputs_rec_format
598
+ , inputs = emoji_outputs, outputs = emoji_outputs)
599
+ '''
600
+ '''
601
+ emoji_gen_chengyu_input.change(fn = gen_emojis_by_chengyu, inputs = emoji_gen_chengyu_input,
602
+ outputs = only_emoji_outputs)
603
+ append_button.click(fn = append_pure_to_input, inputs = [emoji_outputs ,only_emoji_outputs],
604
+ outputs = emoji_outputs)
605
+ '''
606
+
607
+ #emoji_outputs_button.click(lexica, inputs=emoji_outputs, outputs=outputs)
608
+ emoji_outputs_button.click(search,
609
+ inputs=[emoji_outputs, emoji2text_or_not, llm_prompt_input],
610
+ outputs=[outputs, llm_outputs, llm_zh_outputs])
611
+
612
+ zip_button.click(
613
+ zip_ims, inputs = outputs, outputs=downloads
614
+ )
615
+
616
+ demo.launch("0.0.0.0")