svjack commited on
Commit
edff9eb
Β·
1 Parent(s): b7181fa

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +413 -0
app.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #### pip install advertools
2
+ #### pip install emoji
3
+ #### pip install emoji-chengyu
4
+ #### pip install gradio-client
5
+
6
+ #### prefer to run in chorme, others may have problem in change hock function
7
+
8
+ import gradio as gr
9
+ import pandas as pd
10
+
11
+ import emoji
12
+ from advertools.emoji import emoji_df
13
+ from copy import deepcopy
14
+ import numpy as np
15
+
16
+ from emoji_chengyu.data import DefaultChengyuManager
17
+ from emoji_chengyu.puzzle import make_one_puzzle, gen_puzzle
18
+
19
+ from Lex import *
20
+ '''
21
+ lex = Lexica(query="man woman fire snow").images()
22
+ '''
23
+ from PIL import Image
24
+ import requests
25
+
26
+ from zipfile import ZipFile
27
+
28
+ from time import sleep
29
+ sleep_time = 0.5
30
+
31
+ from gradio_client import Client
32
+ llm_client = Client("https://svjack-wizardlm-13b-ggml.hf.space/--replicas/bnqpc/")
33
+
34
+ '''
35
+ llm_result = llm_client.predict(
36
+ "Use following emojis to generate a short description of a scene , the emojis are πŸ‘¨πŸ‘©πŸ”₯❄️", # str in 'Question/Instruction' Textbox component
37
+ 0.8, # int | float (numeric value between 0.1 and 1.0) in 'Temperature' Slider component
38
+ 0.95, # int | float (numeric value between 0.0 and 1.0) in 'Top-p (nucleus sampling)' Slider component
39
+ 40, # int | float (numeric value between 5 and 80) in 'Top-k' Slider component
40
+ 256, # int | float (numeric value between 0 and 1024) in 'Maximum new tokens' Slider component
41
+ 52, # int | float in 'Seed' Number component
42
+ fn_index=1
43
+ )
44
+ '''
45
+
46
+ def run_llm_client(llm_client, prompt):
47
+ llm_result = llm_client.predict(
48
+ prompt, # str in 'Question/Instruction' Textbox component
49
+ 0.8, # int | float (numeric value between 0.1 and 1.0) in 'Temperature' Slider component
50
+ 0.95, # int | float (numeric value between 0.0 and 1.0) in 'Top-p (nucleus sampling)' Slider component
51
+ 40, # int | float (numeric value between 5 and 80) in 'Top-k' Slider component
52
+ 256, # int | float (numeric value between 0 and 1024) in 'Maximum new tokens' Slider component
53
+ 52, # int | float in 'Seed' Number component
54
+ fn_index=1
55
+ )
56
+ return llm_result
57
+
58
+ def min_dim_to_size(img, size = 512):
59
+ h, w = img.size
60
+ ratio = size / max(h, w)
61
+ h, w = map(lambda x: int(x * ratio), [h, w])
62
+ return ( ratio ,img.resize((h, w)) )
63
+
64
+ def lexica(prompt, limit_size = 128, ratio_size = 256 + 128):
65
+ if not prompt or not prompt.strip():
66
+ return []
67
+ prompt = prompt.strip()
68
+ lex = Lexica(query=prompt).images()
69
+ lex = lex[:limit_size]
70
+ lex = list(map(lambda x: x.replace("full_jpg", "sm2"), lex))
71
+ lex_ = []
72
+ for ele in lex:
73
+ try:
74
+ im = Image.open(
75
+ requests.get(ele, stream = True).raw
76
+ )
77
+ lex_.append(im)
78
+ except:
79
+ print("err")
80
+ sleep(sleep_time)
81
+ assert lex_
82
+ lex = list(map(lambda x: min_dim_to_size(x, ratio_size)[1], lex_))
83
+ return lex
84
+
85
+ def search(emoji_outputs, emoji2text_or_not, llm_prompt_input, llm_client = llm_client):
86
+ assert emoji2text_or_not in ["Emoji to Text", "Only Emoji"]
87
+ if emoji2text_or_not == "Only Emoji":
88
+ l = lexica(emoji_outputs)
89
+ return (l, "")
90
+ else:
91
+ assert "{}" in llm_prompt_input
92
+ llm_prompt = llm_prompt_input.format(emoji_outputs)
93
+ llm_output = run_llm_client(llm_client, llm_prompt)
94
+ tail_list = ["someone do something"]
95
+ for tail in tail_list:
96
+ if tail in llm_output and len(llm_output.split(tail)[-1]) > (5 * 3):
97
+ llm_output = llm_output.split(tail)[-1]
98
+ l = lexica(llm_output)
99
+ return (l, llm_output)
100
+
101
+ def enterpix(prompt, limit_size = 100, ratio_size = 256 + 128, use_key = "bigThumbnailUrl"):
102
+ resp = requests.post(
103
+ url = "https://www.enterpix.app/enterpix/v1/image/prompt-search",
104
+ data= {
105
+ "length": limit_size,
106
+ "platform": "stable-diffusion,midjourney",
107
+ "prompt": prompt,
108
+ "start": 0
109
+ }
110
+ )
111
+ resp = resp.json()
112
+ resp = list(map(lambda x: x[use_key], resp["images"]))
113
+ lex_ = []
114
+ for ele in resp:
115
+ try:
116
+ im = Image.open(
117
+ requests.get(ele, stream = True).raw
118
+ )
119
+ lex_.append(im)
120
+ except:
121
+ print("err")
122
+ sleep(sleep_time)
123
+ assert lex_
124
+ resp = list(map(lambda x: min_dim_to_size(x, ratio_size)[1], lex_))
125
+ return resp
126
+
127
+ def zip_ims(g):
128
+ from uuid import uuid1
129
+ if g is None:
130
+ return None
131
+ l = list(map(lambda x: x["name"], g))
132
+ if not l:
133
+ return None
134
+ zip_file_name ="tmp.zip"
135
+ with ZipFile(zip_file_name ,"w") as zipObj:
136
+ for ele in l:
137
+ zipObj.write(ele, "{}.png".format(uuid1()))
138
+ #zipObj.write(file2.name, "file2")
139
+ return zip_file_name
140
+
141
+ emoji_order_list = [
142
+ ["πŸ˜€", ["πŸ₯°", "πŸ˜•", "😺", "πŸ’‹", "πŸ’©"]],
143
+ ["πŸ‡", ["πŸ₯’","🍼", "🍱", "πŸ‡", "πŸ¦€"]],
144
+ ["🐡", ["🐡", "πŸ¦ƒ", "🐌", "🐳"]],
145
+ ["πŸ“”", ["πŸ‘“" ,"πŸ“”", "πŸšͺ", "πŸ”‹", "πŸŽ₯"]],
146
+ ["πŸŽƒ", ["⚽", "πŸŽƒ", "🎯", "🎭", "πŸŽ–οΈ"]],
147
+ #["🌍", ["🌍", "🏠"️, "β›²", "πŸ”"️]],
148
+ ["πŸ‘‹", ["πŸ‘", "πŸ’ͺ", "πŸ‘‹", "πŸ‘Œ",]],
149
+ ["🌍", ["🌍", "β›²", "🏠",]],
150
+ ]
151
+
152
+ sub_cate_num = 5
153
+ sub_cate_size = 36
154
+ sub_col_num = 6
155
+
156
+ def list_to_square(l, col_num = 10):
157
+ assert type(l) == type([])
158
+ row_num = len(l) // col_num
159
+ res = len(l) % col_num
160
+ if res > 0:
161
+ res_for_add = col_num - res
162
+ else:
163
+ res_for_add = 0
164
+ ll = np.asarray(l).reshape([-1, col_num]).tolist()
165
+ return ll
166
+ l_ = deepcopy(l) + [""] * res_for_add
167
+ return list_to_square(l_, col_num)
168
+
169
+ def append_emojis(selected_index: gr.SelectData, dataframe_origin, emoji_prompt):
170
+ val = dataframe_origin.iloc[selected_index.index[0], selected_index.index[1]]
171
+ if val.strip():
172
+ emoji_prompt = emoji_prompt + val
173
+ return emoji_prompt
174
+
175
+ def append_chengyu_emojis(selected_index: gr.SelectData, dataframe_origin, emoji_prompt, append_or_replace):
176
+ val = dataframe_origin.iloc[selected_index.index[0], selected_index.index[1]]
177
+ if type(val) != type("") or not val:
178
+ return emoji_prompt
179
+ assert append_or_replace in ["replace", "append"]
180
+ if append_or_replace == "append":
181
+ emoji_prompt = emoji_prompt + val.split(":")[-1]
182
+ else:
183
+ emoji_prompt = val.split(":")[-1]
184
+ return emoji_prompt
185
+
186
+ def extract_emojis(s):
187
+ #return ''.join(c for c in s if c in emoji.UNICODE_EMOJI['en'])
188
+ dl = emoji.emoji_list(s + "s")
189
+ return "".join(map(lambda x: x["emoji"], dl))
190
+
191
+ def gen_emojis_by_chengyu(words):
192
+ assert type(words) == type("")
193
+ out = DefaultChengyuManager.get_by_word(words)
194
+ if out is None:
195
+ return ""
196
+ out = "".join(make_one_puzzle(out).puzzle)
197
+ out = extract_emojis(out)
198
+ return out
199
+
200
+ def gen_emojis_by_sample(search_count=5000):
201
+ pg = gen_puzzle(manager=DefaultChengyuManager, search_count=search_count)
202
+ df = pd.DataFrame(list(map(lambda x: {
203
+ "words": "".join(x.chengyu_item.word_list),
204
+ "emoji": x.puzzle_str,
205
+ "score": sum(x.mask)
206
+ } ,pg)))
207
+ df = df[df["score"] == 4]
208
+ df = df[df["words"].map(lambda x: len(x) == 4)]
209
+ req = []
210
+ col0 = set([])
211
+ col1 = set([])
212
+ col2 = set([])
213
+ col3 = set([])
214
+ for i, r in df.iterrows():
215
+ words = r["words"]
216
+ emoji = r["emoji"]
217
+ if emoji[0] in col0:
218
+ continue
219
+ col0.add(emoji[0])
220
+ if emoji[1] in col1:
221
+ continue
222
+ col1.add(emoji[1])
223
+ if emoji[2] in col2:
224
+ continue
225
+ col2.add(emoji[2])
226
+ if emoji[3] in col3:
227
+ continue
228
+ col3.add(emoji[3])
229
+ req.append(
230
+ r.to_dict()
231
+ )
232
+ df = pd.DataFrame(req)
233
+ if len(df) < 21:
234
+ return gen_emojis_by_sample(search_count=search_count)
235
+ df = pd.DataFrame(
236
+ np.asarray(df.apply(lambda x: x.to_dict(), axis = 1).head(21).map(lambda x:
237
+ "{}:{}".format(x["words"],x["emoji"])
238
+ ).tolist()).reshape(
239
+ (7, 3)
240
+ )
241
+ )
242
+ return df
243
+
244
+ def append_pure_to_input(emoji_outputs ,only_emoji_outputs):
245
+ return emoji_outputs + only_emoji_outputs
246
+
247
+ css = """
248
+ #frame span{
249
+ font-size: 1.5em; display: flex; align-items: center;
250
+ }
251
+ """
252
+
253
+ ###with gr.Blocks(css="custom.css") as demo:
254
+ with gr.Blocks(css = css) as demo:
255
+ title = gr.HTML(
256
+ """<h1 align="center"> <font size="+10"> πŸ•Œ Emojis to StableDiffusion World 🌍 </font> </h1>""",
257
+ elem_id="title",
258
+ )
259
+
260
+ frame_list = []
261
+ with gr.Row():
262
+ with gr.Column(label = "Emoji samples, You can click to use them"):
263
+ sub_title_0 = gr.Markdown(
264
+ value="### Emoji samples, You can click to use them",
265
+ visible=True,
266
+ #elem_id="selected_model",
267
+ )
268
+ #for group, df in emoji_df.groupby("group"):
269
+ for group_order_ele, sub_group_order_list in emoji_order_list:
270
+ #group_first = df["emoji"].iloc[0]
271
+ group_first = group_order_ele
272
+ df_group = emoji_df[emoji_df["emoji"] == group_first]["group"].iloc[0]
273
+ df = emoji_df[emoji_df["group"] == df_group]
274
+ with gr.Tab("{} {}".format(group_first, df_group)):
275
+ #for ii ,(sub_group, dff) in enumerate(df.groupby("sub_group")):
276
+ for ii in range(len(sub_group_order_list)):
277
+ sub_first = sub_group_order_list[ii]
278
+ df_sub_group = emoji_df[emoji_df["emoji"] == sub_first]["sub_group"].iloc[0]
279
+ dff = df[df["sub_group"] == df_sub_group]
280
+ if ii >= sub_cate_num:
281
+ break
282
+ sub_first = dff["emoji"].iloc[0]
283
+ sub_l = dff["emoji"].values.tolist()[:sub_cate_size]
284
+ sub_l_square = list_to_square(sub_l, sub_col_num)
285
+ g_frame = gr.DataFrame(sub_l_square,
286
+ interactive=False, headers = [''] * sub_col_num,
287
+ #datatype="markdown"
288
+ elem_id="frame",
289
+ label = "{} {}".format(sub_first, df_sub_group)
290
+ )
291
+ #g_frame = gr.Matrix(sub_l_square, label = sub_first,)
292
+ frame_list.append(g_frame)
293
+ with gr.Column():
294
+ with gr.Row():
295
+ with gr.Column():
296
+ sub_title_1 = gr.Markdown(
297
+ value="### ChengYu to Emoji combinations, You can click to use them",
298
+ visible=True,
299
+ #elem_id="selected_model",
300
+ )
301
+ chengyu_frame = gr.DataFrame(gen_emojis_by_sample(),
302
+ interactive=False, headers = [''] * sub_col_num,
303
+ #datatype="markdown"
304
+ elem_id="chengyu_frame",
305
+ #label = "ChengYu to Emoji combinations, You can click to use them"
306
+ )
307
+ with gr.Row():
308
+ chengyu_reset_button = gr.Button("Reset ChengYu Emojis",
309
+ elem_id="run_button")
310
+ with gr.Row():
311
+ append_or_replace = gr.Radio(choices=["replace", "append"],
312
+ value="replace", label="ChengYu Emoji Append or Replace to below", elem_id="text_radio")
313
+ with gr.Row():
314
+ emoji_outputs = gr.Textbox(label="Emoji Prompt Output", show_label=True, lines=1, max_lines=20,
315
+ min_width = 256, placeholder="Click Emoji from left with some emoji input manually", elem_id="prompt",
316
+ interactive=True)
317
+ clean_button = gr.Button("Clean Emojis", elem_id="clean_button")
318
+ '''
319
+ with gr.Column():
320
+ clean_button = gr.Button("Clean Emojis", elem_id="clean_button")
321
+ emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
322
+ '''
323
+ with gr.Row():
324
+ emoji2text_or_not = gr.Radio(choices=["Only Emoji", "Emoji to Text"],
325
+ value="Only Emoji", label="Only use Emoji to get images or translate them to Text by LLM",
326
+ elem_id="trans_radio")
327
+ with gr.Row():
328
+ llm_prompt_input = gr.Textbox(label="Emoji to Text Prompt template used by LLM", show_label=True,
329
+ lines=1, max_lines=20,
330
+ min_width = 256,
331
+ value="Use following emojis to generate a short description of a scene , use the pattern someone do something , the emojis are {}"
332
+ , elem_id="prompt",
333
+ interactive=True)
334
+ llm_outputs = gr.Textbox(label="Emoji to Text Prompt translate by LLM Output", show_label=True,
335
+ lines=1, max_lines=20,
336
+ min_width = 256, placeholder="Emoji describe by Text", elem_id="prompt",
337
+ interactive=True)
338
+ '''
339
+ with gr.Row():
340
+ emoji_gen_chengyu_input = gr.Textbox(label="ChengYu Prompt Input", show_label=False, lines=1, max_lines=20,
341
+ min_width = 256, placeholder="input ChengYu manually, like: εŠ±η²Ύε›Ύζ²»", elem_id="prompt",
342
+ interactive=True)
343
+
344
+ with gr.Row():
345
+ only_emoji_outputs = gr.Textbox(label="Only Emoji Prompt Output", show_label=False, lines=1, max_lines=20,
346
+ min_width = 256, placeholder="Filter out only emoji charactors", elem_id="prompt", interactive=True)
347
+ #gr.Slider(label='Number of images ', minimum = 4, maximum = 20, step = 1, value = 4)]
348
+ append_button = gr.Button("Append Only Emojis to Emoji Prompt Output", elem_id="append_button")
349
+ only_emoji_outputs_button = gr.Button("Retrieve Images Only Emoji", elem_id="run_button")
350
+
351
+ with gr.Row():
352
+ #text_button = gr.Button("Retrieve Images", elem_id="run_button")
353
+ emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
354
+ '''
355
+ with gr.Row():
356
+ emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
357
+
358
+ with gr.Row():
359
+ with gr.Column():
360
+ outputs = gr.Gallery(lable='Output gallery', elem_id="gallery",).style(grid=5,height=768 - 64 - 32,
361
+ allow_preview=False, label = "retrieve Images")
362
+ exp_title = gr.HTML(
363
+ """<br/><br/><h5 align="center"> <font size="+1"> Emojis examples live in πŸ•Œ travel to StableDiffusion 🌍 </font> </h5>""",
364
+ #elem_id="title",
365
+ )
366
+ gr.Examples(
367
+ [
368
+ ["πŸ”", "Only Emoji"],
369
+ ["πŸ”₯🌲", "Only Emoji"],
370
+ ["πŸ±πŸ½οΈπŸ‘¨", "Emoji to Text"],
371
+ ["πŸ»πŸ¦β„οΈπŸŒŠ", "Only Emoji"],
372
+ ["πŸŒŽπŸ¦ΆπŸ‘‚πŸ’€", "Emoji to Text"],
373
+ ["πŸ‘©β€πŸ”¬πŸ—£β˜•πŸ‘¨β€πŸŽ¨", "Emoji to Text"],
374
+ ["πŸ‘œπŸ‘šπŸ§£πŸ‘ΈπŸ°βœŒοΈ", "Emoji to Text"],
375
+ ["πŸ‘πŸπŸŽƒπŸ‘ΆπŸ₯›πŸŒΎπŸ•΅πŸƒβ€β™€οΈ", "Emoji to Text"],
376
+ ["πŸ™ƒπŸ’πŸ‘‹πŸ‘¨β€πŸ”§πŸ‘„πŸ₯€πŸŒŽπŸŒ™", "Emoji to Text"],
377
+ ],
378
+ inputs = [emoji_outputs, emoji2text_or_not],
379
+ #label = "πŸ•Œ Examples"
380
+ )
381
+
382
+ with gr.Row():
383
+ with gr.Tab(label = "Download"):
384
+ zip_button = gr.Button("Zip Images to Download", elem_id="zip_button")
385
+ downloads = gr.File(label = "Image zipped", elem_id = "zip_file")
386
+
387
+
388
+ for g in frame_list:
389
+ g.select(fn = append_emojis, inputs = [g, emoji_outputs], outputs = emoji_outputs)
390
+
391
+ chengyu_frame.select(fn = append_chengyu_emojis, inputs = [chengyu_frame, emoji_outputs, append_or_replace],
392
+ outputs = emoji_outputs)
393
+ chengyu_reset_button.click(fn = lambda _: gen_emojis_by_sample(), outputs = chengyu_frame)
394
+
395
+ clean_button.click(fn = lambda _: "", outputs = emoji_outputs)
396
+ #emoji_outputs.change(fn = extract_emojis, inputs = emoji_outputs, outputs = only_emoji_outputs)
397
+ '''
398
+ emoji_gen_chengyu_input.change(fn = gen_emojis_by_chengyu, inputs = emoji_gen_chengyu_input,
399
+ outputs = only_emoji_outputs)
400
+ append_button.click(fn = append_pure_to_input, inputs = [emoji_outputs ,only_emoji_outputs],
401
+ outputs = emoji_outputs)
402
+ '''
403
+
404
+ #emoji_outputs_button.click(lexica, inputs=emoji_outputs, outputs=outputs)
405
+ emoji_outputs_button.click(search,
406
+ inputs=[emoji_outputs, emoji2text_or_not, llm_prompt_input],
407
+ outputs=[outputs, llm_outputs])
408
+
409
+ zip_button.click(
410
+ zip_ims, inputs = outputs, outputs=downloads
411
+ )
412
+
413
+ demo.launch("0.0.0.0")