svjack's picture
Update app.py
4ced77a
#### pip install advertools
#### pip install emoji
#### pip install emoji-chengyu
#### pip install gradio-client
#### prefer to run in chorme, others may have problem in change hock function
import gradio as gr
import pandas as pd
import emoji
from advertools.emoji import emoji_df
from copy import deepcopy
import numpy as np
from emoji_chengyu.data import DefaultChengyuManager
from emoji_chengyu.puzzle import make_one_puzzle, gen_puzzle
from Lex import *
'''
lex = Lexica(query="man woman fire snow").images()
'''
from PIL import Image
import requests
from zipfile import ZipFile
from time import sleep
sleep_time = 0.5
import requests
import chatglm_cpp
import gradio as gr
from huggingface_hub import space_info
from pathlib import Path
model_file_path = "chatglm2-ggml-q4_0.bin"
chatglm_llm = chatglm_cpp.Pipeline(Path(model_file_path))
def translate_zh_to_en(zh_text):
assert type(zh_text) == type("")
'''
response = requests.post("https://svjack-translate-chinese-to-english.hf.space/run/predict", json={
"data": [
zh_text,
]}).json()
'''
response = requests.post("https://svjack-ctranslate.hf.space/run/predict", json={
"data": [
zh_text,
"zh",
"en",
]}).json()
data = response["data"]
data = data[0]
#data = data["English Question"]
data = data["Target Question"]
return data
def translate_en_to_zh(en_text):
assert type(en_text) == type("")
'''
response = requests.post("https://svjack-translate.hf.space/run/predict", json={
"data": [
en_text,
"en",
"zh",
]}).json()
'''
response = requests.post("https://svjack-ctranslate.hf.space/run/predict", json={
"data": [
en_text,
"en",
"zh",
]}).json()
data = response["data"]
data = data[0]
data = data["Target Question"]
return data
from gradio_client import Client
#llm_client = Client("https://svjack-wizardlm-13b-ggml.hf.space/--replicas/h4fm9/")
llm_client = Client("https://svjack-wizardlm-13b-ggml.hf.space")
'''
llm_result = llm_client.predict(
"Use following emojis to generate a short description of a scene , the emojis are ๐Ÿ‘จ๐Ÿ‘ฉ๐Ÿ”ฅโ„๏ธ", # str in 'Question/Instruction' Textbox component
0.8, # int | float (numeric value between 0.1 and 1.0) in 'Temperature' Slider component
0.95, # int | float (numeric value between 0.0 and 1.0) in 'Top-p (nucleus sampling)' Slider component
40, # int | float (numeric value between 5 and 80) in 'Top-k' Slider component
256, # int | float (numeric value between 0 and 1024) in 'Maximum new tokens' Slider component
52, # int | float in 'Seed' Number component
fn_index=1
)
'''
def run_llm_client(llm_client, prompt):
llm_result = llm_client.predict(
prompt, # str in 'Question/Instruction' Textbox component
0.8, # int | float (numeric value between 0.1 and 1.0) in 'Temperature' Slider component
0.95, # int | float (numeric value between 0.0 and 1.0) in 'Top-p (nucleus sampling)' Slider component
40, # int | float (numeric value between 5 and 80) in 'Top-k' Slider component
256, # int | float (numeric value between 0 and 1024) in 'Maximum new tokens' Slider component
52, # int | float in 'Seed' Number component
fn_index=1
)
return llm_result
def chengyu_emoji_to_im_prompt(chengyu_emoji_input,
llm_prompt_input, llm_client = llm_client):
chengyu, emoji = chengyu_emoji_input.split(":")
chengyu_en = translate_zh_to_en(chengyu)
print("{}\t\t\t{}".format(chengyu_emoji_input ,chengyu_en))
llm_prompt = llm_prompt_input.format(chengyu_en, emoji)
im_en_prompt = run_llm_client(llm_client, llm_prompt)
im_zh_prompt = translate_en_to_zh(im_en_prompt)
return im_en_prompt, im_zh_prompt
def min_dim_to_size(img, size = 512):
h, w = img.size
ratio = size / max(h, w)
h, w = map(lambda x: int(x * ratio), [h, w])
return ( ratio ,img.resize((h, w)) )
def lexica(prompt, limit_size = 128, ratio_size = 256 + 128):
if not prompt or not prompt.strip():
return []
prompt = prompt.strip()
lex = Lexica(query=prompt).images()
lex = lex[:limit_size]
lex = list(map(lambda x: x.replace("full_jpg", "sm2"), lex))
lex_ = []
for ele in lex:
try:
im = Image.open(
requests.get(ele, stream = True).raw
)
lex_.append(im)
except:
print("err")
sleep(sleep_time)
assert lex_
lex = list(map(lambda x: min_dim_to_size(x, ratio_size)[1], lex_))
return lex
def search(emoji_outputs, emoji2text_or_not, llm_prompt_input, llm_client = llm_client):
assert emoji2text_or_not in ["Emoji to Text", "Only Emoji", "ChengYu with Emoji"]
if emoji2text_or_not == "Only Emoji":
emoji_outputs = extract_2(emoji_outputs)[1]
l = lexica(emoji_outputs.replace(":", ""))
return (l, "", "")
elif emoji2text_or_not == "Emoji to Text":
assert "{}" in llm_prompt_input
emoji_outputs = extract_2(emoji_outputs)[1]
llm_prompt = llm_prompt_input.format(emoji_outputs)
llm_en_output = run_llm_client(llm_client, llm_prompt)
llm_zh_output = translate_en_to_zh(llm_en_output)
tail_list = ["someone do something"]
for tail in tail_list:
if tail in llm_en_output and len(llm_en_output.split(tail)[-1]) > (5 * 3):
llm_en_output = llm_en_output.split(tail)[-1]
l = lexica(llm_en_output)
return (l, llm_en_output, llm_zh_output)
else:
assert "{}" in llm_prompt_input
a, b = extract_2(emoji_outputs)
a, b = a.strip(), b.strip()
if not a and not b:
return ([], "", "")
emoji_outputs = "{}:{}".format(a, b)
llm_en_output, llm_zh_output = chengyu_emoji_to_im_prompt(emoji_outputs, llm_prompt_input)
l = lexica(llm_en_output)
return (l, llm_en_output, llm_zh_output)
def enterpix(prompt, limit_size = 100, ratio_size = 256 + 128, use_key = "bigThumbnailUrl"):
resp = requests.post(
url = "https://www.enterpix.app/enterpix/v1/image/prompt-search",
data= {
"length": limit_size,
"platform": "stable-diffusion,midjourney",
"prompt": prompt,
"start": 0
}
)
resp = resp.json()
resp = list(map(lambda x: x[use_key], resp["images"]))
lex_ = []
for ele in resp:
try:
im = Image.open(
requests.get(ele, stream = True).raw
)
lex_.append(im)
except:
print("err")
sleep(sleep_time)
assert lex_
resp = list(map(lambda x: min_dim_to_size(x, ratio_size)[1], lex_))
return resp
def zip_ims(g):
from uuid import uuid1
if g is None:
return None
l = list(map(lambda x: x["name"], g))
if not l:
return None
zip_file_name ="tmp.zip"
with ZipFile(zip_file_name ,"w") as zipObj:
for ele in l:
zipObj.write(ele, "{}.png".format(uuid1()))
#zipObj.write(file2.name, "file2")
return zip_file_name
emoji_order_list = [
["๐Ÿ‡", ["๐Ÿฅข","๐Ÿผ", "๐Ÿฑ", "๐Ÿ‡", "๐Ÿฆ€"]],
["๐Ÿ˜€", ["๐Ÿฅฐ", "๐Ÿ˜•", "๐Ÿ˜บ", "๐Ÿ’‹", "๐Ÿ’ฉ"]],
["๐Ÿต", ["๐Ÿต", "๐Ÿฆƒ", "๐ŸŒ", "๐Ÿณ"]],
["๐Ÿ“”", ["๐Ÿ‘“" ,"๐Ÿ“”", "๐Ÿšช", "๐Ÿ”‹", "๐ŸŽฅ"]],
["๐ŸŽƒ", ["โšฝ", "๐ŸŽƒ", "๐ŸŽฏ", "๐ŸŽญ", "๐ŸŽ–๏ธ"]],
#["๐ŸŒ", ["๐ŸŒ", "๐Ÿ "๏ธ, "โ›ฒ", "๐Ÿ”"๏ธ]],
["๐Ÿ‘‹", ["๐Ÿ‘", "๐Ÿ’ช", "๐Ÿ‘‹", "๐Ÿ‘Œ",]],
["๐ŸŒ", ["๐ŸŒ", "โ›ฒ", "๐Ÿ ",]],
]
sub_cate_num = 5
sub_cate_size = 36
sub_col_num = 6
def list_to_square(l, col_num = 10):
assert type(l) == type([])
row_num = len(l) // col_num
res = len(l) % col_num
if res > 0:
res_for_add = col_num - res
else:
res_for_add = 0
ll = np.asarray(l).reshape([-1, col_num]).tolist()
return ll
l_ = deepcopy(l) + [""] * res_for_add
return list_to_square(l_, col_num)
def append_emojis(selected_index: gr.SelectData, dataframe_origin, emoji_prompt):
val = dataframe_origin.iloc[selected_index.index[0], selected_index.index[1]]
if val.strip():
#emoji_prompt = emoji_prompt + val
a, b = extract_2(emoji_prompt)
aa, bb = extract_2(val)
a, b = a.strip(), b.strip()
aa, bb = aa.strip(), bb.strip()
emoji_prompt = "{}:{}".format(a + aa, b + bb)
return emoji_prompt
def append_chengyu_emojis(selected_index: gr.SelectData, dataframe_origin, emoji_prompt, append_or_replace, emoji2text_or_not):
val = dataframe_origin.iloc[selected_index.index[0], selected_index.index[1]]
if type(val) != type("") or not val:
return emoji_prompt
assert emoji2text_or_not in ["Emoji to Text", "Only Emoji", "ChengYu with Emoji"]
assert append_or_replace in ["replace", "append"]
a, b = extract_2(emoji_prompt)
aa, bb = extract_2(val)
if append_or_replace == "append":
'''
if emoji2text_or_not in ["Emoji to Text", "Only Emoji"]:
emoji_prompt = emoji_prompt + val.split(":")[-1]
else:
a, b = val.split(":")
emoji_prompt = "{}:{}".format(a, emoji_prompt + b)
'''
a, b = a + aa, b + bb
else:
'''
if emoji2text_or_not in ["Emoji to Text", "Only Emoji"]:
emoji_prompt = val.split(":")[-1]
else:
emoji_prompt = val
'''
a, b = aa, bb
a = a.strip()
b = b.strip()
emoji_prompt = "{}:{}".format(a, b)
return emoji_prompt
def extract_emojis(s):
#return ''.join(c for c in s if c in emoji.UNICODE_EMOJI['en'])
dl = emoji.emoji_list(s)
return "".join(map(lambda x: x["emoji"], dl))
def extract_2(s):
b = extract_emojis(s)
a = "".join(filter(lambda x: x not in b + ":", list(s)))
return a, b
def gen_emojis_by_chengyu(words):
assert type(words) == type("")
out = DefaultChengyuManager.get_by_word(words)
if out is None:
return ""
out = "".join(make_one_puzzle(out).puzzle)
out = extract_emojis(out)
return out
def gen_emojis_by_sample(search_count=5000):
pg = gen_puzzle(manager=DefaultChengyuManager, search_count=search_count)
df = pd.DataFrame(list(map(lambda x: {
"words": "".join(x.chengyu_item.word_list),
"emoji": x.puzzle_str,
"score": sum(x.mask)
} ,pg)))
df = df[df["score"] == 4]
df = df[df["words"].map(lambda x: len(x) == 4)]
req = []
col0 = set([])
col1 = set([])
col2 = set([])
col3 = set([])
for i, r in df.iterrows():
words = r["words"]
emoji = r["emoji"]
if emoji[0] in col0:
continue
col0.add(emoji[0])
if emoji[1] in col1:
continue
col1.add(emoji[1])
if emoji[2] in col2:
continue
col2.add(emoji[2])
if emoji[3] in col3:
continue
col3.add(emoji[3])
req.append(
r.to_dict()
)
df = pd.DataFrame(req)
if len(df) < 21:
return gen_emojis_by_sample(search_count=search_count)
df = pd.DataFrame(
np.asarray(df.apply(lambda x: x.to_dict(), axis = 1).head(21).map(lambda x:
"{}:{}".format(x["words"],x["emoji"])
).tolist()).reshape(
(7, 3)
)
)
return df
def append_pure_to_input(emoji_outputs ,only_emoji_outputs):
return emoji_outputs + only_emoji_outputs
def outputs_rec_format(emoji_outputs):
a, b = extract_2(emoji_outputs)
a, b = a.strip(), b.strip()
emoji_outputs = "{}:{}".format(a, b)
return emoji_outputs
def fix_emojis_by_glm(emoji_outputs):
l = [
'ๆœๆ–ฏๅค•ๆ–ฏ',
'๐ŸŒž๐Ÿ•›๐ŸŒ‡๐ŸŒ›',
'ๅ‡บๅ…ถไธๆ„',
'๐Ÿช–๐ŸŽ๐Ÿ—ก๏ธโœŒ๏ธ',
'็™พ็ดซๅƒ็บข',
'๐ŸŽŽ๐ŸŒน๐ŸŠ๐Ÿƒ',
'่ƒŒ็ข‘่ฆ†ๅฑ€',
'๐Ÿ€„๏ธโ™Ÿ๏ธ๐Ÿณ๏ธ๐Ÿ’€',
'ๆ˜ฅ่›‡็ง‹่š“',
'๐ŸŒž๐Ÿ๐Ÿ๐Ÿชฑ',
'ไผ ้ฃŽๆ‰‡็ซ',
'โ˜๏ธ๐ŸŒฌ๏ธ๐Ÿชญ๐Ÿ”ฅ',
'ไธๅฏ้€พ่ถŠ',
'๐Ÿ’ช๐Ÿƒ๐Ÿฅฑโ˜น๏ธ',
'ไธนไนฆ็™ฝ้ฉฌ',
'๐Ÿ“„๐Ÿ“–๐Ÿ˜ป๐ŸŽ',
'ๆŒจๅ†ปๅ—้ฅฟ',
'๐ŸŒฌ๏ธ๐Ÿฅถ๐Ÿฝ๏ธ๐Ÿ˜ฑ',
'็™ฝ้ฉฌ้ž้ฉฌ',
'๐Ÿ˜„๐ŸŒŸ๐ŸŽ๐Ÿ˜',
'ๆŠฑ็މๆก็ ',
'๐Ÿซ‚๐Ÿ’ฐ๐Ÿซณ๐Ÿงง',
'้“้ชจไป™้ฃŽ',
'โœ๏ธ๐Ÿ‘จ๐Ÿ•Œ๐Ÿ‘ผ',
'ๆ˜ฅๅŽ็ง‹ๅฎž',
'๐ŸŒž๐ŸŒฝ๐Ÿ๐Ÿ‰',
'ๆ˜ฅ้ฃŽๅค้›จ',
'โ˜€๏ธ๐ŸŒฌ๏ธ๐ŸŒง๏ธโšก๏ธ'
]
a, b = extract_2(emoji_outputs)
a, b = a.strip(), b.strip()
if a:
b = chatglm_llm.chat(
history= ["่ฏทๆ‰ง่กŒๅฐ†ๆˆ่ฏญ็ฟป่ฏ‘ๆˆemoji็š„ไปปๅŠก๏ผŒไธ‹้ขๆ˜ฏไธ€ไบ›ไพ‹ๅญใ€‚", "ๅฅฝ็š„ใ€‚"] + l + \
[a], do_sample=False
)
b = extract_emojis(b)
emoji_outputs = "{}:{}".format(a, b)
return emoji_outputs
css = """
#frame span{
font-size: 1.5em; display: flex; align-items: center;
}
"""
###with gr.Blocks(css="custom.css") as demo:
with gr.Blocks(css = css) as demo:
title = gr.HTML(
"""<h1 align="center"> <font size="+10"> ๐Ÿ•Œ Emojis to StableDiffusion World ๐ŸŒ </font> </h1>""",
elem_id="title",
)
frame_list = []
with gr.Row():
with gr.Column(label = "Emoji samples, You can click to use them"):
sub_title_0 = gr.Markdown(
value="### Emoji samples, You can click to use them",
visible=True,
#elem_id="selected_model",
)
#for group, df in emoji_df.groupby("group"):
for group_order_ele, sub_group_order_list in emoji_order_list:
#group_first = df["emoji"].iloc[0]
group_first = group_order_ele
df_group = emoji_df[emoji_df["emoji"] == group_first]["group"].iloc[0]
df = emoji_df[emoji_df["group"] == df_group]
with gr.Tab("{} {}".format(group_first, df_group)):
#for ii ,(sub_group, dff) in enumerate(df.groupby("sub_group")):
for ii in range(len(sub_group_order_list)):
sub_first = sub_group_order_list[ii]
df_sub_group = emoji_df[emoji_df["emoji"] == sub_first]["sub_group"].iloc[0]
dff = df[df["sub_group"] == df_sub_group]
if ii >= sub_cate_num:
break
sub_first = dff["emoji"].iloc[0]
sub_l = dff["emoji"].values.tolist()[:sub_cate_size]
sub_l_square = list_to_square(sub_l, sub_col_num)
g_frame = gr.DataFrame(sub_l_square,
interactive=False, headers = [''] * sub_col_num,
#datatype="markdown"
elem_id="frame",
label = "{} {}".format(sub_first, df_sub_group)
)
#g_frame = gr.Matrix(sub_l_square, label = sub_first,)
frame_list.append(g_frame)
markdown_exp_size = "##"
lora_repo = "svjack/chatglm3-few-shot"
lora_repo_link = "svjack/chatglm3-few-shot/?input_list_index=0"
emoji_info = space_info(lora_repo).__dict__["cardData"]["emoji"]
space_cnt = 1
task_name = "[---ChengYu to Emojis---]"
gr.Markdown(
value=f"{markdown_exp_size} {task_name} few shot prompt in ChatGLM3 Few Shot space repo (click submit to activate) : [{lora_repo_link}](https://huggingface.co/spaces/{lora_repo_link}) {emoji_info}",
visible=True,
elem_id="selected_space",
)
lora_repo = "svjack/chatglm3-few-shot"
lora_repo_link = "svjack/chatglm3-few-shot/?input_list_index=1"
emoji_info = space_info(lora_repo).__dict__["cardData"]["emoji"]
space_cnt = 2
task_name = "[---Emojis to Image Prompt---]"
gr.Markdown(
value=f"{markdown_exp_size} {task_name} few shot prompt in ChatGLM3 Few Shot space repo (click submit to activate) : [{lora_repo_link}](https://huggingface.co/spaces/{lora_repo_link}) {emoji_info}",
visible=True,
elem_id="selected_space",
)
gr.HTML(
'''
<div style="justify-content: center; display: flex;">
<iframe
src="https://svjack-chatglm3-few-shot-demo.hf.space/?input_list_index=0"
frameborder="0"
width="768"
height="1024"
></iframe>
</div>
'''
)
gr.HTML(
'''
<div style="justify-content: center; display: flex;">
<iframe
src="https://svjack-chatglm3-few-shot-demo.hf.space/?input_list_index=1"
frameborder="0"
width="768"
height="1024"
></iframe>
</div>
'''
)
with gr.Column():
with gr.Row():
with gr.Column():
sub_title_1 = gr.Markdown(
value="### ChengYu to Pinyin Emoji combinations, You can click to use them, Don't forget edit or Fix them after click, to make it meaningful",
visible=True,
#elem_id="selected_model",
)
chengyu_frame = gr.DataFrame(gen_emojis_by_sample(),
interactive=False, headers = [''] * sub_col_num,
#datatype="markdown"
elem_id="chengyu_frame",
#label = "ChengYu to Emoji combinations, You can click to use them"
)
with gr.Row():
chengyu_reset_button = gr.Button("Reset ChengYu Emojis",
elem_id="run_button")
with gr.Row():
append_or_replace = gr.Radio(choices=["replace", "append"],
value="replace", label="ChengYu Emoji Append or Replace to below", elem_id="text_radio")
with gr.Row():
with gr.Row():
emoji_outputs = gr.Textbox(label="Emoji Prompt Input", show_label=True, lines=1, max_lines=20,
min_width = 256, placeholder="Click Emoji from left with some emoji input manually", elem_id="prompt",
interactive=True, info = "Generate by Click, and can edit by yourself, look up Examples below")
with gr.Row():
clean_button = gr.Button("Clear", elem_id="clear_button",
label = "Clear all content")
fix_button = gr.Button("Fix Pinyin Emoji to Means (4min 2 cores, 30s 12 cores)", elem_id="fix_button",
label = "Fix Emojis by Text part use model, takes 4min in 2 cores and 30s in 12 cores in cpu"
)
'''
with gr.Column():
clean_button = gr.Button("Clean Emojis", elem_id="clean_button")
emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
'''
with gr.Row():
### \n ๐Ÿ•Œ Only Emoji \n ๐Ÿ•Œโžก๏ธ๐Ÿ”ค Emoji to Text \n ๐Ÿ“–โš–๏ธ๐Ÿ•Œ ChengYu with Emoji
with gr.Row():
emoji2text_or_not = gr.Radio(choices=["Only Emoji", "Emoji to Text", "ChengYu with Emoji"],
value="Only Emoji", label="Emoji &| Text to get images or translate them to Text by LLM",
elem_id="trans_radio",
info = "๐Ÿ•Œ Only Emoji -- ๐Ÿ•Œโžก๏ธ๐Ÿ”ค Emoji to Text -- ๐Ÿ“–โš–๏ธ๐Ÿ•Œ ChengYu with Emoji -- ๐Ÿ“–โžก๏ธ๐Ÿ•Œโš–๏ธ๐Ÿ“– Fix + ChengYu with Emoji"
)
llm_prompt_input = gr.Textbox(label="Emoji to Text Prompt template used by LLM", show_label=True,
lines=1, max_lines=20,
min_width = 256,
value="Use following emojis to generate a short description of a scene , use the pattern someone do something , the emojis are {}"
, elem_id="prompt",
interactive=True)
llm_outputs = gr.Textbox(label="Emoji to Text Prompt translate by LLM Output", show_label=True,
lines=1, max_lines=20,
min_width = 256, placeholder="Emoji describe by Text", elem_id="prompt",
interactive=True)
llm_zh_outputs = gr.Textbox(label="Emoji to Text Prompt translate by LLM Output in Chinese", show_label=True,
lines=1, max_lines=20,
min_width = 256, placeholder="Emoji describe by Chinese", elem_id="prompt",
interactive=True)
'''
with gr.Row():
emoji_gen_chengyu_input = gr.Textbox(label="ChengYu Prompt Input", show_label=False, lines=1, max_lines=20,
min_width = 256, placeholder="input ChengYu manually, like: ๅŠฑ็ฒพๅ›พๆฒป", elem_id="prompt",
interactive=True)
with gr.Row():
only_emoji_outputs = gr.Textbox(label="Only Emoji Prompt Output", show_label=False, lines=1, max_lines=20,
min_width = 256, placeholder="Filter out only emoji charactors", elem_id="prompt", interactive=True)
#gr.Slider(label='Number of images ', minimum = 4, maximum = 20, step = 1, value = 4)]
append_button = gr.Button("Append Only Emojis to Emoji Prompt Output", elem_id="append_button")
only_emoji_outputs_button = gr.Button("Retrieve Images Only Emoji", elem_id="run_button")
with gr.Row():
#text_button = gr.Button("Retrieve Images", elem_id="run_button")
emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
'''
with gr.Row():
emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button")
with gr.Row():
with gr.Column():
outputs = gr.Gallery(lable='Output gallery', elem_id="gallery",).style(grid=5,height=768 - 64 - 32,
allow_preview=False, label = "retrieve Images")
exp_title = gr.HTML(
"""<br/><br/><h5 align="center"> <font size="+1"> Emojis examples live in ๐Ÿ•Œ travel to StableDiffusion ๐ŸŒ </font> </h5>""",
#elem_id="title",
)
'''
gr.Examples(
[
["๐Ÿ”", "Only Emoji"],
["๐Ÿ”ฅ๐ŸŒฒ", "Only Emoji"],
["๐Ÿฑ๐Ÿฝ๏ธ๐Ÿ‘จ", "Emoji to Text"],
["๐Ÿป๐Ÿฆโ„๏ธ๐ŸŒŠ", "Only Emoji"],
["๐ŸŒŽ๐Ÿฆถ๐Ÿ‘‚๐Ÿ’ค", "Emoji to Text"],
["๐Ÿ‘ฉโ€๐Ÿ”ฌ๐Ÿ—ฃโ˜•๐Ÿ‘จโ€๐ŸŽจ", "Emoji to Text"],
["ๅˆ€ๅฑฑ็ซๆตท:๐Ÿ™ƒโšก๐Ÿ‘ฉโ€๐Ÿš’๐Ÿงœโ€โ™‚๏ธ", "ChengYu with Emoji"],
["ไผ ไธบไฝณ่ฏ:๐Ÿก๐Ÿงฃ๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ง๐Ÿ‘ฉโ€๐ŸŽจ", "ChengYu with Emoji"],
["ๅคง้“่‡ณ็ฎ€:๐Ÿ‡จ๐Ÿ‡ณ๐ŸŒพ๐Ÿง ๐Ÿ‘€โ€โ™‚๏ธ", "ChengYu with Emoji"],
#["๐Ÿ‘œ๐Ÿ‘š๐Ÿงฃ๐Ÿ‘ธ๐ŸฐโœŒ๏ธ", "Emoji to Text"],
#["๐Ÿ‘๐Ÿ๐ŸŽƒ๐Ÿ‘ถ๐Ÿฅ›๐ŸŒพ๐Ÿ•ต๐Ÿƒโ€โ™€๏ธ", "Emoji to Text"],
#["๐Ÿ™ƒ๐Ÿ’๐Ÿ‘‹๐Ÿ‘จโ€๐Ÿ”ง๐Ÿ‘„๐Ÿฅ€๐ŸŒŽ๐ŸŒ™", "Emoji to Text"],
#["ๆ˜ฅๆฑŸๆฝฎๆฐด่ฟžๆตทๅนณ:๐ŸŒž๐ŸŒบ๐Ÿ’ฆ๐Ÿคค๐Ÿชท๐ŸŒŠโ˜€๏ธ", "ChengYu with Emoji"],
#["ๆœˆๆ˜Žๆ˜Ÿ็จ€๏ผŒไนŒ้นŠๅ—้ฃžใ€‚:๐ŸŒ›๐ŸŒ›โœจ๐Ÿซ…๐Ÿฆ๐Ÿฆ†๐Ÿฆข๐Ÿชฝ", "ChengYu with Emoji"]
],
inputs = [emoji_outputs, emoji2text_or_not],
#label = "๐Ÿ•Œ Examples"
)
gr.Examples(
[
#["๐Ÿ”", "Only Emoji"],
#["๐Ÿ”ฅ๐ŸŒฒ", "Only Emoji"],
#["๐Ÿฑ๐Ÿฝ๏ธ๐Ÿ‘จ", "Emoji to Text"],
#["๐Ÿป๐Ÿฆโ„๏ธ๐ŸŒŠ", "Only Emoji"],
#["๐ŸŒŽ๐Ÿฆถ๐Ÿ‘‚๐Ÿ’ค", "Emoji to Text"],
#["๐Ÿ‘ฉโ€๐Ÿ”ฌ๐Ÿ—ฃโ˜•๐Ÿ‘จโ€๐ŸŽจ", "Emoji to Text"],
#["ๅˆ€ๅฑฑ็ซๆตท:๐Ÿ™ƒโšก๐Ÿ‘ฉโ€๐Ÿš’๐Ÿงœโ€โ™‚๏ธ", "ChengYu with Emoji"],
#["ไผ ไธบไฝณ่ฏ:๐Ÿก๐Ÿงฃ๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ง๐Ÿ‘ฉโ€๐ŸŽจ", "ChengYu with Emoji"],
#["ๅคง้“่‡ณ็ฎ€:๐Ÿ‡จ๐Ÿ‡ณ๐ŸŒพ๐Ÿง ๐Ÿ‘€โ€โ™‚๏ธ", "ChengYu with Emoji"],
["๐Ÿ‘œ๐Ÿ‘š๐Ÿงฃ๐Ÿ‘ธ๐ŸฐโœŒ๏ธ", "Emoji to Text"],
["๐Ÿ‘๐Ÿ๐ŸŽƒ๐Ÿ‘ถ๐Ÿฅ›๐ŸŒพ๐Ÿ•ต๐Ÿƒโ€โ™€๏ธ", "Emoji to Text"],
["๐Ÿ™ƒ๐Ÿ’๐Ÿ‘‹๐Ÿ‘จโ€๐Ÿ”ง๐Ÿ‘„๐Ÿฅ€๐ŸŒŽ๐ŸŒ™", "Emoji to Text"],
["ๆ˜ฅๆฑŸๆฝฎๆฐด่ฟžๆตทๅนณ:๐ŸŒž๐ŸŒบ๐Ÿ’ฆ๐Ÿคค๐Ÿชท๐ŸŒŠโ˜€๏ธ", "ChengYu with Emoji"],
["ๆœˆๆ˜Žๆ˜Ÿ็จ€๏ผŒไนŒ้นŠๅ—้ฃžใ€‚:๐ŸŒ›๐ŸŒ›โœจ๐Ÿซ…๐Ÿฆ๐Ÿฆ†๐Ÿฆข๐Ÿชฝ", "ChengYu with Emoji"]
],
inputs = [emoji_outputs, emoji2text_or_not],
#label = "๐Ÿ•Œ Examples"
)
'''
with gr.Column():
gr.HTML(
"""<h6 align="center"> <font size="+1"> ๐Ÿ•Œ Only Emoji examples </font> </h6>""",
#elem_id="title",
)
gr.Examples(
[
["๐Ÿ”", "Only Emoji"],
["๐Ÿ”ฅ๐ŸŒฒ", "Only Emoji"],
["๐Ÿป๐Ÿฆโ„๏ธ๐ŸŒŠ", "Only Emoji"],
],
inputs = [emoji_outputs, emoji2text_or_not],
#label = "๐Ÿ•Œ Examples"
)
gr.HTML(
"""<h6 align="center"> <font size="+1"> ๐Ÿ•Œโžก๏ธ๐Ÿ”ค Emoji to Text examples </font> </h6>""",
#elem_id="title",
)
gr.Examples(
[
["๐Ÿฑ๐Ÿฝ๏ธ๐Ÿ‘จ", "Emoji to Text"],
["๐ŸŒŽ๐Ÿฆถ๐Ÿ‘‚๐Ÿ’ค", "Emoji to Text"],
["๐Ÿ‘ฉโ€๐Ÿ”ฌ๐Ÿ—ฃโ˜•๐Ÿ‘จโ€๐ŸŽจ", "Emoji to Text"],
["๐Ÿ‘œ๐Ÿ‘š๐Ÿงฃ๐Ÿ‘ธ๐ŸฐโœŒ๏ธ", "Emoji to Text"],
["๐Ÿ‘๐Ÿ๐ŸŽƒ๐Ÿ‘ถ๐Ÿฅ›๐ŸŒพ๐Ÿ•ต๐Ÿƒโ€โ™€๏ธ", "Emoji to Text"],
["๐Ÿ™ƒ๐Ÿ’๐Ÿ‘‹๐Ÿ‘จโ€๐Ÿ”ง๐Ÿ‘„๐Ÿฅ€๐ŸŒŽ๐ŸŒ™", "Emoji to Text"],
],
inputs = [emoji_outputs, emoji2text_or_not],
#label = "๐Ÿ•Œ Examples"
)
gr.HTML(
"""<h6 align="center"> <font size="+1"> ๐Ÿ“–โš–๏ธ๐Ÿ•Œ ChengYu with Emoji examples </font> </h6>""",
#elem_id="title",
)
gr.Examples(
[
["ๅˆ€ๅฑฑ็ซๆตท:๐Ÿ™ƒโšก๐Ÿ‘ฉโ€๐Ÿš’๐Ÿงœโ€โ™‚๏ธ", "ChengYu with Emoji"],
["ไผ ไธบไฝณ่ฏ:๐Ÿก๐Ÿงฃ๐Ÿ‘ฉโ€๐Ÿ‘ฉโ€๐Ÿ‘งโ€๐Ÿ‘ง๐Ÿ‘ฉโ€๐ŸŽจ", "ChengYu with Emoji"],
["ๅคง้“่‡ณ็ฎ€:๐Ÿ‡จ๐Ÿ‡ณ๐ŸŒพ๐Ÿง ๐Ÿ‘€โ€โ™‚๏ธ", "ChengYu with Emoji"],
["ๆ˜ฅๆฑŸๆฝฎๆฐด่ฟžๆตทๅนณ:๐ŸŒž๐ŸŒบ๐Ÿ’ฆ๐Ÿคค๐Ÿชท๐ŸŒŠโ˜€๏ธ", "ChengYu with Emoji"],
["ๆœˆๆ˜Žๆ˜Ÿ็จ€๏ผŒไนŒ้นŠๅ—้ฃžใ€‚:๐ŸŒ›๐ŸŒ›โœจ๐Ÿซ…๐Ÿฆ๐Ÿฆ†๐Ÿฆข๐Ÿชฝ", "ChengYu with Emoji"]
],
inputs = [emoji_outputs, emoji2text_or_not],
#label = "๐Ÿ•Œ Examples"
)
gr.HTML(
"""<h6 align="center"> <font size="+1"> ๐Ÿ“–โžก๏ธ๐Ÿ•Œโš–๏ธ๐Ÿ“– Fix + ChengYu with Emoji examples (click above "Fix" Button: Emojis Fixed by ChengYu) </font> </h6>""",
#elem_id="title",
)
gr.Examples(
[
["้ฅฑๅญฆไน‹ๅฃซ:๐Ÿซ๐Ÿ“š๐Ÿ’ญ๐Ÿ™Œ", "ChengYu with Emoji"],
["โ€็ฌ”ไธ‹็”Ÿ่Šฑ:๐ŸŽจ๐ŸŒน๐Ÿ“๐ŸŒบ", "ChengYu with Emoji"],
["โ€็™พๅชšๅƒๅจ‡:๐ŸŒน๐Ÿ’•๐ŸŒธ๐Ÿ’‹", "ChengYu with Emoji"],
["ๆฏ”็ฟผ่ฟžๆž:๐Ÿ’๐ŸŒน๐ŸŒบ๐Ÿ’—", "ChengYu with Emoji"],
["โ€ๆ˜ฅ้ฃŽๅ’Œๆฐ”:๐ŸŒธ๐Ÿ˜ผ๐ŸŒฌ๏ธ๐ŸŒธ", "ChengYu with Emoji"],
],
inputs = [emoji_outputs, emoji2text_or_not],
#label = "๐Ÿ•Œ Examples"
)
with gr.Row():
with gr.Tab(label = "Download"):
zip_button = gr.Button("Zip Images to Download", elem_id="zip_button")
downloads = gr.File(label = "Image zipped", elem_id = "zip_file")
### ["Only Emoji", "Emoji to Text", "ChengYu with Emoji"]
emoji2text_or_not.change(
fn = lambda x: "" if x == "Only Emoji" else (
"Use following emojis to generate a short description of a scene , use the pattern someone do something , the emojis are {}" if x == "Emoji to Text" \
else "Use following emojis to make a short description of the scene about '{}', the emojis are {}"
)
, inputs = emoji2text_or_not, outputs = llm_prompt_input
)
for g in frame_list:
g.select(fn = append_emojis, inputs = [g, emoji_outputs], outputs = emoji_outputs)
##
chengyu_frame.select(fn = append_chengyu_emojis, inputs = [chengyu_frame, emoji_outputs, append_or_replace, emoji2text_or_not],
outputs = emoji_outputs)
chengyu_reset_button.click(fn = lambda _: gen_emojis_by_sample(), outputs = chengyu_frame)
clean_button.click(fn = lambda _: "", outputs = emoji_outputs)
fix_button.click(fn = fix_emojis_by_glm, inputs = emoji_outputs, outputs = emoji_outputs)
#emoji_outputs.change(fn = extract_emojis, inputs = emoji_outputs, outputs = only_emoji_outputs)
'''
emoji_outputs.change(
fn = outputs_rec_format
, inputs = emoji_outputs, outputs = emoji_outputs)
'''
'''
emoji_gen_chengyu_input.change(fn = gen_emojis_by_chengyu, inputs = emoji_gen_chengyu_input,
outputs = only_emoji_outputs)
append_button.click(fn = append_pure_to_input, inputs = [emoji_outputs ,only_emoji_outputs],
outputs = emoji_outputs)
'''
#emoji_outputs_button.click(lexica, inputs=emoji_outputs, outputs=outputs)
emoji_outputs_button.click(search,
inputs=[emoji_outputs, emoji2text_or_not, llm_prompt_input],
outputs=[outputs, llm_outputs, llm_zh_outputs])
zip_button.click(
zip_ims, inputs = outputs, outputs=downloads
)
demo.launch("0.0.0.0", show_api=False)