Spaces:
Runtime error
Runtime error
#### pip install advertools | |
#### pip install emoji | |
#### pip install emoji-chengyu | |
#### pip install gradio-client | |
#### prefer to run in chorme, others may have problem in change hock function | |
import gradio as gr | |
import pandas as pd | |
import emoji | |
from advertools.emoji import emoji_df | |
from copy import deepcopy | |
import numpy as np | |
from emoji_chengyu.data import DefaultChengyuManager | |
from emoji_chengyu.puzzle import make_one_puzzle, gen_puzzle | |
from Lex import * | |
''' | |
lex = Lexica(query="man woman fire snow").images() | |
''' | |
from PIL import Image | |
import requests | |
from zipfile import ZipFile | |
from time import sleep | |
sleep_time = 0.5 | |
import requests | |
def translate_zh_to_en(zh_text): | |
assert type(zh_text) == type("") | |
''' | |
response = requests.post("https://svjack-translate-chinese-to-english.hf.space/run/predict", json={ | |
"data": [ | |
zh_text, | |
]}).json() | |
''' | |
response = requests.post("https://svjack-ctranslate.hf.space/run/predict", json={ | |
"data": [ | |
zh_text, | |
"zh", | |
"en", | |
]}).json() | |
data = response["data"] | |
data = data[0] | |
#data = data["English Question"] | |
data = data["Target Question"] | |
return data | |
def translate_en_to_zh(en_text): | |
assert type(en_text) == type("") | |
''' | |
response = requests.post("https://svjack-translate.hf.space/run/predict", json={ | |
"data": [ | |
en_text, | |
"en", | |
"zh", | |
]}).json() | |
''' | |
response = requests.post("https://svjack-ctranslate.hf.space/run/predict", json={ | |
"data": [ | |
en_text, | |
"en", | |
"zh", | |
]}).json() | |
data = response["data"] | |
data = data[0] | |
data = data["Target Question"] | |
return data | |
from gradio_client import Client | |
llm_client = Client("https://svjack-wizardlm-13b-ggml.hf.space/--replicas/bnqpc/") | |
''' | |
llm_result = llm_client.predict( | |
"Use following emojis to generate a short description of a scene , the emojis are ๐จ๐ฉ๐ฅโ๏ธ", # str in 'Question/Instruction' Textbox component | |
0.8, # int | float (numeric value between 0.1 and 1.0) in 'Temperature' Slider component | |
0.95, # int | float (numeric value between 0.0 and 1.0) in 'Top-p (nucleus sampling)' Slider component | |
40, # int | float (numeric value between 5 and 80) in 'Top-k' Slider component | |
256, # int | float (numeric value between 0 and 1024) in 'Maximum new tokens' Slider component | |
52, # int | float in 'Seed' Number component | |
fn_index=1 | |
) | |
''' | |
def run_llm_client(llm_client, prompt): | |
llm_result = llm_client.predict( | |
prompt, # str in 'Question/Instruction' Textbox component | |
0.8, # int | float (numeric value between 0.1 and 1.0) in 'Temperature' Slider component | |
0.95, # int | float (numeric value between 0.0 and 1.0) in 'Top-p (nucleus sampling)' Slider component | |
40, # int | float (numeric value between 5 and 80) in 'Top-k' Slider component | |
256, # int | float (numeric value between 0 and 1024) in 'Maximum new tokens' Slider component | |
52, # int | float in 'Seed' Number component | |
fn_index=1 | |
) | |
return llm_result | |
def chengyu_emoji_to_im_prompt(chengyu_emoji_input, | |
llm_prompt_input, llm_client = llm_client): | |
chengyu, emoji = chengyu_emoji_input.split(":") | |
chengyu_en = translate_zh_to_en(chengyu) | |
print("{}\t\t\t{}".format(chengyu_emoji_input ,chengyu_en)) | |
llm_prompt = llm_prompt_input.format(chengyu_en, emoji) | |
im_en_prompt = run_llm_client(llm_client, llm_prompt) | |
im_zh_prompt = translate_en_to_zh(im_en_prompt) | |
return im_en_prompt, im_zh_prompt | |
def min_dim_to_size(img, size = 512): | |
h, w = img.size | |
ratio = size / max(h, w) | |
h, w = map(lambda x: int(x * ratio), [h, w]) | |
return ( ratio ,img.resize((h, w)) ) | |
def lexica(prompt, limit_size = 128, ratio_size = 256 + 128): | |
if not prompt or not prompt.strip(): | |
return [] | |
prompt = prompt.strip() | |
lex = Lexica(query=prompt).images() | |
lex = lex[:limit_size] | |
lex = list(map(lambda x: x.replace("full_jpg", "sm2"), lex)) | |
lex_ = [] | |
for ele in lex: | |
try: | |
im = Image.open( | |
requests.get(ele, stream = True).raw | |
) | |
lex_.append(im) | |
except: | |
print("err") | |
sleep(sleep_time) | |
assert lex_ | |
lex = list(map(lambda x: min_dim_to_size(x, ratio_size)[1], lex_)) | |
return lex | |
def search(emoji_outputs, emoji2text_or_not, llm_prompt_input, llm_client = llm_client): | |
assert emoji2text_or_not in ["Emoji to Text", "Only Emoji", "ChengYu with Emoji"] | |
if emoji2text_or_not == "Only Emoji": | |
emoji_outputs = extract_2(emoji_outputs)[1] | |
l = lexica(emoji_outputs.replace(":", "")) | |
return (l, "", "") | |
elif emoji2text_or_not == "Emoji to Text": | |
assert "{}" in llm_prompt_input | |
emoji_outputs = extract_2(emoji_outputs)[1] | |
llm_prompt = llm_prompt_input.format(emoji_outputs) | |
llm_en_output = run_llm_client(llm_client, llm_prompt) | |
llm_zh_output = translate_en_to_zh(llm_en_output) | |
tail_list = ["someone do something"] | |
for tail in tail_list: | |
if tail in llm_en_output and len(llm_en_output.split(tail)[-1]) > (5 * 3): | |
llm_en_output = llm_en_output.split(tail)[-1] | |
l = lexica(llm_en_output) | |
return (l, llm_en_output, llm_zh_output) | |
else: | |
assert "{}" in llm_prompt_input | |
a, b = extract_2(emoji_outputs) | |
a, b = a.strip(), b.strip() | |
if not a and not b: | |
return ([], "", "") | |
emoji_outputs = "{}:{}".format(a, b) | |
llm_en_output, llm_zh_output = chengyu_emoji_to_im_prompt(emoji_outputs, llm_prompt_input) | |
l = lexica(llm_en_output) | |
return (l, llm_en_output, llm_zh_output) | |
def enterpix(prompt, limit_size = 100, ratio_size = 256 + 128, use_key = "bigThumbnailUrl"): | |
resp = requests.post( | |
url = "https://www.enterpix.app/enterpix/v1/image/prompt-search", | |
data= { | |
"length": limit_size, | |
"platform": "stable-diffusion,midjourney", | |
"prompt": prompt, | |
"start": 0 | |
} | |
) | |
resp = resp.json() | |
resp = list(map(lambda x: x[use_key], resp["images"])) | |
lex_ = [] | |
for ele in resp: | |
try: | |
im = Image.open( | |
requests.get(ele, stream = True).raw | |
) | |
lex_.append(im) | |
except: | |
print("err") | |
sleep(sleep_time) | |
assert lex_ | |
resp = list(map(lambda x: min_dim_to_size(x, ratio_size)[1], lex_)) | |
return resp | |
def zip_ims(g): | |
from uuid import uuid1 | |
if g is None: | |
return None | |
l = list(map(lambda x: x["name"], g)) | |
if not l: | |
return None | |
zip_file_name ="tmp.zip" | |
with ZipFile(zip_file_name ,"w") as zipObj: | |
for ele in l: | |
zipObj.write(ele, "{}.png".format(uuid1())) | |
#zipObj.write(file2.name, "file2") | |
return zip_file_name | |
emoji_order_list = [ | |
["๐", ["๐ฅข","๐ผ", "๐ฑ", "๐", "๐ฆ"]], | |
["๐", ["๐ฅฐ", "๐", "๐บ", "๐", "๐ฉ"]], | |
["๐ต", ["๐ต", "๐ฆ", "๐", "๐ณ"]], | |
["๐", ["๐" ,"๐", "๐ช", "๐", "๐ฅ"]], | |
["๐", ["โฝ", "๐", "๐ฏ", "๐ญ", "๐๏ธ"]], | |
#["๐", ["๐", "๐ "๏ธ, "โฒ", "๐"๏ธ]], | |
["๐", ["๐", "๐ช", "๐", "๐",]], | |
["๐", ["๐", "โฒ", "๐ ",]], | |
] | |
sub_cate_num = 5 | |
sub_cate_size = 36 | |
sub_col_num = 6 | |
def list_to_square(l, col_num = 10): | |
assert type(l) == type([]) | |
row_num = len(l) // col_num | |
res = len(l) % col_num | |
if res > 0: | |
res_for_add = col_num - res | |
else: | |
res_for_add = 0 | |
ll = np.asarray(l).reshape([-1, col_num]).tolist() | |
return ll | |
l_ = deepcopy(l) + [""] * res_for_add | |
return list_to_square(l_, col_num) | |
def append_emojis(selected_index: gr.SelectData, dataframe_origin, emoji_prompt): | |
val = dataframe_origin.iloc[selected_index.index[0], selected_index.index[1]] | |
if val.strip(): | |
#emoji_prompt = emoji_prompt + val | |
a, b = extract_2(emoji_prompt) | |
aa, bb = extract_2(val) | |
a, b = a.strip(), b.strip() | |
aa, bb = aa.strip(), bb.strip() | |
emoji_prompt = "{}:{}".format(a + aa, b + bb) | |
return emoji_prompt | |
def append_chengyu_emojis(selected_index: gr.SelectData, dataframe_origin, emoji_prompt, append_or_replace, emoji2text_or_not): | |
val = dataframe_origin.iloc[selected_index.index[0], selected_index.index[1]] | |
if type(val) != type("") or not val: | |
return emoji_prompt | |
assert emoji2text_or_not in ["Emoji to Text", "Only Emoji", "ChengYu with Emoji"] | |
assert append_or_replace in ["replace", "append"] | |
a, b = extract_2(emoji_prompt) | |
aa, bb = extract_2(val) | |
if append_or_replace == "append": | |
''' | |
if emoji2text_or_not in ["Emoji to Text", "Only Emoji"]: | |
emoji_prompt = emoji_prompt + val.split(":")[-1] | |
else: | |
a, b = val.split(":") | |
emoji_prompt = "{}:{}".format(a, emoji_prompt + b) | |
''' | |
a, b = a + aa, b + bb | |
else: | |
''' | |
if emoji2text_or_not in ["Emoji to Text", "Only Emoji"]: | |
emoji_prompt = val.split(":")[-1] | |
else: | |
emoji_prompt = val | |
''' | |
a, b = aa, bb | |
a = a.strip() | |
b = b.strip() | |
emoji_prompt = "{}:{}".format(a, b) | |
return emoji_prompt | |
def extract_emojis(s): | |
#return ''.join(c for c in s if c in emoji.UNICODE_EMOJI['en']) | |
dl = emoji.emoji_list(s) | |
return "".join(map(lambda x: x["emoji"], dl)) | |
def extract_2(s): | |
b = extract_emojis(s) | |
a = "".join(filter(lambda x: x not in b + ":", list(s))) | |
return a, b | |
def gen_emojis_by_chengyu(words): | |
assert type(words) == type("") | |
out = DefaultChengyuManager.get_by_word(words) | |
if out is None: | |
return "" | |
out = "".join(make_one_puzzle(out).puzzle) | |
out = extract_emojis(out) | |
return out | |
def gen_emojis_by_sample(search_count=5000): | |
pg = gen_puzzle(manager=DefaultChengyuManager, search_count=search_count) | |
df = pd.DataFrame(list(map(lambda x: { | |
"words": "".join(x.chengyu_item.word_list), | |
"emoji": x.puzzle_str, | |
"score": sum(x.mask) | |
} ,pg))) | |
df = df[df["score"] == 4] | |
df = df[df["words"].map(lambda x: len(x) == 4)] | |
req = [] | |
col0 = set([]) | |
col1 = set([]) | |
col2 = set([]) | |
col3 = set([]) | |
for i, r in df.iterrows(): | |
words = r["words"] | |
emoji = r["emoji"] | |
if emoji[0] in col0: | |
continue | |
col0.add(emoji[0]) | |
if emoji[1] in col1: | |
continue | |
col1.add(emoji[1]) | |
if emoji[2] in col2: | |
continue | |
col2.add(emoji[2]) | |
if emoji[3] in col3: | |
continue | |
col3.add(emoji[3]) | |
req.append( | |
r.to_dict() | |
) | |
df = pd.DataFrame(req) | |
if len(df) < 21: | |
return gen_emojis_by_sample(search_count=search_count) | |
df = pd.DataFrame( | |
np.asarray(df.apply(lambda x: x.to_dict(), axis = 1).head(21).map(lambda x: | |
"{}:{}".format(x["words"],x["emoji"]) | |
).tolist()).reshape( | |
(7, 3) | |
) | |
) | |
return df | |
def append_pure_to_input(emoji_outputs ,only_emoji_outputs): | |
return emoji_outputs + only_emoji_outputs | |
def outputs_rec_format(emoji_outputs): | |
a, b = extract_2(emoji_outputs) | |
a, b = a.strip(), b.strip() | |
emoji_outputs = "{}:{}".format(a, b) | |
return emoji_outputs | |
css = """ | |
#frame span{ | |
font-size: 1.5em; display: flex; align-items: center; | |
} | |
""" | |
###with gr.Blocks(css="custom.css") as demo: | |
with gr.Blocks(css = css) as demo: | |
title = gr.HTML( | |
"""<h1 align="center"> <font size="+10"> ๐ Emojis to StableDiffusion World ๐ </font> </h1>""", | |
elem_id="title", | |
) | |
frame_list = [] | |
with gr.Row(): | |
with gr.Column(label = "Emoji samples, You can click to use them"): | |
sub_title_0 = gr.Markdown( | |
value="### Emoji samples, You can click to use them", | |
visible=True, | |
#elem_id="selected_model", | |
) | |
#for group, df in emoji_df.groupby("group"): | |
for group_order_ele, sub_group_order_list in emoji_order_list: | |
#group_first = df["emoji"].iloc[0] | |
group_first = group_order_ele | |
df_group = emoji_df[emoji_df["emoji"] == group_first]["group"].iloc[0] | |
df = emoji_df[emoji_df["group"] == df_group] | |
with gr.Tab("{} {}".format(group_first, df_group)): | |
#for ii ,(sub_group, dff) in enumerate(df.groupby("sub_group")): | |
for ii in range(len(sub_group_order_list)): | |
sub_first = sub_group_order_list[ii] | |
df_sub_group = emoji_df[emoji_df["emoji"] == sub_first]["sub_group"].iloc[0] | |
dff = df[df["sub_group"] == df_sub_group] | |
if ii >= sub_cate_num: | |
break | |
sub_first = dff["emoji"].iloc[0] | |
sub_l = dff["emoji"].values.tolist()[:sub_cate_size] | |
sub_l_square = list_to_square(sub_l, sub_col_num) | |
g_frame = gr.DataFrame(sub_l_square, | |
interactive=False, headers = [''] * sub_col_num, | |
#datatype="markdown" | |
elem_id="frame", | |
label = "{} {}".format(sub_first, df_sub_group) | |
) | |
#g_frame = gr.Matrix(sub_l_square, label = sub_first,) | |
frame_list.append(g_frame) | |
with gr.Column(): | |
with gr.Row(): | |
with gr.Column(): | |
sub_title_1 = gr.Markdown( | |
value="### ChengYu to Emoji combinations, You can click to use them, Don't forget edit them after click, to make it meaningful", | |
visible=True, | |
#elem_id="selected_model", | |
) | |
chengyu_frame = gr.DataFrame(gen_emojis_by_sample(), | |
interactive=False, headers = [''] * sub_col_num, | |
#datatype="markdown" | |
elem_id="chengyu_frame", | |
#label = "ChengYu to Emoji combinations, You can click to use them" | |
) | |
with gr.Row(): | |
chengyu_reset_button = gr.Button("Reset ChengYu Emojis", | |
elem_id="run_button") | |
with gr.Row(): | |
append_or_replace = gr.Radio(choices=["replace", "append"], | |
value="replace", label="ChengYu Emoji Append or Replace to below", elem_id="text_radio") | |
with gr.Row(): | |
emoji_outputs = gr.Textbox(label="Emoji Prompt Input", show_label=True, lines=1, max_lines=20, | |
min_width = 256, placeholder="Click Emoji from left with some emoji input manually", elem_id="prompt", | |
interactive=True, info = "Generate by Click, and can edit by yourself, look up Examples below") | |
clean_button = gr.Button("Clear", elem_id="clear_button") | |
''' | |
with gr.Column(): | |
clean_button = gr.Button("Clean Emojis", elem_id="clean_button") | |
emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button") | |
''' | |
with gr.Row(): | |
### \n ๐ Only Emoji \n ๐โก๏ธ๐ค Emoji to Text \n ๐โ๏ธ๐ ChengYu with Emoji | |
with gr.Row(): | |
emoji2text_or_not = gr.Radio(choices=["Only Emoji", "Emoji to Text", "ChengYu with Emoji"], | |
value="Only Emoji", label="Emoji &| Text to get images or translate them to Text by LLM", | |
elem_id="trans_radio", | |
info = "๐ Only Emoji ----------- ๐โก๏ธ๐ค Emoji to Text ------- ๐โ๏ธ๐ ChengYu with Emoji" | |
) | |
llm_prompt_input = gr.Textbox(label="Emoji to Text Prompt template used by LLM", show_label=True, | |
lines=1, max_lines=20, | |
min_width = 256, | |
value="Use following emojis to generate a short description of a scene , use the pattern someone do something , the emojis are {}" | |
, elem_id="prompt", | |
interactive=True) | |
llm_outputs = gr.Textbox(label="Emoji to Text Prompt translate by LLM Output", show_label=True, | |
lines=1, max_lines=20, | |
min_width = 256, placeholder="Emoji describe by Text", elem_id="prompt", | |
interactive=True) | |
llm_zh_outputs = gr.Textbox(label="Emoji to Text Prompt translate by LLM Output in Chinese", show_label=True, | |
lines=1, max_lines=20, | |
min_width = 256, placeholder="Emoji describe by Chinese", elem_id="prompt", | |
interactive=True) | |
''' | |
with gr.Row(): | |
emoji_gen_chengyu_input = gr.Textbox(label="ChengYu Prompt Input", show_label=False, lines=1, max_lines=20, | |
min_width = 256, placeholder="input ChengYu manually, like: ๅฑ็ฒพๅพๆฒป", elem_id="prompt", | |
interactive=True) | |
with gr.Row(): | |
only_emoji_outputs = gr.Textbox(label="Only Emoji Prompt Output", show_label=False, lines=1, max_lines=20, | |
min_width = 256, placeholder="Filter out only emoji charactors", elem_id="prompt", interactive=True) | |
#gr.Slider(label='Number of images ', minimum = 4, maximum = 20, step = 1, value = 4)] | |
append_button = gr.Button("Append Only Emojis to Emoji Prompt Output", elem_id="append_button") | |
only_emoji_outputs_button = gr.Button("Retrieve Images Only Emoji", elem_id="run_button") | |
with gr.Row(): | |
#text_button = gr.Button("Retrieve Images", elem_id="run_button") | |
emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button") | |
''' | |
with gr.Row(): | |
emoji_outputs_button = gr.Button("Retrieve Images", elem_id="run_button") | |
with gr.Row(): | |
with gr.Column(): | |
outputs = gr.Gallery(lable='Output gallery', elem_id="gallery",).style(grid=5,height=768 - 64 - 32, | |
allow_preview=False, label = "retrieve Images") | |
exp_title = gr.HTML( | |
"""<br/><br/><h5 align="center"> <font size="+1"> Emojis examples live in ๐ travel to StableDiffusion ๐ </font> </h5>""", | |
#elem_id="title", | |
) | |
gr.Examples( | |
[ | |
["๐", "Only Emoji"], | |
["๐ฅ๐ฒ", "Only Emoji"], | |
["๐ฑ๐ฝ๏ธ๐จ", "Emoji to Text"], | |
["๐ป๐ฆโ๏ธ๐", "Only Emoji"], | |
["๐๐ฆถ๐๐ค", "Emoji to Text"], | |
["๐ฉโ๐ฌ๐ฃโ๐จโ๐จ", "Emoji to Text"], | |
["ๅๅฑฑ็ซๆตท:๐โก๐ฉโ๐๐งโโ๏ธ", "ChengYu with Emoji"], | |
["ไผ ไธบไฝณ่ฏ:๐ก๐งฃ๐ฉโ๐ฉโ๐งโ๐ง๐ฉโ๐จ", "ChengYu with Emoji"], | |
["ๅคง้่ณ็ฎ:๐จ๐ณ๐พ๐ง ๐โโ๏ธ", "ChengYu with Emoji"], | |
#["๐๐๐งฃ๐ธ๐ฐโ๏ธ", "Emoji to Text"], | |
#["๐๐๐๐ถ๐ฅ๐พ๐ต๐โโ๏ธ", "Emoji to Text"], | |
#["๐๐๐๐จโ๐ง๐๐ฅ๐๐", "Emoji to Text"], | |
#["ๆฅๆฑๆฝฎๆฐด่ฟๆตทๅนณ:๐๐บ๐ฆ๐คค๐ชท๐โ๏ธ", "ChengYu with Emoji"], | |
#["ๆๆๆ็จ๏ผไน้นๅ้ฃใ:๐๐โจ๐ซ ๐ฆ๐ฆ๐ฆข๐ชฝ", "ChengYu with Emoji"] | |
], | |
inputs = [emoji_outputs, emoji2text_or_not], | |
#label = "๐ Examples" | |
) | |
gr.Examples( | |
[ | |
#["๐", "Only Emoji"], | |
#["๐ฅ๐ฒ", "Only Emoji"], | |
#["๐ฑ๐ฝ๏ธ๐จ", "Emoji to Text"], | |
#["๐ป๐ฆโ๏ธ๐", "Only Emoji"], | |
#["๐๐ฆถ๐๐ค", "Emoji to Text"], | |
#["๐ฉโ๐ฌ๐ฃโ๐จโ๐จ", "Emoji to Text"], | |
#["ๅๅฑฑ็ซๆตท:๐โก๐ฉโ๐๐งโโ๏ธ", "ChengYu with Emoji"], | |
#["ไผ ไธบไฝณ่ฏ:๐ก๐งฃ๐ฉโ๐ฉโ๐งโ๐ง๐ฉโ๐จ", "ChengYu with Emoji"], | |
#["ๅคง้่ณ็ฎ:๐จ๐ณ๐พ๐ง ๐โโ๏ธ", "ChengYu with Emoji"], | |
["๐๐๐งฃ๐ธ๐ฐโ๏ธ", "Emoji to Text"], | |
["๐๐๐๐ถ๐ฅ๐พ๐ต๐โโ๏ธ", "Emoji to Text"], | |
["๐๐๐๐จโ๐ง๐๐ฅ๐๐", "Emoji to Text"], | |
["ๆฅๆฑๆฝฎๆฐด่ฟๆตทๅนณ:๐๐บ๐ฆ๐คค๐ชท๐โ๏ธ", "ChengYu with Emoji"], | |
["ๆๆๆ็จ๏ผไน้นๅ้ฃใ:๐๐โจ๐ซ ๐ฆ๐ฆ๐ฆข๐ชฝ", "ChengYu with Emoji"] | |
], | |
inputs = [emoji_outputs, emoji2text_or_not], | |
#label = "๐ Examples" | |
) | |
with gr.Row(): | |
with gr.Tab(label = "Download"): | |
zip_button = gr.Button("Zip Images to Download", elem_id="zip_button") | |
downloads = gr.File(label = "Image zipped", elem_id = "zip_file") | |
### ["Only Emoji", "Emoji to Text", "ChengYu with Emoji"] | |
emoji2text_or_not.change( | |
fn = lambda x: "" if x == "Only Emoji" else ( | |
"Use following emojis to generate a short description of a scene , use the pattern someone do something , the emojis are {}" if x == "Emoji to Text" \ | |
else "Use following emojis to make a short description of the scene about '{}', the emojis are {}" | |
) | |
, inputs = emoji2text_or_not, outputs = llm_prompt_input | |
) | |
for g in frame_list: | |
g.select(fn = append_emojis, inputs = [g, emoji_outputs], outputs = emoji_outputs) | |
## | |
chengyu_frame.select(fn = append_chengyu_emojis, inputs = [chengyu_frame, emoji_outputs, append_or_replace, emoji2text_or_not], | |
outputs = emoji_outputs) | |
chengyu_reset_button.click(fn = lambda _: gen_emojis_by_sample(), outputs = chengyu_frame) | |
clean_button.click(fn = lambda _: "", outputs = emoji_outputs) | |
#emoji_outputs.change(fn = extract_emojis, inputs = emoji_outputs, outputs = only_emoji_outputs) | |
''' | |
emoji_outputs.change( | |
fn = outputs_rec_format | |
, inputs = emoji_outputs, outputs = emoji_outputs) | |
''' | |
''' | |
emoji_gen_chengyu_input.change(fn = gen_emojis_by_chengyu, inputs = emoji_gen_chengyu_input, | |
outputs = only_emoji_outputs) | |
append_button.click(fn = append_pure_to_input, inputs = [emoji_outputs ,only_emoji_outputs], | |
outputs = emoji_outputs) | |
''' | |
#emoji_outputs_button.click(lexica, inputs=emoji_outputs, outputs=outputs) | |
emoji_outputs_button.click(search, | |
inputs=[emoji_outputs, emoji2text_or_not, llm_prompt_input], | |
outputs=[outputs, llm_outputs, llm_zh_outputs]) | |
zip_button.click( | |
zip_ims, inputs = outputs, outputs=downloads | |
) | |
demo.launch("0.0.0.0") | |