Datasets:

ArXiv:
License:
GUI-Lasagne-L1 / scripts /make_ocr.py
huangzhiyuan's picture
update scripts for parsing the data
e41d92c verified
import os
import os.path as osp
import json
import random
import transformers
from tqdm import tqdm
from multiprocessing import Pool
from functools import partial
from copy import deepcopy
from data_class import domNode, nodeBbox
MAX_TOTAL_LEN = 800
MAX_TOKEN_LEN = 50
MIN_TOKEN_LEN = 0
MAX_IoU = 0
NODE_SEPARATOR_TOKEN = '<node_separator>'
TEXT2BBOX_TEMPLATE = [
"Where is the given content?",
# Other template questions here...
]
BBOX2TEXT_TEMPLATE = [
"Can you give me a description of the region in image?",
# Other template questions here...
]
class NodewithOCR(domNode):
def __init__(self, id, info, children, father=None, tokenizer=None, img_w=None, img_h=None, task=""):
super().__init__(id, info, children, father, tokenizer, img_w=img_w, img_h=img_h, task="")
self.struct_text: str = self.get_struct_text()
def get_struct_text(self) -> str:
if self.is_leaf():
struct_text = self.info.text.strip()
if self.is_img():
if not struct_text:
return ""
if self.info.type == 'img[svg]':
struct_text = '[svg]' + struct_text
else:
struct_text = '[image]' + struct_text
return struct_text
children_text = []
for child in self.children:
child_text = child.struct_text
if child_text:
children_text.append(child.struct_text)
return NODE_SEPARATOR_TOKEN.join(children_text)
def get_context_text(self, root_text: str) -> str:
self.context_text: str = ""
cur_node = self
for _ in range(3):
if root_text.count(cur_node.struct_text) == 1:
func = ''
if self.info.func == 'type':
func += '(input box)'
elif self.info.func == 'click':
func += '(clickable)'
elif self.info.func == 'text':
func += '(pure text)'
if cur_node == self:
context = ""
else:
context = f'(context: "{cur_node.struct_text}")'
self.context_text = self.struct_text + func + context
return
if not cur_node.father:
return
cur_node = cur_node.father
# x1, y1, x2, y2
def bbox2str(self) -> str:
x1_rel = round(max(0, (self.info.bbox.x1 / IMG_W)) * 1000)
y1_rel = round(max(0, (self.info.bbox.y1 / IMG_H)) * 1000)
x2_rel = round(min(0.999, (self.info.bbox.x2 / IMG_W)) * 1000)
y2_rel = round(min(0.999, (self.info.bbox.y2 / IMG_H)) * 1000)
coords = [x1_rel, y1_rel, x2_rel, y2_rel]
bbox_str = ["{:03}".format(coord) for coord in coords]
bbox_str = "[" + ", ".join(bbox_str) + "]"
return bbox_str
# x1, y1, x2, y2 seeclick_format
def bbox2str_2(self) -> str:
x1_rel = round(max(0, (self.info.bbox.x1 / IMG_W)), 2)
y1_rel = round(max(0, (self.info.bbox.y1 / IMG_H)), 2)
x2_rel = round(min(0.999, (self.info.bbox.x2 / IMG_W)), 2)
y2_rel = round(min(0.999, (self.info.bbox.y2 / IMG_H)), 2)
coords = [x1_rel, y1_rel, x2_rel, y2_rel]
bbox_str = ["{:.2f}".format(coord) for coord in coords]
bbox_str = "(" + ",".join(bbox_str) + ")"
return bbox_str
# bbox center_x, center_y, w, h
def bbox2str_ct(self) -> str:
cx_rel = round(min(0.999, max(0, (self.info.bbox.cx / IMG_W))) * 1000)
cy_rel = round(min(0.999, max(0, (self.info.bbox.cy / IMG_H))) * 1000)
w_rel = round(min(0.999, max(0, (self.info.bbox.width / IMG_W))) * 1000)
h_rel = round(min(0.999, max(0, (self.info.bbox.height / IMG_H))) * 1000)
coords = [cx_rel, cy_rel, w_rel, h_rel]
bbox_str = ["{:03}".format(coord) for coord in coords]
bbox_str = "[" + ", ".join(bbox_str) + "]"
return bbox_str
# block idx, bbox center_x, center_y, w, h
def bbox2str_ct_block(self, block_num_w=2, block_num_h=2) -> str:
block_w = IMG_W // block_num_w
block_h = IMG_H // block_num_h
block_xi = max(0, min(IMG_W - 1, self.info.bbox.cx)) // block_w
block_yi = max(0, min(IMG_H - 1, self.info.bbox.cy)) // block_h
block_idx = block_yi * block_num_w + block_xi
cx_block = self.info.bbox.cx % block_w
cy_block = self.info.bbox.cy % block_h
cx_rel = round(min(0.999, max(0, (cx_block / block_w))) * 1000)
cy_rel = round(min(0.999, max(0, (cy_block / block_h))) * 1000)
w_rel = round(min(0.999, max(0, (self.info.bbox.width / block_w))) * 1000)
h_rel = round(min(0.999, max(0, (self.info.bbox.height / block_h))) * 1000)
coords = [cx_rel, cy_rel, w_rel, h_rel]
bbox_str = [str(block_idx)] + ["{:03}".format(coord) for coord in coords]
bbox_str = "[" + ", ".join(bbox_str) + "]"
return bbox_str
# center point seeclick_format
def bbox2str_ct_2(self) -> str:
cx_rel = round(min(0.999, max(0, (self.info.bbox.cx / IMG_W))), 2)
cy_rel = round(min(0.999, max(0, (self.info.bbox.cy / IMG_H))), 2)
coords = [cx_rel, cy_rel]
bbox_str = ["{:.2f}".format(coord) for coord in coords]
bbox_str = "(" + ",".join(bbox_str) + ")"
return bbox_str
def init_context_text(node: NodewithOCR, root_text: str):
node.get_context_text(root_text)
for child in node.children:
init_context_text(child, root_text)
tokenizer = transformers.AutoTokenizer.from_pretrained(
"path/to/your/tokenizer",
trust_remote_code=True,
)
tokenizer.add_tokens([NODE_SEPARATOR_TOKEN], special_tokens=True)
def get_str_token_len(str_: str) -> int:
return len(tokenizer.encode(str_))
def collect_nodes(dom: NodewithOCR, node_list: list[NodewithOCR]) -> list:
if dom.is_valid:
node_list.append(dom)
for child in dom.children:
collect_nodes(child, node_list)
return node_list
def select_node(node, node_text, node_bbox: nodeBbox, selected_node_list: list[domNode], total_len: int) -> tuple[bool, int]:
if node.info.func == 'type':
if not node.context_text:
return False, total_len
if not node_text:
return False, total_len
try:
token_len = get_str_token_len(node_text)
except:
return False, total_len
if token_len > MAX_TOKEN_LEN or token_len < MIN_TOKEN_LEN:
return False, total_len
total_len += token_len + 20
if selected_node_list:
selected_bbox_list = [_node.info.bbox for _node in selected_node_list]
max_IoUs = node_bbox.get_max_IoU(selected_bbox_list)
if max_IoUs > MAX_IoU:
return False, total_len
return True, total_len
def get_bbox_text(node_list: list[NodewithOCR], context=False, is_point=True, is_seeclick=False):
input_node_list = [_node for _node in node_list if _node.info.func == 'type']
pointer_node_list = [_node for _node in node_list if _node.info.pointer]
click_node_list = [_node for _node in node_list if not _node.info.pointer and _node.info.func == 'click']
text_node_list = [_node for _node in node_list if _node.info.func == 'text']
input_node_list_copy = deepcopy(input_node_list)
pointer_node_list_copy = deepcopy(pointer_node_list)
click_node_list_copy = deepcopy(click_node_list)
text_node_list_copy = deepcopy(text_node_list)
random.shuffle(input_node_list_copy)
random.shuffle(pointer_node_list_copy)
random.shuffle(click_node_list_copy)
random.shuffle(text_node_list_copy)
node_list_copy = input_node_list_copy + pointer_node_list_copy + click_node_list_copy + text_node_list_copy
total_len = 0
selected_node_list = []
for node in node_list_copy:
node_text = node.context_text if context else node.struct_text
keep, total_len = select_node(node, node_text, node.info.bbox, selected_node_list, total_len)
if total_len > MAX_TOTAL_LEN:
break
if keep:
selected_node_list.append(node)
random.shuffle(selected_node_list)
bbox_str_list = []
text_str_list = []
for i, node in enumerate(selected_node_list):
node_text = node.context_text if context else node.struct_text
if not is_seeclick:
bbox_str_list.append(f"{i+1}. {node.bbox2str_ct_block(block_num_w=BLOCK_NUM_W, block_num_h=BLOCK_NUM_H)}")
else:
if is_point:
bbox_str_list.append(f"{i+1}. {node.bbox2str_ct_2()}")
else:
bbox_str_list.append(f"{i+1}. {node.bbox2str_2()}")
text_str_list.append(f"{i+1}. {node_text}")
bbox_str = '\n'.join(bbox_str_list)
text_str = '\n'.join(text_str_list)
return bbox_str, text_str
def make_bbox2text(node_list: list[NodewithOCR], img_path: str, is_point=True, is_seeclick=False):
bbox_str, text_str = get_bbox_text(node_list, context=False, is_point=is_point, is_seeclick=is_seeclick)
if not bbox_str or not text_str:
return
prompt = bbox_str + '\n' + random.choice(BBOX2TEXT_TEMPLATE)
return {
"image": img_path,
"conversations": [
{
"from": "human",
"value": f"<image>\n{prompt}"
},
{
"from": "gpt",
"value": text_str
}
]
}
def make_text2bbox(node_list: list[NodewithOCR], img_path: str, is_point=True, is_seeclick=False):
bbox_str, text_str = get_bbox_text(node_list, context=True, is_point=is_point, is_seeclick=is_seeclick)
if not bbox_str or not text_str:
return
prompt = text_str + '\n' + random.choice(TEXT2BBOX_TEMPLATE) + '(bbox: [block_index, cx, cy, w, h])'
return {
"image": img_path,
"conversations": [
{
"from": "human",
"value": f"<image>\n{prompt}"
},
{
"from": "gpt",
"value": bbox_str
}
]
}
def write_ann(ann_list, ann_path, mode='w', save_ratio=1.0):
ann_dir = osp.dirname(ann_path)
if not osp.exists(ann_dir):
os.mkdir(ann_dir)
if save_ratio == 1.0 or random.random() < save_ratio:
with open(ann_path, mode, encoding='utf-8') as f:
for i in range(len(ann_list)):
ann_list[i]['id'] = i
ann_str = json.dumps(ann_list[i], ensure_ascii=False)
f.write(ann_str + '\n')
def single_proc(mode, data_dir, language, sub_dir):
sub_path = osp.join(data_dir, sub_dir)
screen_shot_dir = osp.join(sub_path, 'screenshot')
dom_sub_dir = 'dom_svg' if language == 'zh' else 'dom_svg_en'
dom_dir = osp.join(sub_path, dom_sub_dir)
html_dir = osp.join(sub_path, 'html')
html_path = osp.join(html_dir, 'html_0.html')
if not osp.exists(html_path):
return []
html_content = open(html_path).readline().strip()
if html_content in ['https://www.qq.com/', 'https://music.163.com/']:
return []
all_annotations = []
for img_file in os.listdir(screen_shot_dir):
if 'tmp' in img_file:
continue
file_ids = img_file.split('.')[0].split('_')[1]
json_path = osp.join(dom_dir, f'dom_{file_ids}.json')
img_path = osp.join(screen_shot_dir, img_file)
if not osp.exists(json_path):
continue
dom_data = json.load(open(json_path))
try:
dom_node = NodewithOCR(tokenizer=tokenizer, img_w=IMG_W, img_h=IMG_H, task=mode, **dom_data)
except ValueError as e:
print(f"Json data error: {e}\n{json_path}")
continue
init_context_text(dom_node, dom_node.struct_text)
node_list = collect_nodes(dom_node, [])
if mode == 'bbox2text':
ann = make_bbox2text(node_list, img_path, is_seeclick=False, is_point=False)
elif mode == 'text2bbox':
ann = make_text2bbox(node_list, img_path, is_seeclick=False, is_point=False)
elif mode == 'text2bbox_seeclick':
ann = make_text2bbox(node_list, img_path, is_seeclick=True, is_point=False)
elif mode == 'bbox2text_seeclick':
ann = make_bbox2text(node_list, img_path, is_seeclick=True, is_point=False)
elif mode == 'text2point_seeclick':
ann = make_text2bbox(node_list, img_path, is_seeclick=True, is_point=True)
elif mode == 'point2text_seeclick':
ann = make_bbox2text(node_list, img_path, is_seeclick=True, is_point=True)
else:
assert 0
if ann is not None:
all_annotations.append(ann)
return all_annotations
def main_multi(data_dir, dst_path, mode, language='zh', save_ratio=1.0):
pool = Pool(processes=16)
sub_dir_list = os.listdir(data_dir)
single_proc_partial = partial(single_proc, mode, data_dir, language)
for res in tqdm(pool.imap_unordered(single_proc_partial, sub_dir_list), total=len(sub_dir_list)):
write_ann(res, dst_path, mode='a', save_ratio=save_ratio)
if __name__ == '__main__':
mode = 'text2bbox'
# mode = 'bbox2text'
# ZH
BLOCK_NUM_W = 2
BLOCK_NUM_H = 2
IMG_W = 1120
IMG_H = 1120
data_dir = 'data_20240617'
dst_path = f'xxx.jsonl'
# EN
BLOCK_NUM_W = 3
BLOCK_NUM_H = 2
IMG_W = 1920
IMG_H = 1080
data_dir = 'data_20240624'
dst_path = f'xxx.jsonl'
main_multi(data_dir, dst_path, mode, language='en')