Datasets:

ArXiv:
License:
huangzhiyuan commited on
Commit
e41d92c
·
verified ·
1 Parent(s): 7177cdc

update scripts for parsing the data

Browse files
scripts/data_class.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ IMG_FLAG = "img"
5
+ TEXT_FLAG = "text"
6
+
7
+
8
+ class nodeBbox:
9
+ def __init__(self, img_w, img_h, left=0, top=0, width=0, height=0) -> None:
10
+ assert img_w is not None
11
+ assert img_h is not None
12
+
13
+ self.left: int = round(left)
14
+ self.top: int = round(top)
15
+ self.width: int = round(width)
16
+ self.height: int = round(height)
17
+
18
+ self.x1: int = self.left
19
+ self.y1: int = self.top
20
+ self.x2: int = self.left + self.width
21
+ self.y2: int = self.top + self.height
22
+
23
+ self.cx: int = round((self.x1 + self.x2) / 2)
24
+ self.cy: int = round((self.y1 + self.y2) / 2)
25
+
26
+ self.is_valid: bool = True
27
+
28
+ if not (0 <= self.x1 <= img_w and 0 <= self.y1 <= img_h and 0 <= self.x2 <= img_w and 0 <= self.y2 <= img_h):
29
+ self.is_valid = False
30
+
31
+ if self.x1 >= self.x2 or self.y1 >= self.y2:
32
+ self.is_valid = False
33
+
34
+ def get_max_IoU(self, others) -> float:
35
+ other_bbox = np.array([[o.x1, o.y1, o.x2, o.y2] for o in others])
36
+
37
+ other_bbox[:, 0:1] = np.clip(other_bbox[:, 0:1], a_min=self.x1, a_max=None)
38
+ other_bbox[:, 1:2] = np.clip(other_bbox[:, 1:2], a_min=self.y1, a_max=None)
39
+ other_bbox[:, 2:3] = np.clip(other_bbox[:, 2:3], a_min=None, a_max=self.x2)
40
+ other_bbox[:, 3:4] = np.clip(other_bbox[:, 3:4], a_min=None, a_max=self.y2)
41
+
42
+ inter_w = np.clip(other_bbox[:, 2] - other_bbox[:, 0], a_min=0, a_max=None)
43
+ inter_h = np.clip(other_bbox[:, 3] - other_bbox[:, 1], a_min=0, a_max=None)
44
+
45
+ IoU = inter_w * inter_h
46
+
47
+ return np.max(IoU)
48
+
49
+
50
+ class nodeInfo:
51
+ def __init__(self, name=None, type=None, func=None, text=None, title=None, bbox=None, point=None, pointer=False, tokenizer=None, img_w=None, img_h=None) -> None:
52
+ dom_name = name if isinstance(name, str) else ""
53
+ dom_type = type if isinstance(type, str) else ""
54
+ dom_func = func if isinstance(func, str) else ""
55
+ dom_text = text if isinstance(text, str) else ""
56
+ dom_title = title if isinstance(title, str) else ""
57
+ dom_bbox = nodeBbox(img_w=img_w, img_h=img_h, **bbox) if isinstance(bbox, dict) else nodeBbox()
58
+ dom_point = point if isinstance(point, dict) else {"x": 0, "y": 0}
59
+ dom_pointer = pointer if isinstance(pointer, bool) else False
60
+ is_valid = dom_bbox.is_valid
61
+
62
+ self.name: str = dom_name.lower()
63
+ self.type: str = dom_type
64
+ self.func: str = dom_func
65
+ self.text: str = dom_text
66
+ self.title: str = dom_title
67
+ self.bbox: nodeBbox = dom_bbox
68
+ self.point: dict = dom_point
69
+ self.pointer: bool = dom_pointer
70
+ self.is_valid: bool = is_valid
71
+
72
+
73
+ class domNode:
74
+ def __init__(self, id, info, children, father=None, tokenizer=None, img_w=None, img_h=None, task=""):
75
+ self.id: str = id
76
+ self.info: nodeInfo = nodeInfo(img_w=img_w, img_h=img_h, tokenizer=tokenizer, **info)
77
+ self.father: domNode = father
78
+
79
+ is_valid = self.info.is_valid
80
+
81
+ if self.is_img() and not self.info.text:
82
+ is_valid = False
83
+
84
+ children_nodes = []
85
+ for child in children:
86
+ child_node = type(self)(father=self, tokenizer=tokenizer, img_w=img_w, img_h=img_h, task=task, **child)
87
+ children_nodes.append(child_node)
88
+ if not child_node.is_valid:
89
+ is_valid = False
90
+
91
+ self.children: list[domNode] = children_nodes
92
+ self.is_valid: bool = is_valid
93
+
94
+ for child in self.children:
95
+ if child.is_leaf() and child.info.type == 'text' and self.info.func == 'text':
96
+ self.info.func = 'plain'
97
+ break
98
+
99
+ self.class_for_caption: str = ""
100
+ if self.info.name in ['a', 'button']:
101
+ self.class_for_caption = self.info.name
102
+ elif self.info.name == 'input':
103
+ if self.info.func == 'click':
104
+ self.class_for_caption = 'button'
105
+ elif self.info.func == 'type':
106
+ self.class_for_caption = 'input'
107
+ elif self.img_type():
108
+ if self.img_type() in ['img', 'bgimg']:
109
+ self.class_for_caption = 'img'
110
+ elif self.img_type() in ['svg', 'fa']:
111
+ self.class_for_caption = 'svg'
112
+ elif self.is_text():
113
+ self.class_for_caption = 'text'
114
+
115
+ def is_leaf(self) -> bool:
116
+ return len(self.children) == 0
117
+
118
+ def has_single_text_child(self) -> bool:
119
+ return len(self.children) == 1 and self.children[0].info.type == 'text'
120
+
121
+ def is_func_leaf(self) -> bool:
122
+ if self.is_leaf():
123
+ return not self.is_text()
124
+ return self.has_single_text_child()
125
+
126
+ def is_text(self) -> bool:
127
+ return self.info.type == TEXT_FLAG
128
+
129
+ def is_img(self) -> bool:
130
+ return self.info.type.startswith(IMG_FLAG)
131
+
132
+ def img_type(self) -> str:
133
+ if not self.is_img():
134
+ return ""
135
+ return self.info.type[len(IMG_FLAG) + 1: -1]
scripts/make_dom.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path as osp
3
+ import json
4
+ import random
5
+ import transformers
6
+ from tqdm import tqdm
7
+ from multiprocessing import Pool
8
+ from functools import partial
9
+
10
+ from data_class import domNode, nodeBbox
11
+
12
+
13
+ MAX_TOKEN_LEN = 800
14
+ MIN_TOKEN_LEN = 200
15
+ MAX_IoU = 0
16
+
17
+ HTML2BBOX_TEMPLATE = [
18
+ "Where is the given dom tree?",
19
+ # Other template questions here...
20
+ ]
21
+
22
+ BBOX2HTML_TEMPLATE = [
23
+ "Can you give me a description of the region <objs> in image?",
24
+ # Other template questions here...
25
+ ]
26
+
27
+
28
+ class NodewithHtml(domNode):
29
+ def __init__(self, id, info=None, children=None, father=None, tokenizer=None, img_w=None, img_h=None, task=""):
30
+ super().__init__(id, info, children, father, tokenizer, img_w=img_w, img_h=img_h, task="")
31
+ self.dom_xml = self.get_dom_xml(mode="dom_xml")
32
+ self.dom_xml_with_bbox = self.get_dom_xml(mode="dom_xml_with_bbox")
33
+
34
+ def bbox2str(self) -> str:
35
+ x1_rel = round(max(0, (self.info.bbox.x1 / IMG_W)) * 1000)
36
+ y1_rel = round(max(0, (self.info.bbox.y1 / IMG_H)) * 1000)
37
+ x2_rel = round(min(0.999, (self.info.bbox.x2 / IMG_W)) * 1000)
38
+ y2_rel = round(min(0.999, (self.info.bbox.y2 / IMG_H)) * 1000)
39
+
40
+ coords = [x1_rel, y1_rel, x2_rel, y2_rel]
41
+ bbox_str = ["{:03}".format(coord) for coord in coords]
42
+ bbox_str = "[" + ", ".join(bbox_str) + "]"
43
+
44
+ return bbox_str
45
+
46
+ def center2str(self) -> str:
47
+ cx = round(max(0, min(0.999, self.info.bbox.cx / IMG_W)) * 1000)
48
+ cy = round(max(0, min(0.999, self.info.bbox.cy / IMG_H)) * 1000)
49
+
50
+ point = [cx, cy]
51
+ point_str = ["{:03}".format(p) for p in point]
52
+ point_str = "[" + ", ".join(point_str) + "]"
53
+
54
+ return point_str
55
+
56
+ def node2xml_withbbox(self) -> str:
57
+ text = self.info.text
58
+ bbox_str = self.bbox2str()
59
+
60
+ if self.is_text() or self.has_single_text_child():
61
+ pseudo_html_line = f"<text box={bbox_str} content=\"{text}\">"
62
+ elif self.is_img():
63
+ if self.class_for_caption == 'img':
64
+ pseudo_html_line = f"<img box={bbox_str} alt={text}>"
65
+ elif self.class_for_caption == 'svg':
66
+ pseudo_html_line = f"<svg box={bbox_str} alt={text}>"
67
+ else:
68
+ return ""
69
+ elif self.info.func == 'type':
70
+ pseudo_html_line = f"<input box={bbox_str}, content=\"{text}\">"
71
+ else:
72
+ if self.is_leaf():
73
+ return ""
74
+ else:
75
+ pseudo_html_line = f"<plain box={bbox_str}>"
76
+
77
+ return pseudo_html_line
78
+
79
+ def node2xml(self) -> str:
80
+ text = self.info.text
81
+ bbox_str = self.bbox2str()
82
+ center_str = self.center2str()
83
+
84
+ if self.is_text() or self.has_single_text_child():
85
+ pseudo_html_line = f"<text content=\"{text}\">"
86
+ elif self.is_img():
87
+ if self.class_for_caption == 'img':
88
+ pseudo_html_line = f"<img alt={text}>"
89
+ elif self.class_for_caption == 'svg':
90
+ pseudo_html_line = f"<svg alt={text}>"
91
+ else:
92
+ return ""
93
+ elif self.info.func == 'type':
94
+ pseudo_html_line = f"<input center={center_str}, content=\"{text}\">"
95
+ else:
96
+ if self.is_leaf():
97
+ return ""
98
+ else:
99
+ pseudo_html_line = f"<plain>"
100
+
101
+ return pseudo_html_line
102
+
103
+ def get_dom_xml(self, mode: str):
104
+ xml_func = self.node2xml if mode == 'dom_xml' else self.node2xml_withbbox
105
+
106
+ self_dom_xml = xml_func()
107
+ if not self_dom_xml:
108
+ return ""
109
+
110
+ dom_xml_list = [self_dom_xml]
111
+
112
+ if not self.is_func_leaf():
113
+ for child in self.children:
114
+ child_xml = getattr(child, mode)
115
+ if not child_xml:
116
+ return ""
117
+ child_xml_list = child_xml.strip().split('\n')
118
+ child_xml_list = ['\t' + line for line in child_xml_list]
119
+ child_xml_fmt = '\n'.join(child_xml_list)
120
+ dom_xml_list.append(child_xml_fmt)
121
+
122
+ dom_xml = '\n'.join(dom_xml_list)
123
+ return dom_xml
124
+
125
+
126
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
127
+ "path/to/your/tokenizer",
128
+ trust_remote_code=True,
129
+ )
130
+
131
+
132
+ def get_str_token_len(str_: str) -> int:
133
+ return len(tokenizer.encode(str_))
134
+
135
+
136
+ def collect_nodes(dom: NodewithHtml, node_list: list[NodewithHtml]) -> list:
137
+ if dom.is_valid:
138
+ node_list.append(dom)
139
+
140
+ for child in dom.children:
141
+ collect_nodes(child, node_list)
142
+
143
+ return node_list
144
+
145
+
146
+ def selece_node(xml: str, bbox: nodeBbox, selected_dom_list: list[nodeBbox]) -> bool:
147
+ if not xml:
148
+ return False
149
+
150
+ try:
151
+ token_len = get_str_token_len(xml)
152
+ except:
153
+ return False
154
+ if token_len > MAX_TOKEN_LEN or token_len < MIN_TOKEN_LEN:
155
+ return False
156
+
157
+ if selected_dom_list:
158
+ max_IoUs = bbox.get_max_IoU(selected_dom_list)
159
+ if max_IoUs > MAX_IoU:
160
+ return False
161
+
162
+ return True
163
+
164
+
165
+ def make_qa(node_list: list[NodewithHtml], img_path: str):
166
+ random.shuffle(node_list)
167
+
168
+ dom_xml_list = []
169
+ dom_xml_with_bbox_list = []
170
+
171
+ ann_list = []
172
+ for node in node_list:
173
+ if selece_node(node.dom_xml_with_bbox, node.info.bbox, dom_xml_with_bbox_list):
174
+ bbox2html_prompt = random.choice(BBOX2HTML_TEMPLATE)
175
+ prompt = bbox2html_prompt.replace("<objs>", node.bbox2str())
176
+ caption = node.dom_xml_with_bbox
177
+
178
+ ann = {
179
+ "image": img_path,
180
+ "conversations": [
181
+ {
182
+ "from": "human",
183
+ "value": f"<image>\n{prompt}"
184
+ },
185
+ {
186
+ "from": "gpt",
187
+ "value": caption
188
+ }
189
+ ]
190
+ }
191
+ ann_list.append(ann)
192
+ dom_xml_with_bbox_list.append(node.info.bbox)
193
+
194
+ return ann_list
195
+
196
+
197
+ def write_ann(ann_list, ann_path):
198
+ ann_dir = osp.dirname(ann_path)
199
+ if not osp.exists(ann_dir):
200
+ os.mkdir(ann_dir)
201
+
202
+ with open(ann_path, 'a', encoding='utf-8') as f:
203
+ for i in range(len(ann_list)):
204
+ ann_list[i]['id'] = i
205
+
206
+ ann_str = json.dumps(ann_list[i], ensure_ascii=False)
207
+ f.write(ann_str + '\n')
208
+
209
+
210
+ def single_proc(data_dir, language, sub_dir):
211
+ sub_path = osp.join(data_dir, sub_dir)
212
+ screen_shot_dir = osp.join(sub_path, 'screenshot')
213
+ dom_sub_dir = 'dom_svg' if language == 'zh' else 'dom_svg_en'
214
+ dom_dir = osp.join(sub_path, dom_sub_dir)
215
+ html_dir = osp.join(sub_path, 'html')
216
+
217
+ html_path = osp.join(html_dir, 'html_0.html')
218
+ if not osp.exists(html_path):
219
+ return []
220
+ html_content = open(html_path).readline().strip()
221
+ if html_content in ['https://www.qq.com/', 'https://music.163.com/']:
222
+ return []
223
+
224
+ all_annotations = []
225
+ for img_file in os.listdir(screen_shot_dir):
226
+ if 'tmp' in img_file:
227
+ continue
228
+ file_ids = img_file.split('.')[0].split('_')[1]
229
+ json_path = osp.join(dom_dir, f'dom_{file_ids}.json')
230
+ img_path = osp.join(screen_shot_dir, img_file)
231
+ if not osp.exists(json_path):
232
+ continue
233
+
234
+ dom_data = json.load(open(json_path))
235
+ try:
236
+ dom_node = NodewithHtml(tokenizer=tokenizer, img_w=IMG_W, img_h=IMG_H, task='dom', **dom_data)
237
+ except ValueError as e:
238
+ print(f"Json data error: {e}\n{json_path}")
239
+ continue
240
+
241
+ node_list = collect_nodes(dom_node, [])
242
+ annotations = make_qa(node_list, img_path)
243
+ all_annotations.extend(annotations)
244
+
245
+ return all_annotations
246
+
247
+
248
+ def main_multi(data_dir, dst_path, language):
249
+ pool = Pool(processes=16)
250
+
251
+ sub_dir_list = os.listdir(data_dir)
252
+ single_proc_partial = partial(single_proc, data_dir, language)
253
+
254
+ num = 0
255
+ for res in tqdm(pool.imap_unordered(single_proc_partial, sub_dir_list), total=len(sub_dir_list)):
256
+ write_ann(res, dst_path)
257
+ num += len(res)
258
+
259
+
260
+ IMG_W = 1920
261
+ IMG_H = 1080
262
+
263
+ data_dir = 'data_20240624'
264
+ dst_path = 'xxx.jsonl'
265
+
266
+
267
+ if __name__ == '__main__':
268
+ main_multi(data_dir, dst_path, 'en')
scripts/make_ocr.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path as osp
3
+ import json
4
+ import random
5
+ import transformers
6
+ from tqdm import tqdm
7
+ from multiprocessing import Pool
8
+ from functools import partial
9
+ from copy import deepcopy
10
+
11
+ from data_class import domNode, nodeBbox
12
+
13
+
14
+ MAX_TOTAL_LEN = 800
15
+ MAX_TOKEN_LEN = 50
16
+ MIN_TOKEN_LEN = 0
17
+ MAX_IoU = 0
18
+
19
+ NODE_SEPARATOR_TOKEN = '<node_separator>'
20
+
21
+ TEXT2BBOX_TEMPLATE = [
22
+ "Where is the given content?",
23
+ # Other template questions here...
24
+ ]
25
+
26
+ BBOX2TEXT_TEMPLATE = [
27
+ "Can you give me a description of the region in image?",
28
+ # Other template questions here...
29
+ ]
30
+
31
+
32
+ class NodewithOCR(domNode):
33
+ def __init__(self, id, info, children, father=None, tokenizer=None, img_w=None, img_h=None, task=""):
34
+ super().__init__(id, info, children, father, tokenizer, img_w=img_w, img_h=img_h, task="")
35
+ self.struct_text: str = self.get_struct_text()
36
+
37
+ def get_struct_text(self) -> str:
38
+ if self.is_leaf():
39
+ struct_text = self.info.text.strip()
40
+ if self.is_img():
41
+ if not struct_text:
42
+ return ""
43
+ if self.info.type == 'img[svg]':
44
+ struct_text = '[svg]' + struct_text
45
+ else:
46
+ struct_text = '[image]' + struct_text
47
+ return struct_text
48
+
49
+ children_text = []
50
+ for child in self.children:
51
+ child_text = child.struct_text
52
+ if child_text:
53
+ children_text.append(child.struct_text)
54
+ return NODE_SEPARATOR_TOKEN.join(children_text)
55
+
56
+ def get_context_text(self, root_text: str) -> str:
57
+ self.context_text: str = ""
58
+
59
+ cur_node = self
60
+ for _ in range(3):
61
+ if root_text.count(cur_node.struct_text) == 1:
62
+ func = ''
63
+ if self.info.func == 'type':
64
+ func += '(input box)'
65
+ elif self.info.func == 'click':
66
+ func += '(clickable)'
67
+ elif self.info.func == 'text':
68
+ func += '(pure text)'
69
+ if cur_node == self:
70
+ context = ""
71
+ else:
72
+ context = f'(context: "{cur_node.struct_text}")'
73
+ self.context_text = self.struct_text + func + context
74
+ return
75
+
76
+ if not cur_node.father:
77
+ return
78
+
79
+ cur_node = cur_node.father
80
+
81
+ # x1, y1, x2, y2
82
+ def bbox2str(self) -> str:
83
+ x1_rel = round(max(0, (self.info.bbox.x1 / IMG_W)) * 1000)
84
+ y1_rel = round(max(0, (self.info.bbox.y1 / IMG_H)) * 1000)
85
+ x2_rel = round(min(0.999, (self.info.bbox.x2 / IMG_W)) * 1000)
86
+ y2_rel = round(min(0.999, (self.info.bbox.y2 / IMG_H)) * 1000)
87
+
88
+ coords = [x1_rel, y1_rel, x2_rel, y2_rel]
89
+ bbox_str = ["{:03}".format(coord) for coord in coords]
90
+ bbox_str = "[" + ", ".join(bbox_str) + "]"
91
+
92
+ return bbox_str
93
+
94
+ # x1, y1, x2, y2 seeclick_format
95
+ def bbox2str_2(self) -> str:
96
+ x1_rel = round(max(0, (self.info.bbox.x1 / IMG_W)), 2)
97
+ y1_rel = round(max(0, (self.info.bbox.y1 / IMG_H)), 2)
98
+ x2_rel = round(min(0.999, (self.info.bbox.x2 / IMG_W)), 2)
99
+ y2_rel = round(min(0.999, (self.info.bbox.y2 / IMG_H)), 2)
100
+
101
+ coords = [x1_rel, y1_rel, x2_rel, y2_rel]
102
+ bbox_str = ["{:.2f}".format(coord) for coord in coords]
103
+ bbox_str = "(" + ",".join(bbox_str) + ")"
104
+
105
+ return bbox_str
106
+
107
+ # bbox center_x, center_y, w, h
108
+ def bbox2str_ct(self) -> str:
109
+ cx_rel = round(min(0.999, max(0, (self.info.bbox.cx / IMG_W))) * 1000)
110
+ cy_rel = round(min(0.999, max(0, (self.info.bbox.cy / IMG_H))) * 1000)
111
+ w_rel = round(min(0.999, max(0, (self.info.bbox.width / IMG_W))) * 1000)
112
+ h_rel = round(min(0.999, max(0, (self.info.bbox.height / IMG_H))) * 1000)
113
+
114
+ coords = [cx_rel, cy_rel, w_rel, h_rel]
115
+ bbox_str = ["{:03}".format(coord) for coord in coords]
116
+ bbox_str = "[" + ", ".join(bbox_str) + "]"
117
+
118
+ return bbox_str
119
+
120
+ # block idx, bbox center_x, center_y, w, h
121
+ def bbox2str_ct_block(self, block_num_w=2, block_num_h=2) -> str:
122
+ block_w = IMG_W // block_num_w
123
+ block_h = IMG_H // block_num_h
124
+
125
+ block_xi = max(0, min(IMG_W - 1, self.info.bbox.cx)) // block_w
126
+ block_yi = max(0, min(IMG_H - 1, self.info.bbox.cy)) // block_h
127
+ block_idx = block_yi * block_num_w + block_xi
128
+
129
+ cx_block = self.info.bbox.cx % block_w
130
+ cy_block = self.info.bbox.cy % block_h
131
+
132
+ cx_rel = round(min(0.999, max(0, (cx_block / block_w))) * 1000)
133
+ cy_rel = round(min(0.999, max(0, (cy_block / block_h))) * 1000)
134
+ w_rel = round(min(0.999, max(0, (self.info.bbox.width / block_w))) * 1000)
135
+ h_rel = round(min(0.999, max(0, (self.info.bbox.height / block_h))) * 1000)
136
+
137
+ coords = [cx_rel, cy_rel, w_rel, h_rel]
138
+ bbox_str = [str(block_idx)] + ["{:03}".format(coord) for coord in coords]
139
+ bbox_str = "[" + ", ".join(bbox_str) + "]"
140
+
141
+ return bbox_str
142
+
143
+ # center point seeclick_format
144
+ def bbox2str_ct_2(self) -> str:
145
+ cx_rel = round(min(0.999, max(0, (self.info.bbox.cx / IMG_W))), 2)
146
+ cy_rel = round(min(0.999, max(0, (self.info.bbox.cy / IMG_H))), 2)
147
+
148
+ coords = [cx_rel, cy_rel]
149
+ bbox_str = ["{:.2f}".format(coord) for coord in coords]
150
+ bbox_str = "(" + ",".join(bbox_str) + ")"
151
+
152
+ return bbox_str
153
+
154
+
155
+ def init_context_text(node: NodewithOCR, root_text: str):
156
+ node.get_context_text(root_text)
157
+ for child in node.children:
158
+ init_context_text(child, root_text)
159
+
160
+
161
+ tokenizer = transformers.AutoTokenizer.from_pretrained(
162
+ "path/to/your/tokenizer",
163
+ trust_remote_code=True,
164
+ )
165
+ tokenizer.add_tokens([NODE_SEPARATOR_TOKEN], special_tokens=True)
166
+
167
+
168
+ def get_str_token_len(str_: str) -> int:
169
+ return len(tokenizer.encode(str_))
170
+
171
+
172
+ def collect_nodes(dom: NodewithOCR, node_list: list[NodewithOCR]) -> list:
173
+ if dom.is_valid:
174
+ node_list.append(dom)
175
+
176
+ for child in dom.children:
177
+ collect_nodes(child, node_list)
178
+
179
+ return node_list
180
+
181
+
182
+ def select_node(node, node_text, node_bbox: nodeBbox, selected_node_list: list[domNode], total_len: int) -> tuple[bool, int]:
183
+ if node.info.func == 'type':
184
+ if not node.context_text:
185
+ return False, total_len
186
+ if not node_text:
187
+ return False, total_len
188
+
189
+ try:
190
+ token_len = get_str_token_len(node_text)
191
+ except:
192
+ return False, total_len
193
+ if token_len > MAX_TOKEN_LEN or token_len < MIN_TOKEN_LEN:
194
+ return False, total_len
195
+
196
+ total_len += token_len + 20
197
+
198
+ if selected_node_list:
199
+ selected_bbox_list = [_node.info.bbox for _node in selected_node_list]
200
+ max_IoUs = node_bbox.get_max_IoU(selected_bbox_list)
201
+ if max_IoUs > MAX_IoU:
202
+ return False, total_len
203
+
204
+ return True, total_len
205
+
206
+
207
+ def get_bbox_text(node_list: list[NodewithOCR], context=False, is_point=True, is_seeclick=False):
208
+ input_node_list = [_node for _node in node_list if _node.info.func == 'type']
209
+ pointer_node_list = [_node for _node in node_list if _node.info.pointer]
210
+ click_node_list = [_node for _node in node_list if not _node.info.pointer and _node.info.func == 'click']
211
+ text_node_list = [_node for _node in node_list if _node.info.func == 'text']
212
+
213
+ input_node_list_copy = deepcopy(input_node_list)
214
+ pointer_node_list_copy = deepcopy(pointer_node_list)
215
+ click_node_list_copy = deepcopy(click_node_list)
216
+ text_node_list_copy = deepcopy(text_node_list)
217
+
218
+ random.shuffle(input_node_list_copy)
219
+ random.shuffle(pointer_node_list_copy)
220
+ random.shuffle(click_node_list_copy)
221
+ random.shuffle(text_node_list_copy)
222
+
223
+ node_list_copy = input_node_list_copy + pointer_node_list_copy + click_node_list_copy + text_node_list_copy
224
+
225
+ total_len = 0
226
+ selected_node_list = []
227
+ for node in node_list_copy:
228
+ node_text = node.context_text if context else node.struct_text
229
+ keep, total_len = select_node(node, node_text, node.info.bbox, selected_node_list, total_len)
230
+ if total_len > MAX_TOTAL_LEN:
231
+ break
232
+ if keep:
233
+ selected_node_list.append(node)
234
+ random.shuffle(selected_node_list)
235
+
236
+ bbox_str_list = []
237
+ text_str_list = []
238
+ for i, node in enumerate(selected_node_list):
239
+ node_text = node.context_text if context else node.struct_text
240
+
241
+ if not is_seeclick:
242
+ bbox_str_list.append(f"{i+1}. {node.bbox2str_ct_block(block_num_w=BLOCK_NUM_W, block_num_h=BLOCK_NUM_H)}")
243
+ else:
244
+ if is_point:
245
+ bbox_str_list.append(f"{i+1}. {node.bbox2str_ct_2()}")
246
+ else:
247
+ bbox_str_list.append(f"{i+1}. {node.bbox2str_2()}")
248
+
249
+ text_str_list.append(f"{i+1}. {node_text}")
250
+
251
+ bbox_str = '\n'.join(bbox_str_list)
252
+ text_str = '\n'.join(text_str_list)
253
+
254
+ return bbox_str, text_str
255
+
256
+
257
+ def make_bbox2text(node_list: list[NodewithOCR], img_path: str, is_point=True, is_seeclick=False):
258
+ bbox_str, text_str = get_bbox_text(node_list, context=False, is_point=is_point, is_seeclick=is_seeclick)
259
+ if not bbox_str or not text_str:
260
+ return
261
+
262
+ prompt = bbox_str + '\n' + random.choice(BBOX2TEXT_TEMPLATE)
263
+ return {
264
+ "image": img_path,
265
+ "conversations": [
266
+ {
267
+ "from": "human",
268
+ "value": f"<image>\n{prompt}"
269
+ },
270
+ {
271
+ "from": "gpt",
272
+ "value": text_str
273
+ }
274
+ ]
275
+ }
276
+
277
+
278
+ def make_text2bbox(node_list: list[NodewithOCR], img_path: str, is_point=True, is_seeclick=False):
279
+ bbox_str, text_str = get_bbox_text(node_list, context=True, is_point=is_point, is_seeclick=is_seeclick)
280
+ if not bbox_str or not text_str:
281
+ return
282
+
283
+ prompt = text_str + '\n' + random.choice(TEXT2BBOX_TEMPLATE) + '(bbox: [block_index, cx, cy, w, h])'
284
+
285
+ return {
286
+ "image": img_path,
287
+ "conversations": [
288
+ {
289
+ "from": "human",
290
+ "value": f"<image>\n{prompt}"
291
+ },
292
+ {
293
+ "from": "gpt",
294
+ "value": bbox_str
295
+ }
296
+ ]
297
+ }
298
+
299
+
300
+ def write_ann(ann_list, ann_path, mode='w', save_ratio=1.0):
301
+ ann_dir = osp.dirname(ann_path)
302
+ if not osp.exists(ann_dir):
303
+ os.mkdir(ann_dir)
304
+
305
+ if save_ratio == 1.0 or random.random() < save_ratio:
306
+ with open(ann_path, mode, encoding='utf-8') as f:
307
+ for i in range(len(ann_list)):
308
+ ann_list[i]['id'] = i
309
+
310
+ ann_str = json.dumps(ann_list[i], ensure_ascii=False)
311
+ f.write(ann_str + '\n')
312
+
313
+
314
+ def single_proc(mode, data_dir, language, sub_dir):
315
+ sub_path = osp.join(data_dir, sub_dir)
316
+ screen_shot_dir = osp.join(sub_path, 'screenshot')
317
+ dom_sub_dir = 'dom_svg' if language == 'zh' else 'dom_svg_en'
318
+ dom_dir = osp.join(sub_path, dom_sub_dir)
319
+ html_dir = osp.join(sub_path, 'html')
320
+
321
+ html_path = osp.join(html_dir, 'html_0.html')
322
+ if not osp.exists(html_path):
323
+ return []
324
+ html_content = open(html_path).readline().strip()
325
+ if html_content in ['https://www.qq.com/', 'https://music.163.com/']:
326
+ return []
327
+
328
+ all_annotations = []
329
+ for img_file in os.listdir(screen_shot_dir):
330
+ if 'tmp' in img_file:
331
+ continue
332
+ file_ids = img_file.split('.')[0].split('_')[1]
333
+ json_path = osp.join(dom_dir, f'dom_{file_ids}.json')
334
+ img_path = osp.join(screen_shot_dir, img_file)
335
+ if not osp.exists(json_path):
336
+ continue
337
+
338
+ dom_data = json.load(open(json_path))
339
+
340
+ try:
341
+ dom_node = NodewithOCR(tokenizer=tokenizer, img_w=IMG_W, img_h=IMG_H, task=mode, **dom_data)
342
+ except ValueError as e:
343
+ print(f"Json data error: {e}\n{json_path}")
344
+ continue
345
+
346
+ init_context_text(dom_node, dom_node.struct_text)
347
+
348
+ node_list = collect_nodes(dom_node, [])
349
+ if mode == 'bbox2text':
350
+ ann = make_bbox2text(node_list, img_path, is_seeclick=False, is_point=False)
351
+ elif mode == 'text2bbox':
352
+ ann = make_text2bbox(node_list, img_path, is_seeclick=False, is_point=False)
353
+ elif mode == 'text2bbox_seeclick':
354
+ ann = make_text2bbox(node_list, img_path, is_seeclick=True, is_point=False)
355
+ elif mode == 'bbox2text_seeclick':
356
+ ann = make_bbox2text(node_list, img_path, is_seeclick=True, is_point=False)
357
+ elif mode == 'text2point_seeclick':
358
+ ann = make_text2bbox(node_list, img_path, is_seeclick=True, is_point=True)
359
+ elif mode == 'point2text_seeclick':
360
+ ann = make_bbox2text(node_list, img_path, is_seeclick=True, is_point=True)
361
+
362
+ else:
363
+ assert 0
364
+ if ann is not None:
365
+ all_annotations.append(ann)
366
+
367
+ return all_annotations
368
+
369
+
370
+ def main_multi(data_dir, dst_path, mode, language='zh', save_ratio=1.0):
371
+ pool = Pool(processes=16)
372
+
373
+ sub_dir_list = os.listdir(data_dir)
374
+ single_proc_partial = partial(single_proc, mode, data_dir, language)
375
+
376
+ for res in tqdm(pool.imap_unordered(single_proc_partial, sub_dir_list), total=len(sub_dir_list)):
377
+ write_ann(res, dst_path, mode='a', save_ratio=save_ratio)
378
+
379
+
380
+ if __name__ == '__main__':
381
+ mode = 'text2bbox'
382
+ # mode = 'bbox2text'
383
+
384
+ # ZH
385
+ BLOCK_NUM_W = 2
386
+ BLOCK_NUM_H = 2
387
+ IMG_W = 1120
388
+ IMG_H = 1120
389
+
390
+ data_dir = 'data_20240617'
391
+ dst_path = f'xxx.jsonl'
392
+
393
+ # EN
394
+ BLOCK_NUM_W = 3
395
+ BLOCK_NUM_H = 2
396
+ IMG_W = 1920
397
+ IMG_H = 1080
398
+
399
+ data_dir = 'data_20240624'
400
+ dst_path = f'xxx.jsonl'
401
+
402
+ main_multi(data_dir, dst_path, mode, language='en')
scripts/readme.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ bbox2dom: make_dom.py
2
+ text2bbox/bbox2text: make_ocr.py