python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import os
import torch
import collections
import logging
from tqdm import tqdm, trange
import json
import bs4
from os import path as osp
from bs4 import BeautifulSoup as bs
# from transformers.models.bert.tokenization_bert import BasicTokenizer, whitespace_tokenize
from torch.utils.data import Dataset
import networkx as nx
from lxml import etree
import pickle
# from transformers.tokenization_bert import BertTokenizer
from transformers import BertTokenizer
import argparse
tags_dict = {'a': 0, 'abbr': 1, 'acronym': 2, 'address': 3, 'altGlyph': 4, 'altGlyphDef': 5, 'altGlyphItem': 6,
'animate': 7, 'animateColor': 8, 'animateMotion': 9, 'animateTransform': 10, 'applet': 11, 'area': 12,
'article': 13, 'aside': 14, 'audio': 15, 'b': 16, 'base': 17, 'basefont': 18, 'bdi': 19, 'bdo': 20,
'bgsound': 21, 'big': 22, 'blink': 23, 'blockquote': 24, 'body': 25, 'br': 26, 'button': 27, 'canvas': 28,
'caption': 29, 'center': 30, 'circle': 31, 'cite': 32, 'clipPath': 33, 'code': 34, 'col': 35,
'colgroup': 36, 'color-profile': 37, 'content': 38, 'cursor': 39, 'data': 40, 'datalist': 41, 'dd': 42,
'defs': 43, 'del': 44, 'desc': 45, 'details': 46, 'dfn': 47, 'dialog': 48, 'dir': 49, 'div': 50, 'dl': 51,
'dt': 52, 'ellipse': 53, 'em': 54, 'embed': 55, 'feBlend': 56, 'feColorMatrix': 57,
'feComponentTransfer': 58, 'feComposite': 59, 'feConvolveMatrix': 60, 'feDiffuseLighting': 61,
'feDisplacementMap': 62, 'feDistantLight': 63, 'feFlood': 64, 'feFuncA': 65, 'feFuncB': 66, 'feFuncG': 67,
'feFuncR': 68, 'feGaussianBlur': 69, 'feImage': 70, 'feMerge': 71, 'feMergeNode': 72, 'feMorphology': 73,
'feOffset': 74, 'fePointLight': 75, 'feSpecularLighting': 76, 'feSpotLight': 77, 'feTile': 78,
'feTurbulence': 79, 'fieldset': 80, 'figcaption': 81, 'figure': 82, 'filter': 83, 'font-face-format': 84,
'font-face-name': 85, 'font-face-src': 86, 'font-face-uri': 87, 'font-face': 88, 'font': 89, 'footer': 90,
'foreignObject': 91, 'form': 92, 'frame': 93, 'frameset': 94, 'g': 95, 'glyph': 96, 'glyphRef': 97,
'h1': 98, 'h2': 99, 'h3': 100, 'h4': 101, 'h5': 102, 'h6': 103, 'head': 104, 'header': 105, 'hgroup': 106,
'hkern': 107, 'hr': 108, 'html': 109, 'i': 110, 'iframe': 111, 'image': 112, 'img': 113, 'input': 114,
'ins': 115, 'kbd': 116, 'keygen': 117, 'label': 118, 'legend': 119, 'li': 120, 'line': 121,
'linearGradient': 122, 'link': 123, 'main': 124, 'map': 125, 'mark': 126, 'marker': 127, 'marquee': 128,
'mask': 129, 'math': 130, 'menu': 131, 'menuitem': 132, 'meta': 133, 'metadata': 134, 'meter': 135,
'missing-glyph': 136, 'mpath': 137, 'nav': 138, 'nobr': 139, 'noembed': 140, 'noframes': 141,
'noscript': 142, 'object': 143, 'ol': 144, 'optgroup': 145, 'option': 146, 'output': 147, 'p': 148,
'param': 149, 'path': 150, 'pattern': 151, 'picture': 152, 'plaintext': 153, 'polygon': 154,
'polyline': 155, 'portal': 156, 'pre': 157, 'progress': 158, 'q': 159, 'radialGradient': 160, 'rb': 161,
'rect': 162, 'rp': 163, 'rt': 164, 'rtc': 165, 'ruby': 166, 's': 167, 'samp': 168, 'script': 169,
'section': 170, 'select': 171, 'set': 172, 'shadow': 173, 'slot': 174, 'small': 175, 'source': 176,
'spacer': 177, 'span': 178, 'stop': 179, 'strike': 180, 'strong': 181, 'style': 182, 'sub': 183,
'summary': 184, 'sup': 185, 'svg': 186, 'switch': 187, 'symbol': 188, 'table': 189, 'tbody': 190,
'td': 191, 'template': 192, 'text': 193, 'textPath': 194, 'textarea': 195, 'tfoot': 196, 'th': 197,
'thead': 198, 'time': 199, 'title': 200, 'tr': 201, 'track': 202, 'tref': 203, 'tspan': 204, 'tt': 205,
'u': 206, 'ul': 207, 'use': 208, 'var': 209, 'video': 210, 'view': 211, 'vkern': 212, 'wbr': 213,
'xmp': 214}
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
# ---------- copied ! --------------
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join([w for w in doc_tokens[new_start:(new_end + 1)]
if w[0] != '<' or w[-1] != '>'])
if text_span == tok_answer_text:
return new_start, new_end
return input_start, input_end
class StrucDataset(Dataset):
"""Dataset wrapping tensors.
Each sample will be retrieved by indexing tensors along the first dimension.
Arguments:
*tensors (*torch.Tensor): tensors that have the same size of the first dimension.
page_ids (list): the corresponding page ids of the input features.
cnn_feature_dir (str): the direction where the cnn features are stored.
token_to_tag (torch.Tensor): the mapping from each token to its corresponding tag id.
"""
def __init__(self, *tensors, pad_id=0,
all_expended_attention_mask=None,
all_graph_names=None,
all_token_to_tag=None,
page_ids=None,
attention_width=None,
has_tree_attention_bias = False):
tensors = tuple(tensor for tensor in tensors)
assert all(len(tensors[0]) == len(tensor) for tensor in tensors)
if all_expended_attention_mask is not None:
assert len(tensors[0]) == len(all_expended_attention_mask)
tensors += (all_expended_attention_mask,)
self.tensors = tensors
self.page_ids = page_ids
self.all_graph_names = all_graph_names
self.all_token_to_tag = all_token_to_tag
self.pad_id = pad_id
self.attention_width = attention_width
self.has_tree_attention_bias = has_tree_attention_bias
def __getitem__(self, index):
output = [tensor[index] for tensor in self.tensors]
input_id = output[0]
attention_mask = output[1]
if not self.attention_width is None or self.has_tree_attention_bias:
assert self.all_graph_names is not None , ("For non-empty attention_width / tree rel pos,"
"Graph names must be sent in!")
if self.all_graph_names is not None:
assert self.all_token_to_tag is not None
graph_name = self.all_graph_names[index]
token_to_tag = self.all_token_to_tag[index]
with open(graph_name,"rb") as f:
node_pairs_lengths = pickle.load(f)
# node_pairs_lengths = dict(nx.all_pairs_shortest_path_length(graph))
seq_len = len(token_to_tag)
if self.has_tree_attention_bias:
mat = [[0]*seq_len]*seq_len
else:
mat = None
if self.attention_width is not None:
emask = attention_mask.expand(seq_len,seq_len)
else:
emask = None
for nid in range(seq_len):
if input_id[nid]==self.pad_id:
break
for anid in range(nid+1,seq_len):
if input_id[anid]==self.pad_id:
break
x_tid4nid = token_to_tag[nid]
x_tid4anid = token_to_tag[anid]
if x_tid4nid==x_tid4anid:
continue
try:
xx = node_pairs_lengths[x_tid4nid]
# x_tid4nid in valid tid list, or == -1
except:
# x_tid4nid out of bound, like `question`, `sep` or `cls`
xx = node_pairs_lengths[-1]
x_tid4nid=-1
try:
dis = xx[x_tid4anid]
# x_tid4anid in valid tid list, or == -1
except:
# x_tid4nid out of bound, like `question`, `sep` or `cls`
dis = xx[-1]
x_tid4anid = -1
# xx = node_pairs_lengths.get(tid4nid,node_pairs_lengths[-1])
# dis = xx.get(tid4anid,xx[-1])
if self.has_tree_attention_bias:
if x_tid4nid<x_tid4anid:
mat[nid][anid]=dis
mat[anid][nid]=-dis
else:
mat[nid][anid] = -dis
mat[anid][nid] = dis
if self.attention_width is not None:
# [nid][anid] determines whether nid can see anid
if x_tid4nid==-1 or x_tid4anid==-1: # sep / cls / question / pad
continue
if dis>self.attention_width:
emask[nid][anid]=0
emask[anid][nid]=0
if self.attention_width is not None:
output.append(emask)
if self.has_tree_attention_bias:
t_mat = torch.tensor(mat,dtype=torch.long)
output.append(t_mat)
return tuple(item for item in output)
def __len__(self):
return len(self.tensors[0])
def get_xpath4tokens(html_fn: str, unique_tids: set):
xpath_map = {}
tree = etree.parse(html_fn, etree.HTMLParser())
nodes = tree.xpath('//*')
for node in nodes:
tid = node.attrib.get("tid")
if int(tid) in unique_tids:
xpath_map[int(tid)] = tree.getpath(node)
xpath_map[len(nodes)] = "/html"
xpath_map[len(nodes) + 1] = "/html"
return xpath_map
def get_xpath_and_treeid4tokens(html_code, unique_tids, max_depth):
unknown_tag_id = len(tags_dict)
pad_tag_id = unknown_tag_id + 1
max_width = 1000
width_pad_id = 1001
pad_x_tag_seq = [pad_tag_id] * max_depth
pad_x_subs_seq = [width_pad_id] * max_depth
pad_x_box = [0,0,0,0]
pad_tree_id_seq = [width_pad_id] * max_depth
def xpath_soup(element):
xpath_tags = []
xpath_subscripts = []
tree_index = []
child = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
siblings = parent.find_all(child.name, recursive=False)
para_siblings = parent.find_all(True, recursive=False)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(siblings) else next(i for i, s in enumerate(siblings, 1) if s is child))
tree_index.append(next(i for i, s in enumerate(para_siblings, 0) if s is child))
child = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
tree_index.reverse()
return xpath_tags, xpath_subscripts, tree_index
xpath_tag_map = {}
xpath_subs_map = {}
tree_id_map = {}
for tid in unique_tids:
element = html_code.find(attrs={'tid': tid})
if element is None:
xpath_tags = pad_x_tag_seq
xpath_subscripts = pad_x_subs_seq
tree_index = pad_tree_id_seq
xpath_tag_map[tid] = xpath_tags
xpath_subs_map[tid] = xpath_subscripts
tree_id_map[tid] = tree_index
continue
xpath_tags, xpath_subscripts, tree_index = xpath_soup(element)
assert len(xpath_tags) == len(xpath_subscripts)
assert len(xpath_tags) == len(tree_index)
if len(xpath_tags) > max_depth:
xpath_tags = xpath_tags[-max_depth:]
xpath_subscripts = xpath_subscripts[-max_depth:]
# tree_index = tree_index[-max_depth:]
xpath_tags = [tags_dict.get(name, unknown_tag_id) for name in xpath_tags]
xpath_subscripts = [min(i, max_width) for i in xpath_subscripts]
tree_index = [min(i, max_width) for i in tree_index]
# we do not append them to max depth here
xpath_tags += [pad_tag_id] * (max_depth - len(xpath_tags))
xpath_subscripts += [width_pad_id] * (max_depth - len(xpath_subscripts))
# tree_index += [width_pad_id] * (max_depth - len(tree_index))
xpath_tag_map[tid] = xpath_tags
xpath_subs_map[tid] = xpath_subscripts
tree_id_map[tid] = tree_index
return xpath_tag_map, xpath_subs_map, tree_id_map
# ---------- copied ! --------------
def _check_is_max_context(doc_spans, cur_span_index, position):
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
class SRCExample(object):
r"""
The Containers for SRC Examples.
Arguments:
doc_tokens (list[str]): the original tokens of the HTML file before dividing into sub-tokens.
qas_id (str): the id of the corresponding question.
tag_num (int): the total tag number in the corresponding HTML file, including the additional 'yes' and 'no'.
question_text (str): the text of the corresponding question.
orig_answer_text (str): the answer text provided by the dataset.
all_doc_tokens (list[str]): the sub-tokens of the corresponding HTML file.
start_position (int): the position where the answer starts in the all_doc_tokens.
end_position (int): the position where the answer ends in the all_doc_tokens; NOTE that the answer tokens
include the token at end_position.
tok_to_orig_index (list[int]): the mapping from sub-tokens (all_doc_tokens) to origin tokens (doc_tokens).
orig_to_tok_index (list[int]): the mapping from origin tokens (doc_tokens) to sub-tokens (all_doc_tokens).
tok_to_tags_index (list[int]): the mapping from sub-tokens (all_doc_tokens) to the id of the deepest tag it
belongs to.
"""
# the difference between T-PLM and H-PLM is just add <xx> and </xx> into the
# original tokens and further-tokenized tokens
def __init__(self,
doc_tokens,
qas_id,
tag_num, # <xx> ?? </xx> is counted as one tag
question_text=None,
html_code=None,
orig_answer_text=None,
start_position=None, # in all_doc_tokens
end_position=None, # in all_doc_tokens
tok_to_orig_index=None,
orig_to_tok_index=None,
all_doc_tokens=None,
tok_to_tags_index=None,
xpath_tag_map=None,
xpath_subs_map=None,
xpath_box=None,
tree_id_map=None,
visible_matrix=None,
):
self.doc_tokens = doc_tokens
self.qas_id = qas_id
self.tag_num = tag_num
self.question_text = question_text
self.html_code = html_code
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.tok_to_orig_index = tok_to_orig_index
self.orig_to_tok_index = orig_to_tok_index
self.all_doc_tokens = all_doc_tokens
self.tok_to_tags_index = tok_to_tags_index
self.xpath_tag_map = xpath_tag_map
self.xpath_subs_map = xpath_subs_map
self.xpath_box = xpath_box
self.tree_id_map = tree_id_map
self.visible_matrix = visible_matrix
def __str__(self):
return self.__repr__()
def __repr__(self):
"""
s = ""
s += "qas_id: %s" % self.qas_id
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % self.start_position
if self.end_position:
s += ", end_position: %d" % self.end_position
"""
s = "[INFO]\n"
s += f"qas_id ({type(self.qas_id)}): {self.qas_id}\n"
s += f"tag_num ({type(self.tag_num)}): {self.tag_num}\n"
s += f"question_text ({type(self.question_text)}): {self.question_text}\n"
s += f"html_code ({type(self.html_code)}): {self.html_code}\n"
s += f"orig_answer_text ({type(self.orig_answer_text)}): {self.orig_answer_text}\n"
s += f"start_position ({type(self.start_position)}): {self.start_position}\n"
s += f"end_position ({type(self.end_position)}): {self.end_position}\n"
s += f"tok_to_orig_index ({type(self.tok_to_orig_index)}): {self.tok_to_orig_index}\n"
s += f"orig_to_tok_index ({type(self.orig_to_tok_index)}): {self.orig_to_tok_index}\n"
s += f"all_doc_tokens ({type(self.all_doc_tokens)}): {self.all_doc_tokens}\n"
s += f"tok_to_tags_index ({type(self.tok_to_tags_index)}): {self.tok_to_tags_index}\n"
s += f"xpath_tag_map ({type(self.xpath_tag_map)}): {self.xpath_tag_map}\n"
s += f"xpath_subs_map ({type(self.xpath_subs_map)}): {self.xpath_subs_map}\n"
s += f"tree_id_map ({type(self.tree_id_map)}): {self.tree_id_map}\n"
return s
class InputFeatures(object):
r"""
The Container for the Features of Input Doc Spans.
Arguments:
unique_id (int): the unique id of the input doc span.
example_index (int): the index of the corresponding SRC Example of the input doc span.
page_id (str): the id of the corresponding web page of the question.
doc_span_index (int): the index of the doc span among all the doc spans which corresponding to the same SRC
Example.
tokens (list[str]): the sub-tokens of the input sequence, including cls token, sep tokens, and the sub-tokens
of the question and HTML file.
token_to_orig_map (dict[int, int]): the mapping from the HTML file's sub-tokens in the sequence tokens (tokens)
to the origin tokens (all_tokens in the corresponding SRC Example).
token_is_max_context (dict[int, bool]): whether the current doc span contains the max pre- and post-context for
each HTML file's sub-tokens.
input_ids (list[int]): the ids of the sub-tokens in the input sequence (tokens).
input_mask (list[int]): use 0/1 to distinguish the input sequence from paddings.
segment_ids (list[int]): use 0/1 to distinguish the question and the HTML files.
paragraph_len (int): the length of the HTML file's sub-tokens.
start_position (int): the position where the answer starts in the input sequence (0 if the answer is not fully
in the input sequence).
end_position (int): the position where the answer ends in the input sequence; NOTE that the answer tokens
include the token at end_position (0 if the answer is not fully in the input sequence).
token_to_tag_index (list[int]): the mapping from sub-tokens of the input sequence to the id of the deepest tag
it belongs to.
is_impossible (bool): whether the answer is fully in the doc span.
"""
def __init__(self,
unique_id,
example_index,
page_id,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
paragraph_len,
start_position=None,
end_position=None,
token_to_tag_index=None,
is_impossible=None,
xpath_tags_seq=None,
xpath_subs_seq=None,
xpath_box_seq=None,
extended_attention_mask=None):
self.unique_id = unique_id
self.example_index = example_index
self.page_id = page_id
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.paragraph_len = paragraph_len
self.start_position = start_position
self.end_position = end_position
self.token_to_tag_index = token_to_tag_index
self.is_impossible = is_impossible
self.xpath_tags_seq = xpath_tags_seq
self.xpath_subs_seq = xpath_subs_seq
self.xpath_box_seq = xpath_box_seq
self.extended_attention_mask = extended_attention_mask
def html_escape(html):
r"""
replace the special expressions in the html file for specific punctuation.
"""
html = html.replace('"', '"')
html = html.replace('&', '&')
html = html.replace('<', '<')
html = html.replace('>', '>')
html = html.replace(' ', ' ')
return html
def read_squad_examples(args, input_file, root_dir, is_training, tokenizer, simplify=False, max_depth=50,
split_flag="n-eon",
attention_width=None):
r"""
pre-process the data in json format into SRC Examples.
Arguments:
split_flag:
attention_width:
input_file (str): the inputting data file in json format.
root_dir (str): the root directory of the raw WebSRC dataset, which contains the HTML files.
is_training (bool): True if processing the training set, else False.
tokenizer (Tokenizer): the tokenizer for PLM in use.
method (str): the name of the method in use, choice: ['T-PLM', 'H-PLM', 'V-PLM'].
simplify (bool): when setting to Ture, the returned Example will only contain document tokens, the id of the
question-answers, and the total tag number in the corresponding html files.
Returns:
list[SRCExamples]: the resulting SRC Examples, contained all the needed information for the feature generation
process, except when the argument simplify is setting to True;
set[str]: all the tag names appeared in the processed dataset, e.g. <div>, <img/>, </p>, etc..
"""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
pad_tree_id_seq = [1001] * max_depth
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def html_to_text_list(h):
tag_num, text_list = 0, []
for element in h.descendants:
if (type(element) == bs4.element.NavigableString) and (element.strip()):
text_list.append(element.strip())
if type(element) == bs4.element.Tag:
tag_num += 1
return text_list, tag_num + 2 # + 2 because we treat the additional 'yes' and 'no' as two special tags.
def html_to_text(h):
tag_list = set()
for element in h.descendants:
if type(element) == bs4.element.Tag:
element.attrs = {}
temp = str(element).split()
tag_list.add(temp[0])
tag_list.add(temp[-1])
return html_escape(str(h)), tag_list
def adjust_offset(offset, text):
text_list = text.split()
cnt, adjustment = 0, []
for t in text_list:
if not t:
continue
if t[0] == '<' and t[-1] == '>':
adjustment.append(offset.index(cnt))
else:
cnt += 1
add = 0
adjustment.append(len(offset))
for i in range(len(offset)):
while i >= adjustment[add]:
add += 1
offset[i] += add
return offset
def e_id_to_t_id(e_id, html):
t_id = 0
for element in html.descendants:
if type(element) == bs4.element.NavigableString and element.strip():
t_id += 1
if type(element) == bs4.element.Tag:
if int(element.attrs['tid']) == e_id:
break
return t_id
def calc_num_from_raw_text_list(t_id, l):
n_char = 0
for i in range(t_id):
n_char += len(l[i]) + 1
return n_char
def word_to_tag_from_text(tokens, h):
cnt, w_t, path = -1, [], []
unique_tids = set()
for t in tokens[0:-2]:
if len(t) < 2:
w_t.append(path[-1])
unique_tids.add(path[-1])
continue
if t[0] == '<' and t[-2] == '/':
cnt += 1
w_t.append(cnt)
unique_tids.add(cnt)
continue
if t[0] == '<' and t[1] != '/':
cnt += 1
path.append(cnt)
w_t.append(path[-1])
unique_tids.add(path[-1])
if t[0] == '<' and t[1] == '/':
del path[-1]
w_t.append(cnt + 1)
unique_tids.add(cnt + 1)
w_t.append(cnt + 2)
unique_tids.add(cnt + 2)
assert len(w_t) == len(tokens)
assert len(path) == 0, print(h)
return w_t, unique_tids
def word_tag_offset(html):
cnt, w_t, t_w, tags, tags_tids = 0, [], [], [], []
for element in html.descendants:
if type(element) == bs4.element.Tag:
content = ' '.join(list(element.strings)).split()
t_w.append({'start': cnt, 'len': len(content)})
tags.append('<' + element.name + '>')
tags_tids.append(element['tid'])
elif type(element) == bs4.element.NavigableString and element.strip():
text = element.split()
tid = element.parent['tid']
ind = tags_tids.index(tid)
for _ in text:
w_t.append(ind)
cnt += 1
assert cnt == len(w_t)
w_t.append(len(t_w))
w_t.append(len(t_w) + 1)
return w_t
def subtoken_tag_offset(html, s_tok):
w_t = word_tag_offset(html)
s_t = []
unique_tids = set()
for i in range(len(s_tok)):
s_t.append(w_t[s_tok[i]])
unique_tids.add(w_t[s_tok[i]])
return s_t, unique_tids
def subtoken_tag_offset_plus_eon(html, s_tok, all_doc_tokens):
w_t = word_tag_offset(html)
s_t = []
unique_tids = set()
offset = 0
for i in range(len(s_tok)):
if all_doc_tokens[i] not in ('<end-of-node>', tokenizer.sep_token, tokenizer.cls_token):
s_t.append(w_t[s_tok[i] - offset])
unique_tids.add(w_t[s_tok[i] - offset])
else:
prev_tid = s_t[-1]
s_t.append(prev_tid)
offset += 1
return s_t, unique_tids
def check_visible(path1, path2, attention_width):
i = 0
j = 0
dis = 0
lp1 = len(path1)
lp2 = len(path2)
while i < lp1 and j < lp2 and path1[i] == path2[j]:
i += 1
j += 1
if i < lp1 and j < lp2:
dis += lp1 - i + lp2 - j
else:
if i == lp1:
dis += lp2 - j
else:
dis += lp1 - i
if dis <= attention_width:
return True
return False
def from_tids_to_box(html_fn, unique_tids, json_fn):
sorted_ids = sorted(unique_tids)
f = open(json_fn, 'r')
data = json.load(f)
orig_width, orig_height = data['2']['rect']['width'], data['2']['rect']['height']
orig_x, orig_y = data['2']['rect']['x'], data['2']['rect']['y']
return_dict = {}
for id in sorted_ids:
if str(id) in data:
x, y, width, height = data[str(id)]['rect']['x'], data[str(id)]['rect']['y'], data[str(id)]['rect']['width'], data[str(id)]['rect']['height']
resize_x = (x - orig_x) * 1000 // orig_width
resize_y = (y - orig_y) * 1000 // orig_height
resize_width = width * 1000 // orig_width
resize_height = height * 1000 // orig_height
# if not (resize_x <= 1000 and resize_y <= 1000):
# print('before', x, y, width, height)
# print('after', resize_x, resize_y, resize_width, resize_height)
# print('file name ', html_fn)
# # exit(0)
if resize_x < 0 or resize_y < 0 or resize_width < 0 or resize_height < 0: # meaningless
return_dict[id] = [0, 0, 0, 0]
else:
return_dict[id] = [int(resize_x), int(resize_y), int(resize_x+resize_width), int(resize_y+resize_height)]
else:
return_dict[id] = [0,0,0,0]
return return_dict
def get_visible_matrix(unique_tids, tree_id_map, attention_width):
if attention_width is None:
return None
unique_tids_list = list(unique_tids)
visible_matrix = collections.defaultdict(list)
for i in range(len(unique_tids_list)):
if tree_id_map[unique_tids_list[i]] == pad_tree_id_seq:
visible_matrix[unique_tids_list[i]] = list()
continue
visible_matrix[unique_tids_list[i]].append(unique_tids_list[i])
for j in range(i + 1, len(unique_tids_list)):
if check_visible(tree_id_map[unique_tids_list[i]], tree_id_map[unique_tids_list[j]], attention_width):
visible_matrix[unique_tids_list[i]].append(unique_tids_list[j])
visible_matrix[unique_tids_list[j]].append(unique_tids_list[i])
return visible_matrix
examples = []
all_tag_list = set()
total_num = sum([len(entry["websites"]) for entry in input_data])
with tqdm(total=total_num, desc="Converting websites to examples") as t:
for entry in input_data:
# print('entry', entry)
domain = entry["domain"]
for website in entry["websites"]:
# print('website', website)
# Generate Doc Tokens
page_id = website["page_id"]
# print('page_id', page_id)
curr_dir = osp.join(root_dir, domain, page_id[0:2], 'processed_data')
html_fn = osp.join(curr_dir, page_id + '.html')
json_fn = osp.join(curr_dir, page_id + '.json')
# print('html', html_fn)
html_file = open(html_fn).read()
html_code = bs(html_file, "html.parser")
raw_text_list, tag_num = html_to_text_list(html_code)
# print(raw_text_list)
# print(tag_num)
# exit(0)
doc_tokens = []
char_to_word_offset = []
# print(split_flag) # n-eon
# exit(0)
if split_flag in ["y-eon", "y-sep", "y-cls"]:
prev_is_whitespace = True
for i, doc_string in enumerate(raw_text_list):
for c in doc_string:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
if i < len(raw_text_list) - 1:
prev_is_whitespace = True
char_to_word_offset.append(len(doc_tokens) - 1)
if split_flag == "y-eon":
doc_tokens.append('<end-of-node>')
elif split_flag == "y-sep":
doc_tokens.append(tokenizer.sep_token)
elif split_flag == "y-cls":
doc_tokens.append(tokenizer.cls_token)
else:
raise ValueError("Split flag should be `y-eon` or `y-sep` or `y-cls`")
prev_is_whitespace = True
elif split_flag =="n-eon" or split_flag == "y-hplm":
page_text = ' '.join(raw_text_list)
prev_is_whitespace = True
for c in page_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
doc_tokens.append('no')
char_to_word_offset.append(len(doc_tokens) - 1)
doc_tokens.append('yes')
char_to_word_offset.append(len(doc_tokens) - 1)
if split_flag == "y-hplm":
real_text, tag_list = html_to_text(bs(html_file))
all_tag_list = all_tag_list | tag_list
char_to_word_offset = adjust_offset(char_to_word_offset, real_text)
doc_tokens = real_text.split()
doc_tokens.append('no')
doc_tokens.append('yes')
doc_tokens = [i for i in doc_tokens if i]
else:
tag_list = []
assert len(doc_tokens) == char_to_word_offset[-1] + 1, (len(doc_tokens), char_to_word_offset[-1])
if simplify:
for qa in website["qas"]:
qas_id = qa["id"]
example = SRCExample(doc_tokens=doc_tokens, qas_id=qas_id, tag_num=tag_num)
examples.append(example)
t.update(1)
else:
# Tokenize all doc tokens
# tokenize sth like < / >
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
if token in tag_list:
sub_tokens = [token]
else:
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# Generate extra information for features
if split_flag in ["y-eon", "y-sep", "y-cls"]:
tok_to_tags_index, unique_tids = subtoken_tag_offset_plus_eon(html_code, tok_to_orig_index,
all_doc_tokens)
elif split_flag == "n-eon":
tok_to_tags_index, unique_tids = subtoken_tag_offset(html_code, tok_to_orig_index)
elif split_flag == "y-hplm":
tok_to_tags_index, unique_tids = word_to_tag_from_text(all_doc_tokens, html_code)
else:
raise ValueError("Unsupported split_flag!")
xpath_tag_map, xpath_subs_map, tree_id_map = get_xpath_and_treeid4tokens(html_code, unique_tids,
max_depth=max_depth)
# tree_id_map : neither truncated nor padded
xpath_box = from_tids_to_box(html_fn, unique_tids, json_fn)
assert tok_to_tags_index[-1] == tag_num - 1, (tok_to_tags_index[-1], tag_num - 1)
# we get attention_mask here
visible_matrix = get_visible_matrix(unique_tids, tree_id_map, attention_width=attention_width)
# Process each qas, which is mainly calculate the answer position
for qa in website["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
if is_training:
if len(qa["answers"]) != 1:
raise ValueError(
"For training, each question should have exactly 1 answer.")
answer = qa["answers"][0]
orig_answer_text = answer["text"]
if answer["element_id"] == -1:
num_char = len(char_to_word_offset) - 2
else:
num_char = calc_num_from_raw_text_list(e_id_to_t_id(answer["element_id"], html_code),
raw_text_list)
answer_offset = num_char + answer["answer_start"]
answer_length = len(orig_answer_text) if answer["element_id"] != -1 else 1
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join([w for w in doc_tokens[start_position:(end_position + 1)]
if (w[0] != '<' or w[-1] != '>')
and w != "<end-of-node>"
and w != tokenizer.sep_token
and w != tokenizer.cls_token])
cleaned_answer_text = " ".join(whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logging.warning("Could not find answer of question %s: '%s' vs. '%s'",
qa['id'], actual_text, cleaned_answer_text)
continue
example = SRCExample(
doc_tokens=doc_tokens,
qas_id=qas_id,
tag_num=tag_num,
question_text=question_text,
html_code=html_code,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
tok_to_orig_index=tok_to_orig_index,
orig_to_tok_index=orig_to_tok_index,
all_doc_tokens=all_doc_tokens,
tok_to_tags_index=tok_to_tags_index,
xpath_tag_map=xpath_tag_map,
xpath_subs_map=xpath_subs_map,
xpath_box=xpath_box,
tree_id_map=tree_id_map,
visible_matrix=visible_matrix
)
examples.append(example)
if args.web_num_features != 0:
if len(examples) >= args.web_num_features:
return examples, all_tag_list
t.update(1)
return examples, all_tag_list
def load_and_cache_examples(args, tokenizer, max_depth=50, evaluate=False, output_examples=False):
r"""
Load and process the raw data.
"""
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
# Load data features from cache or dataset file
input_file = args.web_eval_file if evaluate else args.web_train_file
cached_features_file = os.path.join(args.cache_dir, 'cached_{}_{}_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train',
"markuplm",
str(args.max_seq_length),
str(max_depth),
args.web_num_features,
args.model_type
))
if not os.path.exists(os.path.dirname(cached_features_file)):
os.makedirs(os.path.dirname(cached_features_file))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
print("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
if output_examples:
examples, tag_list = read_squad_examples(args, input_file=input_file,
root_dir=args.web_root_dir,
is_training=not evaluate,
tokenizer=tokenizer,
simplify=True,
max_depth=max_depth
)
else:
examples = None
else:
print("Creating features from dataset file at %s", input_file)
examples, _ = read_squad_examples(args, input_file=input_file,
root_dir=args.web_root_dir,
is_training=not evaluate,
tokenizer=tokenizer,
simplify=False,
max_depth=max_depth)
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
pad_token=tokenizer.pad_token_id,
sequence_a_segment_id=0,
sequence_b_segment_id=0,
max_depth=max_depth)
if args.local_rank in [-1, 0] and args.web_save_features:
print("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_xpath_tags_seq = torch.tensor([f.xpath_tags_seq for f in features], dtype=torch.long)
all_xpath_subs_seq = torch.tensor([f.xpath_subs_seq for f in features], dtype=torch.long)
all_xpath_box_seq = torch.tensor([f.xpath_box_seq for f in features], dtype=torch.long)
if evaluate:
all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = StrucDataset(all_input_ids, all_input_mask, all_segment_ids, all_feature_index,
all_xpath_tags_seq, all_xpath_subs_seq, all_xpath_box_seq)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = StrucDataset(all_input_ids, all_input_mask, all_segment_ids,
all_xpath_tags_seq, all_xpath_subs_seq,
all_start_positions, all_end_positions, all_xpath_box_seq)
if output_examples:
dataset = (dataset, examples, features)
return dataset
def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True, max_depth=50):
r"""
Converting the SRC Examples further into the features for all the input doc spans.
Arguments:
examples (list[SRCExample]): the list of SRC Examples to process.
tokenizer (Tokenizer): the tokenizer for PLM in use.
max_seq_length (int): the max length of the total sub-token sequence, including the question, cls token, sep
tokens, and documents; if the length of the input is bigger than max_seq_length, the input
will be cut into several doc spans.
doc_stride (int): the stride length when the input is cut into several doc spans.
max_query_length (int): the max length of the sub-token sequence of the questions; the question will be truncate
if it is longer than max_query_length.
is_training (bool): True if processing the training set, else False.
cls_token (str): the cls token in use, default is '[CLS]'.
sep_token (str): the sep token in use, default is '[SEP]'.
pad_token (int): the id of the padding token in use when the total sub-token length is smaller that
max_seq_length, default is 0 which corresponding to the '[PAD]' token.
sequence_a_segment_id: the segment id for the first sequence (the question), default is 0.
sequence_b_segment_id: the segment id for the second sequence (the html file), default is 1.
cls_token_segment_id: the segment id for the cls token, default is 0.
pad_token_segment_id: the segment id for the padding tokens, default is 0.
mask_padding_with_zero: determine the pattern of the returned input mask; 0 for padding tokens and 1 for others
when True, and vice versa.
Returns:
list[InputFeatures]: the resulting input features for all the input doc spans
"""
pad_x_tag_seq = [216] * max_depth
pad_x_subs_seq = [1001] * max_depth
pad_x_box = [0,0,0,0]
pad_tree_id_seq = [1001] * max_depth
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(tqdm(examples, desc="Converting examples to features")):
xpath_tag_map = example.xpath_tag_map
xpath_subs_map = example.xpath_subs_map
xpath_box = example.xpath_box
tree_id_map = example.tree_id_map
visible_matrix = example.visible_matrix
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = example.orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = example.orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(example.all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
example.all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(example.all_doc_tokens):
length = len(example.all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(example.all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
token_to_tag_index = []
# CLS token at the beginning
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
token_to_tag_index.append(example.tag_num)
# Query
tokens += query_tokens
segment_ids += [sequence_a_segment_id] * len(query_tokens)
token_to_tag_index += [example.tag_num] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
token_to_tag_index.append(example.tag_num)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = example.tok_to_orig_index[split_token_index]
token_to_tag_index.append(example.tok_to_tags_index[split_token_index])
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(example.all_doc_tokens[split_token_index])
segment_ids.append(sequence_b_segment_id)
paragraph_len = doc_span.length
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
token_to_tag_index.append(example.tag_num)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
token_to_tag_index.append(example.tag_num)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(token_to_tag_index) == max_seq_length
span_is_impossible = False
start_position = None
end_position = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
span_is_impossible = True
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
'''
if 10 < example_index < 20:
print("*** Example ***")
#print("page_id: %s" % (example.qas_id[:-5]))
#print("token_to_tag_index :%s" % token_to_tag_index)
#print(len(token_to_tag_index))
#print("unique_id: %s" % (unique_id))
#print("example_index: %s" % (example_index))
#print("doc_span_index: %s" % (doc_span_index))
# print("tokens: %s" % " ".join(tokens))
print("tokens: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in enumerate(tokens)
]))
#print("token_to_orig_map: %s" % " ".join([
# "%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
#print(len(token_to_orig_map))
# print("token_is_max_context: %s" % " ".join([
# "%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
# ]))
#print(len(token_is_max_context))
#print("input_ids: %s" % " ".join([str(x) for x in input_ids]))
#print(len(input_ids))
#print(
# "input_mask: %s" % " ".join([str(x) for x in input_mask]))
#print(len(input_mask))
#print(
# "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
#print(len(segment_ids))
print(f"original answer: {example.orig_answer_text}")
if is_training and span_is_impossible:
print("impossible example")
if is_training and not span_is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
print("start_position: %d" % (start_position))
print("end_position: %d" % (end_position))
print(
"answer: %s" % (answer_text))
'''
# print('token_to_tag_index', token_to_tag_index)
# print('xpath_tag_map', xpath_tag_map)
# exit(0)
xpath_tags_seq = [xpath_tag_map.get(tid, pad_x_tag_seq) for tid in token_to_tag_index] # ok
xpath_subs_seq = [xpath_subs_map.get(tid, pad_x_subs_seq) for tid in token_to_tag_index] # ok
xpath_box_seq = [xpath_box.get(tid, pad_x_box) for tid in token_to_tag_index]
# print(xpath_box_seq)
# exit(0)
# we need to get extended_attention_mask
if visible_matrix is not None:
extended_attention_mask = []
for tid in token_to_tag_index:
if tid == example.tag_num:
extended_attention_mask.append(input_mask)
else:
visible_tids = visible_matrix[tid]
if len(visible_tids) == 0:
extended_attention_mask.append(input_mask)
continue
visible_per_token = []
for i, tid in enumerate(token_to_tag_index):
if tid == example.tag_num and input_mask[i] == (1 if mask_padding_with_zero else 0):
visible_per_token.append(1 if mask_padding_with_zero else 0)
elif tid in visible_tids:
visible_per_token.append(1 if mask_padding_with_zero else 0)
else:
visible_per_token.append(0 if mask_padding_with_zero else 1)
extended_attention_mask.append(visible_per_token) # should be (max_seq_len*max_seq_len)
else:
extended_attention_mask = None
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
page_id=example.qas_id[:-5],
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
token_to_tag_index=token_to_tag_index,
is_impossible=span_is_impossible,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
xpath_box_seq=xpath_box_seq,
extended_attention_mask=extended_attention_mask,
))
unique_id += 1
return features
def get_websrc_dataset(args, tokenizer, evaluate=False, output_examples=False):
if not evaluate:
websrc_dataset = load_and_cache_examples(args, tokenizer, evaluate=evaluate, output_examples=False)
return websrc_dataset
else:
dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=evaluate, output_examples=True)
return dataset, examples, features
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/websrc/websrc.py |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import BertConfig, BertModel, BertPreTrainedModel, RobertaConfig
# from transformers.modeling_bert import BertLayerNorm, BertOnlyMLMHead
from transformers.models.bert.modeling_bert import BertOnlyMLMHead
BertLayerNorm = torch.nn.LayerNorm
logger = logging.getLogger(__name__)
LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP = {}
LAYOUTLMV1_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class Layoutlmv1Config_roberta(RobertaConfig):
pretrained_config_archive_map = LAYOUTLMV1_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "bert"
def __init__(self, max_2d_position_embeddings=1024, add_linear=False, **kwargs):
super().__init__(**kwargs)
self.max_2d_position_embeddings = max_2d_position_embeddings
self.add_linear = add_linear # determine whether to add an additional mapping
class Layoutlmv1Config(BertConfig):
pretrained_config_archive_map = LAYOUTLMV1_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "bert"
def __init__(self, max_2d_position_embeddings=1024, add_linear=False, **kwargs):
super().__init__(**kwargs)
self.max_2d_position_embeddings = max_2d_position_embeddings
self.add_linear = add_linear # determine whether to add an additional mapping
class WebConfig:
max_depth = 50
xpath_unit_hidden_size = 32
hidden_size = 768
hidden_dropout_prob = 0.1
layer_norm_eps = 1e-12
max_xpath_tag_unit_embeddings = 256
max_xpath_subs_unit_embeddings = 1024
class XPathEmbeddings(nn.Module):
"""Construct the embddings from xpath -- tag and subscript"""
# we drop tree-id in this version, as its info can be covered by xpath
def __init__(self, config):
super(XPathEmbeddings, self).__init__()
config = WebConfig()
self.max_depth = config.max_depth
self.xpath_unitseq2_embeddings = nn.Linear(
config.xpath_unit_hidden_size * self.max_depth, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.activation = nn.ReLU()
self.xpath_unitseq2_inner = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, 4 * config.hidden_size)
self.inner2emb = nn.Linear(4 * config.hidden_size, config.hidden_size)
self.xpath_tag_sub_embeddings = nn.ModuleList(
[nn.Embedding(config.max_xpath_tag_unit_embeddings, config.xpath_unit_hidden_size) for _ in
range(self.max_depth)])
self.xpath_subs_sub_embeddings = nn.ModuleList(
[nn.Embedding(config.max_xpath_subs_unit_embeddings, config.xpath_unit_hidden_size) for _ in
range(self.max_depth)])
def forward(self,
xpath_tags_seq=None,
xpath_subs_seq=None):
xpath_tags_embeddings = []
xpath_subs_embeddings = []
for i in range(self.max_depth):
xpath_tags_embeddings.append(self.xpath_tag_sub_embeddings[i](xpath_tags_seq[:, :, i]))
xpath_subs_embeddings.append(self.xpath_subs_sub_embeddings[i](xpath_subs_seq[:, :, i]))
xpath_tags_embeddings = torch.cat(xpath_tags_embeddings, dim=-1)
xpath_subs_embeddings = torch.cat(xpath_subs_embeddings, dim=-1)
xpath_embeddings = xpath_tags_embeddings + xpath_subs_embeddings
xpath_embeddings = self.inner2emb(
self.dropout(self.activation(self.xpath_unitseq2_inner(xpath_embeddings))))
return xpath_embeddings
class Layoutlmv1Embeddings(nn.Module):
def __init__(self, config):
super(Layoutlmv1Embeddings, self).__init__()
self.config = config
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=0
)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
self.x_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.y_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.h_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.w_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size
)
# for web extension
self.xpath_embeddings = XPathEmbeddings(config)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.doc_linear1 = nn.Linear(config.hidden_size, config.hidden_size)
self.doc_linear2 = nn.Linear(config.hidden_size, config.hidden_size)
self.web_linear1 = nn.Linear(config.hidden_size, config.hidden_size)
self.web_linear2 = nn.Linear(config.hidden_size, config.hidden_size)
self.web_linear3 = nn.Linear(config.hidden_size, config.hidden_size)
self.web_linear4 = nn.Linear(config.hidden_size, config.hidden_size)
self.relu = nn.ReLU()
def forward(
self,
input_ids,
bbox=None,
xpath_tags_seq=None,
xpath_subs_seq=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
embedding_mode=None
):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device
)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
if embedding_mode != None and embedding_mode == 'box' : # doc entry
bbox = torch.clamp(bbox, 0, self.config.max_2d_position_embeddings-1)
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
embeddings = (
words_embeddings
+ position_embeddings
+ left_position_embeddings
+ upper_position_embeddings
# + right_position_embeddings
# + lower_position_embeddings
# + h_position_embeddings
# + w_position_embeddings
+ token_type_embeddings
)
elif embedding_mode != None and embedding_mode == 'html+box' : # doc entry
bbox = torch.clamp(bbox, 0, self.config.max_2d_position_embeddings-1)
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
xpath_embeddings = self.xpath_embeddings(xpath_tags_seq, xpath_subs_seq)
embeddings = (
words_embeddings
+ position_embeddings
+ left_position_embeddings
+ upper_position_embeddings
+ xpath_embeddings
# + right_position_embeddings
# + lower_position_embeddings
# + h_position_embeddings
# + w_position_embeddings
+ token_type_embeddings
)
else: # web entry
if not self.config.add_linear:
xpath_embeddings = self.xpath_embeddings(xpath_tags_seq, xpath_subs_seq)
embeddings = (
words_embeddings
+ position_embeddings
+ token_type_embeddings
+ xpath_embeddings
)
else:
xpath_embeddings = self.xpath_embeddings(xpath_tags_seq, xpath_subs_seq)
temp_embeddings = self.web_linear2(self.relu(self.web_linear1(
xpath_embeddings
)))
embeddings = (
words_embeddings
+ position_embeddings
+ token_type_embeddings
+ temp_embeddings
)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class Layoutlmv1Model(BertModel):
config_class = Layoutlmv1Config
pretrained_model_archive_map = LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super(Layoutlmv1Model, self).__init__(config)
self.embeddings = Layoutlmv1Embeddings(config)
self.init_weights()
def forward(
self,
input_ids,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
xpath_tags_seq=None,
xpath_subs_seq=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
embedding_mode=None,
):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=torch.float32
# dtype=next(self.parameters()).dtype # this will trigger error when using high version torch
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = (
head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
)
head_mask = head_mask.expand(
self.config.num_hidden_layers, -1, -1, -1, -1
)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids, bbox=bbox, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, position_ids=position_ids, token_type_ids=token_type_ids, embedding_mode=embedding_mode
)
encoder_outputs = self.encoder(
embedding_output, extended_attention_mask, head_mask=head_mask
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class Layoutlmv1ForTokenClassification(BertPreTrainedModel):
config_class = Layoutlmv1Config
pretrained_model_archive_map = LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = Layoutlmv1Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[
2:
] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
class Layoutlmv1ForMaskedLM(BertPreTrainedModel):
config_class = Layoutlmv1Config
pretrained_model_archive_map = LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super().__init__(config)
self.bert = Layoutlmv1Model(config)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_input_embeddings(self):
return self.bert.embeddings.word_embeddings
def get_output_embeddings(self):
return self.cls.predictions.decoder
def forward(
self,
input_ids,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
lm_labels=None,
xpath_tags_seq=None,
xpath_subs_seq=None,
):
outputs = self.bert(
input_ids,
bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[
2:
] # Add hidden states and attention if they are here
# Although this may seem awkward, BertForMaskedLM supports two scenarios:
# 1. If a tensor that contains the indices of masked labels is provided,
# the cross-entropy is the MLM cross-entropy that measures the likelihood
# of predictions for masked words.
# 2. If `lm_labels` is provided we are in a causal scenario where we
# try to predict the next token for each input in the decoder.
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size),
masked_lm_labels.view(-1),
)
outputs = (masked_lm_loss,) + outputs
return (
outputs
) # (masked_lm_loss), (ltr_lm_loss), prediction_scores, (hidden_states), (attentions)
class Layoutlmv1ForMaskedLM_roberta(BertPreTrainedModel):
config_class = Layoutlmv1Config
pretrained_model_archive_map = LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super().__init__(config)
self.roberta = Layoutlmv1Model(config)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_input_embeddings(self):
return self.roberta.embeddings.word_embeddings
def get_output_embeddings(self):
return self.cls.predictions.decoder
def forward(
self,
input_ids,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
lm_labels=None,
xpath_tags_seq=None,
xpath_subs_seq=None,
):
outputs = self.roberta(
input_ids,
bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[
2:
] # Add hidden states and attention if they are here
# Although this may seem awkward, BertForMaskedLM supports two scenarios:
# 1. If a tensor that contains the indices of masked labels is provided,
# the cross-entropy is the MLM cross-entropy that measures the likelihood
# of predictions for masked words.
# 2. If `lm_labels` is provided we are in a causal scenario where we
# try to predict the next token for each input in the decoder.
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size),
masked_lm_labels.view(-1),
)
outputs = (masked_lm_loss,) + outputs
return (
outputs
) # (masked_lm_loss), (ltr_lm_loss), prediction_scores, (hidden_states), (attentions)
class Layoutlmv1ForQuestionAnswering(BertPreTrainedModel):
config_class = Layoutlmv1Config
pretrained_model_archive_map = LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = Layoutlmv1Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
# inputs_embeds=None,
start_positions=None,
end_positions=None,
# output_attentions=None,
# output_hidden_states=None,
# return_dict=None,
xpath_tags_seq=None,
xpath_subs_seq=None,
embedding_mode=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
"""
# return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids=input_ids,
bbox=bbox,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
embedding_mode=embedding_mode
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
# if not return_dict:
# output = (start_logits, end_logits) + outputs[2:]
# return ((total_loss,) + output) if total_loss is not None else output
#
# return QuestionAnsweringModelOutput(
# loss=total_loss,
# start_logits=start_logits,
# end_logits=end_logits,
# hidden_states=outputs.hidden_states,
# attentions=outputs.attentions,
# )
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
class Layoutlmv1ForQuestionAnswering_roberta(BertPreTrainedModel):
config_class = Layoutlmv1Config
pretrained_model_archive_map = LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = Layoutlmv1Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
# inputs_embeds=None,
start_positions=None,
end_positions=None,
# output_attentions=None,
# output_hidden_states=None,
# return_dict=None,
xpath_tags_seq=None,
xpath_subs_seq=None,
embedding_mode=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
"""
# return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids=input_ids,
bbox=bbox,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
embedding_mode=embedding_mode
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
# if not return_dict:
# output = (start_logits, end_logits) + outputs[2:]
# return ((total_loss,) + output) if total_loss is not None else output
#
# return QuestionAnsweringModelOutput(
# loss=total_loss,
# start_logits=start_logits,
# end_logits=end_logits,
# hidden_states=outputs.hidden_states,
# attentions=outputs.attentions,
# )
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/websrc/model.py |
tags_dict = {'a': 0, 'abbr': 1, 'acronym': 2, 'address': 3, 'altGlyph': 4, 'altGlyphDef': 5, 'altGlyphItem': 6,
'animate': 7, 'animateColor': 8, 'animateMotion': 9, 'animateTransform': 10, 'applet': 11, 'area': 12,
'article': 13, 'aside': 14, 'audio': 15, 'b': 16, 'base': 17, 'basefont': 18, 'bdi': 19, 'bdo': 20,
'bgsound': 21, 'big': 22, 'blink': 23, 'blockquote': 24, 'body': 25, 'br': 26, 'button': 27, 'canvas': 28,
'caption': 29, 'center': 30, 'circle': 31, 'cite': 32, 'clipPath': 33, 'code': 34, 'col': 35,
'colgroup': 36, 'color-profile': 37, 'content': 38, 'cursor': 39, 'data': 40, 'datalist': 41, 'dd': 42,
'defs': 43, 'del': 44, 'desc': 45, 'details': 46, 'dfn': 47, 'dialog': 48, 'dir': 49, 'div': 50, 'dl': 51,
'dt': 52, 'ellipse': 53, 'em': 54, 'embed': 55, 'feBlend': 56, 'feColorMatrix': 57,
'feComponentTransfer': 58, 'feComposite': 59, 'feConvolveMatrix': 60, 'feDiffuseLighting': 61,
'feDisplacementMap': 62, 'feDistantLight': 63, 'feFlood': 64, 'feFuncA': 65, 'feFuncB': 66, 'feFuncG': 67,
'feFuncR': 68, 'feGaussianBlur': 69, 'feImage': 70, 'feMerge': 71, 'feMergeNode': 72, 'feMorphology': 73,
'feOffset': 74, 'fePointLight': 75, 'feSpecularLighting': 76, 'feSpotLight': 77, 'feTile': 78,
'feTurbulence': 79, 'fieldset': 80, 'figcaption': 81, 'figure': 82, 'filter': 83, 'font-face-format': 84,
'font-face-name': 85, 'font-face-src': 86, 'font-face-uri': 87, 'font-face': 88, 'font': 89, 'footer': 90,
'foreignObject': 91, 'form': 92, 'frame': 93, 'frameset': 94, 'g': 95, 'glyph': 96, 'glyphRef': 97,
'h1': 98, 'h2': 99, 'h3': 100, 'h4': 101, 'h5': 102, 'h6': 103, 'head': 104, 'header': 105, 'hgroup': 106,
'hkern': 107, 'hr': 108, 'html': 109, 'i': 110, 'iframe': 111, 'image': 112, 'img': 113, 'input': 114,
'ins': 115, 'kbd': 116, 'keygen': 117, 'label': 118, 'legend': 119, 'li': 120, 'line': 121,
'linearGradient': 122, 'link': 123, 'main': 124, 'map': 125, 'mark': 126, 'marker': 127, 'marquee': 128,
'mask': 129, 'math': 130, 'menu': 131, 'menuitem': 132, 'meta': 133, 'metadata': 134, 'meter': 135,
'missing-glyph': 136, 'mpath': 137, 'nav': 138, 'nobr': 139, 'noembed': 140, 'noframes': 141,
'noscript': 142, 'object': 143, 'ol': 144, 'optgroup': 145, 'option': 146, 'output': 147, 'p': 148,
'param': 149, 'path': 150, 'pattern': 151, 'picture': 152, 'plaintext': 153, 'polygon': 154,
'polyline': 155, 'portal': 156, 'pre': 157, 'progress': 158, 'q': 159, 'radialGradient': 160, 'rb': 161,
'rect': 162, 'rp': 163, 'rt': 164, 'rtc': 165, 'ruby': 166, 's': 167, 'samp': 168, 'script': 169,
'section': 170, 'select': 171, 'set': 172, 'shadow': 173, 'slot': 174, 'small': 175, 'source': 176,
'spacer': 177, 'span': 178, 'stop': 179, 'strike': 180, 'strong': 181, 'style': 182, 'sub': 183,
'summary': 184, 'sup': 185, 'svg': 186, 'switch': 187, 'symbol': 188, 'table': 189, 'tbody': 190,
'td': 191, 'template': 192, 'text': 193, 'textPath': 194, 'textarea': 195, 'tfoot': 196, 'th': 197,
'thead': 198, 'time': 199, 'title': 200, 'tr': 201, 'track': 202, 'tref': 203, 'tspan': 204, 'tt': 205,
'u': 206, 'ul': 207, 'use': 208, 'var': 209, 'video': 210, 'view': 211, 'vkern': 212, 'wbr': 213,
'xmp': 214}
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/websrc/web_tag_utils.py |
import os
import sys
sys.path.append(os.getcwd())
import torch
import torch.nn as nn
import shutil
import logging
import torch.distributed as dist
from transformers import (
BertTokenizer,
RobertaTokenizer
)
from args import args
from model import (
Layoutlmv1ForQuestionAnswering,
Layoutlmv1Config,
Layoutlmv1Config_roberta,
Layoutlmv1ForQuestionAnswering_roberta
)
from util import set_seed, set_exp_folder, check_screen
from trainer import train, evaluate # choose a specific train function
# from data.datasets.docvqa import DocvqaDataset
from websrc import get_websrc_dataset
def main(args):
set_seed(args)
set_exp_folder(args)
# Set up logger
logging.basicConfig(filename="{}/output/{}/log.txt".format(args.output_dir, args.exp_name), level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info('Args '+str(args))
# Get config, model, and tokenizer
if args.model_type == 'bert':
config_class, model_class, tokenizer_class = Layoutlmv1Config, Layoutlmv1ForQuestionAnswering, BertTokenizer
elif args.model_type == 'roberta':
config_class, model_class, tokenizer_class = Layoutlmv1Config_roberta, Layoutlmv1ForQuestionAnswering_roberta, RobertaTokenizer
config = config_class.from_pretrained(
args.model_name_or_path, cache_dir=args.cache_dir
)
config.add_linear = args.add_linear
tokenizer = tokenizer_class.from_pretrained(
args.model_name_or_path, cache_dir=args.cache_dir
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
parameters = sum(p.numel() for p in model.parameters())
print("Total params: %.2fM" % (parameters/1e6))
## Start training
if args.do_train:
dataset_web = get_websrc_dataset(args, tokenizer)
logging.info(f'Web dataset is successfully loaded. Length : {len(dataset_web)}')
train(args, dataset_web, model, tokenizer)
# ## Start evaluating
# if args.do_eval:
logging.info('Start evaluating')
dataset_web, examples, features = get_websrc_dataset(args, tokenizer, evaluate=True, output_examples=True)
logging.info(f'[Eval] Web dataset is successfully loaded. Length : {len(dataset_web)}')
evaluate(args, dataset_web, examples, features, model, tokenizer)
## Start testing
if args.do_test:
pass
if __name__ == '__main__':
main(args)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/websrc/run_websrc.py |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--exp_name", default='your_exp_name', type=str)
parser.add_argument("--seed", default=42, type=int)
parser.add_argument("--output_dir", default='.', type=str)
parser.add_argument("--overwrite_output_dir", default=True)
parser.add_argument("--model_name_or_path", type=str, default='/path/to/xdoc-pretrain-roberta-1M')
parser.add_argument("--cache_dir", type=str, default='./cache')
parser.add_argument("--scratch", action='store_true')
parser.add_argument("--model_type", default='roberta', choices=['bert', 'roberta'])
parser.add_argument("--do_train", default=False, type=bool)
parser.add_argument("--do_eval", default=True, type=bool)
parser.add_argument("--do_test", default=False, type=bool)
parser.add_argument("--overwrite_cache", default=False, type=bool)
parser.add_argument("--train_file", default='train.json', type=str)
parser.add_argument("--dev_file", default='val.json', type=str)
parser.add_argument("--test_file", default='test.json', type=str)
parser.add_argument("--doc_stride", default=128, type=int)
parser.add_argument("--threads", default=1, type=int)
parser.add_argument("--max_query_length", default=64, type=int)
parser.add_argument("--local_rank", default=-1, type=int)
parser.add_argument("--pad_img_input", default=False, type=bool)
parser.add_argument("--fix_visual", default=False, type=bool)
parser.add_argument("--num_workers", default=8, type=int)
# dataset (args to load websrc)
parser.add_argument("--web_train_file", default='/path/to/WebSRC/websrc1.0_train_.json', type=str)
parser.add_argument("--web_eval_file", default='/path/to/WebSRC/websrc1.0_dev_.json', type=str)
parser.add_argument("--web_root_dir", default='/path/to/WebSRC', type=str)
parser.add_argument("--root_dir", default='/path/to/WebSRC', type=str)
parser.add_argument("--n_best_size", default=20, type=int)
parser.add_argument("--max_answer_length", default=30, type=int)
parser.add_argument("--do_lower_case", default=True, type=bool)
parser.add_argument("--web_num_features", default=0, type=int)
parser.add_argument("--web_save_features", default=True, type=bool)
parser.add_argument("--verbose_logging", default=True, type=bool)
parser.add_argument("--embedding_mode", choices=['html','box','html+box'], default='html', type=str)
parser.add_argument("--dataloader_shuffle", default=True, type=bool)
# train
parser.add_argument("--batch_per_gpu", default=16, type=int)
parser.add_argument("--epoch", default=5, type=int)
parser.add_argument("--warmup_ratio", default=0.1, type=float)
parser.add_argument("--weight_decay", default=0, type=float)
parser.add_argument("--fp16", default=False)
parser.add_argument("--fp16_opt_level", default='O1', type=str)
parser.add_argument("--learning_rate", default=5e-5, type=float)
parser.add_argument("--adam_epsilon", default=1e-8, type=float)
parser.add_argument("--max_grad_norm", default=1, type=float)
parser.add_argument("--log_step", default=50, type=int)
parser.add_argument("--save_step", default=10000, type=int)
parser.add_argument("--add_linear", default=True, type=bool)
parser.add_argument("--accumulation", default=1, type=int)
# mlm
parser.add_argument("--mlm_probability", default=0.15, type=float)
args = parser.parse_args() | EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/websrc/args.py |
from genericpath import exists
import os
import torch.nn as nn
import torch
import logging
from tqdm import tqdm, trange
import timeit
import collections
import json
import math
from bs4 import BeautifulSoup
from copy import deepcopy
import string
import re
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler)
from transformers import (
BasicTokenizer,
)
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
)
def reorganize_batch_web(args, batch_web):
dic = {}
dic['input_ids'] = batch_web[0].cuda()
dic['attention_mask'] = batch_web[1].cuda()
dic['token_type_ids'] = batch_web[2].cuda()
dic['xpath_tags_seq'] = batch_web[3].cuda()
dic['xpath_subs_seq'] = batch_web[4].cuda()
dic['start_positions'] = batch_web[5].cuda()
dic['end_positions'] = batch_web[6].cuda()
if 'box' in args.embedding_mode:
dic['bbox'] = batch_web[7].cuda() # new added
dic['embedding_mode'] = args.embedding_mode
return dic
def train(args, dataset_web, model, tokenizer):
# torch.cuda.set_device(args.local_rank)
# Log when executing on clusters
try:
from azureml.core.run import Run
aml_run = Run.get_context()
except:
aml_run = None
# Open tensorboard
writer = SummaryWriter(f'{args.output_dir}/output/{args.exp_name}')
# Count batch
gpu_nums = torch.cuda.device_count()
batch = args.batch_per_gpu * gpu_nums
dataloader_web = DataLoader(
dataset_web, batch_size=batch, num_workers=args.num_workers, pin_memory=False, shuffle=True,
)
# Get warmup steps
total_step = args.epoch * len(dataloader_web)
warmup_steps = int(args.warmup_ratio * total_step)
# Prepare optimizers
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_step
)
# Transfer the parameters to cuda
model = model.cuda()
# Prepare fp16
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level
)
logging.info('Successfully load fp16 mode')
# Parallel or Distribute
if gpu_nums > 1:
model = torch.nn.DataParallel(model)
# Record some training info
logging.info("***** Running training *****")
# logging.info(" Num examples in dataset_doc = %d", len(dataset_doc))
logging.info(" Num examples in dataset_web = %d", len(dataset_web))
# logging.info(" Num steps for each epoch for doc = %d", len(dataloader_doc))
logging.info(" Num steps for each epoch for web = %d", len(dataloader_web))
logging.info(" Num Epochs = %d", args.epoch)
logging.info(
" Instantaneous batch size per GPU = %d", args.batch_per_gpu
)
logging.info(" Total optimization steps = %d", total_step)
# Start training
model.zero_grad()
train_iterator = trange(
0,
int(args.epoch),
desc="Epoch",
)
global_step = 0
for now_epoch, _ in enumerate(tqdm(train_iterator, desc="Iteration")): # tqdm for epoch
# epoch_iterator_doc = iter(dataloader_doc)
epoch_iterator_web = iter(dataloader_web)
min_step = len(epoch_iterator_web)
for now_step in tqdm(range(min_step), desc="Iteration"): # tqdm for step
# batch_doc = epoch_iterator_doc.next()
batch_web = epoch_iterator_web.next()
batch_web = reorganize_batch_web(args, batch_web)
model.train()
# loss_doc = model(**batch_doc)[0]
loss_web = model(**batch_web)[0]
loss = loss_web
if gpu_nums > 1:
loss = loss.mean()
# loss_doc = loss_doc.mean()
loss_web = loss_web.mean()
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm
)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm
)
if global_step % args.accumulation == 0:
optimizer.step()
model.zero_grad()
scheduler.step()
global_step += 1
if global_step % args.log_step == 0:
logging.info(f'epoch: {now_epoch} | step: {now_step+1} | total_step: {global_step} | loss: {loss} | lr: {scheduler.get_lr()[0]}')
writer.add_scalar('loss', loss, global_step//args.log_step)
# writer.add_scalar('loss_doc', loss_doc, global_step//args.log_step)
writer.add_scalar('loss_web', loss_web, global_step//args.log_step)
writer.add_scalar('lr', scheduler.get_lr()[0], global_step//args.log_step)
if aml_run is not None:
aml_run.log('loss', loss.item())
# aml_run.log('loss_doc', loss_doc.item())
aml_run.log('loss_web', loss_web.item())
aml_run.log('lr', scheduler.get_lr()[0])
if global_step % args.save_step == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'output', args.exp_name, f'step-{global_step}')
os.makedirs(output_dir, exist_ok=True)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logging.info("Saving model checkpoint to %s", output_dir)
torch.save(
optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")
)
torch.save(
scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")
)
logging.info(
"Saving optimizer and scheduler states to %s", output_dir
)
if global_step % 1000 == 0:
# eval
print('Start eval!')
from data.datasets.websrc import get_websrc_dataset
dataset_web, examples, features = get_websrc_dataset(args, tokenizer, evaluate=True, output_examples=True)
evaluate(args, dataset_web, examples, features, model, tokenizer, global_step)
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def to_list(tensor):
return tensor.detach().cpu().tolist()
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return ns_text, ns_to_s_map
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
# if verbose_logging:
# logging.info(
# "Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class EvalOpts:
r"""
The options which the matrix evaluation process needs.
Arguments:
data_file (str): the SQuAD-style json file of the dataset in evaluation.
root_dir (str): the root directory of the raw WebSRC dataset, which contains the HTML files.
pred_file (str): the prediction file which contain the best predicted answer text of each question from the
model.
tag_pred_file (str): the prediction file which contain the best predicted answer tag id of each question from
the model.
result_file (str): the file to write down the matrix evaluation results of each question.
out_file (str): the file to write down the final matrix evaluation results of the whole dataset.
"""
def __init__(self, data_file, root_dir, pred_file, tag_pred_file, result_file='', out_file=""):
self.data_file = data_file
self.root_dir = root_dir
self.pred_file = pred_file
self.tag_pred_file = tag_pred_file
self.result_file = result_file
self.out_file = out_file
def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case,
output_prediction_file, output_tag_prediction_file,
output_nbest_file, verbose_logging, tokenizer):
r"""
Compute and write down the final results, including the n best results.
Arguments:
all_examples (list[SRCExample]): all the SRC Example of the dataset; note that we only need it to provide the
mapping from example index to the question-answers id.
all_features (list[InputFeatures]): all the features for the input doc spans.
all_results (list[RawResult]): all the results from the models.
n_best_size (int): the number of the n best buffer and the final n best result saved.
max_answer_length (int): constrain the model to predict the answer no longer than it.
do_lower_case (bool): whether the model distinguish upper and lower case of the letters.
output_prediction_file (str): the file which the best answer text predictions will be written to.
output_tag_prediction_file (str): the file which the best answer tag predictions will be written to.
output_nbest_file (str): the file which the n best answer predictions including text, tag, and probabilities
will be written to.
verbose_logging (bool): if true, all of the warnings related to data processing will be printed.
"""
logging.info("Writing predictions to: %s" % output_prediction_file)
logging.info("Writing nbest to: %s" % output_nbest_file)
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit", "tag_ids"])
all_predictions = collections.OrderedDict()
all_tag_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
tag_ids = set(feature.token_to_tag_index[start_index: end_index + 1])
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
tag_ids=list(tag_ids)))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit", "tag_ids"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = _get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit,
tag_ids=pred.tag_ids))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0, tag_ids=[-1]))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
output["tag_ids"] = entry.tag_ids
nbest_json.append(output)
assert len(nbest_json) >= 1
best = nbest_json[0]["text"].split()
best = ' '.join([w for w in best
if (w[0] != '<' or w[-1] != '>')
and w != "<end-of-node>"
and w != tokenizer.sep_token
and w != tokenizer.cls_token])
all_predictions[example.qas_id] = best
all_tag_predictions[example.qas_id] = nbest_json[0]["tag_ids"]
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w+") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w+") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
with open(output_tag_prediction_file, 'w+') as writer:
writer.write(json.dumps(all_tag_predictions, indent=4) + '\n')
return
def make_qid_to_has_ans(dataset):
r"""
Pick all the questions which has answer in the dataset and return the list.
"""
qid_to_has_ans = {}
for domain in dataset:
for w in domain['websites']:
for qa in w['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def compute_exact(a_gold, a_pred):
r"""
Calculate the exact match.
"""
if normalize_answer(a_gold) == normalize_answer(a_pred):
return 1
return 0
def get_raw_scores(dataset, preds, tag_preds, root_dir):
r"""
Calculate all the three matrix (exact match, f1, POS) for each question.
Arguments:
dataset (dict): the dataset in use.
preds (dict): the answer text prediction for each question in the dataset.
tag_preds (dict): the answer tags prediction for each question in the dataset.
root_dir (str): the base directory for the html files.
Returns:
tuple(dict, dict, dict): exact match, f1, pos scores for each question.
"""
exact_scores = {}
f1_scores = {}
pos_scores = {}
for websites in dataset:
for w in websites['websites']:
f = os.path.join(root_dir, websites['domain'], w['page_id'][0:2], 'processed_data',
w['page_id'] + '.html')
for qa in w['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if normalize_answer(a['text'])]
gold_tag_answers = [a['element_id'] for a in qa['answers']]
additional_tag_information = [a['answer_start'] for a in qa['answers']]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred, t_pred = preds[qid], tag_preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
pos_scores[qid] = max(compute_pos(f, t, a, t_pred)
for t, a in zip(gold_tag_answers, additional_tag_information))
return exact_scores, f1_scores, pos_scores
def get_tokens(s):
r"""
Get the word list in the input.
"""
if not s:
return []
return normalize_answer(s).split()
def compute_f1(a_gold, a_pred):
r"""
Calculate the f1 score.
"""
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def compute_pos(f, t_gold, addition, t_pred):
r"""
Calculate the POS score.
Arguments:
f (str): the html file on which the question is based.
t_gold (int): the gold answer tag id provided by the dataset (the value correspond to the key element_id).
addition (int): the addition information used for yes/no question provided by the dataset (the value
corresponding to the key answer_start).
t_pred (list[int]): the tag ids of the tags corresponding the each word in the predicted answer.
Returns:
float: the POS score.
"""
h = BeautifulSoup(open(f), "lxml")
p_gold, e_gold = set(), h.find(tid=t_gold)
if e_gold is None:
if len(t_pred) != 1:
return 0
else:
t = t_pred[0]
e_pred, e_prev = h.find(tid=t), h.find(tid=t-1)
if (e_pred is not None) or (addition == 1 and e_prev is not None) or\
(addition == 0 and e_prev is None):
return 0
else:
return 1
else:
p_gold.add(e_gold['tid'])
for e in e_gold.parents:
if int(e['tid']) < 2:
break
p_gold.add(e['tid'])
p = None
for t in t_pred:
p_pred, e_pred = set(), h.find(tid=t)
if e_pred is not None:
p_pred.add(e_pred['tid'])
if e_pred.name != 'html':
for e in e_pred.parents:
if int(e['tid']) < 2:
break
p_pred.add(e['tid'])
else:
p_pred.add(str(t))
if p is None:
p = p_pred
else:
p = p & p_pred
return len(p_gold & p) / len(p_gold | p)
def make_pages_list(dataset):
r"""
Record all the pages which appears in the dataset and return the list.
"""
pages_list = []
last_page = None
for domain in dataset:
for w in domain['websites']:
for qa in w['qas']:
if last_page != qa['id'][:4]:
last_page = qa['id'][:4]
pages_list.append(last_page)
return pages_list
def make_eval_dict(exact_scores, f1_scores, pos_scores, qid_list=None):
r"""
Make the dictionary to show the evaluation results.
"""
if qid_list is None:
total = len(exact_scores)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('pos', 100.0 * sum(pos_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
if total == 0:
return collections.OrderedDict([
('exact', 0),
('f1', 0),
('pos', 0),
('total', 0),
])
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('pos', 100.0 * sum(pos_scores[k] for k in qid_list) / total),
('total', total),
])
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval['%s_%s' % (prefix, k)] = new_eval[k]
def evaluate_on_squad(opts):
with open(opts.data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
if isinstance(opts.pred_file, str):
with open(opts.pred_file) as f:
preds = json.load(f)
else:
preds = opts.pred_file
if isinstance(opts.tag_pred_file, str):
with open(opts.tag_pred_file) as f:
tag_preds = json.load(f)
else:
tag_preds = opts.tag_pred_file
qid_to_has_ans = make_qid_to_has_ans(dataset)
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact, f1, pos = get_raw_scores(dataset, preds, tag_preds, opts.root_dir)
out_eval = make_eval_dict(exact, f1, pos)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact, f1, pos, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = make_eval_dict(exact, f1, pos, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, 'NoAns')
print(json.dumps(out_eval, indent=2))
pages_list, write_eval = make_pages_list(dataset), deepcopy(out_eval)
for p in pages_list:
pages_ans_qids = [k for k, _ in qid_to_has_ans.items() if p in k]
page_eval = make_eval_dict(exact, f1, pos, qid_list=pages_ans_qids)
merge_eval(write_eval, page_eval, p)
if opts.result_file:
with open(opts.result_file, 'w') as f:
w = {}
for k, v in qid_to_has_ans.items():
w[k] = {'exact': exact[k], 'f1': f1[k], 'pos': pos[k]}
json.dump(w, f)
if opts.out_file:
with open(opts.out_file, 'w') as f:
json.dump(write_eval, f)
print('****** result ******')
print(out_eval)
return out_eval
def evaluate(args, dataset_web, examples, features, model, tokenizer, step=0):
gpu_nums = torch.cuda.device_count()
batch = args.batch_per_gpu * gpu_nums
eval_sampler = SequentialSampler(dataset_web)
eval_dataloader = DataLoader(dataset_web, sampler=eval_sampler, batch_size=batch, num_workers=8)
# Eval!
logging.info("***** Running evaluation *****")
logging.info(" Num examples = %d", len(dataset_web))
logging.info(" Batch size = %d", batch)
model = model.cuda()
all_results = []
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.cuda() for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2],
'xpath_tags_seq': batch[4],
'xpath_subs_seq': batch[5],
}
feature_indices = batch[3]
outputs = model(**inputs)
for i, feature_index in enumerate(feature_indices):
eval_feature = features[feature_index.item()]
unique_id = int(eval_feature.unique_id)
result = RawResult(unique_id=unique_id,
start_logits=to_list(outputs[0][i]),
end_logits=to_list(outputs[1][i]))
all_results.append(result)
eval_time = timeit.default_timer() - start_time
logging.info(" Evaluation done in total %f secs (%f sec per example)", eval_time, eval_time / len(dataset_web))
# Compute predictions
# output_dir = os.path.join(args.output_dir, 'output', args.exp_name, f'step-{global_step}')
output_prediction_file = os.path.join(args.output_dir,"output", args.exp_name, f"predictions_{step}.json")
output_tag_prediction_file = os.path.join(args.output_dir,"output", args.exp_name, f"tag_predictions_{step}.json")
output_nbest_file = os.path.join(args.output_dir,"output", args.exp_name, f"nbest_predictions_{step}.json")
output_result_file = os.path.join(args.output_dir,"output", args.exp_name, f"qas_eval_results_{step}.json")
output_file = os.path.join(args.output_dir,"output", args.exp_name, f"eval_matrix_results_{step}")
write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case,
output_prediction_file, output_tag_prediction_file, output_nbest_file, args.verbose_logging,
tokenizer)
# Evaluate
evaluate_options = EvalOpts(data_file=args.web_eval_file,
root_dir=args.root_dir,
pred_file=output_prediction_file,
tag_pred_file=output_tag_prediction_file,
result_file=output_result_file,
out_file=output_file)
results = evaluate_on_squad(evaluate_options)
return results
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/websrc/trainer.py |
import collections
import json
import logging
import os
from typing import Optional, Tuple
import numpy as np
from tqdm.auto import tqdm
logger = logging.getLogger(__name__)
def postprocess_qa_predictions(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
log_level: Optional[int] = logging.WARNING,
):
"""
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
original contexts. This is the base postprocessing functions for models that only return start and end logits.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):
The threshold used to select the null answer: if the best answer has a score that is less than the score of
the null answer minus this threshold, the null answer is selected for this example (note that the score of
the null answer for an example giving several features is the minimum of the scores for the null answer on
each feature: all features must be aligned on the fact they `want` to predict a null answer).
Only useful when :obj:`version_2_with_negative` is :obj:`True`.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``):
``logging`` log level (e.g., ``logging.WARNING``)
"""
if len(predictions) != 2:
raise ValueError("`predictions` should be a tuple with two elements (start_logits, end_logits).")
all_start_logits, all_end_logits = predictions
if len(predictions[0]) != len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.")
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
# Logging.
logger.setLevel(log_level)
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction.
feature_null_score = start_logits[0] + end_logits[0]
if min_null_prediction is None or min_null_prediction["score"] > feature_null_score:
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[0],
"end_logit": end_logits[0],
}
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or len(offset_mapping[start_index]) < 2
or offset_mapping[end_index] is None
or len(offset_mapping[end_index]) < 2
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
if version_2_with_negative and min_null_prediction is not None:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its low score.
if (
version_2_with_negative
and min_null_prediction is not None
and not any(p["offsets"] == (0, 0) for p in predictions)
):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""):
predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["id"]] = predictions[0]["text"]
else:
# Otherwise we first need to find the best non-empty prediction.
i = 0
while predictions[i]["text"] == "":
i += 1
best_non_null_pred = predictions[i]
# Then we compare to the null prediction using the threshold.
score_diff = null_score - best_non_null_pred["start_logit"] - best_non_null_pred["end_logit"]
scores_diff_json[example["id"]] = float(score_diff) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["id"]] = ""
else:
all_predictions[example["id"]] = best_non_null_pred["text"]
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
if not os.path.isdir(output_dir):
raise EnvironmentError(f"{output_dir} is not a directory.")
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
def postprocess_qa_predictions_with_beam_search(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
start_n_top: int = 5,
end_n_top: int = 5,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
log_level: Optional[int] = logging.WARNING,
):
"""
Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the
original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as
cls token predictions.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
start_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top start logits too keep when searching for the :obj:`n_best_size` predictions.
end_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top end logits too keep when searching for the :obj:`n_best_size` predictions.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``):
``logging`` log level (e.g., ``logging.WARNING``)
"""
if len(predictions) != 5:
raise ValueError("`predictions` should be a tuple with five elements.")
start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions
if len(predictions[0]) != len(features):
raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.")
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict() if version_2_with_negative else None
# Logging.
logger.setLevel(log_level)
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_score = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_log_prob = start_top_log_probs[feature_index]
start_indexes = start_top_index[feature_index]
end_log_prob = end_top_log_probs[feature_index]
end_indexes = end_top_index[feature_index]
feature_null_score = cls_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction
if min_null_score is None or feature_null_score < min_null_score:
min_null_score = feature_null_score
# Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits.
for i in range(start_n_top):
for j in range(end_n_top):
start_index = int(start_indexes[i])
j_index = i * end_n_top + j
end_index = int(end_indexes[j_index])
# Don't consider out-of-scope answers (last part of the test should be unnecessary because of the
# p_mask but let's not take any risk)
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or len(offset_mapping[start_index]) < 2
or offset_mapping[end_index] is None
or len(offset_mapping[end_index]) < 2
):
continue
# Don't consider answers with a length negative or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_log_prob[i] + end_log_prob[j_index],
"start_log_prob": start_log_prob[i],
"end_log_prob": end_log_prob[j_index],
}
)
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0:
# Without predictions min_null_score is going to be None and None will cause an exception later
min_null_score = -2e-6
predictions.insert(0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": min_null_score})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction and set the probability for the null answer.
all_predictions[example["id"]] = predictions[0]["text"]
if version_2_with_negative:
scores_diff_json[example["id"]] = float(min_null_score)
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
if not os.path.isdir(output_dir):
raise EnvironmentError(f"{output_dir} is not a directory.")
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions, scores_diff_json
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/squad/utils_qa.py |
import logging
import os
os.environ['DISABLE_MLFLOW_INTEGRATION'] = 'True'
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
from datasets import load_dataset, load_metric
import transformers
from trainer_qa import QuestionAnsweringTrainer
from transformers import (
AutoConfig,
AutoModelForQuestionAnswering,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizerFast,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from utils_qa import postprocess_qa_predictions
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.20.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
)
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=384,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when"
" batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU)."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
version_2_with_negative: bool = field(
default=False, metadata={"help": "If true, some of the examples do not have an answer."}
)
null_score_diff_threshold: float = field(
default=0.0,
metadata={
"help": (
"The threshold used to select the null answer: if the best answer has a score that is less than "
"the score of the null answer minus this threshold, the null answer is selected for this example. "
"Only useful when `version_2_with_negative=True`."
)
},
)
doc_stride: int = field(
default=128,
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
)
n_best_size: int = field(
default=20,
metadata={"help": "The total number of n-best predictions to generate when looking for an answer."},
)
max_answer_length: int = field(
default=30,
metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
and self.test_file is None
):
raise ValueError("Need either a dataset name or a training/validation file/test_file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.test_file is not None:
extension = self.test_file.split(".")[-1]
assert extension in ["csv", "json"], "`test_file` should be a csv or a json file."
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
# set_seed(training_args.seed)
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
field="data",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models at"
" https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet"
" this requirement"
)
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
column_names = raw_datasets["validation"].column_names
else:
column_names = raw_datasets["test"].column_names
question_column_name = "question" if "question" in column_names else column_names[0]
context_column_name = "context" if "context" in column_names else column_names[1]
answer_column_name = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
pad_on_right = tokenizer.padding_side == "right"
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Training preprocessing
def prepare_train_features(examples):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
return tokenized_examples
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
# We will select sample from whole data if argument is specified
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
# Create train feature from dataset
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
prepare_train_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
if data_args.max_train_samples is not None:
# Number of samples might increase during Feature Creation, We select only specified max samples
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
# Validation preprocessing
def prepare_validation_features(examples):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_examples = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
# We will select sample from whole data
max_eval_samples = min(len(eval_examples), data_args.max_eval_samples)
eval_examples = eval_examples.select(range(max_eval_samples))
# Validation Feature Creation
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
if data_args.max_eval_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
if training_args.do_predict:
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_examples = raw_datasets["test"]
if data_args.max_predict_samples is not None:
# We will select sample from whole data
predict_examples = predict_examples.select(range(data_args.max_predict_samples))
# Predict Feature Creation
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_examples.map(
prepare_validation_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
if data_args.max_predict_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
# Data collator
# We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data
# collator.
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Post-processing:
def post_processing_function(examples, features, predictions, stage="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
predictions = postprocess_qa_predictions(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=data_args.version_2_with_negative,
n_best_size=data_args.n_best_size,
max_answer_length=data_args.max_answer_length,
null_score_diff_threshold=data_args.null_score_diff_threshold,
output_dir=training_args.output_dir,
log_level=log_level,
prefix=stage,
)
# Format the result to the format the metric expects.
if data_args.version_2_with_negative:
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad")
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
# Initialize our Trainer
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
eval_examples=eval_examples if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
post_process_function=post_processing_function,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
results = trainer.predict(predict_dataset, predict_examples)
metrics = results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/squad/run_squad.py |
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class QuestionAnsweringTrainer(Trainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"):
eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
)
finally:
self.compute_metrics = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)
metrics = self.compute_metrics(eval_preds)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
self.log(metrics)
else:
metrics = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str = "test"):
predict_dataloader = self.get_test_dataloader(predict_dataset)
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(
predict_dataloader,
description="Prediction",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
)
finally:
self.compute_metrics = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, "predict")
metrics = self.compute_metrics(predictions)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/squad/trainer_qa.py |
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import Seq2SeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class QuestionAnsweringSeq2SeqTrainer(Seq2SeqTrainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
# def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"):
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
eval_examples=None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
max_length: Optional[int] = None,
num_beams: Optional[int] = None,
) -> Dict[str, float]:
self._max_length = max_length if max_length is not None else self.args.generation_max_length
self._num_beams = num_beams if num_beams is not None else self.args.generation_num_beams
eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
)
finally:
self.compute_metrics = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
eval_preds = self.post_process_function(eval_examples, eval_dataset, output)
metrics = self.compute_metrics(eval_preds)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
self.log(metrics)
else:
metrics = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str = "test"):
predict_dataloader = self.get_test_dataloader(predict_dataset)
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(
predict_dataloader,
description="Prediction",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
)
finally:
self.compute_metrics = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, "predict")
metrics = self.compute_metrics(predictions)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/squad/trainer_seq2seq_qa.py |
#!/usr/bin/env python
# coding=utf-8
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import ClassLabel, load_dataset, load_metric
import layoutlmft.data.datasets.funsd
import transformers
from layoutlmft.data import DataCollatorForKeyValueExtraction
from layoutlmft.data.data_args import DataTrainingArguments
from layoutlmft.models.model_args import ModelArguments
from layoutlmft.trainers import FunsdTrainer as Trainer
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
HfArgumentParser,
PreTrainedTokenizerFast,
TrainingArguments,
set_seed,
RobertaConfig
)
import torch
from model import Layoutlmv1ForTokenClassification
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.5.0")
logger = logging.getLogger(__name__)
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
training_args.per_device_train_batch_size = 16
training_args.num_train_epochs=100.0
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
set_seed(training_args.seed)
datasets = load_dataset(os.path.abspath(layoutlmft.data.datasets.funsd.__file__))
print(datasets)
if training_args.do_train:
column_names = datasets["train"].column_names
features = datasets["train"].features
else:
column_names = datasets["test"].column_names
features = datasets["test"].features
text_column_name = "tokens" if "tokens" in column_names else column_names[0]
label_column_name = (
f"{data_args.task_name}_tags" if f"{data_args.task_name}_tags" in column_names else column_names[1]
)
remove_columns = column_names
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
# No need to convert the labels since they are already ints.
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
config = RobertaConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
add_prefix_space=True
)
# model = AutoModelForTokenClassification.from_pretrained(
model = Layoutlmv1ForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models "
"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this "
"requirement"
)
# Preprocessing the dataset
# Padding strategy
padding = "max_length" if data_args.pad_to_max_length else False
# Tokenize all texts and align the labels with them.
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
return_overflowing_tokens=True,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
)
labels = []
bboxes = []
images = []
for batch_index in range(len(tokenized_inputs["input_ids"])):
word_ids = tokenized_inputs.word_ids(batch_index=batch_index)
org_batch_index = tokenized_inputs["overflow_to_sample_mapping"][batch_index]
label = examples[label_column_name][org_batch_index]
bbox = examples["bboxes"][org_batch_index]
image = examples["image"][org_batch_index]
previous_word_idx = None
label_ids = []
bbox_inputs = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
bbox_inputs.append([0, 0, 0, 0])
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label_to_id[label[word_idx]])
bbox_inputs.append(bbox[word_idx])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100)
bbox_inputs.append(bbox[word_idx])
previous_word_idx = word_idx
labels.append(label_ids)
bboxes.append(bbox_inputs)
images.append(image)
tokenized_inputs["labels"] = labels
tokenized_inputs["bbox"] = bboxes
tokenized_inputs["image"] = images
return tokenized_inputs
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(
tokenize_and_align_labels,
batched=True,
remove_columns=remove_columns,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "test" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["test"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(
tokenize_and_align_labels,
batched=True,
remove_columns=remove_columns,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_predict:
if "test" not in datasets:
raise ValueError("--do_predict requires a test dataset")
test_dataset = datasets["test"]
if data_args.max_test_samples is not None:
test_dataset = test_dataset.select(range(data_args.max_test_samples))
test_dataset = test_dataset.map(
tokenize_and_align_labels,
batched=True,
remove_columns=remove_columns,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
data_collator = DataCollatorForKeyValueExtraction(
tokenizer,
pad_to_multiple_of=8 if training_args.fp16 else None,
padding=padding,
max_length=512,
)
# Metrics
metric = load_metric("seqeval")
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
# Unpack nested dictionaries
final_results = {}
for key, value in results.items():
if isinstance(value, dict):
for n, v in value.items():
final_results[f"{key}_{n}"] = v
else:
final_results[key] = value
return final_results
else:
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = last_checkpoint if last_checkpoint else None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Predict
if training_args.do_predict:
logger.info("*** Predict ***")
predictions, labels, metrics = trainer.predict(test_dataset)
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
trainer.log_metrics("test", metrics)
trainer.save_metrics("test", metrics)
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")
if trainer.is_world_process_zero():
with open(output_test_predictions_file, "w") as writer:
for prediction in true_predictions:
writer.write(" ".join(prediction) + "\n")
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/run_funsd.py |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers import BertConfig, BertModel, BertPreTrainedModel, RobertaConfig
# from transformers.modeling_bert import BertLayerNorm, BertOnlyMLMHead
logger = logging.getLogger(__name__)
LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP = {}
LAYOUTLMV1_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class Layoutlmv1Config(RobertaConfig):
pretrained_config_archive_map = LAYOUTLMV1_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "bert"
def __init__(self, max_2d_position_embeddings=1024, add_linear=False, **kwargs):
super().__init__(**kwargs)
pass
class Layoutlmv1Embeddings(nn.Module):
def __init__(self, config):
super(Layoutlmv1Embeddings, self).__init__()
self.config = config
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=0
)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
config.max_2d_position_embeddings = 1024
self.x_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.y_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.h_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.w_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size
)
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.doc_linear1 = nn.Linear(config.hidden_size, config.hidden_size)
self.doc_linear2 = nn.Linear(config.hidden_size, config.hidden_size)
self.doc_linear3 = nn.Linear(config.hidden_size, config.hidden_size)
self.doc_linear4 = nn.Linear(config.hidden_size, config.hidden_size)
self.relu = nn.ReLU()
def forward(
self,
input_ids,
bbox,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device
)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
h_position_embeddings = self.h_position_embeddings(
bbox[:, :, 3] - bbox[:, :, 1]
)
w_position_embeddings = self.w_position_embeddings(
bbox[:, :, 2] - bbox[:, :, 0]
)
temp_embeddings = self.doc_linear2(self.relu(self.doc_linear1(
left_position_embeddings
+ upper_position_embeddings
+ right_position_embeddings
+ lower_position_embeddings
+ h_position_embeddings
+ w_position_embeddings
)))
embeddings = (
words_embeddings
+ position_embeddings
+ temp_embeddings
+ token_type_embeddings
)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class Layoutlmv1Model(BertModel):
config_class = Layoutlmv1Config
pretrained_model_archive_map = LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super(Layoutlmv1Model, self).__init__(config)
self.embeddings = Layoutlmv1Embeddings(config)
self.init_weights()
def forward(
self,
input_ids,
bbox,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=torch.float32
# dtype=next(self.parameters()).dtype # this will trigger error when using high version torch
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = (
head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
)
head_mask = head_mask.expand(
self.config.num_hidden_layers, -1, -1, -1, -1
)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids, bbox, position_ids=position_ids, token_type_ids=token_type_ids
)
encoder_outputs = self.encoder(
embedding_output, extended_attention_mask, head_mask=head_mask
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class Layoutlmv1ForTokenClassification(BertPreTrainedModel):
config_class = Layoutlmv1Config
pretrained_model_archive_map = LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = Layoutlmv1Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids,
bbox,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.roberta(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[
2:
] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
class Layoutlmv1ForMaskedLM(BertPreTrainedModel):
config_class = Layoutlmv1Config
pretrained_model_archive_map = LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super().__init__(config)
self.bert = Layoutlmv1Model(config)
self.cls = BertOnlyMLMHead(config)
self.init_weights()
def get_input_embeddings(self):
return self.bert.embeddings.word_embeddings
def get_output_embeddings(self):
return self.cls.predictions.decoder
def forward(
self,
input_ids,
bbox,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
lm_labels=None,
):
outputs = self.layoutlm(
input_ids,
bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[
2:
] # Add hidden states and attention if they are here
# Although this may seem awkward, BertForMaskedLM supports two scenarios:
# 1. If a tensor that contains the indices of masked labels is provided,
# the cross-entropy is the MLM cross-entropy that measures the likelihood
# of predictions for masked words.
# 2. If `lm_labels` is provided we are in a causal scenario where we
# try to predict the next token for each input in the decoder.
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size),
masked_lm_labels.view(-1),
)
outputs = (masked_lm_loss,) + outputs
return (
outputs
) # (masked_lm_loss), (ltr_lm_loss), prediction_scores, (hidden_states), (attentions)
class Layoutlmv1ForQuestionAnswering(BertPreTrainedModel):
config_class = Layoutlmv1Config
pretrained_model_archive_map = LAYOUTLMV1_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = Layoutlmv1Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids,
bbox,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
# inputs_embeds=None,
start_positions=None,
end_positions=None,
# output_attentions=None,
# output_hidden_states=None,
# return_dict=None,
):
# import numpy as np
# torch.set_printoptions(threshold=np.inf)
# print(bbox[0])
# exit(0)
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
"""
# return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/model.py |
import os
import re
import numpy as np
from transformers.utils import logging
logger = logging.get_logger(__name__)
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
def re_score(pred_relations, gt_relations, mode="strict"):
"""Evaluate RE predictions
Args:
pred_relations (list) : list of list of predicted relations (several relations in each sentence)
gt_relations (list) : list of list of ground truth relations
rel = { "head": (start_idx (inclusive), end_idx (exclusive)),
"tail": (start_idx (inclusive), end_idx (exclusive)),
"head_type": ent_type,
"tail_type": ent_type,
"type": rel_type}
vocab (Vocab) : dataset vocabulary
mode (str) : in 'strict' or 'boundaries'"""
assert mode in ["strict", "boundaries"]
relation_types = [v for v in [0, 1] if not v == 0]
scores = {rel: {"tp": 0, "fp": 0, "fn": 0} for rel in relation_types + ["ALL"]}
# Count GT relations and Predicted relations
n_sents = len(gt_relations)
n_rels = sum([len([rel for rel in sent]) for sent in gt_relations])
n_found = sum([len([rel for rel in sent]) for sent in pred_relations])
# Count TP, FP and FN per type
for pred_sent, gt_sent in zip(pred_relations, gt_relations):
for rel_type in relation_types:
# strict mode takes argument types into account
if mode == "strict":
pred_rels = {
(rel["head"], rel["head_type"], rel["tail"], rel["tail_type"])
for rel in pred_sent
if rel["type"] == rel_type
}
gt_rels = {
(rel["head"], rel["head_type"], rel["tail"], rel["tail_type"])
for rel in gt_sent
if rel["type"] == rel_type
}
# boundaries mode only takes argument spans into account
elif mode == "boundaries":
pred_rels = {(rel["head"], rel["tail"]) for rel in pred_sent if rel["type"] == rel_type}
gt_rels = {(rel["head"], rel["tail"]) for rel in gt_sent if rel["type"] == rel_type}
scores[rel_type]["tp"] += len(pred_rels & gt_rels)
scores[rel_type]["fp"] += len(pred_rels - gt_rels)
scores[rel_type]["fn"] += len(gt_rels - pred_rels)
# Compute per entity Precision / Recall / F1
for rel_type in scores.keys():
if scores[rel_type]["tp"]:
scores[rel_type]["p"] = scores[rel_type]["tp"] / (scores[rel_type]["fp"] + scores[rel_type]["tp"])
scores[rel_type]["r"] = scores[rel_type]["tp"] / (scores[rel_type]["fn"] + scores[rel_type]["tp"])
else:
scores[rel_type]["p"], scores[rel_type]["r"] = 0, 0
if not scores[rel_type]["p"] + scores[rel_type]["r"] == 0:
scores[rel_type]["f1"] = (
2 * scores[rel_type]["p"] * scores[rel_type]["r"] / (scores[rel_type]["p"] + scores[rel_type]["r"])
)
else:
scores[rel_type]["f1"] = 0
# Compute micro F1 Scores
tp = sum([scores[rel_type]["tp"] for rel_type in relation_types])
fp = sum([scores[rel_type]["fp"] for rel_type in relation_types])
fn = sum([scores[rel_type]["fn"] for rel_type in relation_types])
if tp:
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * precision * recall / (precision + recall)
else:
precision, recall, f1 = 0, 0, 0
scores["ALL"]["p"] = precision
scores["ALL"]["r"] = recall
scores["ALL"]["f1"] = f1
scores["ALL"]["tp"] = tp
scores["ALL"]["fp"] = fp
scores["ALL"]["fn"] = fn
# Compute Macro F1 Scores
scores["ALL"]["Macro_f1"] = np.mean([scores[ent_type]["f1"] for ent_type in relation_types])
scores["ALL"]["Macro_p"] = np.mean([scores[ent_type]["p"] for ent_type in relation_types])
scores["ALL"]["Macro_r"] = np.mean([scores[ent_type]["r"] for ent_type in relation_types])
logger.info(f"RE Evaluation in *** {mode.upper()} *** mode")
logger.info(
"processed {} sentences with {} relations; found: {} relations; correct: {}.".format(
n_sents, n_rels, n_found, tp
)
)
logger.info(
"\tALL\t TP: {};\tFP: {};\tFN: {}".format(scores["ALL"]["tp"], scores["ALL"]["fp"], scores["ALL"]["fn"])
)
logger.info("\t\t(m avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (micro)".format(precision, recall, f1))
logger.info(
"\t\t(M avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (Macro)\n".format(
scores["ALL"]["Macro_p"], scores["ALL"]["Macro_r"], scores["ALL"]["Macro_f1"]
)
)
for rel_type in relation_types:
logger.info(
"\t{}: \tTP: {};\tFP: {};\tFN: {};\tprecision: {:.2f};\trecall: {:.2f};\tf1: {:.2f};\t{}".format(
rel_type,
scores[rel_type]["tp"],
scores[rel_type]["fp"],
scores[rel_type]["fn"],
scores[rel_type]["p"],
scores[rel_type]["r"],
scores[rel_type]["f1"],
scores[rel_type]["tp"] + scores[rel_type]["fp"],
)
)
return scores
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/evaluation.py |
from collections import OrderedDict
from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_NAMES_MAPPING, TOKENIZER_MAPPING
from transformers.convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, BertConverter, XLMRobertaConverter
from transformers.models.auto.modeling_auto import auto_class_factory
from .models.layoutlmv2 import (
LayoutLMv2Config,
LayoutLMv2ForRelationExtraction,
LayoutLMv2ForTokenClassification,
LayoutLMv2Tokenizer,
LayoutLMv2TokenizerFast,
)
from .models.layoutxlm import (
LayoutXLMConfig,
LayoutXLMForRelationExtraction,
LayoutXLMForTokenClassification,
LayoutXLMTokenizer,
LayoutXLMTokenizerFast,
)
CONFIG_MAPPING.update([("layoutlmv2", LayoutLMv2Config), ("layoutxlm", LayoutXLMConfig)])
MODEL_NAMES_MAPPING.update([("layoutlmv2", "LayoutLMv2"), ("layoutxlm", "LayoutXLM")])
TOKENIZER_MAPPING.update(
[
(LayoutLMv2Config, (LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast)),
(LayoutXLMConfig, (LayoutXLMTokenizer, LayoutXLMTokenizerFast)),
]
)
SLOW_TO_FAST_CONVERTERS.update({"LayoutLMv2Tokenizer": BertConverter, "LayoutXLMTokenizer": XLMRobertaConverter})
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.update(
[(LayoutLMv2Config, LayoutLMv2ForTokenClassification), (LayoutXLMConfig, LayoutXLMForTokenClassification)]
)
MODEL_FOR_RELATION_EXTRACTION_MAPPING = OrderedDict(
[(LayoutLMv2Config, LayoutLMv2ForRelationExtraction), (LayoutXLMConfig, LayoutXLMForRelationExtraction)]
)
AutoModelForTokenClassification = auto_class_factory(
"AutoModelForTokenClassification", MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, head_doc="token classification"
)
AutoModelForRelationExtraction = auto_class_factory(
"AutoModelForRelationExtraction", MODEL_FOR_RELATION_EXTRACTION_MAPPING, head_doc="relation extraction"
)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/__init__.py |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
import torch
from transformers.file_utils import ModelOutput
@dataclass
class ReOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
entities: Optional[Dict] = None
relations: Optional[Dict] = None
pred_relations: Optional[Dict] = None
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/utils.py |
EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/__init__.py |
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/model_args.py |
# coding=utf-8
from transformers.models.layoutlm.tokenization_layoutlm import LayoutLMTokenizer
from transformers.utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt",
"microsoft/layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/layoutlmv2-base-uncased": 512,
"microsoft/layoutlmv2-large-uncased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/layoutlmv2-base-uncased": {"do_lower_case": True},
"microsoft/layoutlmv2-large-uncased": {"do_lower_case": True},
}
class LayoutLMv2Tokenizer(LayoutLMTokenizer):
r"""
Constructs a LayoutLMv2 tokenizer.
:class:`~transformers.LayoutLMv2Tokenizer is identical to :class:`~transformers.BertTokenizer` and runs end-to-end
tokenization: punctuation splitting + wordpiece.
Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, model_max_length=512, **kwargs):
super().__init__(model_max_length=model_max_length, **kwargs)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/tokenization_layoutlmv2.py |
from .configuration_layoutlmv2 import LayoutLMv2Config
from .modeling_layoutlmv2 import LayoutLMv2ForRelationExtraction, LayoutLMv2ForTokenClassification, LayoutLMv2Model
from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer
from .tokenization_layoutlmv2_fast import LayoutLMv2TokenizerFast
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/__init__.py |
# -*- coding: utf-8 -*-
def add_layoutlmv2_config(cfg):
_C = cfg
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C.MODEL.MASK_ON = True
# When using pre-trained models in Detectron1 or any MSRA models,
# std has been absorbed into its conv1 weights, so the std needs to be set 1.
# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)
_C.MODEL.PIXEL_STD = [57.375, 57.120, 58.395]
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE.NAME = "build_resnet_fpn_backbone"
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
# Names of the input feature maps to be used by FPN
# They must have contiguous power of 2 strides
# e.g., ["res2", "res3", "res4", "res5"]
_C.MODEL.FPN.IN_FEATURES = ["res2", "res3", "res4", "res5"]
# ---------------------------------------------------------------------------- #
# Anchor generator options
# ---------------------------------------------------------------------------- #
# Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.
# Format: list[list[float]]. SIZES[i] specifies the list of sizes
# to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true,
# or len(SIZES) == 1 is true and size list SIZES[0] is used for all
# IN_FEATURES.
_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32], [64], [128], [256], [512]]
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
# Names of the input feature maps to be used by RPN
# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN
_C.MODEL.RPN.IN_FEATURES = ["p2", "p3", "p4", "p5", "p6"]
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 2000
_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 1000
# Number of top scoring RPN proposals to keep after applying NMS
# When FPN is used, this limit is applied per level and then again to the union
# of proposals from all levels
# NOTE: When FPN is used, the meaning of this config is different from Detectron1.
# It means per-batch topk in Detectron1, but per-image topk here.
# See the "find_top_rpn_proposals" function for details.
_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 1000
_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS.NAME = "StandardROIHeads"
# Number of foreground classes
_C.MODEL.ROI_HEADS.NUM_CLASSES = 5
# Names of the input feature maps to be used by ROI heads
# Currently all heads (box, mask, ...) use the same input feature map list
# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN
_C.MODEL.ROI_HEADS.IN_FEATURES = ["p2", "p3", "p4", "p5"]
# ---------------------------------------------------------------------------- #
# Box Head
# ---------------------------------------------------------------------------- #
# C4 don't use head name option
# Options for non-C4 models: FastRCNNConvFCHead,
_C.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead"
_C.MODEL.ROI_BOX_HEAD.NUM_FC = 2
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
# ---------------------------------------------------------------------------- #
# Mask Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead"
_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 4 # The number of convs in the mask head
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 7
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS.DEPTH = 101
_C.MODEL.RESNETS.SIZES = [[32], [64], [128], [256], [512]]
_C.MODEL.RESNETS.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
_C.MODEL.RESNETS.OUT_FEATURES = ["res2", "res3", "res4", "res5"] # res4 for C4 backbone, res2..5 for FPN backbone
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 32
# Baseline width of each group.
# Scaling this parameters will scale the width of all bottleneck layers.
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 8
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = False
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/detectron2_config.py |
# coding=utf-8
import math
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
import detectron2
from detectron2.modeling import META_ARCH_REGISTRY
from transformers import PreTrainedModel
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
TokenClassifierOutput,
)
from transformers.modeling_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMIntermediate as LayoutLMv2Intermediate
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMOutput as LayoutLMv2Output
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMPooler as LayoutLMv2Pooler
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMSelfOutput as LayoutLMv2SelfOutput
from transformers.utils import logging
from ...modules.decoders.re import REDecoder
from ...utils import ReOutput
from .configuration_layoutlmv2 import LayoutLMv2Config
from .detectron2_config import add_layoutlmv2_config
logger = logging.get_logger(__name__)
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"layoutlmv2-base-uncased",
"layoutlmv2-large-uncased",
]
LayoutLMv2LayerNorm = torch.nn.LayerNorm
class LayoutLMv2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(LayoutLMv2Embeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = LayoutLMv2LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def _cal_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
spatial_position_embeddings = torch.cat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
dim=-1,
)
return spatial_position_embeddings
class LayoutLMv2SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.fast_qkv = config.fast_qkv
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if config.fast_qkv:
self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)
self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = torch.chunk(qkv, 3, dim=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1,) * (q.ndimension() - 1) + (-1,)
q = q + self.q_bias.view(*_sz)
v = v + self.v_bias.view(*_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
q, k, v = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = self.transpose_for_scores(q)
key_layer = self.transpose_for_scores(k)
value_layer = self.transpose_for_scores(v)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = attention_scores.float().masked_fill_(attention_mask.to(torch.bool), float("-inf"))
attention_probs = F.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LayoutLMv2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv2SelfAttention(config)
self.output = LayoutLMv2SelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class LayoutLMv2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMv2Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = LayoutLMv2Attention(config)
self.intermediate = LayoutLMv2Intermediate(config)
self.output = LayoutLMv2Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class LayoutLMv2Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)])
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_onehot_size = config.rel_pos_bins
self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
def _cal_1d_pos_emb(self, hidden_states, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos,
)
rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
rel_pos = rel_pos.contiguous()
return rel_pos
def _cal_2d_pos_emb(self, hidden_states, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = relative_position_bucket(
rel_pos_x_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_y = relative_position_bucket(
rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
bbox=None,
position_ids=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class LayoutLMv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LayoutLMv2Config
pretrained_model_archive_map = LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "layoutlmv2"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, LayoutLMv2LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def my_convert_sync_batchnorm(module, process_group=None):
# same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d`
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
module_output = module
if isinstance(module, detectron2.layers.FrozenBatchNorm2d):
module_output = torch.nn.SyncBatchNorm(
num_features=module.num_features,
eps=module.eps,
affine=True,
track_running_stats=True,
process_group=process_group,
)
module_output.weight = torch.nn.Parameter(module.weight)
module_output.bias = torch.nn.Parameter(module.bias)
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device)
for name, child in module.named_children():
module_output.add_module(name, my_convert_sync_batchnorm(child, process_group))
del module
return module_output
class VisualBackbone(nn.Module):
def __init__(self, config):
super().__init__()
self.cfg = detectron2.config.get_cfg()
add_layoutlmv2_config(self.cfg)
meta_arch = self.cfg.MODEL.META_ARCHITECTURE
model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg)
assert isinstance(model.backbone, detectron2.modeling.backbone.FPN)
self.backbone = model.backbone
if (
config.convert_sync_batchnorm
and torch.distributed.is_available()
and torch.distributed.is_initialized()
and torch.distributed.get_rank() > -1
):
self_rank = torch.distributed.get_rank()
node_size = torch.cuda.device_count()
world_size = torch.distributed.get_world_size()
assert world_size % node_size == 0
node_global_ranks = [
list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size)
]
sync_bn_groups = [
torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size)
]
node_rank = self_rank // node_size
assert self_rank in node_global_ranks[node_rank]
self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank])
assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
self.register_buffer(
"pixel_mean",
torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1),
)
self.register_buffer("pixel_std", torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1))
self.out_feature_key = "p2"
if torch.is_deterministic():
logger.warning("using `AvgPool2d` instead of `AdaptiveAvgPool2d`")
input_shape = (224, 224)
backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride
self.pool = nn.AvgPool2d(
(
math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]),
math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1]),
)
)
else:
self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2])
if len(config.image_feature_pool_shape) == 2:
config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels)
assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2]
def forward(self, images):
images_input = ((images if torch.is_tensor(images) else images.tensor) - self.pixel_mean) / self.pixel_std
features = self.backbone(images_input)
features = features[self.out_feature_key]
features = self.pool(features).flatten(start_dim=2).transpose(1, 2).contiguous()
return features
class LayoutLMv2Model(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super(LayoutLMv2Model, self).__init__(config)
self.config = config
self.has_visual_segment_embedding = config.has_visual_segment_embedding
self.embeddings = LayoutLMv2Embeddings(config)
self.visual = VisualBackbone(config)
self.visual_proj = nn.Linear(config.image_feature_pool_shape[-1], config.hidden_size)
if self.has_visual_segment_embedding:
self.visual_segment_embedding = nn.Parameter(nn.Embedding(1, config.hidden_size).weight[0])
self.visual_LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.visual_dropout = nn.Dropout(config.hidden_dropout_prob)
self.encoder = LayoutLMv2Encoder(config)
self.pooler = LayoutLMv2Pooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def _calc_text_embeddings(self, input_ids, bbox, position_ids, token_type_ids):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.embeddings.word_embeddings(input_ids)
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(bbox)
token_type_embeddings = self.embeddings.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + spatial_position_embeddings + token_type_embeddings
embeddings = self.embeddings.LayerNorm(embeddings)
embeddings = self.embeddings.dropout(embeddings)
return embeddings
def _calc_img_embeddings(self, image, bbox, position_ids):
visual_embeddings = self.visual_proj(self.visual(image))
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(bbox)
embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings
if self.has_visual_segment_embedding:
embeddings += self.visual_segment_embedding
embeddings = self.visual_LayerNorm(embeddings)
embeddings = self.visual_dropout(embeddings)
return embeddings
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_shape = list(input_shape)
visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
visual_shape = torch.Size(visual_shape)
final_shape = list(input_shape)
final_shape[1] += visual_shape[1]
final_shape = torch.Size(final_shape)
visual_bbox_x = (
torch.arange(
0,
1000 * (self.config.image_feature_pool_shape[1] + 1),
1000,
device=device,
dtype=bbox.dtype,
)
// self.config.image_feature_pool_shape[1]
)
visual_bbox_y = (
torch.arange(
0,
1000 * (self.config.image_feature_pool_shape[0] + 1),
1000,
device=device,
dtype=bbox.dtype,
)
// self.config.image_feature_pool_shape[0]
)
visual_bbox = torch.stack(
[
visual_bbox_x[:-1].repeat(self.config.image_feature_pool_shape[0], 1),
visual_bbox_y[:-1].repeat(self.config.image_feature_pool_shape[1], 1).transpose(0, 1),
visual_bbox_x[1:].repeat(self.config.image_feature_pool_shape[0], 1),
visual_bbox_y[1:].repeat(self.config.image_feature_pool_shape[1], 1).transpose(0, 1),
],
dim=-1,
).view(-1, bbox.size(-1))
visual_bbox = visual_bbox.repeat(final_shape[0], 1, 1)
final_bbox = torch.cat([bbox, visual_bbox], dim=1)
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
visual_attention_mask = torch.ones(visual_shape, device=device)
final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if position_ids is None:
seq_length = input_shape[1]
position_ids = self.embeddings.position_ids[:, :seq_length]
position_ids = position_ids.expand_as(input_ids)
visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
input_shape[0], 1
)
final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
text_layout_emb = self._calc_text_embeddings(
input_ids=input_ids,
bbox=bbox,
token_type_ids=token_type_ids,
position_ids=position_ids,
)
visual_emb = self._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids,
)
final_emb = torch.cat([text_layout_emb, visual_emb], dim=1)
extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
final_emb,
extended_attention_mask,
bbox=final_bbox,
position_ids=final_position_ids,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class LayoutLMv2ForRelationExtraction(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.extractor = REDecoder(config)
self.init_weights()
def forward(
self,
input_ids,
bbox,
labels=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
entities=None,
relations=None,
):
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
loss, pred_relations = self.extractor(sequence_output, entities, relations)
return ReOutput(
loss=loss,
entities=entities,
relations=relations,
pred_relations=pred_relations,
hidden_states=outputs[0],
)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/modeling_layoutlmv2.py |
# coding=utf-8
from transformers.models.layoutlm.tokenization_layoutlm_fast import LayoutLMTokenizerFast
from transformers.utils import logging
from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt",
"microsoft/layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/tokenizer.json",
"microsoft/layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/layoutlmv2-base-uncased": 512,
"microsoft/layoutlmv2-large-uncased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/layoutlmv2-base-uncased": {"do_lower_case": True},
"microsoft/layoutlmv2-large-uncased": {"do_lower_case": True},
}
class LayoutLMv2TokenizerFast(LayoutLMTokenizerFast):
r"""
Constructs a "Fast" LayoutLMv2Tokenizer.
Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
slow_tokenizer_class = LayoutLMv2Tokenizer
def __init__(self, model_max_length=512, **kwargs):
super().__init__(model_max_length=model_max_length, **kwargs)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/tokenization_layoutlmv2_fast.py |
# coding=utf-8
from transformers.models.layoutlm.configuration_layoutlm import LayoutLMConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/config.json",
"layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/config.json",
}
class LayoutLMv2Config(LayoutLMConfig):
model_type = "layoutlmv2"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
gradient_checkpointing=False,
max_2d_position_embeddings=1024,
max_rel_pos=128,
rel_pos_bins=32,
fast_qkv=True,
max_rel_2d_pos=256,
rel_2d_pos_bins=64,
convert_sync_batchnorm=True,
image_feature_pool_shape=[7, 7, 256],
coordinate_size=128,
shape_size=128,
has_relative_attention_bias=True,
has_spatial_attention_bias=True,
has_visual_segment_embedding=False,
**kwargs
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
gradient_checkpointing=gradient_checkpointing,
**kwargs,
)
self.max_2d_position_embeddings = max_2d_position_embeddings
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
self.fast_qkv = fast_qkv
self.max_rel_2d_pos = max_rel_2d_pos
self.rel_2d_pos_bins = rel_2d_pos_bins
self.convert_sync_batchnorm = convert_sync_batchnorm
self.image_feature_pool_shape = image_feature_pool_shape
self.coordinate_size = coordinate_size
self.shape_size = shape_size
self.has_relative_attention_bias = has_relative_attention_bias
self.has_spatial_attention_bias = has_spatial_attention_bias
self.has_visual_segment_embedding = has_visual_segment_embedding
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlmv2/configuration_layoutlmv2.py |
# coding=utf-8
from transformers.utils import logging
from ..layoutlmv2 import LayoutLMv2Config
logger = logging.get_logger(__name__)
LAYOUTXLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/config.json",
"layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/config.json",
}
class LayoutXLMConfig(LayoutLMv2Config):
model_type = "layoutxlm"
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutxlm/configuration_layoutxlm.py |
# coding=utf-8
from transformers import XLMRobertaTokenizerFast
from transformers.file_utils import is_sentencepiece_available
from transformers.utils import logging
if is_sentencepiece_available():
from .tokenization_layoutxlm import LayoutXLMTokenizer
else:
LayoutXLMTokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/sentencepiece.bpe.model",
"layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/tokenizer.json",
"layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"layoutxlm-base": 512,
"layoutxlm-large": 512,
}
class LayoutXLMTokenizerFast(XLMRobertaTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = LayoutXLMTokenizer
def __init__(self, model_max_length=512, **kwargs):
super().__init__(model_max_length=model_max_length, **kwargs)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutxlm/tokenization_layoutxlm_fast.py |
# coding=utf-8
from transformers import XLMRobertaTokenizer
from transformers.utils import logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/sentencepiece.bpe.model",
"layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/sentencepiece.bpe.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"layoutxlm-base": 512,
"layoutxlm-large": 512,
}
class LayoutXLMTokenizer(XLMRobertaTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(self, model_max_length=512, **kwargs):
super().__init__(model_max_length=model_max_length, **kwargs)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutxlm/tokenization_layoutxlm.py |
from .configuration_layoutxlm import LayoutXLMConfig
from .modeling_layoutxlm import LayoutXLMForRelationExtraction, LayoutXLMForTokenClassification, LayoutXLMModel
from .tokenization_layoutxlm import LayoutXLMTokenizer
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutxlm/__init__.py |
# coding=utf-8
from transformers.utils import logging
from ..layoutlmv2 import LayoutLMv2ForRelationExtraction, LayoutLMv2ForTokenClassification, LayoutLMv2Model
from .configuration_layoutxlm import LayoutXLMConfig
logger = logging.get_logger(__name__)
LAYOUTXLM_PRETRAINED_MODEL_ARCHIVE_LIST = [
"layoutxlm-base",
"layoutxlm-large",
]
class LayoutXLMModel(LayoutLMv2Model):
config_class = LayoutXLMConfig
class LayoutXLMForTokenClassification(LayoutLMv2ForTokenClassification):
config_class = LayoutXLMConfig
class LayoutXLMForRelationExtraction(LayoutLMv2ForRelationExtraction):
config_class = LayoutXLMConfig
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutxlm/modeling_layoutxlm.py |
from transformers.models.layoutlm import *
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/models/layoutlm/__init__.py |
import collections
import time
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from packaging import version
from torch import nn
from torch.utils.data import DataLoader, Dataset
from transformers.trainer_utils import EvalPrediction, PredictionOutput, speed_metrics
from transformers.utils import logging
from .funsd_trainer import FunsdTrainer
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
logger = logging.get_logger(__name__)
class XfunSerTrainer(FunsdTrainer):
pass
class XfunReTrainer(FunsdTrainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.label_names.append("relations")
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
inputs = self._prepare_inputs(inputs)
with torch.no_grad():
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
labels = tuple(inputs.get(name) for name in self.label_names)
return outputs, labels
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
if self.args.deepspeed and not self.args.do_train:
# no harm, but flagging to the user that deepspeed config is ignored for eval
# flagging only for when --do_train wasn't passed as only then it's redundant
logger.info("Detected the deepspeed argument but it will not be used for evaluation")
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, half it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
model.eval()
self.callback_handler.eval_dataloader = dataloader
re_labels = None
pred_relations = None
entities = None
for step, inputs in enumerate(dataloader):
outputs, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
re_labels = labels[1] if re_labels is None else re_labels + labels[1]
pred_relations = (
outputs.pred_relations if pred_relations is None else pred_relations + outputs.pred_relations
)
entities = outputs.entities if entities is None else entities + outputs.entities
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
gt_relations = []
for b in range(len(re_labels)):
rel_sent = []
for head, tail in zip(re_labels[b]["head"], re_labels[b]["tail"]):
rel = {}
rel["head_id"] = head
rel["head"] = (entities[b]["start"][rel["head_id"]], entities[b]["end"][rel["head_id"]])
rel["head_type"] = entities[b]["label"][rel["head_id"]]
rel["tail_id"] = tail
rel["tail"] = (entities[b]["start"][rel["tail_id"]], entities[b]["end"][rel["tail_id"]])
rel["tail_type"] = entities[b]["label"][rel["tail_id"]]
rel["type"] = 1
rel_sent.append(rel)
gt_relations.append(rel_sent)
re_metrics = self.compute_metrics(EvalPrediction(predictions=pred_relations, label_ids=gt_relations))
re_metrics = {
"precision": re_metrics["ALL"]["p"],
"recall": re_metrics["ALL"]["r"],
"f1": re_metrics["ALL"]["f1"],
}
re_metrics[f"{metric_key_prefix}_loss"] = outputs.loss.mean().item()
metrics = {}
# # Prefix all keys with metric_key_prefix + '_'
for key in list(re_metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = re_metrics.pop(key)
else:
metrics[f"{key}"] = re_metrics.pop(key)
return metrics
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
self.args.local_rank = -1
eval_dataloader = self.get_eval_dataloader(eval_dataset)
self.args.local_rank = torch.distributed.get_rank()
start_time = time.time()
metrics = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(metrics)
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/trainers/xfun_trainer.py |
from .funsd_trainer import FunsdTrainer
from .xfun_trainer import XfunReTrainer, XfunSerTrainer
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/trainers/__init__.py |
from typing import Any, Dict, Union
import torch
from transformers import Trainer
class FunsdTrainer(Trainer):
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if hasattr(v, "to") and hasattr(v, "device"):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/trainers/funsd_trainer.py |
EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/modules/__init__.py |
|
EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/modules/decoders/__init__.py |
|
import copy
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
class BiaffineAttention(torch.nn.Module):
"""Implements a biaffine attention operator for binary relation classification.
PyTorch implementation of the biaffine attention operator from "End-to-end neural relation
extraction using deep biaffine attention" (https://arxiv.org/abs/1812.11275) which can be used
as a classifier for binary relation classification.
Args:
in_features (int): The size of the feature dimension of the inputs.
out_features (int): The size of the feature dimension of the output.
Shape:
- x_1: `(N, *, in_features)` where `N` is the batch dimension and `*` means any number of
additional dimensisons.
- x_2: `(N, *, in_features)`, where `N` is the batch dimension and `*` means any number of
additional dimensions.
- Output: `(N, *, out_features)`, where `N` is the batch dimension and `*` means any number
of additional dimensions.
Examples:
>>> batch_size, in_features, out_features = 32, 100, 4
>>> biaffine_attention = BiaffineAttention(in_features, out_features)
>>> x_1 = torch.randn(batch_size, in_features)
>>> x_2 = torch.randn(batch_size, in_features)
>>> output = biaffine_attention(x_1, x_2)
>>> print(output.size())
torch.Size([32, 4])
"""
def __init__(self, in_features, out_features):
super(BiaffineAttention, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bilinear = torch.nn.Bilinear(in_features, in_features, out_features, bias=False)
self.linear = torch.nn.Linear(2 * in_features, out_features, bias=True)
self.reset_parameters()
def forward(self, x_1, x_2):
return self.bilinear(x_1, x_2) + self.linear(torch.cat((x_1, x_2), dim=-1))
def reset_parameters(self):
self.bilinear.reset_parameters()
self.linear.reset_parameters()
class REDecoder(nn.Module):
def __init__(self, config):
super().__init__()
self.entity_emb = nn.Embedding(3, config.hidden_size, scale_grad_by_freq=True)
projection = nn.Sequential(
nn.Linear(config.hidden_size * 2, config.hidden_size),
nn.ReLU(),
nn.Dropout(config.hidden_dropout_prob),
nn.Linear(config.hidden_size, config.hidden_size // 2),
nn.ReLU(),
nn.Dropout(config.hidden_dropout_prob),
)
self.ffnn_head = copy.deepcopy(projection)
self.ffnn_tail = copy.deepcopy(projection)
self.rel_classifier = BiaffineAttention(config.hidden_size // 2, 2)
self.loss_fct = CrossEntropyLoss()
def build_relation(self, relations, entities):
batch_size = len(relations)
new_relations = []
for b in range(batch_size):
if len(entities[b]["start"]) <= 2:
entities[b] = {"end": [1, 1], "label": [0, 0], "start": [0, 0]}
all_possible_relations = set(
[
(i, j)
for i in range(len(entities[b]["label"]))
for j in range(len(entities[b]["label"]))
if entities[b]["label"][i] == 1 and entities[b]["label"][j] == 2
]
)
if len(all_possible_relations) == 0:
all_possible_relations = set([(0, 1)])
positive_relations = set(list(zip(relations[b]["head"], relations[b]["tail"])))
negative_relations = all_possible_relations - positive_relations
positive_relations = set([i for i in positive_relations if i in all_possible_relations])
reordered_relations = list(positive_relations) + list(negative_relations)
relation_per_doc = {"head": [], "tail": [], "label": []}
relation_per_doc["head"] = [i[0] for i in reordered_relations]
relation_per_doc["tail"] = [i[1] for i in reordered_relations]
relation_per_doc["label"] = [1] * len(positive_relations) + [0] * (
len(reordered_relations) - len(positive_relations)
)
assert len(relation_per_doc["head"]) != 0
new_relations.append(relation_per_doc)
return new_relations, entities
def get_predicted_relations(self, logits, relations, entities):
pred_relations = []
for i, pred_label in enumerate(logits.argmax(-1)):
if pred_label != 1:
continue
rel = {}
rel["head_id"] = relations["head"][i]
rel["head"] = (entities["start"][rel["head_id"]], entities["end"][rel["head_id"]])
rel["head_type"] = entities["label"][rel["head_id"]]
rel["tail_id"] = relations["tail"][i]
rel["tail"] = (entities["start"][rel["tail_id"]], entities["end"][rel["tail_id"]])
rel["tail_type"] = entities["label"][rel["tail_id"]]
rel["type"] = 1
pred_relations.append(rel)
return pred_relations
def forward(self, hidden_states, entities, relations):
batch_size, max_n_words, context_dim = hidden_states.size()
device = hidden_states.device
relations, entities = self.build_relation(relations, entities)
loss = 0
all_pred_relations = []
for b in range(batch_size):
head_entities = torch.tensor(relations[b]["head"], device=device)
tail_entities = torch.tensor(relations[b]["tail"], device=device)
relation_labels = torch.tensor(relations[b]["label"], device=device)
entities_start_index = torch.tensor(entities[b]["start"], device=device)
entities_labels = torch.tensor(entities[b]["label"], device=device)
head_index = entities_start_index[head_entities]
head_label = entities_labels[head_entities]
head_label_repr = self.entity_emb(head_label)
tail_index = entities_start_index[tail_entities]
tail_label = entities_labels[tail_entities]
tail_label_repr = self.entity_emb(tail_label)
head_repr = torch.cat(
(hidden_states[b][head_index], head_label_repr),
dim=-1,
)
tail_repr = torch.cat(
(hidden_states[b][tail_index], tail_label_repr),
dim=-1,
)
heads = self.ffnn_head(head_repr)
tails = self.ffnn_tail(tail_repr)
logits = self.rel_classifier(heads, tails)
loss += self.loss_fct(logits, relation_labels)
pred_relations = self.get_predicted_relations(logits, relations[b], entities[b])
all_pred_relations.append(pred_relations)
return loss, all_pred_relations
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/modules/decoders/re.py |
# flake8: noqa
from .data_collator import DataCollatorForKeyValueExtraction
from .datasets import *
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/__init__.py |
import torch
from detectron2.data.detection_utils import read_image
from detectron2.data.transforms import ResizeTransform, TransformList
def normalize_bbox(bbox, size):
return [
int(1000 * bbox[0] / size[0]),
int(1000 * bbox[1] / size[1]),
int(1000 * bbox[2] / size[0]),
int(1000 * bbox[3] / size[1]),
]
def simplify_bbox(bbox):
return [
min(bbox[0::2]),
min(bbox[1::2]),
max(bbox[2::2]),
max(bbox[3::2]),
]
def merge_bbox(bbox_list):
x0, y0, x1, y1 = list(zip(*bbox_list))
return [min(x0), min(y0), max(x1), max(y1)]
def load_image(image_path):
image = read_image(image_path, format="BGR")
h = image.shape[0]
w = image.shape[1]
img_trans = TransformList([ResizeTransform(h=h, w=w, new_h=224, new_w=224)])
image = torch.tensor(img_trans.apply_image(image).copy()).permute(2, 0, 1) # copy to make it writeable
return image, (w, h)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/utils.py |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a csv or JSON file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_val_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
},
)
max_test_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of test examples to this "
"value if set."
},
)
label_all_tokens: bool = field(
default=False,
metadata={
"help": "Whether to put the label for one word on all tokens of generated by that word or just on the "
"one (in which case the other tokens will have a padding index)."
},
)
return_entity_level_metrics: bool = field(
default=False,
metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."},
)
@dataclass
class XFUNDataTrainingArguments(DataTrainingArguments):
lang: Optional[str] = field(default="en")
additional_langs: Optional[str] = field(default=None)
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/data_args.py |
from dataclasses import dataclass
from typing import Optional, Union
import torch
from detectron2.structures import ImageList
from transformers import PreTrainedTokenizerBase
from transformers.file_utils import PaddingStrategy
@dataclass
class DataCollatorForKeyValueExtraction:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
def __call__(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
has_image_input = "image" in features[0]
has_bbox_input = "bbox" in features[0]
if has_image_input:
image = ImageList.from_tensors([torch.tensor(feature["image"]) for feature in features], 32)
for feature in features:
del feature["image"]
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="pt" if labels is None else None,
)
if labels is None:
return batch
sequence_length = torch.tensor(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels]
if has_bbox_input:
batch["bbox"] = [bbox + [[0, 0, 0, 0]] * (sequence_length - len(bbox)) for bbox in batch["bbox"]]
else:
batch["labels"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels]
if has_bbox_input:
batch["bbox"] = [[[0, 0, 0, 0]] * (sequence_length - len(bbox)) + bbox for bbox in batch["bbox"]]
batch = {k: torch.tensor(v, dtype=torch.int64) if isinstance(v[0], list) else v for k, v in batch.items()}
if has_image_input:
batch["image"] = image
return batch
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/data_collator.py |
EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/datasets/__init__.py |
|
# Lint as: python3
import json
import logging
import os
import datasets
from layoutlmft.data.utils import load_image, merge_bbox, normalize_bbox, simplify_bbox
from transformers import AutoTokenizer
_URL = "https://github.com/doc-analysis/XFUN/releases/download/v1.0/"
_LANG = ["zh", "de", "es", "fr", "en", "it", "ja", "pt"]
logger = logging.getLogger(__name__)
class XFUNConfig(datasets.BuilderConfig):
"""BuilderConfig for XFUN."""
def __init__(self, lang, additional_langs=None, **kwargs):
"""
Args:
lang: string, language for the input text
**kwargs: keyword arguments forwarded to super.
"""
super(XFUNConfig, self).__init__(**kwargs)
self.lang = lang
self.additional_langs = additional_langs
class XFUN(datasets.GeneratorBasedBuilder):
"""XFUN dataset."""
BUILDER_CONFIGS = [XFUNConfig(name=f"xfun.{lang}", lang=lang) for lang in _LANG]
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"id": datasets.Value("string"),
"input_ids": datasets.Sequence(datasets.Value("int64")),
"bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
"labels": datasets.Sequence(
datasets.ClassLabel(
names=["O", "B-QUESTION", "B-ANSWER", "B-HEADER", "I-ANSWER", "I-QUESTION", "I-HEADER"]
)
),
"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
"entities": datasets.Sequence(
{
"start": datasets.Value("int64"),
"end": datasets.Value("int64"),
"label": datasets.ClassLabel(names=["HEADER", "QUESTION", "ANSWER"]),
}
),
"relations": datasets.Sequence(
{
"head": datasets.Value("int64"),
"tail": datasets.Value("int64"),
"start_index": datasets.Value("int64"),
"end_index": datasets.Value("int64"),
}
),
}
),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": [f"{_URL}{self.config.lang}.train.json", f"{_URL}{self.config.lang}.train.zip"],
"val": [f"{_URL}{self.config.lang}.val.json", f"{_URL}{self.config.lang}.val.zip"],
# "test": [f"{_URL}{self.config.lang}.test.json", f"{_URL}{self.config.lang}.test.zip"],
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
train_files_for_many_langs = [downloaded_files["train"]]
val_files_for_many_langs = [downloaded_files["val"]]
# test_files_for_many_langs = [downloaded_files["test"]]
if self.config.additional_langs:
additional_langs = self.config.additional_langs.split("+")
if "all" in additional_langs:
additional_langs = [lang for lang in _LANG if lang != self.config.lang]
for lang in additional_langs:
urls_to_download = {"train": [f"{_URL}{lang}.train.json", f"{_URL}{lang}.train.zip"]}
additional_downloaded_files = dl_manager.download_and_extract(urls_to_download)
train_files_for_many_langs.append(additional_downloaded_files["train"])
logger.info(f"Training on {self.config.lang} with additional langs({self.config.additional_langs})")
logger.info(f"Evaluating on {self.config.lang}")
logger.info(f"Testing on {self.config.lang}")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_files_for_many_langs}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": val_files_for_many_langs}
),
# datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": test_files_for_many_langs}),
]
def _generate_examples(self, filepaths):
for filepath in filepaths:
logger.info("Generating examples from = %s", filepath)
with open(filepath[0], "r", encoding="utf-8") as f:
data = json.load(f)
for doc in data["documents"]:
doc["img"]["fpath"] = os.path.join(filepath[1], doc["img"]["fname"])
image, size = load_image(doc["img"]["fpath"])
document = doc["document"]
tokenized_doc = {"input_ids": [], "bbox": [], "labels": []}
entities = []
relations = []
id2label = {}
entity_id_to_index_map = {}
empty_entity = set()
for line in document:
if len(line["text"]) == 0:
empty_entity.add(line["id"])
continue
id2label[line["id"]] = line["label"]
relations.extend([tuple(sorted(l)) for l in line["linking"]])
tokenized_inputs = self.tokenizer(
line["text"],
add_special_tokens=False,
return_offsets_mapping=True,
return_attention_mask=False,
)
text_length = 0
ocr_length = 0
bbox = []
last_box = None
for token_id, offset in zip(tokenized_inputs["input_ids"], tokenized_inputs["offset_mapping"]):
if token_id == 6:
bbox.append(None)
continue
text_length += offset[1] - offset[0]
tmp_box = []
while ocr_length < text_length:
ocr_word = line["words"].pop(0)
ocr_length += len(
self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word["text"].strip())
)
tmp_box.append(simplify_bbox(ocr_word["box"]))
if len(tmp_box) == 0:
tmp_box = last_box
bbox.append(normalize_bbox(merge_bbox(tmp_box), size))
last_box = tmp_box
bbox = [
[bbox[i + 1][0], bbox[i + 1][1], bbox[i + 1][0], bbox[i + 1][1]] if b is None else b
for i, b in enumerate(bbox)
]
if line["label"] == "other":
label = ["O"] * len(bbox)
else:
label = [f"I-{line['label'].upper()}"] * len(bbox)
label[0] = f"B-{line['label'].upper()}"
tokenized_inputs.update({"bbox": bbox, "labels": label})
if label[0] != "O":
entity_id_to_index_map[line["id"]] = len(entities)
entities.append(
{
"start": len(tokenized_doc["input_ids"]),
"end": len(tokenized_doc["input_ids"]) + len(tokenized_inputs["input_ids"]),
"label": line["label"].upper(),
}
)
for i in tokenized_doc:
tokenized_doc[i] = tokenized_doc[i] + tokenized_inputs[i]
relations = list(set(relations))
relations = [rel for rel in relations if rel[0] not in empty_entity and rel[1] not in empty_entity]
kvrelations = []
for rel in relations:
pair = [id2label[rel[0]], id2label[rel[1]]]
if pair == ["question", "answer"]:
kvrelations.append(
{"head": entity_id_to_index_map[rel[0]], "tail": entity_id_to_index_map[rel[1]]}
)
elif pair == ["answer", "question"]:
kvrelations.append(
{"head": entity_id_to_index_map[rel[1]], "tail": entity_id_to_index_map[rel[0]]}
)
else:
continue
def get_relation_span(rel):
bound = []
for entity_index in [rel["head"], rel["tail"]]:
bound.append(entities[entity_index]["start"])
bound.append(entities[entity_index]["end"])
return min(bound), max(bound)
relations = sorted(
[
{
"head": rel["head"],
"tail": rel["tail"],
"start_index": get_relation_span(rel)[0],
"end_index": get_relation_span(rel)[1],
}
for rel in kvrelations
],
key=lambda x: x["head"],
)
chunk_size = 512
for chunk_id, index in enumerate(range(0, len(tokenized_doc["input_ids"]), chunk_size)):
item = {}
for k in tokenized_doc:
item[k] = tokenized_doc[k][index : index + chunk_size]
entities_in_this_span = []
global_to_local_map = {}
for entity_id, entity in enumerate(entities):
if (
index <= entity["start"] < index + chunk_size
and index <= entity["end"] < index + chunk_size
):
entity["start"] = entity["start"] - index
entity["end"] = entity["end"] - index
global_to_local_map[entity_id] = len(entities_in_this_span)
entities_in_this_span.append(entity)
relations_in_this_span = []
for relation in relations:
if (
index <= relation["start_index"] < index + chunk_size
and index <= relation["end_index"] < index + chunk_size
):
relations_in_this_span.append(
{
"head": global_to_local_map[relation["head"]],
"tail": global_to_local_map[relation["tail"]],
"start_index": relation["start_index"] - index,
"end_index": relation["end_index"] - index,
}
)
item.update(
{
"id": f"{doc['id']}_{chunk_id}",
"image": image,
"entities": entities_in_this_span,
"relations": relations_in_this_span,
}
)
yield f"{doc['id']}_{chunk_id}", item
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/datasets/xfun.py |
# coding=utf-8
import json
import os
import datasets
from layoutlmft.data.utils import load_image, normalize_bbox
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{Jaume2019FUNSDAD,
title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents},
author={Guillaume Jaume and H. K. Ekenel and J. Thiran},
journal={2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)},
year={2019},
volume={2},
pages={1-6}
}
"""
_DESCRIPTION = """\
https://guillaumejaume.github.io/FUNSD/
"""
class FunsdConfig(datasets.BuilderConfig):
"""BuilderConfig for FUNSD"""
def __init__(self, **kwargs):
"""BuilderConfig for FUNSD.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(FunsdConfig, self).__init__(**kwargs)
class Funsd(datasets.GeneratorBasedBuilder):
"""Conll2003 dataset."""
BUILDER_CONFIGS = [
FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"]
)
),
"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
}
),
supervised_keys=None,
homepage="https://guillaumejaume.github.io/FUNSD/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_file = dl_manager.download_and_extract("https://guillaumejaume.github.io/FUNSD/dataset.zip")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"}
),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
ann_dir = os.path.join(filepath, "annotations")
img_dir = os.path.join(filepath, "images")
for guid, file in enumerate(sorted(os.listdir(ann_dir))):
tokens = []
bboxes = []
ner_tags = []
file_path = os.path.join(ann_dir, file)
with open(file_path, "r", encoding="utf8") as f:
data = json.load(f)
image_path = os.path.join(img_dir, file)
image_path = image_path.replace("json", "png")
image, size = load_image(image_path)
for item in data["form"]:
words, label = item["words"], item["label"]
whole_box = item["box"]
words = [w for w in words if w["text"].strip() != ""]
if len(words) == 0:
continue
if label == "other":
for w in words:
tokens.append(w["text"])
ner_tags.append("O")
# bboxes.append(normalize_bbox(w["box"], size))
bboxes.append(normalize_bbox(whole_box, size))
else:
tokens.append(words[0]["text"])
ner_tags.append("B-" + label.upper())
# bboxes.append(normalize_bbox(words[0]["box"], size))
bboxes.append(normalize_bbox(whole_box, size))
for w in words[1:]:
tokens.append(w["text"])
ner_tags.append("I-" + label.upper())
# bboxes.append(normalize_bbox(w["box"], size))
bboxes.append(normalize_bbox(whole_box, size))
yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags, "image": image}
| EXA-1-master | exa/models/unilm-master/xdoc/fine_tuning/funsd/layoutlmft/data/datasets/funsd.py |
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import math
import os
import pickle
import random
from time import sleep
import numpy as np
import torch
from nltk.translate.bleu_score import sentence_bleu
from tqdm import tqdm
from transformers import \
BertTokenizer, RobertaTokenizer
from transformers.tokenization_bert import whitespace_tokenize
import s2s_ft.s2s_loader as seq2seq_loader
from s2s_ft.modeling_decoding import LayoutlmForSeq2SeqDecoder, BertConfig
from s2s_ft.tokenization_minilm import MinilmTokenizer
from s2s_ft.tokenization_unilm import UnilmTokenizer
from s2s_ft.utils import load_and_cache_layoutlm_examples, convert_src_layout_inputs_to_tokens, \
get_tokens_from_src_and_index, convert_tgt_layout_inputs_to_tokens
TOKENIZER_CLASSES = {
'bert': BertTokenizer,
'minilm': MinilmTokenizer,
'roberta': RobertaTokenizer,
'unilm': UnilmTokenizer,
'layoutlm': BertTokenizer,
}
class WhitespaceTokenizer(object):
def tokenize(self, text):
return whitespace_tokenize(text)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list
def ascii_print(text):
text = text.encode("ascii", "ignore")
print(text)
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(TOKENIZER_CLASSES.keys()))
parser.add_argument("--model_path", default=None, type=str, required=True,
help="Path to the model checkpoint.")
parser.add_argument("--config_path", default=None, type=str,
help="Path to config.json for the model.")
parser.add_argument("--sentence_shuffle_rate", default=0, type=float)
parser.add_argument("--layoutlm_only_layout", action='store_true')
# tokenizer_name
parser.add_argument("--tokenizer_name", default=None, type=str, required=True,
help="tokenizer name")
parser.add_argument("--max_seq_length", default=512, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
# decoding parameters
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--amp', action='store_true',
help="Whether to use amp for fp16")
parser.add_argument("--input_file", type=str, help="Input file")
parser.add_argument("--input_folder", type=str, help="Input folder")
parser.add_argument("--cached_feature_file", type=str)
parser.add_argument('--subset', type=int, default=0,
help="Decode a subset of the input dataset.")
parser.add_argument("--output_file", type=str, help="output file")
parser.add_argument("--split", type=str, default="",
help="Data split (train/val/test).")
parser.add_argument('--tokenized_input', action='store_true',
help="Whether the input is tokenized.")
parser.add_argument('--seed', type=int, default=123,
help="random seed for initialization")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--batch_size', type=int, default=4,
help="Batch size for decoding.")
parser.add_argument('--beam_size', type=int, default=1,
help="Beam size for searching")
parser.add_argument('--length_penalty', type=float, default=0,
help="Length penalty for beam search")
parser.add_argument('--forbid_duplicate_ngrams', action='store_true')
parser.add_argument('--forbid_ignore_word', type=str, default=None,
help="Forbid the word during forbid_duplicate_ngrams")
parser.add_argument("--min_len", default=1, type=int)
parser.add_argument('--need_score_traces', action='store_true')
parser.add_argument('--ngram_size', type=int, default=3)
parser.add_argument('--mode', default="s2s",
choices=["s2s", "l2r", "both"])
parser.add_argument('--max_tgt_length', type=int, default=128,
help="maximum length of target sequence")
parser.add_argument('--s2s_special_token', action='store_true',
help="New special tokens ([S2S_SEP]/[S2S_CLS]) of S2S.")
parser.add_argument('--s2s_add_segment', action='store_true',
help="Additional segmental for the encoder of S2S.")
parser.add_argument('--s2s_share_segment', action='store_true',
help="Sharing segment embeddings for the encoder of S2S (used with --s2s_add_segment).")
parser.add_argument('--pos_shift', action='store_true',
help="Using position shift for fine-tuning.")
parser.add_argument("--cache_dir", default=None, type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
args = parser.parse_args()
model_path = args.model_path
assert os.path.exists(model_path), 'model_path ' + model_path + ' not exists!'
if args.need_score_traces and args.beam_size <= 1:
raise ValueError(
"Score trace is only available for beam search with beam size > 1.")
if args.max_tgt_length >= args.max_seq_length - 2:
raise ValueError("Maximum tgt length exceeds max seq length - 2.")
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
if args.seed > 0:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
else:
random_seed = random.randint(0, 10000)
logger.info("Set random seed as: {}".format(random_seed))
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
tokenizer = TOKENIZER_CLASSES[args.model_type].from_pretrained(
args.tokenizer_name, do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
max_len=args.max_seq_length
)
if args.model_type == "roberta":
vocab = tokenizer.encoder
else:
vocab = tokenizer.vocab
# NOTE: tokenizer cannot setattr, so move this to the initialization step
# tokenizer.max_len = args.max_seq_length
config_file = args.config_path if args.config_path else os.path.join(args.model_path, "config.json")
logger.info("Read decoding config from: %s" % config_file)
config = BertConfig.from_json_file(config_file,
# base_model_type=args.model_type
layoutlm_only_layout_flag=args.layoutlm_only_layout
)
bi_uni_pipeline = []
bi_uni_pipeline.append(seq2seq_loader.Preprocess4Seq2seqDecoder(
list(vocab.keys()), tokenizer.convert_tokens_to_ids, args.max_seq_length,
max_tgt_length=args.max_tgt_length, pos_shift=args.pos_shift,
source_type_id=config.source_type_id, target_type_id=config.target_type_id,
cls_token=tokenizer.cls_token, sep_token=tokenizer.sep_token, pad_token=tokenizer.pad_token,
layout_flag=args.model_type == 'layoutlm'
))
mask_word_id, eos_word_ids, sos_word_id = tokenizer.convert_tokens_to_ids(
[tokenizer.mask_token, tokenizer.sep_token, tokenizer.sep_token])
forbid_ignore_set = None
if args.forbid_ignore_word:
w_list = []
for w in args.forbid_ignore_word.split('|'):
if w.startswith('[') and w.endswith(']'):
w_list.append(w.upper())
else:
w_list.append(w)
forbid_ignore_set = set(tokenizer.convert_tokens_to_ids(w_list))
print(args.model_path)
found_checkpoint_flag = False
for model_recover_path in [args.model_path.strip()]:
logger.info("***** Recover model: %s *****", model_recover_path)
found_checkpoint_flag = True
model = LayoutlmForSeq2SeqDecoder.from_pretrained(
model_recover_path, config=config, mask_word_id=mask_word_id, search_beam_size=args.beam_size,
length_penalty=args.length_penalty, eos_id=eos_word_ids, sos_id=sos_word_id,
forbid_duplicate_ngrams=args.forbid_duplicate_ngrams, forbid_ignore_set=forbid_ignore_set,
ngram_size=args.ngram_size, min_len=args.min_len, mode=args.mode,
max_position_embeddings=args.max_seq_length, pos_shift=args.pos_shift,
)
if args.fp16:
model.half()
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
torch.cuda.empty_cache()
model.eval()
next_i = 0
max_src_length = args.max_seq_length - 2 - args.max_tgt_length
max_tgt_length = args.max_tgt_length
example_path = args.input_file if args.input_file else args.input_folder
to_pred = load_and_cache_layoutlm_examples(
example_path, tokenizer, local_rank=-1,
cached_features_file=args.cached_feature_file, shuffle=False, layout_flag=args.model_type == 'layoutlm',
src_shuffle_rate=args.sentence_shuffle_rate
)
input_lines = convert_src_layout_inputs_to_tokens(to_pred, tokenizer.convert_ids_to_tokens, max_src_length,
layout_flag=args.model_type == 'layoutlm')
target_lines = convert_tgt_layout_inputs_to_tokens(to_pred, tokenizer.convert_ids_to_tokens, max_tgt_length,
layout_flag=args.model_type == 'layoutlm')
target_geo_scores = [x['bleu'] for x in to_pred]
if args.subset > 0:
logger.info("Decoding subset: %d", args.subset)
input_lines = input_lines[:args.subset]
# NOTE: add the sequence index through enumerate
input_lines = sorted(list(enumerate(input_lines)), key=lambda x: -len(x[1]))
score_trace_list = [None] * len(input_lines)
total_batch = math.ceil(len(input_lines) / args.batch_size)
fn_out = args.output_file
fout = open(fn_out, "w", encoding="utf-8")
with tqdm(total=total_batch) as pbar:
batch_count = 0
first_batch = True
while first_batch or (next_i + args.batch_size <= len(input_lines)):
# while next_i < len(input_lines):
_chunk = input_lines[next_i:next_i + args.batch_size]
buf_id = [x[0] for x in _chunk]
buf = [x[1] for x in _chunk]
next_i += args.batch_size
batch_count += 1
max_a_len = max([len(x) for x in buf])
instances = []
for instance in [(x, max_a_len) for x in buf]:
for proc in bi_uni_pipeline:
instances.append(proc(instance))
with torch.no_grad():
batch = seq2seq_loader.batch_list_to_batch_tensors(
instances)
batch = [
t.to(device) if t is not None else None for t in batch]
input_ids, token_type_ids, position_ids, input_mask, mask_qkv, task_idx = batch
traces = model(input_ids, token_type_ids,
position_ids, input_mask, task_idx=task_idx, mask_qkv=mask_qkv)
if args.beam_size > 1:
traces = {k: v.tolist() for k, v in traces.items()}
output_ids = traces['pred_seq']
else:
output_ids = traces.tolist()
for i in range(len(buf)):
w_ids = output_ids[i]
output_buf = get_tokens_from_src_and_index(src=buf[i], index=w_ids, modifier=lambda x: x-1)
output_tokens = []
for t in output_buf:
if t in (tokenizer.sep_token, tokenizer.pad_token):
break
output_tokens.append(t)
output_tokens = output_tokens[:len(target_lines[buf_id[i]])]
if args.model_type == "roberta":
output_sequence = tokenizer.convert_tokens_to_string(output_tokens)
else:
output_sequence = ' '.join(detokenize(output_tokens))
if '\n' in output_sequence:
output_sequence = " [X_SEP] ".join(output_sequence.split('\n'))
target = target_lines[buf_id[i]]
target = detokenize(target)
result = output_sequence.split()
score = sentence_bleu([target], result)
geo_score = target_geo_scores[buf_id[i]]
target_sequence = ' '.join(target)
fout.write('{}\t{:.8f}\t{:.8f}\t{}\t{}\n'.format(buf_id[i], score, geo_score, output_sequence, target_sequence))
if first_batch or batch_count % 50 == 0:
logger.info("{}: BLEU={:.4f} GEO={:.4f} | {}"
.format(buf_id[i], score, target_geo_scores[buf_id[i]], output_sequence))
if args.need_score_traces:
score_trace_list[buf_id[i]] = {
'scores': traces['scores'][i], 'wids': traces['wids'][i], 'ptrs': traces['ptrs'][i]}
pbar.update(1)
first_batch = False
outscore = open(fn_out, encoding='utf-8')
bleu_score = geo_score = {}
total_bleu = total_geo = 0.0
for line in outscore.readlines():
id, bleu, geo, out_seq, tgt_seq = line.split('\t')
bleu_score[int(id)] = float(bleu)
total_bleu += float(bleu)
geo_score[int(id)] = float(geo)
total_geo += float(geo)
print("avg_bleu", round(100 * total_bleu / len(bleu_score), 1))
print("avg_geo", round(100 * total_geo / len(geo_score), 1))
# released model (layoutreader-base-readingbank): avg_bleu 98.2, avg_geo 69.7
if args.need_score_traces:
with open(fn_out + ".trace.pickle", "wb") as fout_trace:
pickle.dump(
{"version": 0.0, "num_samples": len(input_lines)}, fout_trace)
for x in score_trace_list:
pickle.dump(x, fout_trace)
if not found_checkpoint_flag:
logger.info("Not found the model checkpoint file!")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/layoutreader/decode_seq2seq.py |
from io import open
from setuptools import find_packages, setup
extras = {
'serving': ['pydantic', 'uvicorn', 'fastapi'],
'serving-tf': ['pydantic', 'uvicorn', 'fastapi'],
'serving-torch': ['pydantic', 'uvicorn', 'fastapi', 'torch']
}
extras['all'] = [package for package in extras.values()]
setup(
name="s2s-ft",
version="0.0.1",
author="UniLM Team",
author_email="[email protected]",
description="Fine-Tuning Bidirectional Transformers for Sequence-to-Sequence Learning",
long_description=open("README.md", "r", encoding='utf-8').read(),
long_description_content_type="text/markdown",
keywords='Fine-Tuning Bidirectional Transformers for Sequence-to-Sequence Learning',
license='Apache',
url="https://github.com/microsoft/unilm/tree/master/s2s-ft",
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
install_requires=['numpy',
'boto3',
'requests',
'tqdm',
'regex != 2019.12.17',
'sentencepiece',
'sacremoses',
'tensorboardX',
'transformers <= 2.10.0'],
extras_require=extras,
python_requires='>=3.5.0',
classifiers=[
'Programming Language :: Python :: 3',
],
) | EXA-1-master | exa/models/unilm-master/layoutreader/setup.py |
from __future__ import absolute_import, division, print_function
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
import tqdm
from s2s_ft.modeling import LayoutlmForSequenceToSequence, LayoutlmConfig
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import \
RobertaConfig, BertConfig, \
BertTokenizer, RobertaTokenizer, \
XLMRobertaConfig, XLMRobertaTokenizer
from s2s_ft.configuration_unilm import UnilmConfig
from s2s_ft.tokenization_unilm import UnilmTokenizer
from s2s_ft.configuration_minilm import MinilmConfig
from s2s_ft.tokenization_minilm import MinilmTokenizer
from s2s_ft import utils
from s2s_ft.config import BertForSeq2SeqConfig
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'bert': (BertConfig, BertTokenizer),
'minilm': (MinilmConfig, MinilmTokenizer),
'roberta': (RobertaConfig, RobertaTokenizer),
'xlm-roberta': (XLMRobertaConfig, XLMRobertaTokenizer),
'unilm': (UnilmConfig, UnilmTokenizer),
'layoutlm': (LayoutlmConfig, BertTokenizer),
}
def prepare_for_training(args, model, checkpoint_state_dict, amp):
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
if amp:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if checkpoint_state_dict:
amp.load_state_dict(checkpoint_state_dict['amp'])
if checkpoint_state_dict:
optimizer.load_state_dict(checkpoint_state_dict['optimizer'])
model.load_state_dict(checkpoint_state_dict['model'])
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
return model, optimizer
def train(args, training_features, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0] and args.log_dir:
tb_writer = SummaryWriter(log_dir=args.log_dir)
else:
tb_writer = None
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
else:
amp = None
# model recover
recover_step = utils.get_max_epoch_model(args.output_dir)
checkpoint_state_dict = None
model.to(args.device)
model, optimizer = prepare_for_training(args, model, checkpoint_state_dict, amp=amp)
if args.n_gpu == 0 or args.no_cuda:
per_node_train_batch_size = args.per_gpu_train_batch_size * args.gradient_accumulation_steps
else:
per_node_train_batch_size = args.per_gpu_train_batch_size * args.n_gpu * args.gradient_accumulation_steps
train_batch_size = per_node_train_batch_size * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)
global_step = recover_step if recover_step else 0
if args.num_training_steps == -1:
args.num_training_steps = int(args.num_training_epochs * len(training_features) / train_batch_size)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.num_training_steps, last_epoch=-1)
if checkpoint_state_dict:
scheduler.load_state_dict(checkpoint_state_dict["lr_scheduler"])
train_dataset = utils.Seq2seqDatasetForLayoutlm(
features=training_features, max_source_len=args.max_source_seq_length,
max_target_len=args.max_target_seq_length, vocab_size=tokenizer.vocab_size,
cls_id=tokenizer.cls_token_id, sep_id=tokenizer.sep_token_id, pad_id=tokenizer.pad_token_id,
mask_id=tokenizer.mask_token_id, random_prob=args.random_prob, keep_prob=args.keep_prob,
offset=train_batch_size * global_step, num_training_instances=train_batch_size * args.num_training_steps,
layout_flag=args.model_type == 'layoutlm'
)
logger.info("Check dataset:")
for i in range(5):
source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens, target_index = train_dataset.__getitem__(
i)
logger.info("Instance-%d" % i)
try:
src = [sid[0] for sid in source_ids]
tgt = [tid[0] for tid in target_ids]
except TypeError:
src = source_ids
tgt = target_ids
logger.info("Source tokens = %s" % " ".join(tokenizer.convert_ids_to_tokens(src)))
logger.info("Target tokens = %s" % " ".join(tokenizer.convert_ids_to_tokens(tgt)))
logger.info("Mode = %s" % str(model))
# Train!
logger.info(" ***** Running training ***** *")
logger.info(" Num examples = %d", len(training_features))
logger.info(" Num Epochs = %.2f", len(train_dataset) / len(training_features))
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Batch size per node = %d", per_node_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", train_batch_size)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", args.num_training_steps)
if args.num_training_steps <= global_step:
logger.info("Training is done. Please use a new dir or clean this dir!")
else:
# The training features are shuffled
train_sampler = SequentialSampler(train_dataset) \
if args.local_rank == -1 else DistributedSampler(train_dataset, shuffle=False)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler,
batch_size=per_node_train_batch_size // args.gradient_accumulation_steps,
collate_fn=utils.batch_list_to_batch_tensors)
train_iterator = tqdm.tqdm(
train_dataloader, initial=global_step,
desc="Iter (loss=X.XXX, lr=X.XXXXXXX)", disable=args.local_rank not in [-1, 0])
model.train()
model.zero_grad()
tr_loss, logging_loss = 0.0, 0.0
for step, batch in enumerate(train_iterator):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'source_idxys': batch[0],
'target_idxys': batch[1],
'pseudo_idxys': batch[2],
'num_source_tokens': batch[3],
'num_target_tokens': batch[4],
'target_index': batch[-1]}
loss = model(**inputs)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
train_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
logging_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and global_step % args.logging_steps == 0 and tb_writer is not None:
logging_loss = 0.0
tb_writer.add_scalar('train/lr', scheduler.get_lr()[0], global_step=global_step)
tb_writer.add_scalar('train/loss', loss.item(), global_step=global_step)
if args.local_rank in [-1, 0] and args.save_steps > 0 and \
(global_step % args.save_steps == 0 or global_step == args.num_training_steps):
save_path = os.path.join(args.output_dir, "ckpt-%d" % global_step)
os.makedirs(save_path, exist_ok=True)
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(save_path)
optim_to_save = {
"optimizer": optimizer.state_dict(),
"lr_scheduler": scheduler.state_dict(),
}
if args.fp16:
optim_to_save["amp"] = amp.state_dict()
torch.save(
optim_to_save, os.path.join(args.output_dir, 'optim.{}.bin'.format(global_step)))
logger.info("Saving model checkpoint %d into %s", global_step, save_path)
if args.local_rank in [-1, 0] and tb_writer:
tb_writer.close()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--train_file", default=None, type=str,
help="Training data (json format) for training. Keys: source and target")
parser.add_argument("--train_folder", default=None, type=str,
help="Training data folder for training. Keys: source and target")
parser.add_argument("--sentence_shuffle_rate", default=0, type=float)
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--layoutlm_only_layout", action='store_true')
parser.add_argument("--layout_only_dataset", action='store_true')
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list:")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints and predictions will be written.")
parser.add_argument("--log_dir", default=None, type=str,
help="The output directory where the log will be written.")
## Other parameters
parser.add_argument("--config_name", default=None, type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default=None, type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default=None, type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_source_seq_length", default=464, type=int,
help="The maximum total source sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--max_target_seq_length", default=48, type=int,
help="The maximum total target sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--cached_train_features_file", default=None, type=str,
help="Cached training features file")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--label_smoothing", default=0.1, type=float,
help="Label smoothing.")
parser.add_argument("--num_training_steps", default=-1, type=int,
help="set total number of training steps to perform")
parser.add_argument("--num_training_epochs", default=10, type=int,
help="set total number of training epochs to perform (--num_training_steps has higher priority)")
parser.add_argument("--num_warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--random_prob", default=0.1, type=float,
help="prob to random replace a masked token")
parser.add_argument("--keep_prob", default=0.1, type=float,
help="prob to keep no change for a masked token")
parser.add_argument('--logging_steps', type=int, default=500,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=1500,
help="Save checkpoint every X updates steps.")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
return args
def prepare(args):
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
os.makedirs(args.output_dir, exist_ok=True)
json.dump(args.__dict__, open(os.path.join(
args.output_dir, 'train_opt.json'), 'w'), sort_keys=True, indent=2)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, 'einsum')
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
def get_model_and_tokenizer(args):
config_class, tokenizer_class = MODEL_CLASSES[args.model_type]
model_config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
config = BertForSeq2SeqConfig.from_exist_config(
config=model_config, label_smoothing=args.label_smoothing,
max_position_embeddings=args.max_source_seq_length + args.max_target_seq_length,
max_source_length=args.max_source_seq_length,
base_model_type=args.model_type,
layoutlm_only_layout_flag=args.layoutlm_only_layout,
)
logger.info("Model config for seq2seq: %s", str(config))
if args.model_type == 'layoutlm':
if args.tokenizer_name is not None:
tokenizer_name = args.tokenizer_name
else:
tokenizer_name = 'bert' + args.model_name_or_path[8:]
tokenizer = tokenizer_class.from_pretrained(
tokenizer_name, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None)
else:
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None)
model = LayoutlmForSequenceToSequence.from_pretrained(
args.model_name_or_path, config=config, model_type=args.model_type,
reuse_position_embedding=True,
cache_dir=args.cache_dir if args.cache_dir else None,
)
return model, tokenizer
def main():
args = get_args()
prepare(args)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
# Make sure only the first process in distributed training will download model & vocab
# Load pretrained model and tokenizer
model, tokenizer = get_model_and_tokenizer(args)
if args.local_rank == 0:
torch.distributed.barrier()
# Make sure only the first process in distributed training will download model & vocab
if args.cached_train_features_file is None:
args.cached_train_features_file = os.path.join(args.output_dir, "cached_features_for_training.pt")
example_path = args.train_file if args.train_file else args.train_folder
if args.layout_only_dataset:
training_features = utils.load_and_cache_line_order_examples(
example_path=example_path, tokenizer=tokenizer, local_rank=args.local_rank,
cached_features_file=args.cached_train_features_file, max_src_length=args.max_source_seq_length,
layout_flag=args.model_type == 'layoutlm', shuffle=True,
src_shuffle_rate=args.sentence_shuffle_rate)
else:
training_features = utils.load_and_cache_layoutlm_examples(
example_path=example_path, tokenizer=tokenizer, local_rank=args.local_rank,
cached_features_file=args.cached_train_features_file, max_src_length=args.max_source_seq_length,
layout_flag=args.model_type == 'layoutlm', shuffle=True,
src_shuffle_rate=args.sentence_shuffle_rate
)
train(args, training_features, model, tokenizer)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/layoutreader/run_seq2seq.py |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from transformers import BertConfig, RobertaConfig
from s2s_ft.configuration_unilm import UnilmConfig
# from s2s_ft.modeling import LayoutlmConfig
logger = logging.getLogger(__name__)
class BertForSeq2SeqConfig(BertConfig):
def __init__(self, label_smoothing=0.1, source_type_id=0, target_type_id=1, **kwargs):
super(BertForSeq2SeqConfig, self).__init__(**kwargs)
self.label_smoothing = label_smoothing
self.source_type_id = source_type_id
self.target_type_id = target_type_id
@classmethod
def from_exist_config(cls, config, label_smoothing=0.1, max_position_embeddings=None, max_source_length=None,
base_model_type='bert', layoutlm_only_layout_flag=False):
required_keys = [
"vocab_size", "hidden_size", "num_hidden_layers", "num_attention_heads",
"hidden_act", "intermediate_size", "hidden_dropout_prob", "attention_probs_dropout_prob",
"max_position_embeddings", "type_vocab_size", "initializer_range", "layer_norm_eps"]
kwargs = {}
for key in required_keys:
assert hasattr(config, key)
kwargs[key] = getattr(config, key)
kwargs["vocab_size_or_config_json_file"] = kwargs["vocab_size"]
if isinstance(config, RobertaConfig):
kwargs["type_vocab_size"] = 0
kwargs["max_position_embeddings"] = kwargs["max_position_embeddings"] - 2
additional_keys = [
"source_type_id", "target_type_id"
]
for key in additional_keys:
if hasattr(config, key):
kwargs[key] = getattr(config, key)
# if isinstance(config, LayoutlmConfig):
if hasattr(config, 'max_2d_position_embeddings'):
layoutlm_special_keys = ['max_2d_position_embeddings',]
for key in layoutlm_special_keys:
kwargs[key] = getattr(config, key)
kwargs['base_model_type'] = base_model_type
kwargs['layoutlm_only_layout'] = layoutlm_only_layout_flag
if max_position_embeddings is not None and max_position_embeddings > config.max_position_embeddings:
kwargs["max_position_embeddings"] = max_position_embeddings
logger.info(" ** Change max position embeddings to %d ** " % max_position_embeddings)
if max_source_length is not None:
kwargs['max_source_length'] = max_source_length
return cls(label_smoothing=label_smoothing, **kwargs)
| EXA-1-master | exa/models/unilm-master/layoutreader/s2s_ft/config.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" MiniLM model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from transformers.configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
MINILM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'minilm-l12-h384-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/minilm-l12-h384-uncased-config.json",
}
class MinilmConfig(PretrainedConfig):
r"""
:class:`~transformers.MinilmConfig` is the configuration class to store the configuration of a
`MinilmModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `MiniLMModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`MiniLMModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = MINILM_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size=28996,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=6,
initializer_range=0.02,
layer_norm_eps=1e-12,
source_type_id=0,
target_type_id=1,
**kwargs):
super(MinilmConfig, self).__init__(**kwargs)
if isinstance(vocab_size, str) or (sys.version_info[0] == 2
and isinstance(vocab_size, unicode)):
with open(vocab_size, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size, int):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.source_type_id = source_type_id
self.target_type_id = target_type_id
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
| EXA-1-master | exa/models/unilm-master/layoutreader/s2s_ft/configuration_minilm.py |
# coding=utf-8
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import logging
import math
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.modules.loss import _Loss
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None,
reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
'unilm-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased.bin",
'unilm-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased.bin",
'unilm1-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased.bin",
'unilm1-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased.bin",
'unilm1.2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1.2-base-uncased.bin"
}
CONFIG_NAME = 'config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
relax_projection=0,
new_pos_ids=False,
initializer_range=0.02,
task_idx=None,
fp32_embedding=False,
ffn_type=0,
label_smoothing=None,
num_qkv=0,
seg_emb=False,
source_type_id=0,
target_type_id=1,
no_segment_embedding=False, **kwargs):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.relax_projection = relax_projection
self.new_pos_ids = new_pos_ids
self.initializer_range = initializer_range
self.task_idx = task_idx
self.fp32_embedding = fp32_embedding
self.ffn_type = ffn_type
self.label_smoothing = label_smoothing
self.num_qkv = num_qkv
self.seg_emb = seg_emb
self.no_segment_embedding = no_segment_embedding
self.source_type_id = source_type_id
self.target_type_id = target_type_id
if type_vocab_size == 0:
self.no_segment_embedding = True
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file, **kwargs):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
json_info = json.loads(text)
for k, v in kwargs.items():
json_info[k] = v
return cls.from_dict(json_info)
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-5):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size)
if config.no_segment_embedding:
self.token_type_embeddings = None
else:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
if hasattr(config, 'fp32_embedding'):
self.fp32_embedding = config.fp32_embedding
else:
self.fp32_embedding = False
if hasattr(config, 'new_pos_ids') and config.new_pos_ids:
self.num_pos_emb = 4
else:
self.num_pos_emb = 1
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size * self.num_pos_emb)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if self.num_pos_emb > 1:
num_batch = position_embeddings.size(0)
num_pos = position_embeddings.size(1)
position_embeddings = position_embeddings.view(
num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
embeddings = words_embeddings + position_embeddings
if self.token_type_embeddings is not None:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
if self.fp32_embedding:
embeddings = embeddings.half()
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LayoutlmEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(LayoutlmEmbeddings, self).__init__()
# self.word_embeddings = nn.Embedding(
# config.vocab_size, config.hidden_size)
self.only_layout = config.layoutlm_only_layout_flag
if not self.only_layout:
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=0
)
else:
self.word_embeddings = None
self.x_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.y_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.h_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.w_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
if config.no_segment_embedding:
self.token_type_embeddings = None
else:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
if hasattr(config, 'fp32_embedding'):
self.fp32_embedding = config.fp32_embedding
else:
self.fp32_embedding = False
if hasattr(config, 'new_pos_ids') and config.new_pos_ids:
self.num_pos_emb = 4
else:
self.num_pos_emb = 1
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size * self.num_pos_emb)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, bbox, token_type_ids=None, position_ids=None, task_idx=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if self.num_pos_emb > 1:
num_batch = position_embeddings.size(0)
num_pos = position_embeddings.size(1)
position_embeddings = position_embeddings.view(
num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
h_position_embeddings = self.h_position_embeddings(
bbox[:, :, 3] - bbox[:, :, 1]
)
w_position_embeddings = self.w_position_embeddings(
bbox[:, :, 2] - bbox[:, :, 0]
)
# token_type_embeddings = self.token_type_embeddings(token_type_ids)
# words_embeddings = self.word_embeddings(input_ids)
# position_embeddings = self.position_embeddings(position_ids)
embeddings = (
# words_embeddings
position_embeddings
+ left_position_embeddings
+ upper_position_embeddings
+ right_position_embeddings
+ lower_position_embeddings
+ h_position_embeddings
+ w_position_embeddings
)
if not self.only_layout:
words_embeddings = self.word_embeddings(input_ids)
embeddings = embeddings + words_embeddings
if self.token_type_embeddings is not None:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
if self.fp32_embedding:
embeddings = embeddings.half()
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
if hasattr(config, 'num_qkv') and (config.num_qkv > 1):
self.num_qkv = config.num_qkv
else:
self.num_qkv = 1
self.query = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.key = nn.Linear(config.hidden_size,
self.all_head_size * self.num_qkv)
self.value = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.uni_debug_flag = True if os.getenv(
'UNI_DEBUG_FLAG', '') else False
if self.uni_debug_flag:
self.register_buffer('debug_attention_probs',
torch.zeros((512, 512)))
if hasattr(config, 'seg_emb') and config.seg_emb:
self.b_q_s = nn.Parameter(torch.zeros(
1, self.num_attention_heads, 1, self.attention_head_size))
self.seg_emb = nn.Embedding(
config.type_vocab_size, self.all_head_size)
else:
self.b_q_s = None
self.seg_emb = None
def transpose_for_scores(self, x, mask_qkv=None):
if self.num_qkv > 1:
sz = x.size()[:-1] + (self.num_qkv,
self.num_attention_heads, self.all_head_size)
# (batch, pos, num_qkv, head, head_hid)
x = x.view(*sz)
if mask_qkv is None:
x = x[:, :, 0, :, :]
elif isinstance(mask_qkv, int):
x = x[:, :, mask_qkv, :, :]
else:
# mask_qkv: (batch, pos)
if mask_qkv.size(1) > sz[1]:
mask_qkv = mask_qkv[:, :sz[1]]
# -> x: (batch, pos, head, head_hid)
x = x.gather(2, mask_qkv.view(sz[0], sz[1], 1, 1, 1).expand(
sz[0], sz[1], 1, sz[3], sz[4])).squeeze(2)
else:
sz = x.size()[:-1] + (self.num_attention_heads,
self.attention_head_size)
# (batch, pos, head, head_hid)
x = x.view(*sz)
# (batch, head, pos, head_hid)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None,
key_cache=None, value_cache=None,
):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(hidden_states, self.key.weight)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(x_states, self.key.weight)
mixed_value_layer = self.value(x_states)
if key_cache is not None and isinstance(key_cache, list):
key_cache.append(mixed_key_layer)
mixed_key_layer = torch.cat(key_cache, dim=1)
if value_cache is not None and isinstance(value_cache, list):
value_cache.append(mixed_value_layer)
mixed_value_layer = torch.cat(value_cache, dim=1)
query_layer = self.transpose_for_scores(mixed_query_layer, mask_qkv)
key_layer = self.transpose_for_scores(mixed_key_layer, mask_qkv)
value_layer = self.transpose_for_scores(mixed_value_layer, mask_qkv)
if key_history is not None and not isinstance(key_history, list):
key_layer = torch.cat((key_history, key_layer), dim=-2)
value_layer = torch.cat((value_history, value_layer), dim=-2)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch, head, pos, pos)
attention_scores = torch.matmul(
query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if self.seg_emb is not None:
seg_rep = self.seg_emb(seg_ids)
# (batch, pos, head, head_hid)
seg_rep = seg_rep.view(seg_rep.size(0), seg_rep.size(
1), self.num_attention_heads, self.attention_head_size)
qs = torch.einsum('bnih,bjnh->bnij',
query_layer + self.b_q_s, seg_rep)
attention_scores = attention_scores + qs
# attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.uni_debug_flag:
_pos = attention_probs.size(-1)
self.debug_attention_probs[:_pos, :_pos].copy_(
attention_probs[0].mean(0).view(_pos, _pos))
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if isinstance(key_history, list):
key_history.append(key_layer)
if isinstance(value_history, list):
value_history.append(value_layer)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None):
self_output = self.self(
input_tensor, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TransformerFFN(nn.Module):
def __init__(self, config):
super(TransformerFFN, self).__init__()
self.ffn_type = config.ffn_type
assert self.ffn_type in (1, 2)
if self.ffn_type in (1, 2):
self.wx0 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (2,):
self.wx1 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (1, 2):
self.output = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, x):
if self.ffn_type in (1, 2):
x0 = self.wx0(x)
if self.ffn_type == 1:
x1 = x
elif self.ffn_type == 2:
x1 = self.wx1(x)
out = self.output(x0 * x1)
out = self.dropout(out)
out = self.LayerNorm(out + x)
return out
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.ffn_type = config.ffn_type
if self.ffn_type:
self.ffn = TransformerFFN(config)
else:
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None):
attention_output = self.attention(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history)
if self.ffn_type:
layer_output = self.ffn(attention_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, prev_embedding=None,
prev_encoded_layers=None, mask_qkv=None, seg_ids=None, key_history=None, value_history=None):
# history embedding and encoded layer must be simultanously given
assert (prev_embedding is None) == (prev_encoded_layers is None)
all_encoder_layers = []
if (prev_embedding is not None) and (prev_encoded_layers is not None):
history_states = prev_embedding
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states, attention_mask, history_states=history_states, mask_qkv=mask_qkv, seg_ids=seg_ids)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if prev_encoded_layers is not None:
history_states = prev_encoded_layers[i]
else:
for i, layer_module in enumerate(self.layer):
set_key = None
if isinstance(key_history, list):
set_key = key_history if len(key_history) < len(self.layer) else key_history[i]
set_value = None
if isinstance(value_history, list):
set_value = value_history if len(key_history) < len(self.layer) else value_history[i]
hidden_states = layer_module(
hidden_states, attention_mask, mask_qkv=mask_qkv, seg_ids=seg_ids,
key_history=set_key, value_history=set_value)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
hid_size = config.hidden_size
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
hid_size *= config.relax_projection
self.dense = nn.Linear(config.hidden_size, hid_size)
self.LayerNorm = BertLayerNorm(hid_size, eps=1e-5)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class LayoutlmSPLMPredictionHead(nn.Module):
def __init__(self, config, src_len):
super(LayoutlmSPLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.bias = nn.Parameter(torch.zeros(src_len))
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
self.relax_projection = config.relax_projection
else:
self.relax_projection = 0
self.fp32_embedding = config.fp32_embedding
def convert_to_type(tensor):
if self.fp32_embedding:
return tensor.half()
else:
return tensor
self.type_converter = convert_to_type
self.converted = False
def forward(self, hidden_states, src_emb, task_idx=None):
if not self.converted:
self.converted = True
if self.fp32_embedding:
self.transform.half()
hidden_states = self.transform(self.type_converter(hidden_states))
if self.relax_projection > 1:
num_batch = hidden_states.size(0)
num_pos = hidden_states.size(1)
# (batch, num_pos, relax_projection*hid) -> (batch, num_pos, relax_projection, hid) -> (batch, num_pos, hid)
hidden_states = hidden_states.view(
num_batch, num_pos, self.relax_projection, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
if self.fp32_embedding:
hidden_states = torch.einsum('btf,bsf->bts',
self.type_converter(hidden_states), self.type_converter(src_emb)) + \
self.type_converter(self.bias)
# hidden_states = F.linear(self.type_converter(hidden_states), self.type_converter(
# self.decoder.weight), self.type_converter(self.bias))
else:
hidden_states = torch.einsum('btf,bsf->bts', hidden_states, src_emb) + self.bias
return hidden_states
class LayoutlmSPPreTrainingHeads(nn.Module):
def __init__(self, config, src_len, num_labels=2):
super(LayoutlmSPPreTrainingHeads, self).__init__()
self.predictions = LayoutlmSPLMPredictionHead(config, src_len)
self.seq_relationship = nn.Linear(config.hidden_size, num_labels)
def forward(self, sequence_output, pooled_output, src_emb, task_idx=None):
prediction_scores = self.predictions(sequence_output, src_emb, task_idx)
if pooled_output is None:
seq_relationship_score = None
else:
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
# module.weight.data.copy_(torch.Tensor(
# truncnorm.rvs(-1, 1, size=list(module.weight.data.shape)) * self.config.initializer_range))
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, config, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
logger.info("Model config {}".format(config))
# clean the arguments in kwargs
for arg_clean in ('config_path', 'type_vocab_size', 'relax_projection', 'new_pos_ids', 'task_idx',
'max_position_embeddings', 'fp32_embedding', 'ffn_type', 'label_smoothing',
'hidden_dropout_prob', 'attention_probs_dropout_prob', 'num_qkv', 'seg_emb',
'word_emb_map', 'num_labels', 'num_rel', 'num_sentlvl_labels'):
if arg_clean in kwargs:
del kwargs[arg_clean]
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(pretrained_model_name, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu')
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
model.missing_keys = missing_keys
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
logger.info('\n'.join(error_msgs))
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def rescale_some_parameters(self):
for layer_id, layer in enumerate(self.encoder.layer):
layer.attention.output.dense.weight.data.div_(
math.sqrt(2.0 * (layer_id + 1)))
layer.output.dense.weight.data.div_(math.sqrt(2.0 * (layer_id + 1)))
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
mask_qkv=None, task_idx=None, key_history=None, value_history=None, position_ids=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, task_idx=task_idx, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
mask_qkv=mask_qkv, seg_ids=token_type_ids,
key_history=key_history, value_history=value_history)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class LayoutlmModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
```
"""
def __init__(self, config):
super(LayoutlmModel, self).__init__(config)
self.embeddings = LayoutlmEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def rescale_some_parameters(self):
for layer_id, layer in enumerate(self.encoder.layer):
layer.attention.output.dense.weight.data.div_(
math.sqrt(2.0 * (layer_id + 1)))
layer.output.dense.weight.data.div_(math.sqrt(2.0 * (layer_id + 1)))
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
mask_qkv=None, task_idx=None, key_history=None, value_history=None, position_ids=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids[:, :, 0], token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids[:, :, 0], input_ids[:, :, 1:], token_type_ids, task_idx=task_idx, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
mask_qkv=mask_qkv, seg_ids=token_type_ids,
key_history=key_history, value_history=value_history)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertModelIncr(BertModel):
def __init__(self, config):
super(BertModelIncr, self).__init__(config)
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, position_ids, task_idx=task_idx)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv,
seg_ids=token_type_ids)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class LayoutlmModelIncr(LayoutlmModel):
def __init__(self, config):
super(LayoutlmModelIncr, self).__init__(config)
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids[:, :, 0], token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids[:, :, 0], input_ids[:, :, 1:], token_type_ids, position_ids, task_idx=task_idx)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv,
seg_ids=token_type_ids)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class LayoutlmForSeq2SeqDecoder(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config, mask_word_id=0, num_labels=2, num_rel=0,
search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0,
forbid_duplicate_ngrams=False, forbid_ignore_set=None, ngram_size=3, min_len=0, mode="s2s",
pos_shift=False):
super(LayoutlmForSeq2SeqDecoder, self).__init__(config)
self.layout_flag = config.base_model_type == 'layoutlm'
if config.base_model_type == 'layoutlm':
self.bert = LayoutlmModelIncr(config)
else:
self.bert = BertModelIncr(config)
# self.bert = BertModelIncr(config)
# note: the max source length is the max src seq length during fine tuning which includes the cls and sep
# NOTE: we don't remove anything. the 0 is for padding
self.cls = LayoutlmSPPreTrainingHeads(
config, src_len=config.max_source_length, num_labels=num_labels)
self.apply(self.init_bert_weights)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1)
self.mask_word_id = mask_word_id
self.num_labels = num_labels
self.num_rel = num_rel
self.search_beam_size = search_beam_size
self.length_penalty = length_penalty
self.eos_id = eos_id
self.sos_id = sos_id
self.forbid_duplicate_ngrams = forbid_duplicate_ngrams
self.forbid_ignore_set = forbid_ignore_set
self.ngram_size = ngram_size
self.min_len = min_len
assert mode in ("s2s", "l2r")
self.mode = mode
self.pos_shift = pos_shift
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
if self.search_beam_size > 1:
return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask, task_idx=task_idx,
mask_qkv=mask_qkv)
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
if not self.layout_flag:
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
else:
mask_ids = input_ids.new_zeros(batch_size, 1, 5)
mask_ids[:, :, 0] = self.mask_word_id
next_pos = input_length
if self.pos_shift:
if not self.layout_flag:
sos_ids = input_ids.new(batch_size, 1).fill_(self.sos_id)
else:
sos_ids = input_ids.new_zeros(batch_size, 1, 5)
sos_ids[:, :, 0] = self.sos_id
src_embedding = None
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sos_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
# if self.layout_flag:
# mask_ids[:, -1, 1:] = curr_ids[:, , 1:]
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv)
if src_embedding is None:
# note: cut three embedding: CLS (1st), ..., SEP (-2nd), next to pred (-1st)
# note: (NEW) the sep is kept for ignore index in loss func (for padding's index)
# NOTE: only remove the next to pred token
src_embedding = new_embedding[:, :-1, :]
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(last_hidden, None, src_embedding, task_idx=task_idx)
_, max_ids = torch.max(prediction_scores, dim=-1)
output_ids.append(max_ids)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = new_embedding
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
else:
if prev_embedding is None:
prev_embedding = new_embedding[:, :-1, :]
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x[:, :-1, :]
for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
if not self.layout_flag:
index = max_ids
curr_ids = torch.gather(input_ids, 1, index)
else:
_, _, dim = input_ids.shape
index = max_ids.unsqueeze(-1)
index = index.expand(index.shape[0], index.shape[1], dim)
# index = index.repeat(1, 1, dim)
curr_ids = torch.gather(input_ids, 1, index)
# if len(input_ids.shape) == 2:
# real_input_ids = input_ids[:, 1:]
# index = max_ids
# curr_ids = torch.gather(real_input_ids, 1, index)
# else:
# real_input_ids = input_ids[:, 1:, :]
# _, _, dim = real_input_ids.shape
# index = max_ids.unsqueeze(-1)
# index = index.expand(index.shape[0], index.shape[1], dim)
# curr_ids = torch.gather(real_input_ids, 1, index)
# # note: real input ids only include the ids for real data (remove the cls and sep)
# real_input_ids = input_ids[:, 1: -1, :]
#
# _, _, dim = real_input_ids.shape
# index = max_ids.unsqueeze(-1)
# index = index.expand(index.shape[0], index.shape[1], dim)
#
# curr_ids = torch.gather(real_input_ids, 1, index)
# curr_ids = real_input_ids[:, max_ids, :]
# curr_ids = max_ids
next_pos += 1
return torch.cat(output_ids, dim=1)
# TODO: do the same with beam search as forward()
def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
# mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
if not self.layout_flag:
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
else:
mask_ids = input_ids.new_zeros(batch_size, 1, 5)
mask_ids[:, :, 0] = self.mask_word_id
next_pos = input_length
if self.pos_shift:
if not self.layout_flag:
sos_ids = input_ids.new(batch_size, 1).fill_(self.sos_id)
else:
sos_ids = input_ids.new_zeros(batch_size, 1, 5)
sos_ids[:, :, 0] = self.sos_id
K = self.search_beam_size
total_scores = []
beam_masks = []
step_ids = []
step_back_ptrs = []
partial_seqs = []
forbid_word_mask = None
buf_matrix = None
src_embedding = None
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sos_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv)
def first_expand(x):
input_shape = list(x.size())
expanded_shape = input_shape[:1] + [1] + input_shape[1:]
x = torch.reshape(x, expanded_shape)
repeat_count = [1, K] + [1] * (len(input_shape) - 1)
x = x.repeat(*repeat_count)
x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
return x
if src_embedding is None:
src_embedding = new_embedding[:, :-1, :]
if src_embedding.shape[0] != new_embedding.shape[0]:
src_embedding = first_expand(src_embedding)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(last_hidden, None, src_embedding, task_idx=task_idx)
log_scores = torch.nn.functional.log_softmax(
prediction_scores, dim=-1)
# if forbid_word_mask is not None:
# log_scores += (forbid_word_mask * -10000.0)
# if self.min_len and (next_pos - input_length + 1 <= self.min_len):
# log_scores[:, :, self.eos_id].fill_(-10000.0)
kk_scores, kk_ids = torch.topk(log_scores, k=K)
if len(total_scores) == 0:
k_ids = torch.reshape(kk_ids, [batch_size, K])
back_ptrs = torch.zeros(batch_size, K, dtype=torch.long)
k_scores = torch.reshape(kk_scores, [batch_size, K])
else:
last_eos = torch.reshape(
beam_masks[-1], [batch_size * K, 1, 1])
last_seq_scores = torch.reshape(
total_scores[-1], [batch_size * K, 1, 1])
kk_scores += last_eos * (-10000.0) + last_seq_scores
kk_scores = torch.reshape(kk_scores, [batch_size, K * K])
k_scores, k_ids = torch.topk(kk_scores, k=K)
back_ptrs = torch.floor_divide(k_ids, K)
kk_ids = torch.reshape(kk_ids, [batch_size, K * K])
k_ids = torch.gather(kk_ids, 1, k_ids)
step_back_ptrs.append(back_ptrs)
step_ids.append(k_ids)
beam_masks.append(torch.eq(k_ids, self.eos_id).type_as(kk_scores))
total_scores.append(k_scores)
# def first_expand(x):
# input_shape = list(x.size())
# expanded_shape = input_shape[:1] + [1] + input_shape[1:]
# x = torch.reshape(x, expanded_shape)
# repeat_count = [1, K] + [1] * (len(input_shape) - 1)
# x = x.repeat(*repeat_count)
# x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
# return x
def select_beam_items(x, ids):
id_shape = list(ids.size())
id_rank = len(id_shape)
assert len(id_shape) == 2
x_shape = list(x.size())
x = torch.reshape(x, [batch_size, K] + x_shape[1:])
x_rank = len(x_shape) + 1
assert x_rank >= 2
if id_rank < x_rank:
ids = torch.reshape(
ids, id_shape + [1] * (x_rank - id_rank))
ids = ids.expand(id_shape + x_shape[1:])
y = torch.gather(x, 1, ids)
y = torch.reshape(y, x_shape)
return y
is_first = (prev_embedding is None)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding)
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
else:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding[:, :-1, :])
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x[:, :-1, :]) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
max_ids = torch.reshape(k_ids, [batch_size * K, 1])
if len(input_ids.shape) == 2:
expand_input_ids = first_expand(input_ids)
index = max_ids
curr_ids = torch.gather(expand_input_ids, 1, index)
else:
expand_input_ids = first_expand(input_ids)
_, _, dim = expand_input_ids.shape
index = max_ids.unsqueeze(-1)
index = index.expand(index.shape[0], index.shape[1], dim)
curr_ids = torch.gather(expand_input_ids, 1, index)
if is_first:
token_type_ids = first_expand(token_type_ids)
position_ids = first_expand(position_ids)
attention_mask = first_expand(attention_mask)
mask_ids = first_expand(mask_ids)
if mask_qkv is not None:
mask_qkv = first_expand(mask_qkv)
if self.forbid_duplicate_ngrams:
wids = step_ids[-1].tolist()
ptrs = step_back_ptrs[-1].tolist()
if is_first:
partial_seqs = []
for b in range(batch_size):
for k in range(K):
partial_seqs.append([wids[b][k]])
else:
new_partial_seqs = []
for b in range(batch_size):
for k in range(K):
new_partial_seqs.append(
partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])
partial_seqs = new_partial_seqs
def get_dup_ngram_candidates(seq, n):
cands = set()
if len(seq) < n:
return []
tail = seq[-(n - 1):]
if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):
return []
for i in range(len(seq) - (n - 1)):
mismatch = False
for j in range(n - 1):
if tail[j] != seq[i + j]:
mismatch = True
break
if (not mismatch) and not (
self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):
cands.add(seq[i + n - 1])
return list(sorted(cands))
if len(partial_seqs[0]) >= self.ngram_size:
dup_cands = []
for seq in partial_seqs:
dup_cands.append(
get_dup_ngram_candidates(seq, self.ngram_size))
if max(len(x) for x in dup_cands) > 0:
if buf_matrix is None:
vocab_size = list(log_scores.size())[-1]
buf_matrix = np.zeros(
(batch_size * K, vocab_size), dtype=float)
else:
buf_matrix.fill(0)
for bk, cands in enumerate(dup_cands):
for i, wid in enumerate(cands):
buf_matrix[bk, wid] = 1.0
forbid_word_mask = torch.tensor(
buf_matrix, dtype=log_scores.dtype)
forbid_word_mask = torch.reshape(
forbid_word_mask, [batch_size * K, 1, vocab_size]).to(input_ids.device)
else:
forbid_word_mask = None
next_pos += 1
# [(batch, beam)]
total_scores = [x.tolist() for x in total_scores]
step_ids = [x.tolist() for x in step_ids]
step_back_ptrs = [x.tolist() for x in step_back_ptrs]
# back tracking
traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}
for b in range(batch_size):
# [(beam,)]
scores = [x[b] for x in total_scores]
wids_list = [x[b] for x in step_ids]
ptrs = [x[b] for x in step_back_ptrs]
traces['scores'].append(scores)
traces['wids'].append(wids_list)
traces['ptrs'].append(ptrs)
# first we need to find the eos frame where all symbols are eos
# any frames after the eos frame are invalid
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid == self.eos_id for wid in wids):
last_frame_id = i
break
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if wid == self.eos_id or fid == last_frame_id:
s = scores[fid][i]
if self.length_penalty > 0:
s /= math.pow((5 + fid + 1) / 6.0,
self.length_penalty)
if s > max_score:
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
traces['pred_seq'].append([0])
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
traces['pred_seq'].append(seq)
def _pad_sequence(sequences, max_len, padding_value=0):
trailing_dims = sequences[0].size()[1:]
out_dims = (len(sequences), max_len) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
# use index notation to prevent duplicate references to the tensor
out_tensor[i, :length, ...] = tensor
return out_tensor
# convert to tensors for DataParallel
for k in ('pred_seq', 'scores', 'wids', 'ptrs'):
ts_list = traces[k]
if not isinstance(ts_list[0], torch.Tensor):
dt = torch.float if k == 'scores' else torch.long
ts_list = [torch.tensor(it, dtype=dt) for it in ts_list]
traces[k] = _pad_sequence(
ts_list, output_length, padding_value=0).to(input_ids.device)
return traces
| EXA-1-master | exa/models/unilm-master/layoutreader/s2s_ft/modeling_decoding.py |
import numpy as np
from random import randint, shuffle, choice
from random import random as rand
import math
import logging
import torch
import torch.utils.data
logger = logging.getLogger(__name__)
def get_random_word(vocab_words):
i = randint(0, len(vocab_words)-1)
return vocab_words[i]
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if x[0] is None:
batch_tensors.append(None)
elif isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def _get_word_split_index(tokens, st, end):
split_idx = []
i = st
while i < end:
if (not tokens[i].startswith('##')) or (i == st):
split_idx.append(i)
i += 1
split_idx.append(end)
return split_idx
def _expand_whole_word(tokens, st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
class Pipeline():
""" Pre-process Pipeline Class : callable """
def __init__(self):
super().__init__()
self.skipgram_prb = None
self.skipgram_size = None
self.pre_whole_word = None
self.mask_whole_word = None
self.word_subsample_prb = None
self.sp_prob = None
self.pieces_dir = None
self.vocab_words = None
self.pieces_threshold = 10
self.call_count = 0
self.offline_mode = False
self.skipgram_size_geo_list = None
self.span_same_mask = False
def __call__(self, instance):
raise NotImplementedError
class Preprocess4Seq2seqDecoder(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128,
mode="s2s", pos_shift=False, source_type_id=0, target_type_id=1,
cls_token='[CLS]', sep_token='[SEP]', pad_token='[PAD]', layout_flag=False):
super().__init__()
self.max_len = max_len
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self.max_len = max_len
self._tril_matrix = torch.tril(torch.ones((max_len, max_len), dtype=torch.long))
self.task_idx = 3 # relax projection layer for different tasks
assert mode in ("s2s", "l2r")
self.mode = mode
self.max_tgt_length = max_tgt_length
self.pos_shift = pos_shift
self.layout_flag = layout_flag
if layout_flag:
self.cls_token = [cls_token, 0, 0, 0, 0]
self.sep_token = [sep_token, 1000, 1000, 1000, 1000]
self.pad_token = [pad_token, 0, 0, 0, 0]
else:
self.cls_token = cls_token
self.sep_token = sep_token
self.pad_token = pad_token
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.cc = 0
def __call__(self, instance):
tokens_a, max_a_len = instance
# NOTE: must pad to the max src length
max_a_len = 511
padded_tokens_a = [self.cls_token] + tokens_a + [self.sep_token]
assert len(padded_tokens_a) <= max_a_len + 2
if max_a_len + 2 > len(padded_tokens_a):
padded_tokens_a += [self.pad_token] * \
(max_a_len + 2 - len(padded_tokens_a))
assert len(padded_tokens_a) == max_a_len + 2
max_len_in_batch = min(self.max_tgt_length + max_a_len + 2, self.max_len)
tokens = padded_tokens_a
segment_ids = [self.source_type_id] * (len(padded_tokens_a)) \
+ [self.target_type_id] * (max_len_in_batch - len(padded_tokens_a))
mask_qkv = None
position_ids = []
for i in range(len(tokens_a) + 2):
position_ids.append(i)
for i in range(len(tokens_a) + 2, max_a_len + 2):
position_ids.append(0)
for i in range(max_a_len + 2, max_len_in_batch):
position_ids.append(i - (max_a_len + 2) + len(tokens_a) + 2)
# Token Indexing
if not self.layout_flag:
input_ids = self.indexer(tokens)
else:
raw_text = [x[0] for x in tokens]
raw_text_ids = self.indexer(raw_text)
input_ids = [[i] + x[1:] for i, x in zip(raw_text_ids, tokens)]
self.cc += 1
if self.cc < 5:
if not self.layout_flag:
logger.info("Input src = %s" % " ".join(self.vocab_words[tk_id] for tk_id in input_ids))
else:
logger.info("Input src = %s" % " ".join(self.vocab_words[tk_id[0]] for tk_id in input_ids))
# Zero Padding
input_mask = torch.zeros(
max_len_in_batch, max_len_in_batch, dtype=torch.long)
if self.mode == "s2s":
input_mask[:, :len(tokens_a)+2].fill_(1)
else:
st, end = 0, len(tokens_a) + 2
input_mask[st:end, st:end].copy_(
self._tril_matrix[:end, :end])
input_mask[end:, :len(tokens_a)+2].fill_(1)
second_st, second_end = len(padded_tokens_a), max_len_in_batch
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
return input_ids, segment_ids, position_ids, input_mask, mask_qkv, self.task_idx
| EXA-1-master | exa/models/unilm-master/layoutreader/s2s_ft/s2s_loader.py |
import torch
import logging
from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME
logger = logging.getLogger(__name__)
def get_checkpoint_from_transformer_cache(
archive_file, pretrained_model_name_or_path, pretrained_model_archive_map,
cache_dir, force_download, proxies, resume_download,
):
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download,
proxies=proxies, resume_download=resume_download)
except EnvironmentError:
if pretrained_model_name_or_path in pretrained_model_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file)
else:
msg = "Model name '{}' was not found in model name list ({}). " \
"We assumed '{}' was a path or url to model weight files named one of {} but " \
"couldn't find any such file at this path or url.".format(
pretrained_model_name_or_path,
', '.join(pretrained_model_archive_map.keys()),
archive_file,
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME])
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
return torch.load(resolved_archive_file, map_location='cpu')
def hf_roberta_to_hf_bert(state_dict):
logger.info(" * Convert Huggingface RoBERTa format to Huggingface BERT format * ")
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key == 'roberta.embeddings.position_embeddings.weight':
value = value[2:]
if key == 'roberta.embeddings.token_type_embeddings.weight':
continue
if key.startswith('roberta'):
key = 'bert.' + key[8:]
elif key.startswith('lm_head'):
if 'layer_norm' in key or 'dense' in key:
key = 'cls.predictions.transform.' + key[8:]
else:
key = 'cls.predictions.' + key[8:]
key = key.replace('layer_norm', 'LayerNorm')
new_state_dict[key] = value
return new_state_dict
def hf_distilbert_to_hf_bert(state_dict):
logger.info(" * Convert Huggingface DistilBERT format to Huggingface BERT format * ")
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key == 'roberta.embeddings.position_embeddings.weight':
value = value[2:]
if key == 'roberta.embeddings.token_type_embeddings.weight':
continue
if key.startswith('roberta'):
key = 'bert.' + key[8:]
elif key.startswith('lm_head'):
if 'layer_norm' in key or 'dense' in key:
key = 'cls.predictions.transform.' + key[8:]
else:
key = 'cls.predictions.' + key[8:]
key = key.replace('layer_norm', 'LayerNorm')
new_state_dict[key] = value
return new_state_dict
def hf_bert_to_hf_bert(state_dict):
# NOTE: all cls states are used for prediction,
# we predict the index so omit all pretrained states for prediction.
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key.startswith('cls'):
# NOTE: all cls states are used for prediction,
# we predict the index so omit all pretrained states for prediction.
continue
new_state_dict[key] = value
return new_state_dict
def hf_layoutlm_to_hf_bert(state_dict):
logger.info(" * Convert Huggingface LayoutLM format to Huggingface BERT format * ")
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key.startswith('layoutlm'):
key = 'bert.' + key[9:]
elif key.startswith('cls'):
# NOTE: all cls states are used for prediction,
# we predict the index so omit all pretrained states for prediction.
continue
new_state_dict[key] = value
return new_state_dict
state_dict_convert = {
'bert': hf_bert_to_hf_bert,
'unilm': hf_bert_to_hf_bert,
'minilm': hf_bert_to_hf_bert,
'layoutlm': hf_layoutlm_to_hf_bert,
'roberta': hf_roberta_to_hf_bert,
'xlm-roberta': hf_roberta_to_hf_bert,
'distilbert': hf_distilbert_to_hf_bert,
}
| EXA-1-master | exa/models/unilm-master/layoutreader/s2s_ft/convert_state_dict.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Tokenization classes for UniLM."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from transformers.tokenization_bert import BertTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'unilm-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm-large-cased-vocab.txt",
'unilm-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm-base-cased-vocab.txt",
'unilm1-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased-vocab.txt",
'unilm1-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased-vocab.txt",
'unilm1.2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1.2-base-uncased-vocab.txt"
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'unilm-large-cased': 512,
'unilm-base-cased': 512,
'unilm1-large-cased': 512,
'unilm1-base-cased': 512,
'unilm1.2-base-uncased': 512,
}
class UnilmTokenizer(BertTokenizer):
r"""
Constructs a UnilmTokenizer.
:class:`~transformers.UnilmTokenizer` is identical to BertTokenizer and runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
class WhitespaceTokenizer(object):
def tokenize(self, text):
return whitespace_tokenize(text)
| EXA-1-master | exa/models/unilm-master/layoutreader/s2s_ft/tokenization_unilm.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" UniLM model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from transformers.configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
UNILM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'unilm-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm-large-cased-config.json",
'unilm-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm-base-cased-config.json",
'unilm1-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased-config.json",
'unilm1-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased-config.json",
'unilm1.2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1.2-base-uncased-config.json",
}
class UnilmConfig(PretrainedConfig):
r"""
:class:`~transformers.UnilmConfig` is the configuration class to store the configuration of a
`UnilmModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `UnilmModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`UnilmModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = UNILM_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size=28996,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=6,
initializer_range=0.02,
layer_norm_eps=1e-12,
source_type_id=0,
target_type_id=1,
**kwargs):
super(UnilmConfig, self).__init__(**kwargs)
if isinstance(vocab_size, str) or (sys.version_info[0] == 2
and isinstance(vocab_size, unicode)):
with open(vocab_size, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size, int):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.source_type_id = source_type_id
self.target_type_id = target_type_id
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
| EXA-1-master | exa/models/unilm-master/layoutreader/s2s_ft/configuration_unilm.py |
from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import re
import torch
import tqdm
import torch.utils.data
logger = logging.getLogger(__name__)
class Seq2seqDatasetForBert(torch.utils.data.Dataset):
def __init__(
self, features, max_source_len, max_target_len,
vocab_size, cls_id, sep_id, pad_id, mask_id,
random_prob, keep_prob, offset, num_training_instances,
span_len=1, span_prob=1.0):
self.features = features
self.max_source_len = max_source_len
self.max_target_len = max_target_len
self.offset = offset
if offset > 0:
logger.info(" **** Set offset %d in Seq2seqDatasetForBert **** ", offset)
self.cls_id = cls_id
self.sep_id = sep_id
self.pad_id = pad_id
self.random_prob = random_prob
self.keep_prob = keep_prob
self.mask_id = mask_id
self.vocab_size = vocab_size
self.num_training_instances = num_training_instances
self.span_len = span_len
self.span_prob = span_prob
def __len__(self):
return int(self.num_training_instances)
def __trunk(self, ids, max_len):
if len(ids) > max_len - 1:
ids = ids[:max_len - 1]
ids = ids + [self.sep_id]
return ids
def __pad(self, ids, max_len):
if len(ids) < max_len:
return ids + [self.pad_id] * (max_len - len(ids))
else:
assert len(ids) == max_len
return ids
def __getitem__(self, idx):
idx = (self.offset + idx) % len(self.features)
feature = self.features[idx]
source_ids = self.__trunk([self.cls_id] + feature["source_ids"], self.max_source_len)
target_ids = self.__trunk(feature["target_ids"], self.max_target_len)
pseudo_ids = []
for tk_id in target_ids:
p = random.random()
if p < self.keep_prob:
pseudo_ids.append(tk_id)
elif p < self.keep_prob + self.random_prob:
pseudo_ids.append(random.randint(0, self.vocab_size - 1))
else:
pseudo_ids.append(self.mask_id)
num_source_tokens = len(source_ids)
num_target_tokens = len(target_ids)
source_ids = self.__pad(source_ids, self.max_source_len)
target_ids = self.__pad(target_ids, self.max_target_len)
pseudo_ids = self.__pad(pseudo_ids, self.max_target_len)
if self.span_len > 1:
span_ids = []
span_id = 1
while len(span_ids) < num_target_tokens:
p = random.random()
if p < self.span_prob:
span_len = random.randint(2, self.span_len)
span_len = min(span_len, num_target_tokens - len(span_ids))
else:
span_len = 1
span_ids.extend([span_id] * span_len)
span_id += 1
span_ids = self.__pad(span_ids, self.max_target_len)
return source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens, span_ids
else:
return source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens
# DONE: finish this!!! the 2D input id settings.
class Seq2seqDatasetForLayoutlm(torch.utils.data.Dataset):
def __init__(
self, features, max_source_len, max_target_len,
vocab_size, cls_id, sep_id, pad_id, mask_id,
random_prob, keep_prob, offset, num_training_instances, layout_flag=True,
span_len=1, span_prob=1.0):
self.layout_flag = layout_flag
self.features = features
self.max_source_len = max_source_len
self.max_target_len = max_target_len
self.offset = offset
if offset > 0:
logger.info(" **** Set offset %d in Seq2seqDatasetForBert **** ", offset)
self.cls_id = cls_id
self.sep_id = sep_id
self.pad_id = pad_id
self.random_prob = random_prob
self.keep_prob = keep_prob
self.mask_id = mask_id
self.vocab_size = vocab_size
self.num_training_instances = num_training_instances
self.span_len = span_len
self.span_prob = span_prob
self.index_sp_id = 0
def __len__(self):
return int(self.num_training_instances)
def __clip_index(self, ids):
replace_value = 0
for i in range(len(ids)):
if ids[i] > self.max_source_len - 1:
ids[i] = replace_value
return ids
def __trunk(self, ids, max_len, simple=False, value=None):
trunk_value = value if value is not None else self.sep_id
if len(ids) > max_len - 1:
ids = ids[:max_len - 1]
if simple:
ids = ids + [trunk_value]
else:
ids = ids + [[trunk_value, 1000, 1000, 1000, 1000]]
return ids
def __pad(self, ids, max_len, simple=False, value=None):
pad_value = value if value is not None else self.pad_id
if len(ids) < max_len:
if simple:
return ids + [pad_value] * (max_len - len(ids))
else:
return ids + [[pad_value, 0, 0, 0, 0]] * (max_len - len(ids))
else:
assert len(ids) == max_len
return ids
def __getitem__(self, idx):
if self.layout_flag:
return self.__getitem_layout__(idx)
else:
return self.__getitem_bert__(idx)
def __getitem_bert__(self, idx):
idx = (self.offset + idx) % len(self.features)
feature = self.features[idx]
source_ids = self.__trunk([self.cls_id] + feature["source_ids"], self.max_source_len, simple=True)
target_ids = self.__trunk(feature["target_ids"], self.max_target_len, simple=True)
target_index = self.__trunk(feature['target_index'], self.max_target_len, simple=True, value=self.index_sp_id)
pseudo_ids = []
for tk_id in target_ids:
p = random.random()
if p < self.keep_prob:
pseudo_ids.append(tk_id)
elif p < self.keep_prob + self.random_prob:
pseudo_ids.append(random.randint(0, self.vocab_size - 1))
else:
pseudo_ids.append(self.mask_id)
num_source_tokens = len(source_ids)
num_target_tokens = len(target_ids)
source_ids = self.__pad(source_ids, self.max_source_len, simple=True)
target_ids = self.__pad(target_ids, self.max_target_len, simple=True)
pseudo_ids = self.__pad(pseudo_ids, self.max_target_len, simple=True)
target_index = self.__pad(target_index, self.max_target_len, simple=True, value=self.index_sp_id)
target_index = self.__clip_index(target_index)
if self.span_len > 1:
span_ids = []
span_id = 1
while len(span_ids) < num_target_tokens:
p = random.random()
if p < self.span_prob:
span_len = random.randint(2, self.span_len)
span_len = min(span_len, num_target_tokens - len(span_ids))
else:
span_len = 1
span_ids.extend([span_id] * span_len)
span_id += 1
span_ids = self.__pad(span_ids, self.max_target_len)
return source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens, span_ids, target_index
else:
return source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens, target_index
def __getitem_layout__(self, idx):
# TODO: how to initialize the random and masked tokens' pos emb
# Simple Solution: only mask the text
idx = (self.offset + idx) % len(self.features)
feature = self.features[idx]
source_ids = self.__trunk([[self.cls_id, 0, 0, 0, 0]] + feature["source_ids"], self.max_source_len)
target_ids = self.__trunk(feature["target_ids"], self.max_target_len)
target_index = self.__trunk(feature['target_index'], self.max_target_len, simple=True, value=self.index_sp_id)
pseudo_ids = []
for tk_id in target_ids:
p = random.random()
if p < self.keep_prob:
pseudo_ids.append(tk_id)
elif p < self.keep_prob + self.random_prob:
pseudo_ids.append([random.randint(0, self.vocab_size - 1)] + [0, 0, 0, 0]) # tk_id[1:])
else:
pseudo_ids.append([self.mask_id] + [0, 0, 0, 0]) # tk_id[1:])
num_source_tokens = len(source_ids)
num_target_tokens = len(target_ids)
source_ids = self.__pad(source_ids, self.max_source_len)
target_ids = self.__pad(target_ids, self.max_target_len)
pseudo_ids = self.__pad(pseudo_ids, self.max_target_len)
target_index = self.__pad(target_index, self.max_target_len, simple=True, value=self.index_sp_id)
target_index = self.__clip_index(target_index)
if self.span_len > 1:
span_ids = []
span_id = 1
while len(span_ids) < num_target_tokens:
p = random.random()
if p < self.span_prob:
span_len = random.randint(2, self.span_len)
span_len = min(span_len, num_target_tokens - len(span_ids))
else:
span_len = 1
span_ids.extend([span_id] * span_len)
span_id += 1
span_ids = self.__pad(span_ids, self.max_target_len)
return source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens, span_ids, target_index
else:
return source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens, target_index
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(output_dir, "model.*.bin"))
fn_optim_list = glob.glob(os.path.join(output_dir, "optim.*.bin"))
if (not fn_model_list) or (not fn_optim_list):
return None
os.path.basename(output_dir)
both_set = set([int(os.path.basename(fn).split('.')[1]) for fn in fn_model_list]
) & set([int(os.path.basename(fn).split('.')[1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None
def load_and_cache_examples(
example_file, tokenizer, local_rank, cached_features_file, shuffle=True):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.exists(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", example_file)
examples = []
with open(example_file, mode="r", encoding="utf-8") as reader:
for i, line in enumerate(reader):
if i == 100:
break
examples.append(json.loads(line))
features = []
for example in tqdm.tqdm(examples):
if isinstance(example["src"], list):
source_tokens = example["src"]
target_tokens = example["tgt"]
else:
source_tokens = tokenizer.tokenize(example["src"])
target_tokens = tokenizer.tokenize(example["tgt"])
features.append({
"source_ids": tokenizer.convert_tokens_to_ids(source_tokens),
"target_ids": tokenizer.convert_tokens_to_ids(target_tokens),
})
if shuffle:
random.shuffle(features)
if local_rank in [-1, 0] and cached_features_file is not None:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features
def load_and_cache_line_order_examples(
example_path, tokenizer, local_rank, cached_features_file, max_src_length=1024,
layout_flag=True, shuffle=True,
src_shuffle_rate=0,
file_info_flag=False,
):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.exists(cached_features_file) and False:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset at %s", example_path)
examples = []
with open(example_path, 'r') as layout_reader:
logger.info(f'Start loading {example_path}')
for i, line in enumerate(layout_reader):
examples.append(json.loads(line))
features = []
for layout in tqdm.tqdm(examples):
bleu = layout['bleu']
if random.random() < src_shuffle_rate:
# print('Random!!!')
# DONE: the random src! here has bug! index also need shuffle
src_layout = layout['src']
tgt_index = layout['tgt_index']
source_length = len(src_layout)
shuffle_index = list(range(source_length))
random.shuffle(shuffle_index)
shuffle_layout = ['' for _ in range(source_length)]
for i, j in enumerate(shuffle_index):
# NOTE: map i-th token to j-th token
shuffle_layout[j] = src_layout[i]
shuffle_target_index = [shuffle_index[i] for i in tgt_index]
layout['tgt_index'] = shuffle_target_index
layout['src'] = shuffle_layout
mask = tokenizer.mask_token_id
src_ids = [tokenizer.convert_tokens_to_ids([str(tmp_i)])[:1] + src_layout for tmp_i, src_layout in enumerate(layout['src'])]
tgt_ids = [tokenizer.convert_tokens_to_ids([str(tmp_i)])[:1] + tgt_layout for tmp_i, tgt_layout in enumerate(layout['tgt'])]
tgt_index = layout['tgt_index']
feature = {
"source_ids": src_ids,
"target_ids": tgt_ids,
"target_index": tgt_index,
'bleu': bleu
}
if file_info_flag:
file_info = {'original_filename': layout['filename'], 'filename': layout['filename'],
'page_idx': 0}
feature['file_info'] = file_info
features.append(feature)
if shuffle:
random.shuffle(features)
if local_rank in [-1, 0] and cached_features_file is not None:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features
def load_and_cache_layoutlm_examples(
example_path, tokenizer, local_rank, cached_features_file, max_src_length=1024,
layout_flag=True, shuffle=True,
src_shuffle_rate=0,
file_info_flag=False
):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.exists(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset at %s", example_path)
examples = []
if os.path.isdir(example_path):
text_files = glob.glob(f'{example_path}/*text*.json')
layout_files = [re.sub('text|txt', 'layout', x, 1) for x in text_files]
else:
text_files = [example_path]
layout_files = [re.sub('text|txt', 'layout', example_path, 1)]
for text_file, layout_file in zip(text_files, layout_files):
with open(text_file, mode='r', encoding='utf-8') as text_reader, \
open(layout_file, mode='r', encoding='utf-8') as layout_reader:
logger.info(f'Start loading {text_file}')
for i, (text_line, layout_line) in enumerate(zip(text_reader, layout_reader)):
if (i + 1) % 10000 == 0:
logger.info(f'{i + 1} lines ...')
examples.append((json.loads(text_line), json.loads(layout_line)))
features = []
def tokenize_text_and_layout_src(_text, _layout, _layout_flag):
ret = []
index_split = {}
words = _text.split()
# note: (OLD) the index should start from 1: 0-the cls token in src
# note: (NEW) we need to remove the src embedding's CLS SEP token so we can still start from 0
# note: (NEWER) we need to at least one blank pos for ignore index in loss function (we use sep's index)
# NOTE: (NEWER-ER) 1 for all padding tgt index
new_token_index = 1 # first ordinary index
for i, (word, box) in enumerate(zip(words, _layout)):
if (not box[2] >= box[0]) or (not box[3] >= box[1]):
continue
tokens = tokenizer.tokenize(word)
tokens = tokenizer.convert_tokens_to_ids(tokens)
new_token_ids = []
for token in tokens:
if _layout_flag:
ret.append([token] + box)
else:
ret.append(token)
new_token_ids.append(new_token_index)
new_token_index += 1
index_split[i] = new_token_ids
return ret, index_split
def tokenize_text_and_layout_tgt(_text, _layout, _index, _index_split, _layout_flag):
ret = []
ret_index = []
words = _text.split()
for word, box, i in zip(words, _layout, _index):
if (not box[2] >= box[0]) or (not box[3] >= box[1]):
continue
tokens = tokenizer.tokenize(word)
tokens = tokenizer.convert_tokens_to_ids(tokens)
for token, ii in zip(tokens, _index_split[i]):
if _layout_flag:
ret.append([token] + box)
else:
ret.append(token)
ii = min(ii, max_src_length - 1)
ret_index.append(ii)
return ret, ret_index
for text, layout in tqdm.tqdm(examples):
if 'bleu' in text:
bleu = text['bleu']
else:
bleu = 0
if random.random() < src_shuffle_rate:
# print('Random!!!')
# DONE: the random src! here has bug! index also need shuffle
src_text = text['src']
src_layout = layout['src']
tgt_index = text['tgt_index']
src_text = src_text.split()
source_length = len(src_text)
shuffle_index = list(range(source_length))
random.shuffle(shuffle_index)
shuffle_text = ['' for _ in range(source_length)]
shuffle_layout = ['' for _ in range(source_length)]
for i, j in enumerate(shuffle_index):
# NOTE: map i-th token to j-th token
shuffle_text[j] = src_text[i]
shuffle_layout[j] = src_layout[i]
shuffle_target_index = [shuffle_index[i] for i in tgt_index]
text['src'] = ' '.join(shuffle_text)
text['tgt_index'] = shuffle_target_index
layout['src'] = shuffle_layout
src_ids, src_index_split = tokenize_text_and_layout_src(text['src'], layout['src'],
_layout_flag=layout_flag)
tgt_ids, tgt_index = tokenize_text_and_layout_tgt(text['tgt'], layout['tgt'], text['tgt_index'],
src_index_split, _layout_flag=layout_flag)
feature = {
"source_ids": src_ids,
"target_ids": tgt_ids,
"target_index": tgt_index,
'bleu': bleu
}
if file_info_flag:
file_info = {'original_filename': text['original_filename'], 'filename': text['filename'], 'page_idx': text['page_idx']}
feature['file_info'] = file_info
features.append(feature)
if shuffle:
random.shuffle(features)
if local_rank in [-1, 0] and cached_features_file is not None:
if not os.path.exists(os.path.dirname(cached_features_file)):
os.makedirs(os.path.dirname(cached_features_file))
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features
def convert_src_layout_inputs_to_tokens(inputs, converter, max_src_length, layout_flag=True):
ret = []
if not layout_flag:
for line in inputs:
ret.append(converter(line["source_ids"])[: max_src_length])
else:
for line in inputs:
raw_text_ids = [x[0] for x in line['source_ids']]
raw_text = converter(raw_text_ids)
new_line = [[t] + x[1:] for t, x in zip(raw_text, line['source_ids'])][: max_src_length]
ret.append(new_line)
return ret
def convert_tgt_layout_inputs_to_tokens(inputs, converter, max_tgt_length, layout_flag=True):
ret = []
if not layout_flag:
for line in inputs:
ret.append(converter(line["target_ids"])[: max_tgt_length])
else:
for line in inputs:
raw_text_ids = [x[0] for x in line['target_ids']]
ret.append(converter(raw_text_ids)[: max_tgt_length])
return ret
def get_tokens_from_src_and_index(src, index, modifier=None):
result = []
for i in index:
i = modifier(i)
i = min(i, len(src) - 1)
if isinstance(src[i], list):
result.append(src[i][0])
else:
result.append(src[i])
return result
def get_layout_from_src_and_index(src, index, modifier=None):
result = []
s = set()
for i in index:
i = modifier(i)
i = min(i, len(src) - 1)
layout = src[i][1:]
if repr(layout) not in s:
result.append(layout)
s.add(repr(layout))
return result
def get_everything_from_src_and_index(src, index, modifier=None):
result = []
for i in index:
i = modifier(i)
i = min(i, len(src) - 1)
result.append(src[i])
return result
| EXA-1-master | exa/models/unilm-master/layoutreader/s2s_ft/utils.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Tokenization classes for MiniLM."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from transformers.tokenization_bert import BertTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'minilm-l12-h384-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/minilm-l12-h384-uncased-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'minilm-l12-h384-uncased': 512,
}
class MinilmTokenizer(BertTokenizer):
r"""
Constructs a MinilmTokenizer.
:class:`~transformers.MinilmTokenizer` is identical to BertTokenizer and runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
class WhitespaceTokenizer(object):
def tokenize(self, text):
return whitespace_tokenize(text)
| EXA-1-master | exa/models/unilm-master/layoutreader/s2s_ft/tokenization_minilm.py |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import os
import torch
from torch import nn
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from transformers import BertConfig
from transformers.modeling_bert import \
BertPreTrainedModel, BertSelfOutput, BertIntermediate, BertOutput, BertPredictionHeadTransform
from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_xlm_roberta import XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from s2s_ft.config import BertForSeq2SeqConfig
from s2s_ft.convert_state_dict import get_checkpoint_from_transformer_cache, state_dict_convert
logger = logging.getLogger(__name__)
BertLayerNorm = torch.nn.LayerNorm
UNILM_PRETRAINED_MODEL_ARCHIVE_MAP = {
'unilm-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased.bin",
'unilm-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased.bin",
'unilm1-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased.bin",
'unilm1-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased.bin",
'unilm1.2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1.2-base-uncased.bin"
}
MINILM_PRETRAINED_MODEL_ARCHIVE_MAP = {
'minilm-l12-h384-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/minilm-l12-h384-uncased.bin",
}
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_MAP = {
'layoutlm-base-uncased': 'https://huggingface.co/microsoft/layoutlm-base-uncased/resolve/main/pytorch_model.bin',
'layoutlm-large-uncased': 'https://huggingface.co/microsoft/layoutlm-large-uncased/resolve/main/pytorch_model.bin'
}
LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'layoutlm-base-uncased': 'https://huggingface.co/microsoft/layoutlm-base-uncased/resolve/main/config.json',
'layoutlm-large-uncased': 'https://huggingface.co/microsoft/layoutlm-large-uncased/resolve/main/config.json'
}
class LayoutlmConfig(BertConfig):
pretrained_config_archive_map = LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "bert"
def __init__(self, max_2d_position_embeddings=1024, **kwargs):
super().__init__(**kwargs)
self.max_2d_position_embeddings = max_2d_position_embeddings
class BertPreTrainedForSeq2SeqModel(BertPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertForSeq2SeqConfig
supported_convert_pretrained_model_archive_map = {
"bert": BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
"roberta": ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
"xlm-roberta": XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
"unilm": UNILM_PRETRAINED_MODEL_ARCHIVE_MAP,
"minilm": MINILM_PRETRAINED_MODEL_ARCHIVE_MAP,
"layoutlm": LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_MAP,
}
base_model_prefix = "bert_for_seq2seq"
pretrained_model_archive_map = {
**ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
**XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
**BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
**UNILM_PRETRAINED_MODEL_ARCHIVE_MAP,
**MINILM_PRETRAINED_MODEL_ARCHIVE_MAP,
**LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_MAP,
}
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, reuse_position_embedding=None,
*model_args, **kwargs):
model_type = kwargs.pop('model_type', None)
if model_type is not None and "state_dict" not in kwargs:
if model_type in cls.supported_convert_pretrained_model_archive_map:
pretrained_model_archive_map = cls.supported_convert_pretrained_model_archive_map[model_type]
if pretrained_model_name_or_path in pretrained_model_archive_map:
state_dict = get_checkpoint_from_transformer_cache(
archive_file=pretrained_model_archive_map[pretrained_model_name_or_path],
pretrained_model_name_or_path=pretrained_model_name_or_path,
pretrained_model_archive_map=pretrained_model_archive_map,
cache_dir=kwargs.get("cache_dir", None), force_download=kwargs.get("force_download", None),
proxies=kwargs.get("proxies", None), resume_download=kwargs.get("resume_download", None),
)
state_dict = state_dict_convert[model_type](state_dict)
kwargs["state_dict"] = state_dict
elif os.path.isfile(pretrained_model_name_or_path):
kwargs["state_dict"] = torch.load(pretrained_model_name_or_path, map_location='cpu')
if kwargs["state_dict"] is None:
logger.info("s2s-ft does't support the model !")
raise NotImplementedError()
config = kwargs["config"]
state_dict = kwargs["state_dict"]
# initialize new position embeddings (From Microsoft/UniLM)
_k = 'bert.embeddings.position_embeddings.weight'
if _k in state_dict:
if config.max_position_embeddings > state_dict[_k].shape[0]:
logger.info("Resize > position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_position_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_position_embedding = nn.Parameter(data=new_position_embedding, requires_grad=True)
new_position_embedding.data.normal_(mean=0.0, std=config.initializer_range)
max_range = config.max_position_embeddings if reuse_position_embedding else old_vocab_size
shift = 0
while shift < max_range:
delta = min(old_vocab_size, max_range - shift)
new_position_embedding.data[shift: shift + delta, :] = state_dict[_k][:delta, :]
logger.info(" CP [%d ~ %d] into [%d ~ %d] " % (0, delta, shift, shift + delta))
shift += delta
state_dict[_k] = new_position_embedding.data
del new_position_embedding
elif config.max_position_embeddings < state_dict[_k].shape[0]:
logger.info("Resize < position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_position_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_position_embedding = nn.Parameter(data=new_position_embedding, requires_grad=True)
new_position_embedding.data.normal_(mean=0.0, std=config.initializer_range)
new_position_embedding.data.copy_(state_dict[_k][:config.max_position_embeddings, :])
state_dict[_k] = new_position_embedding.data
del new_position_embedding
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
else:
self.token_type_embeddings = None
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
if self.token_type_embeddings:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LayoutlmEmbeddings(nn.Module):
def __init__(self, config):
super(LayoutlmEmbeddings, self).__init__()
self.only_layout_flag = config.layoutlm_only_layout
if not config.layoutlm_only_layout:
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=0
)
else:
self.word_embeddings = None
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
self.x_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.y_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.h_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.w_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
else:
self.token_type_embeddings = None
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self,
input_ids,
bbox,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device
)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
h_position_embeddings = self.h_position_embeddings(
bbox[:, :, 3] - bbox[:, :, 1]
)
w_position_embeddings = self.w_position_embeddings(
bbox[:, :, 2] - bbox[:, :, 0]
)
position_embeddings = self.position_embeddings(position_ids)
embeddings = (
left_position_embeddings
+ upper_position_embeddings
+ right_position_embeddings
+ lower_position_embeddings
+ h_position_embeddings
+ w_position_embeddings
+ position_embeddings
# + token_type_embeddings
)
if not self.only_layout_flag:
words_embeddings = self.word_embeddings(input_ids)
embeddings = embeddings + words_embeddings
if self.token_type_embeddings:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def multi_head_attention(self, query, key, value, attention_mask):
query_layer = self.transpose_for_scores(query)
key_layer = self.transpose_for_scores(key)
value_layer = self.transpose_for_scores(value)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return (context_layer, attention_probs) if self.output_attentions else (context_layer,)
def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None, split_lengths=None):
mixed_query_layer = self.query(hidden_states)
if split_lengths:
assert not self.output_attentions
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
if split_lengths:
query_parts = torch.split(mixed_query_layer, split_lengths, dim=1)
key_parts = torch.split(mixed_key_layer, split_lengths, dim=1)
value_parts = torch.split(mixed_value_layer, split_lengths, dim=1)
key = None
value = None
outputs = []
sum_length = 0
for (query, _key, _value, part_length) in zip(query_parts, key_parts, value_parts, split_lengths):
key = _key if key is None else torch.cat((key, _key), dim=1)
value = _value if value is None else torch.cat((value, _value), dim=1)
sum_length += part_length
outputs.append(self.multi_head_attention(
query, key, value, attention_mask[:, :, sum_length - part_length: sum_length, :sum_length]
)[0])
outputs = (torch.cat(outputs, dim=1), )
else:
outputs = self.multi_head_attention(
mixed_query_layer, mixed_key_layer, mixed_value_layer, attention_mask)
return outputs
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None, split_lengths=None):
self_outputs = self.self(
hidden_states, attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states, split_lengths=split_lengths)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask=None, split_lengths=None):
self_attention_outputs = self.attention(
hidden_states, attention_mask, split_lengths=split_lengths)
attention_output = self_attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + self_attention_outputs[1:]
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask=None, split_lengths=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, split_lengths=split_lengths)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class BertModel(BertPreTrainedForSeq2SeqModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,
position_ids=None, inputs_embeds=None, split_lengths=None, return_emb=False):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(
embedding_output, attention_mask=extended_attention_mask, split_lengths=split_lengths)
sequence_output = encoder_outputs[0]
outputs = (sequence_output, ) + encoder_outputs[1:] # add hidden_states and attentions if they are here
if return_emb:
outputs += (embedding_output,)
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class LayoutlmModel(BertPreTrainedForSeq2SeqModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(LayoutlmModel, self).__init__(config)
self.config = config
self.embeddings = LayoutlmEmbeddings(config)
self.encoder = BertEncoder(config)
def forward(self,
input_ids=None,
bbox=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
split_lengths=None,
return_emb=False):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# embedding_output = self.embeddings(
# input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
embedding_output = self.embeddings(
input_ids, bbox, position_ids=position_ids, token_type_ids=token_type_ids
)
encoder_outputs = self.encoder(
embedding_output, attention_mask=extended_attention_mask, split_lengths=split_lengths)
sequence_output = encoder_outputs[0]
outputs = (sequence_output, ) + encoder_outputs[1:] # add hidden_states and attentions if they are here
if return_emb:
outputs += (embedding_output,)
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_size > 0
smoothing_value = label_smoothing / (tgt_size - 2)
one_hot = torch.full((tgt_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_size = tgt_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_size)
target = target.view(-1)
model_prob = self.one_hot.float().repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
class LayoutlmSPLMPredictionHead(nn.Module):
def __init__(self, config, src_len):
super(LayoutlmSPLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
self.bias = nn.Parameter(torch.zeros(src_len))
def forward(self, hidden_states, src_emb):
hidden_states = self.transform(hidden_states)
hidden_states = torch.einsum('btf,bsf->bts', hidden_states, src_emb) + self.bias
# hidden_states = F.linear(hidden_states, weight=src_emb, bias=self.bias)
return hidden_states
class LayoutlmSPOnlyMLMHead(nn.Module):
def __init__(self, config, src_len):
super(LayoutlmSPOnlyMLMHead, self).__init__()
self.predictions = LayoutlmSPLMPredictionHead(config, src_len=src_len)
def forward(self, sequence_output, src_emb):
prediction_scores = self.predictions(sequence_output, src_emb=src_emb)
return prediction_scores
class LayoutlmForSequenceToSequence(BertPreTrainedForSeq2SeqModel):
def __init__(self, config):
super(LayoutlmForSequenceToSequence, self).__init__(config)
if config.base_model_type == 'layoutlm':
self.bert = LayoutlmModel(config)
else:
self.bert = BertModel(config)
self.cls = LayoutlmSPOnlyMLMHead(config, src_len=config.max_source_length)
self.init_weights()
self.log_softmax = nn.LogSoftmax()
# setattr(config, 'label_smoothing', 0.1)
self.source_type_id = config.source_type_id
self.target_type_id = config.target_type_id
if config.label_smoothing > 0:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.max_source_length, ignore_index=0, reduction='none')
self.crit_mask_lm = None
else:
self.crit_mask_lm_smoothed = None
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none', ignore_index=0)
@staticmethod
def create_mask_and_position_ids(num_tokens, max_len, offset=None):
base_position_matrix = torch.arange(
0, max_len, dtype=num_tokens.dtype, device=num_tokens.device).view(1, -1)
mask = (base_position_matrix < num_tokens.view(-1, 1)).type_as(num_tokens)
if offset is not None:
base_position_matrix = base_position_matrix + offset.view(-1, 1)
position_ids = base_position_matrix * mask
return mask, position_ids
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat((torch.zeros_like(source_position_ids), target_span_ids, -target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = (0 <= to_weight) & (torch.cat((source_mask, target_mask, target_mask), dim=1) == 1).unsqueeze(1)
true_tokens_mask = (from_weight >= 0) & true_tokens & (to_weight <= from_weight)
pseudo_tokens_mask = (from_weight < 0) & true_tokens & (-to_weight > from_weight)
pseudo_tokens_mask = pseudo_tokens_mask | ((from_weight < 0) & (to_weight == from_weight))
return (true_tokens_mask | pseudo_tokens_mask).type_as(source_mask)
def forward(self, source_idxys, target_idxys, target_index, pseudo_idxys, num_source_tokens, num_target_tokens,
target_span_ids=None):
source_len = source_idxys.size(1)
target_len = target_idxys.size(1)
pseudo_len = pseudo_idxys.size(1)
assert target_len == pseudo_len
assert source_len > 0 and target_len > 0
split_lengths = (source_len, target_len, pseudo_len)
if self.config.base_model_type == 'layoutlm':
source_xys = source_idxys[:, :, 1:]
target_xys = target_idxys[:, :, 1:]
pseudo_xys = pseudo_idxys[:, :, 1:]
input_xys = torch.cat((source_xys, target_xys, pseudo_xys), dim=1)
source_ids = source_idxys[:, :, 0]
target_ids = target_idxys[:, :, 0]
pseudo_ids = pseudo_idxys[:, :, 0]
else:
source_ids = source_idxys
target_ids = target_idxys
pseudo_ids = pseudo_idxys
input_xys = None
input_ids = torch.cat((source_ids, target_ids, pseudo_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id,
torch.ones_like(pseudo_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
self.create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
self.create_mask_and_position_ids(num_target_tokens, target_len, offset=num_source_tokens)
position_ids = torch.cat((source_position_ids, target_position_ids, target_position_ids), dim=1)
if target_span_ids is None:
target_span_ids = target_position_ids
attention_mask = self.create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids)
if self.config.base_model_type == 'layoutlm':
outputs = self.bert(
input_ids, input_xys, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths, return_emb=True)
else:
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths, return_emb=True)
sequence_output = outputs[0]
pseudo_sequence_output = sequence_output[:, source_len + target_len:, ]
sequence_embedding = outputs[-1]
source_embedding = sequence_embedding[:, :source_len, :]
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
# TODO: do we need to mask the impossible pos with the real input length
prediction_scores_masked = self.cls(pseudo_sequence_output, source_embedding)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), target_index)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), target_index)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), target_mask)
return pseudo_lm_loss
| EXA-1-master | exa/models/unilm-master/layoutreader/s2s_ft/modeling.py |
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import glob
import logging
import argparse
import math
from tqdm import tqdm
import numpy as np
import torch
import random
import pickle
from s2s_ft.modeling_decoding import BertForSeq2SeqDecoder, BertConfig
from transformers.tokenization_bert import whitespace_tokenize
import s2s_ft.s2s_loader as seq2seq_loader
from s2s_ft.utils import load_and_cache_examples
from transformers import \
BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer, ElectraTokenizer
from s2s_ft.tokenization_unilm import UnilmTokenizer
from s2s_ft.tokenization_minilm import MinilmTokenizer
TOKENIZER_CLASSES = {
'bert': BertTokenizer,
'minilm': MinilmTokenizer,
'roberta': RobertaTokenizer,
'unilm': UnilmTokenizer,
'xlm-roberta': XLMRobertaTokenizer,
'electra': ElectraTokenizer,
}
class WhitespaceTokenizer(object):
def tokenize(self, text):
return whitespace_tokenize(text)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list
def ascii_print(text):
text = text.encode("ascii", "ignore")
print(text)
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(TOKENIZER_CLASSES.keys()))
parser.add_argument("--model_path", default=None, type=str, required=True,
help="Path to the model checkpoint.")
parser.add_argument("--config_path", default=None, type=str,
help="Path to config.json for the model.")
# tokenizer_name
parser.add_argument("--tokenizer_name", default=None, type=str, required=True,
help="tokenizer name")
parser.add_argument("--max_seq_length", default=512, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
# decoding parameters
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--no_cuda', action='store_true',
help="Whether to use CUDA for decoding")
parser.add_argument("--input_file", type=str, help="Input file")
parser.add_argument('--subset', type=int, default=0,
help="Decode a subset of the input dataset.")
parser.add_argument("--output_file", type=str, help="output file")
parser.add_argument("--split", type=str, default="",
help="Data split (train/val/test).")
parser.add_argument('--tokenized_input', action='store_true',
help="Whether the input is tokenized.")
parser.add_argument('--seed', type=int, default=123,
help="random seed for initialization")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--batch_size', type=int, default=4,
help="Batch size for decoding.")
parser.add_argument('--beam_size', type=int, default=1,
help="Beam size for searching")
parser.add_argument('--length_penalty', type=float, default=0,
help="Length penalty for beam search")
parser.add_argument('--forbid_duplicate_ngrams', action='store_true')
parser.add_argument('--forbid_ignore_word', type=str, default=None,
help="Forbid the word during forbid_duplicate_ngrams")
parser.add_argument("--min_len", default=1, type=int)
parser.add_argument('--need_score_traces', action='store_true')
parser.add_argument('--ngram_size', type=int, default=3)
parser.add_argument('--mode', default="s2s",
choices=["s2s", "l2r", "both"])
parser.add_argument('--max_tgt_length', type=int, default=128,
help="maximum length of target sequence")
parser.add_argument('--s2s_special_token', action='store_true',
help="New special tokens ([S2S_SEP]/[S2S_CLS]) of S2S.")
parser.add_argument('--s2s_add_segment', action='store_true',
help="Additional segmental for the encoder of S2S.")
parser.add_argument('--s2s_share_segment', action='store_true',
help="Sharing segment embeddings for the encoder of S2S (used with --s2s_add_segment).")
parser.add_argument('--pos_shift', action='store_true',
help="Using position shift for fine-tuning.")
parser.add_argument("--cache_dir", default=None, type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
args = parser.parse_args()
if args.need_score_traces and args.beam_size <= 1:
raise ValueError(
"Score trace is only available for beam search with beam size > 1.")
if args.max_tgt_length >= args.max_seq_length - 2:
raise ValueError("Maximum tgt length exceeds max seq length - 2.")
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
if args.seed > 0:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
else:
random_seed = random.randint(0, 10000)
logger.info("Set random seed as: {}".format(random_seed))
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
tokenizer = TOKENIZER_CLASSES[args.model_type].from_pretrained(
args.tokenizer_name, do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.model_type == "roberta":
vocab = tokenizer.encoder
elif args.model_type == "xlm-roberta":
vocab = {}
for tk_id in range(len(tokenizer)):
tk = tokenizer._convert_id_to_token(tk_id)
vocab[tk] = tk_id
else:
vocab = tokenizer.vocab
if hasattr(tokenizer, 'model_max_length'):
tokenizer.model_max_length = args.max_seq_length
elif hasattr(tokenizer, 'max_len'):
tokenizer.max_len = args.max_seq_length
mask_word_id, eos_word_ids, sos_word_id = tokenizer.convert_tokens_to_ids(
[tokenizer.mask_token, tokenizer.sep_token, tokenizer.sep_token])
forbid_ignore_set = None
if args.forbid_ignore_word:
w_list = []
for w in args.forbid_ignore_word.split('|'):
if w.startswith('[') and w.endswith(']'):
w_list.append(w.upper())
else:
w_list.append(w)
forbid_ignore_set = set(tokenizer.convert_tokens_to_ids(w_list))
print(args.model_path)
found_checkpoint_flag = False
for model_recover_path in glob.glob(args.model_path):
if not os.path.isdir(model_recover_path):
continue
logger.info("***** Recover model: %s *****", model_recover_path)
config_file = args.config_path if args.config_path else os.path.join(model_recover_path, "config.json")
logger.info("Read decoding config from: %s" % config_file)
config = BertConfig.from_json_file(config_file)
bi_uni_pipeline = []
bi_uni_pipeline.append(seq2seq_loader.Preprocess4Seq2seqDecoder(
list(vocab.keys()), tokenizer.convert_tokens_to_ids, args.max_seq_length,
max_tgt_length=args.max_tgt_length, pos_shift=args.pos_shift,
source_type_id=config.source_type_id, target_type_id=config.target_type_id,
cls_token=tokenizer.cls_token, sep_token=tokenizer.sep_token, pad_token=tokenizer.pad_token))
found_checkpoint_flag = True
model = BertForSeq2SeqDecoder.from_pretrained(
model_recover_path, config=config, mask_word_id=mask_word_id, search_beam_size=args.beam_size,
length_penalty=args.length_penalty, eos_id=eos_word_ids, sos_id=sos_word_id,
forbid_duplicate_ngrams=args.forbid_duplicate_ngrams, forbid_ignore_set=forbid_ignore_set,
ngram_size=args.ngram_size, min_len=args.min_len, mode=args.mode,
max_position_embeddings=args.max_seq_length, pos_shift=args.pos_shift,
)
if args.fp16:
model.half()
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
torch.cuda.empty_cache()
model.eval()
next_i = 0
max_src_length = args.max_seq_length - 2 - args.max_tgt_length
if args.pos_shift:
max_src_length += 1
to_pred = load_and_cache_examples(
args.input_file, tokenizer, local_rank=-1,
cached_features_file=None, shuffle=False, eval_mode=True)
input_lines = []
for line in to_pred:
input_lines.append(tokenizer.convert_ids_to_tokens(line.source_ids)[:max_src_length])
if args.subset > 0:
logger.info("Decoding subset: %d", args.subset)
input_lines = input_lines[:args.subset]
input_lines = sorted(list(enumerate(input_lines)),
key=lambda x: -len(x[1]))
output_lines = [""] * len(input_lines)
score_trace_list = [None] * len(input_lines)
total_batch = math.ceil(len(input_lines) / args.batch_size)
with tqdm(total=total_batch) as pbar:
batch_count = 0
first_batch = True
while next_i < len(input_lines):
_chunk = input_lines[next_i:next_i + args.batch_size]
buf_id = [x[0] for x in _chunk]
buf = [x[1] for x in _chunk]
next_i += args.batch_size
batch_count += 1
max_a_len = max([len(x) for x in buf])
instances = []
for instance in [(x, max_a_len) for x in buf]:
for proc in bi_uni_pipeline:
instances.append(proc(instance))
with torch.no_grad():
batch = seq2seq_loader.batch_list_to_batch_tensors(
instances)
batch = [
t.to(device) if t is not None else None for t in batch]
input_ids, token_type_ids, position_ids, input_mask, mask_qkv, task_idx = batch
traces = model(input_ids, token_type_ids,
position_ids, input_mask, task_idx=task_idx, mask_qkv=mask_qkv)
if args.beam_size > 1:
traces = {k: v.tolist() for k, v in traces.items()}
output_ids = traces['pred_seq']
else:
output_ids = traces.tolist()
for i in range(len(buf)):
w_ids = output_ids[i]
output_buf = tokenizer.convert_ids_to_tokens(w_ids)
output_tokens = []
for t in output_buf:
if t in (tokenizer.sep_token, tokenizer.pad_token):
break
output_tokens.append(t)
if args.model_type == "roberta" or args.model_type == "xlm-roberta":
output_sequence = tokenizer.convert_tokens_to_string(output_tokens)
else:
output_sequence = ' '.join(detokenize(output_tokens))
if '\n' in output_sequence:
output_sequence = " [X_SEP] ".join(output_sequence.split('\n'))
output_lines[buf_id[i]] = output_sequence
if first_batch or batch_count % 50 == 0:
logger.info("{} = {}".format(buf_id[i], output_sequence))
if args.need_score_traces:
score_trace_list[buf_id[i]] = {
'scores': traces['scores'][i], 'wids': traces['wids'][i], 'ptrs': traces['ptrs'][i]}
pbar.update(1)
first_batch = False
if args.output_file:
fn_out = args.output_file
else:
fn_out = model_recover_path+'.'+args.split
with open(fn_out, "w", encoding="utf-8") as fout:
for l in output_lines:
fout.write(l)
fout.write("\n")
if args.need_score_traces:
with open(fn_out + ".trace.pickle", "wb") as fout_trace:
pickle.dump(
{"version": 0.0, "num_samples": len(input_lines)}, fout_trace)
for x in score_trace_list:
pickle.dump(x, fout_trace)
if not found_checkpoint_flag:
logger.info("Not found the model checkpoint file!")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/s2s-ft/decode_seq2seq.py |
from io import open
from setuptools import find_packages, setup
extras = {
'serving': ['pydantic', 'uvicorn', 'fastapi'],
'serving-tf': ['pydantic', 'uvicorn', 'fastapi'],
'serving-torch': ['pydantic', 'uvicorn', 'fastapi', 'torch']
}
extras['all'] = [package for package in extras.values()]
setup(
name="s2s-ft",
version="0.0.1",
author="UniLM Team",
author_email="[email protected]",
description="Fine-Tuning Bidirectional Transformers for Sequence-to-Sequence Learning",
long_description=open("README.md", "r", encoding='utf-8').read(),
long_description_content_type="text/markdown",
keywords='Fine-Tuning Bidirectional Transformers for Sequence-to-Sequence Learning',
license='Apache',
url="https://github.com/microsoft/unilm/tree/master/s2s-ft",
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
install_requires=['numpy',
'boto3',
'requests',
'tqdm',
'regex != 2019.12.17',
'sentencepiece',
'sacremoses',
'tensorboardX',
'transformers == 2.10.0'],
extras_require=extras,
python_requires='>=3.5.0',
classifiers=[
'Programming Language :: Python :: 3',
],
)
| EXA-1-master | exa/models/unilm-master/s2s-ft/setup.py |
import pickle
import math
import argparse
import glob
import logging
from pathlib import Path
from tqdm import tqdm
import unicodedata
from transformers import BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer
from s2s_ft.tokenization_unilm import UnilmTokenizer
from s2s_ft.tokenization_minilm import MinilmTokenizer
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
TOKENIZER_CLASSES = {
'bert': BertTokenizer,
'minilm': MinilmTokenizer,
'roberta': RobertaTokenizer,
'unilm': UnilmTokenizer,
'xlm-roberta': XLMRobertaTokenizer,
}
def read_traces_from_file(file_name):
with open(file_name, "rb") as fin:
meta = pickle.load(fin)
num_samples = meta["num_samples"]
samples = []
for _ in range(num_samples):
samples.append(pickle.load(fin))
return samples
def get_best_sequence(sample, eos_id, pad_id, length_penalty=None, alpha=None, expect=None, min_len=None):
# if not any((length_penalty, alpha, expect, min_len)):
# raise ValueError(
# "You can only specify length penalty or alpha, but not both.")
scores = sample["scores"]
wids_list = sample["wids"]
ptrs = sample["ptrs"]
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid in (eos_id, pad_id) for wid in wids):
last_frame_id = i
break
while all(wid == pad_id for wid in wids_list[last_frame_id]):
last_frame_id -= 1
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if fid <= last_frame_id and scores[fid][i] >= 0:
# skip paddings
continue
if (wid in (eos_id, pad_id)) or fid == last_frame_id:
s = scores[fid][i]
if length_penalty:
if expect:
s -= length_penalty * math.fabs(fid+1 - expect)
else:
s += length_penalty * (fid + 1)
elif alpha:
s = s / math.pow((5 + fid + 1) / 6.0, alpha)
if s > max_score:
# if (frame_id != -1) and min_len and (fid+1 < min_len):
# continue
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
seq = []
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
return seq
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list
def simple_postprocess(tk_list):
# truncate duplicate punctuations
while tk_list and len(tk_list) > 4 and len(tk_list[-1]) == 1 and unicodedata.category(tk_list[-1]).startswith('P') and all(it == tk_list[-1] for it in tk_list[-4:]):
tk_list = tk_list[:-3]
return tk_list
# def include_unk(line):
# return " UNK ".join(line.split('<unk>')).strip()
def main(args):
tokenizer = TOKENIZER_CLASSES[args.model_type].from_pretrained(
args.tokenizer_name, do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
eos_token = tokenizer.sep_token
pad_token = tokenizer.pad_token
eos_id, pad_id = tokenizer.convert_tokens_to_ids([eos_token, pad_token])
logger.info("*********************************************")
logger.info(" EOS TOKEN = {}, ID = {}".format(eos_token, eos_id))
logger.info(" PAD TOKEN = {}, ID = {}".format(pad_token, pad_id))
logger.info("*********************************************")
for input_file in tqdm(glob.glob(args.input)):
if not Path(input_file+'.trace.pickle').exists():
continue
print(input_file)
samples = read_traces_from_file(input_file+'.trace.pickle')
results = []
for s in samples:
word_ids = get_best_sequence(s, eos_id, pad_id, alpha=args.alpha,
length_penalty=args.length_penalty, expect=args.expect, min_len=args.min_len)
tokens = tokenizer.convert_ids_to_tokens(word_ids)
buf = []
for t in tokens:
if t in (eos_token, pad_token):
break
else:
buf.append(t)
if args.model_type == "roberta" or args.model_type == "xlm-roberta":
output_text = " ".join(simple_postprocess(tokenizer.convert_tokens_to_string(buf).split(' ')))
if '\n' in output_text:
output_text = " [X_SEP] ".join(output_text.split('\n'))
else:
output_text = " ".join(simple_postprocess(detokenize(buf)))
results.append(output_text)
fn_out = input_file + '.'
if args.length_penalty:
fn_out += 'lenp'+str(args.length_penalty)
if args.expect:
fn_out += 'exp'+str(args.expect)
if args.alpha:
fn_out += 'alp'+str(args.alpha)
if args.min_len:
fn_out += 'minl'+str(args.min_len)
with open(fn_out, "w", encoding="utf-8") as fout:
for line in results:
fout.write(line)
fout.write("\n")
logger.info("Output file = [%s]" % fn_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, help="Input file.")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(TOKENIZER_CLASSES.keys()))
parser.add_argument("--alpha", default=None, type=float)
parser.add_argument("--length_penalty", default=None, type=float)
parser.add_argument("--expect", default=None, type=float,
help="Expectation of target length.")
parser.add_argument("--min_len", default=None, type=int)
# tokenizer_name
parser.add_argument("--tokenizer_name", default=None, type=str, required=True,
help="tokenizer name")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--cache_dir", default=None, type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
args = parser.parse_args()
main(args)
| EXA-1-master | exa/models/unilm-master/s2s-ft/gen_seq_from_trace.py |
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import json
import random
import numpy as np
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
import tqdm
from s2s_ft.modeling import BertForSequenceToSequenceWithPseudoMask, BertForSequenceToSequenceUniLMV1
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers import \
RobertaConfig, BertConfig, \
BertTokenizer, RobertaTokenizer, \
XLMRobertaConfig, XLMRobertaTokenizer, \
ElectraConfig, ElectraTokenizer
from s2s_ft.configuration_unilm import UnilmConfig
from s2s_ft.tokenization_unilm import UnilmTokenizer
from s2s_ft.configuration_minilm import MinilmConfig
from s2s_ft.tokenization_minilm import MinilmTokenizer
from s2s_ft import utils
from s2s_ft.config import BertForSeq2SeqConfig
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'bert': (BertConfig, BertTokenizer),
'minilm': (MinilmConfig, MinilmTokenizer),
'roberta': (RobertaConfig, RobertaTokenizer),
'xlm-roberta': (XLMRobertaConfig, XLMRobertaTokenizer),
'unilm': (UnilmConfig, UnilmTokenizer),
'electra': (ElectraConfig, ElectraTokenizer),
}
def prepare_for_training(args, model, checkpoint_state_dict, amp):
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
if checkpoint_state_dict:
optimizer.load_state_dict(checkpoint_state_dict['optimizer'])
model.load_state_dict(checkpoint_state_dict['model'])
# then remove optimizer state to make amp happy
# https://github.com/NVIDIA/apex/issues/480#issuecomment-587154020
if amp:
optimizer.state = {}
if amp:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if checkpoint_state_dict:
amp.load_state_dict(checkpoint_state_dict['amp'])
# Black Tech from https://github.com/NVIDIA/apex/issues/480#issuecomment-587154020
# forward, backward, optimizer step, zero_grad
random_input = {'source_ids': torch.ones(size=(2, 2), device=args.device, dtype=torch.long),
'target_ids': torch.ones(size=(2, 2), device=args.device, dtype=torch.long),
'label_ids': torch.ones(size=(2, 2), device=args.device, dtype=torch.long),
'pseudo_ids': torch.ones(size=(2, 2), device=args.device, dtype=torch.long),
'num_source_tokens': torch.zeros(size=(2,), device=args.device, dtype=torch.long),
'num_target_tokens': torch.zeros(size=(2,), device=args.device, dtype=torch.long)}
loss = model(**random_input)
print("Loss = %f" % loss.cpu().item())
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
model.zero_grad()
# then load optimizer state_dict again (this time without removing optimizer.state)
optimizer.load_state_dict(checkpoint_state_dict['optimizer'])
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
return model, optimizer
def train(args, training_features, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0] and args.log_dir:
tb_writer = SummaryWriter(log_dir=args.log_dir)
else:
tb_writer = None
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
else:
amp = None
# model recover
recover_step = utils.get_max_epoch_model(args.output_dir)
if recover_step:
checkpoint_state_dict = utils.get_checkpoint_state_dict(args.output_dir, recover_step)
else:
checkpoint_state_dict = None
model.to(args.device)
model, optimizer = prepare_for_training(args, model, checkpoint_state_dict, amp=amp)
per_node_train_batch_size = args.per_gpu_train_batch_size * args.n_gpu * args.gradient_accumulation_steps
train_batch_size = per_node_train_batch_size * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)
global_step = recover_step if recover_step else 0
if args.num_training_steps == -1:
args.num_training_steps = args.num_training_epochs * len(training_features) / train_batch_size
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.num_training_steps, last_epoch=-1)
if checkpoint_state_dict:
scheduler.load_state_dict(checkpoint_state_dict["lr_scheduler"])
train_dataset = utils.Seq2seqDatasetForBert(
features=training_features, max_source_len=args.max_source_seq_length,
max_target_len=args.max_target_seq_length, vocab_size=tokenizer.vocab_size,
cls_id=tokenizer.cls_token_id, sep_id=tokenizer.sep_token_id, pad_id=tokenizer.pad_token_id,
mask_id=tokenizer.mask_token_id, random_prob=args.random_prob, keep_prob=args.keep_prob,
offset=train_batch_size * global_step, num_training_instances=train_batch_size * args.num_training_steps,
source_mask_prob=args.source_mask_prob, target_mask_prob=args.target_mask_prob,
mask_way=args.mask_way, num_max_mask_token=args.num_max_mask_token,
)
logger.info("Check dataset:")
for i in range(5):
source_ids, target_ids = train_dataset.__getitem__(i)[:2]
logger.info("Instance-%d" % i)
logger.info("Source tokens = %s" % " ".join(tokenizer.convert_ids_to_tokens(source_ids)))
logger.info("Target tokens = %s" % " ".join(tokenizer.convert_ids_to_tokens(target_ids)))
logger.info("Mode = %s" % str(model))
# Train!
logger.info(" ***** Running training ***** *")
logger.info(" Num examples = %d", len(training_features))
logger.info(" Num Epochs = %.2f", len(train_dataset) / len(training_features))
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Batch size per node = %d", per_node_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", train_batch_size)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", args.num_training_steps)
if args.num_training_steps <= global_step:
logger.info("Training is done. Please use a new dir or clean this dir!")
else:
# The training features are shuffled
train_sampler = SequentialSampler(train_dataset) \
if args.local_rank == -1 else DistributedSampler(train_dataset, shuffle=False)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler,
batch_size=per_node_train_batch_size // args.gradient_accumulation_steps,
collate_fn=utils.batch_list_to_batch_tensors)
train_iterator = tqdm.tqdm(
train_dataloader, initial=global_step * args.gradient_accumulation_steps,
desc="Iter (loss=X.XXX, lr=X.XXXXXXX)", disable=args.local_rank not in [-1, 0])
model.train()
model.zero_grad()
tr_loss, logging_loss = 0.0, 0.0
for step, batch in enumerate(train_iterator):
if global_step > args.num_training_steps:
break
batch = tuple(t.to(args.device) for t in batch)
if args.mask_way == 'v2':
inputs = {'source_ids': batch[0],
'target_ids': batch[1],
'label_ids': batch[2],
'pseudo_ids': batch[3],
'num_source_tokens': batch[4],
'num_target_tokens': batch[5]}
elif args.mask_way == 'v1' or args.mask_way == 'v0':
inputs = {'source_ids': batch[0],
'target_ids': batch[1],
'masked_ids': batch[2],
'masked_pos': batch[3],
'masked_weight': batch[4],
'num_source_tokens': batch[5],
'num_target_tokens': batch[6]}
loss = model(**inputs)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
train_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
logging_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logger.info("")
logger.info(" Step [%d ~ %d]: %.2f", global_step - args.logging_steps, global_step, logging_loss)
logging_loss = 0.0
if args.local_rank in [-1, 0] and args.save_steps > 0 and \
(global_step % args.save_steps == 0 or global_step == args.num_training_steps):
save_path = os.path.join(args.output_dir, "ckpt-%d" % global_step)
os.makedirs(save_path, exist_ok=True)
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(save_path)
optim_to_save = {
"optimizer": optimizer.state_dict(),
"lr_scheduler": scheduler.state_dict(),
}
if args.fp16:
optim_to_save["amp"] = amp.state_dict()
torch.save(optim_to_save, os.path.join(save_path, utils.OPTIM_NAME))
logger.info("Saving model checkpoint %d into %s", global_step, save_path)
if args.local_rank in [-1, 0] and tb_writer:
tb_writer.close()
def get_args():
parser = argparse.ArgumentParser()
# parser.add_argument("--train_source_file", default=None, type=str, required=True,
# help="Training data contains source")
# parser.add_argument("--train_target_file", default=None, type=str, required=True,
# help="Training data contains target")
parser.add_argument("--train_file", default=None, type=str, required=True,
help="Training data (json format) for training. Keys: source and target")
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list:")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints and predictions will be written.")
parser.add_argument("--log_dir", default=None, type=str,
help="The output directory where the log will be written.")
## Other parameters
parser.add_argument("--config_name", default=None, type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default=None, type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default=None, type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_source_seq_length", default=464, type=int,
help="The maximum total source sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--max_target_seq_length", default=48, type=int,
help="The maximum total target sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--cached_train_features_file", default=None, type=str,
help="Cached training features file")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--label_smoothing", default=0.1, type=float,
help="Max gradient norm.")
parser.add_argument("--num_training_steps", default=-1, type=int,
help="set total number of training steps to perform")
parser.add_argument("--num_training_epochs", default=10, type=int,
help="set total number of training epochs to perform (--num_training_steps has higher priority)")
parser.add_argument("--num_warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--random_prob", default=0.1, type=float,
help="prob to random replace a masked token")
parser.add_argument("--keep_prob", default=0.1, type=float,
help="prob to keep no change for a masked token")
parser.add_argument("--fix_word_embedding", action='store_true',
help="Set word embedding no grad when finetuning.")
parser.add_argument('--logging_steps', type=int, default=500,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=1500,
help="Save checkpoint every X updates steps.")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--source_mask_prob', type=float, default=-1.0,
help="Probability to mask source sequence in fine-tuning")
parser.add_argument('--target_mask_prob', type=float, default=0.5,
help="Probability to mask target sequence in fine-tuning")
parser.add_argument('--num_max_mask_token', type=int, default=0,
help="The number of the max masked tokens in target sequence")
parser.add_argument('--mask_way', type=str, default='v2',
help="Fine-tuning method (v0: position shift, v1: masked LM, v2: pseudo-masking)")
parser.add_argument("--lmdb_cache", action='store_true',
help="Use LMDB to cache training features")
parser.add_argument("--lmdb_dtype", type=str, default='h',
help="Data type for cached data type for LMDB")
parser.add_argument
args = parser.parse_args()
return args
def prepare(args):
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
os.makedirs(args.output_dir, exist_ok=True)
json.dump(args.__dict__, open(os.path.join(
args.output_dir, 'train_opt.json'), 'w'), sort_keys=True, indent=2)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, 'einsum')
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
def get_model_and_tokenizer(args):
config_class, tokenizer_class = MODEL_CLASSES[args.model_type]
model_config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
config = BertForSeq2SeqConfig.from_exist_config(
config=model_config, label_smoothing=args.label_smoothing,
fix_word_embedding=args.fix_word_embedding,
max_position_embeddings=args.max_source_seq_length + args.max_target_seq_length)
logger.info("Model config for seq2seq: %s", str(config))
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None)
model_class = \
BertForSequenceToSequenceWithPseudoMask if args.mask_way == 'v2' \
else BertForSequenceToSequenceUniLMV1
logger.info("Construct model %s" % model_class.MODEL_NAME)
model = model_class.from_pretrained(
args.model_name_or_path, config=config, model_type=args.model_type,
reuse_position_embedding=True,
cache_dir=args.cache_dir if args.cache_dir else None)
return model, tokenizer
def main():
args = get_args()
prepare(args)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
# Make sure only the first process in distributed training will download model & vocab
# Load pretrained model and tokenizer
model, tokenizer = get_model_and_tokenizer(args)
if args.local_rank == 0:
torch.distributed.barrier()
# Make sure only the first process in distributed training will download model & vocab
if args.cached_train_features_file is None:
if not args.lmdb_cache:
args.cached_train_features_file = os.path.join(args.output_dir, "cached_features_for_training.pt")
else:
args.cached_train_features_file = os.path.join(args.output_dir, "cached_features_for_training_lmdb")
training_features = utils.load_and_cache_examples(
example_file=args.train_file, tokenizer=tokenizer, local_rank=args.local_rank,
cached_features_file=args.cached_train_features_file, shuffle=True,
lmdb_cache=args.lmdb_cache, lmdb_dtype=args.lmdb_dtype,
)
train(args, training_features, model, tokenizer)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/s2s-ft/run_seq2seq.py |
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import glob
import json
import argparse
import math
import string
from multiprocessing import Pool, cpu_count
from tqdm import tqdm, trange
from pathlib import Path
import numpy as np
# pip install py-rouge
import rouge
import time
import tempfile
import shutil
# pip install pyrouge
from evaluations.bs_pyrouge import Rouge155
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--gold", type=str, help="Gold output file.")
parser.add_argument("--pred", type=str, help="Input prediction file.")
parser.add_argument("--split", type=str, default="",
help="Data split (train/dev/test).")
parser.add_argument("--save_best", action='store_true',
help="Save best epoch.")
parser.add_argument("--only_eval_best", action='store_true',
help="Only evaluate best epoch.")
parser.add_argument("--trunc_len", type=int, default=0,
help="Truncate line by the maximum length.")
default_process_count = max(1, cpu_count() - 1)
parser.add_argument("--processes", type=int, default=default_process_count,
help="Number of processes to use (default %(default)s)")
parser.add_argument("--perl", action='store_true',
help="Using the perl script.")
parser.add_argument('--lazy_eval', action='store_true',
help="Skip evaluation if the .rouge file exists.")
args = parser.parse_args()
evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2,
limit_length=False, apply_avg=True, weight_factor=1.2)
def test_rouge(cand, ref):
temp_dir = tempfile.mkdtemp()
candidates = cand
references = ref
assert len(candidates) == len(references)
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
def rouge_results_to_str(results_dict):
return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format(
results_dict["rouge_1_f_score"] * 100,
results_dict["rouge_2_f_score"] * 100,
results_dict["rouge_l_f_score"] * 100,
results_dict["rouge_1_recall"] * 100,
results_dict["rouge_2_recall"] * 100,
results_dict["rouge_l_recall"] * 100
)
def count_tokens(tokens):
counter = {}
for t in tokens:
if t in counter.keys():
counter[t] += 1
else:
counter[t] = 1
return counter
def get_f1(text_a, text_b):
tokens_a = text_a.lower().split()
tokens_b = text_b.lower().split()
if len(tokens_a) == 0 or len(tokens_b) == 0:
return 1 if len(tokens_a) == len(tokens_b) else 0
set_a = count_tokens(tokens_a)
set_b = count_tokens(tokens_b)
match = 0
for token in set_a.keys():
if token in set_b.keys():
match += min(set_a[token], set_b[token])
p = match / len(tokens_a)
r = match / len(tokens_b)
return 2.0 * p * r / (p + r + 1e-5)
_tok_dict = {}
def _is_digit(w):
for ch in w:
if not(ch.isdigit() or ch == ','):
return False
return True
def fix_tokenization(text):
input_tokens = text.split()
output_tokens = []
i = 0
prev_dash = False
while i < len(input_tokens):
tok = input_tokens[i]
flag_prev_dash = False
if tok in _tok_dict.keys():
output_tokens.append(_tok_dict[tok])
i += 1
elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t":
output_tokens[-1] = output_tokens[-1][:-1]
output_tokens.append("n't")
i += 2
elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"):
output_tokens.append("'"+input_tokens[i + 1])
i += 2
elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".":
output_tokens.append("...")
i += 3
elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]):
# $ 3 , 000 -> $ 3,000
output_tokens[-1] += ','+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit():
# 3 . 03 -> $ 3.03
output_tokens[-1] += '.'+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.':
# U . N . -> U.N.
k = i+3
while k+2 < len(input_tokens):
if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.':
k += 2
else:
break
output_tokens[-1] += ''.join(input_tokens[i:k])
i += 2
elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation:
output_tokens[-1] += tok
i += 1
else:
output_tokens.append(tok)
i += 1
prev_dash = flag_prev_dash
return " ".join(output_tokens)
def process_eval(eval_fn):
gold_list = []
with open(args.gold, "r", encoding="utf-8") as f_in:
for l in f_in:
line = l.strip()
gold_list.append(line)
pred_list = []
with open(eval_fn, "r", encoding="utf-8") as f_in:
for l in f_in:
buf = []
sentence = fix_tokenization(l.strip()).replace("(", " -LRB- ").replace(")", " -RRB- ")
while " " in sentence:
sentence = sentence.replace(" ", " ")
buf.append(sentence)
if args.trunc_len:
num_left = args.trunc_len
trunc_list = []
for bit in buf:
tk_list = bit.split()
n = min(len(tk_list), num_left)
trunc_list.append(' '.join(tk_list[:n]))
num_left -= n
if num_left <= 0:
break
else:
trunc_list = buf
line = "\n".join(trunc_list)
pred_list.append(line)
with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out:
for l in pred_list:
f_out.write(l.strip())
f_out.write('\n')
# rouge scores
if len(pred_list) < len(gold_list):
# evaluate subset
gold_list = gold_list[:len(pred_list)]
assert len(pred_list) == len(gold_list)
if args.perl:
scores = test_rouge(pred_list, gold_list)
else:
scores = evaluator.get_scores(pred_list, [[it] for it in gold_list])
return eval_fn, scores
def main():
if args.perl:
eval_fn_list = list(glob.glob(args.pred))
else:
eval_fn_list = [eval_fn for eval_fn in glob.glob(args.pred) if not(
args.lazy_eval and Path(eval_fn+".rouge").exists())]
eval_fn_list = list(filter(lambda fn: not(fn.endswith(
'.post') or fn.endswith('.rouge')), eval_fn_list))
if args.only_eval_best:
best_epoch_dict = {}
for dir_path in set(Path(fn).parent for fn in eval_fn_list):
fn_save = os.path.join(dir_path, 'save_best.dev')
if Path(fn_save).exists():
with open(fn_save, 'r') as f_in:
__, o_name, __ = f_in.read().strip().split('\n')
epoch = o_name.split('.')[1]
best_epoch_dict[dir_path] = epoch
new_eval_fn_list = []
for fn in eval_fn_list:
dir_path = Path(fn).parent
if dir_path in best_epoch_dict:
if Path(fn).name.split('.')[1] == best_epoch_dict[dir_path]:
new_eval_fn_list.append(fn)
eval_fn_list = new_eval_fn_list
logger.info("***** Evaluation: %s *****", ','.join(eval_fn_list))
num_pool = min(args.processes, len(eval_fn_list))
p = Pool(num_pool)
r_list = p.imap_unordered(process_eval, eval_fn_list)
r_list = sorted([(fn, scores)
for fn, scores in r_list], key=lambda x: x[0])
rg2_dict = {}
for fn, scores in r_list:
print(fn)
if args.perl:
print(rouge_results_to_str(scores))
else:
rg2_dict[fn] = scores['rouge-2']['f']
print(
"ROUGE-1: {}\tROUGE-2: {}\n".format(scores['rouge-1']['f'], scores['rouge-2']['f']))
with open(fn+".rouge", 'w') as f_out:
f_out.write(json.dumps(
{'rg1': scores['rouge-1']['f'], 'rg2': scores['rouge-2']['f']}))
p.close()
p.join()
if args.save_best:
# find best results
group_dict = {}
for k, v in rg2_dict.items():
d_name, o_name = Path(k).parent, Path(k).name
if (d_name not in group_dict) or (v > group_dict[d_name][1]):
group_dict[d_name] = (o_name, v)
# compare and save the best result
for k, v in group_dict.items():
fn = os.path.join(k, 'save_best.'+args.split)
o_name_s, rst_s = v
should_save = True
if Path(fn).exists():
with open(fn, 'r') as f_in:
rst_f = float(f_in.read().strip().split('\n')[-1])
if rst_s <= rst_f:
should_save = False
if should_save:
with open(fn, 'w') as f_out:
f_out.write('{0}\n{1}\n{2}\n'.format(k, o_name_s, rst_s))
logger.info("Should save: {}".format(json.dumps(v, indent=2)))
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/s2s-ft/evaluations/eval_for_xsum.py |
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import glob
import json
import argparse
import math
import string
from multiprocessing import Pool, cpu_count
from tqdm import tqdm, trange
from pathlib import Path
import numpy as np
# pip install py-rouge
import rouge
import time
import tempfile
import shutil
# pip install pyrouge
from evaluations.bs_pyrouge import Rouge155
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--gold", type=str, help="Gold output file.")
parser.add_argument("--pred", type=str, help="Input prediction file.")
parser.add_argument("--split", type=str, default="",
help="Data split (train/dev/test).")
parser.add_argument("--save_best", action='store_true',
help="Save best epoch.")
parser.add_argument("--only_eval_best", action='store_true',
help="Only evaluate best epoch.")
parser.add_argument("--trunc_len", type=int, default=0,
help="Truncate line by the maximum length.")
default_process_count = max(1, cpu_count() - 1)
parser.add_argument("--processes", type=int, default=default_process_count,
help="Number of processes to use (default %(default)s)")
parser.add_argument("--perl", action='store_true',
help="Using the perl script.")
parser.add_argument('--lazy_eval', action='store_true',
help="Skip evaluation if the .rouge file exists.")
args = parser.parse_args()
evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2,
limit_length=False, apply_avg=True, weight_factor=1.2)
def test_rouge(cand, ref):
temp_dir = tempfile.mkdtemp()
candidates = cand
references = ref
assert len(candidates) == len(references)
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
def rouge_results_to_str(results_dict):
return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format(
results_dict["rouge_1_f_score"] * 100,
results_dict["rouge_2_f_score"] * 100,
results_dict["rouge_l_f_score"] * 100,
results_dict["rouge_1_recall"] * 100,
results_dict["rouge_2_recall"] * 100,
results_dict["rouge_l_recall"] * 100
)
def count_tokens(tokens):
counter = {}
for t in tokens:
if t in counter.keys():
counter[t] += 1
else:
counter[t] = 1
return counter
def get_f1(text_a, text_b):
tokens_a = text_a.lower().split()
tokens_b = text_b.lower().split()
if len(tokens_a) == 0 or len(tokens_b) == 0:
return 1 if len(tokens_a) == len(tokens_b) else 0
set_a = count_tokens(tokens_a)
set_b = count_tokens(tokens_b)
match = 0
for token in set_a.keys():
if token in set_b.keys():
match += min(set_a[token], set_b[token])
p = match / len(tokens_a)
r = match / len(tokens_b)
return 2.0 * p * r / (p + r + 1e-5)
_tok_dict = {"(": "-lrb-", ")": "-rrb-",
"[": "-lsb-", "]": "-rsb-",
"{": "-lcb-", "}": "-rcb-",
"[UNK]": "UNK", '&': '&',
'<': '<', '>': '>',
"<unk>": "UNK",
}
def _is_digit(w):
for ch in w:
if not(ch.isdigit() or ch == ','):
return False
return True
def fix_tokenization(text):
input_tokens = text.split()
output_tokens = []
has_left_quote = False
has_left_single_quote = False
i = 0
prev_dash = False
while i < len(input_tokens):
tok = input_tokens[i]
flag_prev_dash = False
if tok in _tok_dict.keys():
output_tokens.append(_tok_dict[tok])
i += 1
elif tok == "\"":
if has_left_quote:
output_tokens.append("''")
else:
output_tokens.append("``")
has_left_quote = not has_left_quote
i += 1
elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t":
output_tokens[-1] = output_tokens[-1][:-1]
output_tokens.append("n't")
i += 2
elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"):
output_tokens.append("'"+input_tokens[i + 1])
i += 2
elif tok == "'":
if has_left_single_quote:
output_tokens.append("'")
else:
output_tokens.append("`")
has_left_single_quote = not has_left_single_quote
i += 1
elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".":
output_tokens.append("...")
i += 3
elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]):
# $ 3 , 000 -> $ 3,000
output_tokens[-1] += ','+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit():
# 3 . 03 -> $ 3.03
output_tokens[-1] += '.'+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.':
# U . N . -> U.N.
k = i+3
while k+2 < len(input_tokens):
if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.':
k += 2
else:
break
output_tokens[-1] += ''.join(input_tokens[i:k])
i += 2
elif tok == "-":
if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-":
output_tokens.append("--")
i += 2
elif i == len(input_tokens) - 1 or i == 0:
output_tokens.append("-")
i += 1
elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation:
output_tokens[-1] += "-"
i += 1
flag_prev_dash = True
else:
output_tokens.append("-")
i += 1
elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation:
output_tokens[-1] += tok
i += 1
else:
output_tokens.append(tok)
i += 1
prev_dash = flag_prev_dash
return " ".join(output_tokens)
def process_eval(eval_fn):
gold_list = []
with open(args.gold, "r", encoding="utf-8") as f_in:
for l in f_in:
line = l.strip()
gold_list.append(line)
pred_list = []
with open(eval_fn, "r", encoding="utf-8") as f_in:
for l in f_in:
buf = []
sentence = fix_tokenization(l.strip()).replace('1', '#')
buf.append(sentence)
if args.trunc_len:
num_left = args.trunc_len
trunc_list = []
for bit in buf:
tk_list = bit.split()
n = min(len(tk_list), num_left)
trunc_list.append(' '.join(tk_list[:n]))
num_left -= n
if num_left <= 0:
break
else:
trunc_list = buf
line = "\n".join(trunc_list)
pred_list.append(line)
with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out:
for l in pred_list:
f_out.write(l.strip())
f_out.write('\n')
# rouge scores
if len(pred_list) < len(gold_list):
# evaluate subset
gold_list = gold_list[:len(pred_list)]
assert len(pred_list) == len(gold_list)
if args.perl:
scores = test_rouge(pred_list, gold_list)
else:
scores = evaluator.get_scores(pred_list, [[it] for it in gold_list])
return eval_fn, scores
def main():
if args.perl:
eval_fn_list = list(glob.glob(args.pred))
else:
eval_fn_list = [eval_fn for eval_fn in glob.glob(args.pred) if not(
args.lazy_eval and Path(eval_fn+".rouge").exists())]
eval_fn_list = list(filter(lambda fn: not(fn.endswith(
'.post') or fn.endswith('.rouge')), eval_fn_list))
if args.only_eval_best:
best_epoch_dict = {}
for dir_path in set(Path(fn).parent for fn in eval_fn_list):
fn_save = os.path.join(dir_path, 'save_best.dev')
if Path(fn_save).exists():
with open(fn_save, 'r') as f_in:
__, o_name, __ = f_in.read().strip().split('\n')
epoch = o_name.split('.')[1]
best_epoch_dict[dir_path] = epoch
new_eval_fn_list = []
for fn in eval_fn_list:
dir_path = Path(fn).parent
if dir_path in best_epoch_dict:
if Path(fn).name.split('.')[1] == best_epoch_dict[dir_path]:
new_eval_fn_list.append(fn)
eval_fn_list = new_eval_fn_list
logger.info("***** Evaluation: %s *****", ','.join(eval_fn_list))
num_pool = min(args.processes, len(eval_fn_list))
p = Pool(num_pool)
r_list = p.imap_unordered(process_eval, eval_fn_list)
r_list = sorted([(fn, scores)
for fn, scores in r_list], key=lambda x: x[0])
rg2_dict = {}
for fn, scores in r_list:
print(fn)
if args.perl:
print(rouge_results_to_str(scores))
else:
rg2_dict[fn] = scores['rouge-2']['f']
print(
"ROUGE-1: {}\tROUGE-2: {}\n".format(scores['rouge-1']['f'], scores['rouge-2']['f']))
with open(fn+".rouge", 'w') as f_out:
f_out.write(json.dumps(
{'rg1': scores['rouge-1']['f'], 'rg2': scores['rouge-2']['f']}))
p.close()
p.join()
if args.save_best:
# find best results
group_dict = {}
for k, v in rg2_dict.items():
d_name, o_name = Path(k).parent, Path(k).name
if (d_name not in group_dict) or (v > group_dict[d_name][1]):
group_dict[d_name] = (o_name, v)
# compare and save the best result
for k, v in group_dict.items():
fn = os.path.join(k, 'save_best.'+args.split)
o_name_s, rst_s = v
should_save = True
if Path(fn).exists():
with open(fn, 'r') as f_in:
rst_f = float(f_in.read().strip().split('\n')[-1])
if rst_s <= rst_f:
should_save = False
if should_save:
with open(fn, 'w') as f_out:
f_out.write('{0}\n{1}\n{2}\n'.format(k, o_name_s, rst_s))
logger.info("Should save: {}".format(json.dumps(v, indent=2)))
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/s2s-ft/evaluations/eval_for_gigaword.py |
from __future__ import print_function, unicode_literals, division
import os
import re
import codecs
import platform
from subprocess import check_output
from tempfile import mkdtemp
from functools import partial
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
from pyrouge.utils import log
from pyrouge.utils.file_utils import verify_dir
REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}",
"-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'}
def clean(x):
return re.sub(
r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''",
lambda m: REMAP.get(m.group()), x)
class DirectoryProcessor:
@staticmethod
def process(input_dir, output_dir, function):
"""
Apply function to all files in input_dir and save the resulting ouput
files in output_dir.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = log.get_global_console_logger()
logger.info("Processing files in {}.".format(input_dir))
input_file_names = os.listdir(input_dir)
for input_file_name in input_file_names:
input_file = os.path.join(input_dir, input_file_name)
with codecs.open(input_file, "r", encoding="UTF-8") as f:
input_string = f.read()
output_string = function(input_string)
output_file = os.path.join(output_dir, input_file_name)
with codecs.open(output_file, "w", encoding="UTF-8") as f:
f.write(clean(output_string.lower()))
logger.info("Saved processed files to {}.".format(output_dir))
class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, temp_dir=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
self.temp_dir = temp_dir
self.log = log.get_global_console_logger()
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
def sent_split_to_string(s): return "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = [model_filename_pattern.replace('#ID#', id)]
# model_filenames = Rouge155.__get_model_filenames_for_id(
# id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp(dir=self.temp_dir)
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
# 0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp(dir=self.temp_dir)
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
# '-2',
# '-1',
# '-U',
'-m',
# '-v',
'-r', 1000,
'-n', 2,
# '-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
if __name__ == "__main__":
import argparse
from utils.argparsers import rouge_path_parser
parser = argparse.ArgumentParser(parents=[rouge_path_parser])
args = parser.parse_args()
rouge = Rouge155(args.rouge_home)
rouge.save_home_dir()
| EXA-1-master | exa/models/unilm-master/s2s-ft/evaluations/bs_pyrouge.py |
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import glob
import json
import argparse
import math
import string
from multiprocessing import Pool, cpu_count
from tqdm import tqdm, trange
from pathlib import Path
import numpy as np
# pip install py-rouge
import rouge
import time
import tempfile
import shutil
# pip install pyrouge
from evaluations.bs_pyrouge import Rouge155
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--gold", type=str, help="Gold output file.")
parser.add_argument("--pred", type=str, help="Input prediction file.")
parser.add_argument("--split", type=str, default="",
help="Data split (train/dev/test).")
parser.add_argument("--save_best", action='store_true',
help="Save best epoch.")
parser.add_argument("--only_eval_best", action='store_true',
help="Only evaluate best epoch.")
parser.add_argument("--trunc_len", type=int, default=60,
help="Truncate line by the maximum length.")
parser.add_argument("--duplicate_rate", type=float, default=0.7,
help="If the duplicat rate (compared with history) is large, we can discard the current sentence.")
default_process_count = max(1, cpu_count() - 1)
parser.add_argument("--processes", type=int, default=default_process_count,
help="Number of processes to use (default %(default)s)")
parser.add_argument("--perl", action='store_true',
help="Using the perl script.")
parser.add_argument('--lazy_eval', action='store_true',
help="Skip evaluation if the .rouge file exists.")
args = parser.parse_args()
SPECIAL_TOKEN = ["[UNK]", "[PAD]", "[CLS]", "[MASK]"]
evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2,
limit_length=False, apply_avg=True, weight_factor=1.2)
def test_rouge(cand, ref):
temp_dir = tempfile.mkdtemp()
candidates = cand
references = ref
assert len(candidates) == len(references)
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
def rouge_results_to_str(results_dict):
return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format(
results_dict["rouge_1_f_score"] * 100,
results_dict["rouge_2_f_score"] * 100,
results_dict["rouge_l_f_score"] * 100,
results_dict["rouge_1_recall"] * 100,
results_dict["rouge_2_recall"] * 100,
results_dict["rouge_l_recall"] * 100
)
def count_tokens(tokens):
counter = {}
for t in tokens:
if t in counter.keys():
counter[t] += 1
else:
counter[t] = 1
return counter
def get_f1(text_a, text_b):
tokens_a = text_a.lower().split()
tokens_b = text_b.lower().split()
if len(tokens_a) == 0 or len(tokens_b) == 0:
return 1 if len(tokens_a) == len(tokens_b) else 0
set_a = count_tokens(tokens_a)
set_b = count_tokens(tokens_b)
match = 0
for token in set_a.keys():
if token in set_b.keys():
match += min(set_a[token], set_b[token])
p = match / len(tokens_a)
r = match / len(tokens_b)
return 2.0 * p * r / (p + r + 1e-5)
_tok_dict = {"(": "-LRB-", ")": "-RRB-",
"[": "-LSB-", "]": "-RSB-",
"{": "-LCB-", "}": "-RCB-"}
def _is_digit(w):
for ch in w:
if not(ch.isdigit() or ch == ','):
return False
return True
def fix_tokenization(text):
input_tokens = text.split()
output_tokens = []
has_left_quote = False
has_left_single_quote = False
i = 0
prev_dash = False
while i < len(input_tokens):
tok = input_tokens[i]
flag_prev_dash = False
if tok in _tok_dict.keys():
output_tokens.append(_tok_dict[tok])
i += 1
elif tok == "\"":
if has_left_quote:
output_tokens.append("''")
else:
output_tokens.append("``")
has_left_quote = not has_left_quote
i += 1
elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t":
output_tokens[-1] = output_tokens[-1][:-1]
output_tokens.append("n't")
i += 2
elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"):
output_tokens.append("'"+input_tokens[i + 1])
i += 2
elif tok == "'":
if has_left_single_quote:
output_tokens.append("'")
else:
output_tokens.append("`")
has_left_single_quote = not has_left_single_quote
i += 1
elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".":
output_tokens.append("...")
i += 3
elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]):
# $ 3 , 000 -> $ 3,000
output_tokens[-1] += ','+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit():
# 3 . 03 -> $ 3.03
output_tokens[-1] += '.'+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.':
# U . N . -> U.N.
k = i+3
while k+2 < len(input_tokens):
if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.':
k += 2
else:
break
output_tokens[-1] += ''.join(input_tokens[i:k])
i += 2
elif tok == "-":
if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-":
output_tokens.append("--")
i += 2
elif i == len(input_tokens) - 1 or i == 0:
output_tokens.append("-")
i += 1
elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation:
output_tokens[-1] += "-"
i += 1
flag_prev_dash = True
else:
output_tokens.append("-")
i += 1
elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation:
output_tokens[-1] += tok
i += 1
else:
output_tokens.append(tok)
i += 1
prev_dash = flag_prev_dash
return " ".join(output_tokens)
def remove_duplicate(l_list, duplicate_rate):
tk_list = [l.lower().split() for l in l_list]
r_list = []
history_set = set()
for i, w_list in enumerate(tk_list):
w_set = set(w_list)
if len(w_set & history_set)/len(w_set) <= duplicate_rate:
r_list.append(l_list[i])
history_set |= w_set
return r_list
def process_eval(eval_fn):
gold_list = []
with open(args.gold, "r", encoding="utf-8") as f_in:
for l in f_in:
line = l.strip().replace(" <S_SEP> ", '\n')
gold_list.append(line)
pred_list = []
with open(eval_fn, "r", encoding="utf-8") as f_in:
for l in f_in:
buf = []
for sentence in l.strip().split("[X_SEP]"):
sentence = fix_tokenization(sentence)
sentence = sentence.replace("(", " -LRB- ").replace(")", " -RRB- ")
sentence = sentence.replace("[", " -LSB- ").replace("]", " -RSB- ")
while " " in sentence:
sentence = sentence.replace(" ", " ")
if any(get_f1(sentence, s) > 1.0 for s in buf):
continue
s_len = len(sentence.split())
if s_len <= 4:
continue
buf.append(sentence)
if args.duplicate_rate and args.duplicate_rate < 1:
buf = remove_duplicate(buf, args.duplicate_rate)
if args.trunc_len:
num_left = args.trunc_len
trunc_list = []
for bit in buf:
tk_list = bit.split()
n = min(len(tk_list), num_left)
trunc_list.append(' '.join(tk_list[:n]))
num_left -= n
if num_left <= 0:
break
else:
trunc_list = buf
line = "\n".join(trunc_list)
pred_list.append(line)
with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out:
for l in pred_list:
f_out.write(l.replace('\n', ' [X_SEP] ').strip())
f_out.write('\n')
# rouge scores
if len(pred_list) < len(gold_list):
# evaluate subset
gold_list = gold_list[:len(pred_list)]
assert len(pred_list) == len(gold_list)
if args.perl:
scores = test_rouge(pred_list, gold_list)
else:
scores = evaluator.get_scores(pred_list, [[it] for it in gold_list])
return eval_fn, scores
def main():
if args.perl:
eval_fn_list = list(glob.glob(args.pred))
else:
eval_fn_list = [eval_fn for eval_fn in glob.glob(args.pred) if not(
args.lazy_eval and Path(eval_fn+".rouge").exists())]
eval_fn_list = list(filter(lambda fn: not(fn.endswith(
'.post') or fn.endswith('.rouge')), eval_fn_list))
if args.only_eval_best:
best_epoch_dict = {}
for dir_path in set(Path(fn).parent for fn in eval_fn_list):
fn_save = os.path.join(dir_path, 'save_best.dev')
if Path(fn_save).exists():
with open(fn_save, 'r') as f_in:
__, o_name, __ = f_in.read().strip().split('\n')
epoch = o_name.split('.')[1]
best_epoch_dict[dir_path] = epoch
new_eval_fn_list = []
for fn in eval_fn_list:
dir_path = Path(fn).parent
if dir_path in best_epoch_dict:
if Path(fn).name.split('.')[1] == best_epoch_dict[dir_path]:
new_eval_fn_list.append(fn)
eval_fn_list = new_eval_fn_list
logger.info("***** Evaluation: %s *****", ','.join(eval_fn_list))
num_pool = min(args.processes, len(eval_fn_list))
p = Pool(num_pool)
r_list = p.imap_unordered(process_eval, eval_fn_list)
r_list = sorted([(fn, scores)
for fn, scores in r_list], key=lambda x: x[0])
rg2_dict = {}
for fn, scores in r_list:
print(fn)
if args.perl:
print(rouge_results_to_str(scores))
else:
rg2_dict[fn] = scores['rouge-2']['f']
print(
"ROUGE-1: {}\tROUGE-2: {}\n".format(scores['rouge-1']['f'], scores['rouge-2']['f']))
with open(fn+".rouge", 'w') as f_out:
f_out.write(json.dumps(
{'rg1': scores['rouge-1']['f'], 'rg2': scores['rouge-2']['f']}))
p.close()
p.join()
if args.save_best:
# find best results
group_dict = {}
for k, v in rg2_dict.items():
d_name, o_name = Path(k).parent, Path(k).name
if (d_name not in group_dict) or (v > group_dict[d_name][1]):
group_dict[d_name] = (o_name, v)
# compare and save the best result
for k, v in group_dict.items():
fn = os.path.join(k, 'save_best.'+args.split)
o_name_s, rst_s = v
should_save = True
if Path(fn).exists():
with open(fn, 'r') as f_in:
rst_f = float(f_in.read().strip().split('\n')[-1])
if rst_s <= rst_f:
should_save = False
if should_save:
with open(fn, 'w') as f_out:
f_out.write('{0}\n{1}\n{2}\n'.format(k, o_name_s, rst_s))
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/s2s-ft/evaluations/eval_for_cnndm.py |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from transformers import BertConfig, RobertaConfig
from s2s_ft.configuration_unilm import UnilmConfig
logger = logging.getLogger(__name__)
class BertForSeq2SeqConfig(BertConfig):
def __init__(self, label_smoothing=0.1, source_type_id=0, target_type_id=1,
rel_pos_bins=0, max_rel_pos=0, fix_word_embedding=False, **kwargs):
super(BertForSeq2SeqConfig, self).__init__(**kwargs)
self.label_smoothing = label_smoothing
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
self.fix_word_embedding = fix_word_embedding
@classmethod
def from_exist_config(cls, config, label_smoothing=0.1, max_position_embeddings=None, fix_word_embedding=False):
required_keys = [
"vocab_size", "hidden_size", "num_hidden_layers", "num_attention_heads",
"hidden_act", "intermediate_size", "hidden_dropout_prob", "attention_probs_dropout_prob",
"max_position_embeddings", "type_vocab_size", "initializer_range", "layer_norm_eps",
]
kwargs = {}
for key in required_keys:
assert hasattr(config, key)
kwargs[key] = getattr(config, key)
kwargs["vocab_size_or_config_json_file"] = kwargs["vocab_size"]
if isinstance(config, RobertaConfig):
kwargs["type_vocab_size"] = 0
kwargs["max_position_embeddings"] = kwargs["max_position_embeddings"] - 2
additional_keys = [
"source_type_id", "target_type_id", "rel_pos_bins", "max_rel_pos",
]
for key in additional_keys:
if hasattr(config, key):
kwargs[key] = getattr(config, key)
if max_position_embeddings is not None and max_position_embeddings > config.max_position_embeddings:
kwargs["max_position_embeddings"] = max_position_embeddings
logger.info(" ** Change max position embeddings to %d ** " % max_position_embeddings)
return cls(label_smoothing=label_smoothing, fix_word_embedding=fix_word_embedding, **kwargs)
| EXA-1-master | exa/models/unilm-master/s2s-ft/s2s_ft/config.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" MiniLM model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from transformers.configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
MINILM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'minilm-l12-h384-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/minilm-l12-h384-uncased-config.json",
}
class MinilmConfig(PretrainedConfig):
r"""
:class:`~transformers.MinilmConfig` is the configuration class to store the configuration of a
`MinilmModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `MiniLMModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`MiniLMModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = MINILM_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size=28996,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=6,
initializer_range=0.02,
layer_norm_eps=1e-12,
source_type_id=0,
target_type_id=1,
**kwargs):
super(MinilmConfig, self).__init__(**kwargs)
if isinstance(vocab_size, str) or (sys.version_info[0] == 2
and isinstance(vocab_size, unicode)):
with open(vocab_size, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size, int):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.source_type_id = source_type_id
self.target_type_id = target_type_id
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
| EXA-1-master | exa/models/unilm-master/s2s-ft/s2s_ft/configuration_minilm.py |
# coding=utf-8
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import numpy as np
from functools import partial
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.file_utils import cached_path
from torch.nn.modules.loss import _Loss
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None,
reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
'unilm-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased.bin",
'unilm-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased.bin",
'unilm1-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased.bin",
'unilm1-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased.bin",
'unilm1.2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1.2-base-uncased.bin"
}
CONFIG_NAME = 'config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
relax_projection=0,
new_pos_ids=False,
initializer_range=0.02,
task_idx=None,
fp32_embedding=False,
ffn_type=0,
label_smoothing=None,
num_qkv=0,
seg_emb=False,
source_type_id=0,
target_type_id=1,
rel_pos_bins=0,
max_rel_pos=0, **kwargs):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.relax_projection = relax_projection
self.new_pos_ids = new_pos_ids
self.initializer_range = initializer_range
self.task_idx = task_idx
self.fp32_embedding = fp32_embedding
self.ffn_type = ffn_type
self.label_smoothing = label_smoothing
self.num_qkv = num_qkv
self.seg_emb = seg_emb
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-5):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size)
if config.type_vocab_size == 0:
self.token_type_embeddings = None
else:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
if hasattr(config, 'fp32_embedding'):
self.fp32_embedding = config.fp32_embedding
else:
self.fp32_embedding = False
if hasattr(config, 'new_pos_ids') and config.new_pos_ids:
self.num_pos_emb = 4
else:
self.num_pos_emb = 1
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size * self.num_pos_emb)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if self.num_pos_emb > 1:
num_batch = position_embeddings.size(0)
num_pos = position_embeddings.size(1)
position_embeddings = position_embeddings.view(
num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
embeddings = words_embeddings + position_embeddings
if self.token_type_embeddings is not None:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
if self.fp32_embedding:
embeddings = embeddings.half()
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
if hasattr(config, 'num_qkv') and (config.num_qkv > 1):
self.num_qkv = config.num_qkv
else:
self.num_qkv = 1
self.query = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.key = nn.Linear(config.hidden_size,
self.all_head_size * self.num_qkv)
self.value = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.uni_debug_flag = True if os.getenv(
'UNI_DEBUG_FLAG', '') else False
if self.uni_debug_flag:
self.register_buffer('debug_attention_probs',
torch.zeros((512, 512)))
if hasattr(config, 'seg_emb') and config.seg_emb:
self.b_q_s = nn.Parameter(torch.zeros(
1, self.num_attention_heads, 1, self.attention_head_size))
self.seg_emb = nn.Embedding(
config.type_vocab_size, self.all_head_size)
else:
self.b_q_s = None
self.seg_emb = None
def transpose_for_scores(self, x, mask_qkv=None):
if self.num_qkv > 1:
sz = x.size()[:-1] + (self.num_qkv,
self.num_attention_heads, self.all_head_size)
# (batch, pos, num_qkv, head, head_hid)
x = x.view(*sz)
if mask_qkv is None:
x = x[:, :, 0, :, :]
elif isinstance(mask_qkv, int):
x = x[:, :, mask_qkv, :, :]
else:
# mask_qkv: (batch, pos)
if mask_qkv.size(1) > sz[1]:
mask_qkv = mask_qkv[:, :sz[1]]
# -> x: (batch, pos, head, head_hid)
x = x.gather(2, mask_qkv.view(sz[0], sz[1], 1, 1, 1).expand(
sz[0], sz[1], 1, sz[3], sz[4])).squeeze(2)
else:
sz = x.size()[:-1] + (self.num_attention_heads,
self.attention_head_size)
# (batch, pos, head, head_hid)
x = x.view(*sz)
# (batch, head, pos, head_hid)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None,
key_cache=None, value_cache=None, rel_pos=None,
):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(hidden_states, self.key.weight)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(x_states, self.key.weight)
mixed_value_layer = self.value(x_states)
if key_cache is not None and isinstance(key_cache, list):
key_cache.append(mixed_key_layer)
mixed_key_layer = torch.cat(key_cache, dim=1)
if value_cache is not None and isinstance(value_cache, list):
value_cache.append(mixed_value_layer)
mixed_value_layer = torch.cat(value_cache, dim=1)
query_layer = self.transpose_for_scores(mixed_query_layer, mask_qkv)
key_layer = self.transpose_for_scores(mixed_key_layer, mask_qkv)
value_layer = self.transpose_for_scores(mixed_value_layer, mask_qkv)
if key_history is not None and not isinstance(key_history, list):
key_layer = torch.cat((key_history, key_layer), dim=-2)
value_layer = torch.cat((value_history, value_layer), dim=-2)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch, head, pos, pos)
attention_scores = torch.matmul(
query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if rel_pos is not None:
attention_scores = attention_scores + rel_pos
if self.seg_emb is not None:
seg_rep = self.seg_emb(seg_ids)
# (batch, pos, head, head_hid)
seg_rep = seg_rep.view(seg_rep.size(0), seg_rep.size(
1), self.num_attention_heads, self.attention_head_size)
qs = torch.einsum('bnih,bjnh->bnij',
query_layer + self.b_q_s, seg_rep)
attention_scores = attention_scores + qs
# attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.uni_debug_flag:
_pos = attention_probs.size(-1)
self.debug_attention_probs[:_pos, :_pos].copy_(
attention_probs[0].mean(0).view(_pos, _pos))
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if isinstance(key_history, list):
key_history.append(key_layer)
if isinstance(value_history, list):
value_history.append(value_layer)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
self_output = self.self(
input_tensor, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TransformerFFN(nn.Module):
def __init__(self, config):
super(TransformerFFN, self).__init__()
self.ffn_type = config.ffn_type
assert self.ffn_type in (1, 2)
if self.ffn_type in (1, 2):
self.wx0 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (2,):
self.wx1 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (1, 2):
self.output = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, x):
if self.ffn_type in (1, 2):
x0 = self.wx0(x)
if self.ffn_type == 1:
x1 = x
elif self.ffn_type == 2:
x1 = self.wx1(x)
out = self.output(x0 * x1)
out = self.dropout(out)
out = self.LayerNorm(out + x)
return out
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.ffn_type = config.ffn_type
if self.ffn_type:
self.ffn = TransformerFFN(config)
else:
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None, rel_pos=None):
attention_output = self.attention(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history, rel_pos=rel_pos)
if self.ffn_type:
layer_output = self.ffn(attention_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None,
seg_ids=None, key_history=None, value_history=None, rel_pos=None):
# history embedding and encoded layer must be simultanously given
assert (prev_embedding is None) == (prev_encoded_layers is None)
all_encoder_layers = []
if (prev_embedding is not None) and (prev_encoded_layers is not None):
history_states = prev_embedding
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if prev_encoded_layers is not None:
history_states = prev_encoded_layers[i]
else:
for i, layer_module in enumerate(self.layer):
set_key = None
if isinstance(key_history, list):
set_key = key_history if len(key_history) < len(self.layer) else key_history[i]
set_value = None
if isinstance(value_history, list):
set_value = value_history if len(key_history) < len(self.layer) else value_history[i]
hidden_states = layer_module(
hidden_states, attention_mask, mask_qkv=mask_qkv, seg_ids=seg_ids,
key_history=set_key, value_history=set_value, rel_pos=rel_pos)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
hid_size = config.hidden_size
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
hid_size *= config.relax_projection
self.dense = nn.Linear(config.hidden_size, hid_size)
self.LayerNorm = BertLayerNorm(hid_size, eps=1e-5)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(
bert_model_embedding_weights.size(0)))
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
self.relax_projection = config.relax_projection
else:
self.relax_projection = 0
self.fp32_embedding = config.fp32_embedding
def convert_to_type(tensor):
if self.fp32_embedding:
return tensor.half()
else:
return tensor
self.type_converter = convert_to_type
self.converted = False
def forward(self, hidden_states, task_idx=None):
if not self.converted:
self.converted = True
if self.fp32_embedding:
self.transform.half()
hidden_states = self.transform(self.type_converter(hidden_states))
if self.relax_projection > 1:
num_batch = hidden_states.size(0)
num_pos = hidden_states.size(1)
# (batch, num_pos, relax_projection*hid) -> (batch, num_pos, relax_projection, hid) -> (batch, num_pos, hid)
hidden_states = hidden_states.view(
num_batch, num_pos, self.relax_projection, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
if self.fp32_embedding:
hidden_states = F.linear(self.type_converter(hidden_states), self.type_converter(
self.decoder.weight), self.type_converter(self.bias))
else:
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights, num_labels=2):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, num_labels)
def forward(self, sequence_output, pooled_output, task_idx=None):
prediction_scores = self.predictions(sequence_output, task_idx)
if pooled_output is None:
seq_relationship_score = None
else:
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
# module.weight.data.copy_(torch.Tensor(
# truncnorm.rvs(-1, 1, size=list(module.weight.data.shape)) * self.config.initializer_range))
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, config, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
logger.info("Model config {}".format(config))
# clean the arguments in kwargs
for arg_clean in ('config_path', 'type_vocab_size', 'relax_projection', 'new_pos_ids', 'task_idx',
'max_position_embeddings', 'fp32_embedding', 'ffn_type', 'label_smoothing',
'hidden_dropout_prob', 'attention_probs_dropout_prob', 'num_qkv', 'seg_emb',
'word_emb_map', 'num_labels', 'num_rel', 'num_sentlvl_labels'):
if arg_clean in kwargs:
del kwargs[arg_clean]
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(pretrained_model_name, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
model.missing_keys = missing_keys
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
logger.info('\n'.join(error_msgs))
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.config = config
self.apply(self.init_bert_weights)
def rescale_some_parameters(self):
for layer_id, layer in enumerate(self.encoder.layer):
layer.attention.output.dense.weight.data.div_(
math.sqrt(2.0 * (layer_id + 1)))
layer.output.dense.weight.data.div_(math.sqrt(2.0 * (layer_id + 1)))
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
mask_qkv=None, task_idx=None, key_history=None, value_history=None, position_ids=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, task_idx=task_idx, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
mask_qkv=mask_qkv, seg_ids=token_type_ids,
key_history=key_history, value_history=value_history)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertModelIncr(BertModel):
def __init__(self, config):
super(BertModelIncr, self).__init__(config)
if self.config.rel_pos_bins > 0:
self.rel_pos_bias = nn.Linear(self.config.rel_pos_bins, config.num_attention_heads, bias=False)
else:
self.rel_pos_bias = None
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None, rel_pos=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, position_ids, task_idx=task_idx)
if self.rel_pos_bias is not None:
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = F.one_hot(rel_pos, num_classes=self.config.rel_pos_bins).type_as(embedding_output)
# print("Rel pos size = %s" % str(rel_pos.size()))
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
# print("Rel pos size = %s" % str(rel_pos.size()))
else:
rel_pos = None
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv,
seg_ids=token_type_ids, rel_pos=rel_pos)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, mask_qkv=None, task_idx=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False, mask_qkv=mask_qkv,
task_idx=task_idx)
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(
seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertPreTrainingPairTransform(nn.Module):
def __init__(self, config):
super(BertPreTrainingPairTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
# self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
def forward(self, pair_x, pair_y):
hidden_states = torch.cat([pair_x, pair_y], dim=-1)
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
# hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
"""
ret = 0
if bidirectional:
num_buckets //= 2
# mtf.to_int32(mtf.less(n, 0)) * num_buckets
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance /
max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def get_div_func():
# a crude code fix floor div for multiple torch version
# https://github.com/microsoft/unilm/issues/297
# Thanks github user @guijuzhejiang, @piskunow and @zengyan-97
x = torch.ones(size=(1,), dtype=torch.long) * 11
try:
# for pytorch 1.8+
div_func = partial(torch.div, rounding_mode='floor')
y = div_func(x, 4)
return div_func
except:
pass
try:
# for pytorch 1.6 & 1.7
div_func = torch.floor_divide
y = div_func(x, 4)
return div_func
except:
pass
div_func = torch.div
y = div_func(x, 4)
if y.dtype != torch.long:
raise NotImplementedError("Can not found right floor div function !")
return div_func
class BertForSeq2SeqDecoder(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config, mask_word_id=0, num_labels=2, num_rel=0,
search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0,
forbid_duplicate_ngrams=False, forbid_ignore_set=None, ngram_size=3, min_len=0, mode="s2s",
pos_shift=False):
super(BertForSeq2SeqDecoder, self).__init__(config)
self.bert = BertModelIncr(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels)
self.apply(self.init_bert_weights)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1)
self.mask_word_id = mask_word_id
self.num_labels = num_labels
self.search_beam_size = search_beam_size
self.length_penalty = length_penalty
self.eos_id = eos_id
self.sos_id = sos_id
self.forbid_duplicate_ngrams = forbid_duplicate_ngrams
self.forbid_ignore_set = forbid_ignore_set
self.ngram_size = ngram_size
self.min_len = min_len
assert mode in ("s2s", "l2r")
self.mode = mode
self.pos_shift = pos_shift
self.div_func = get_div_func()
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
if self.search_beam_size > 1:
return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask,
task_idx=task_idx, mask_qkv=mask_qkv)
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
_, max_ids = torch.max(prediction_scores, dim=-1)
output_ids.append(max_ids)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = new_embedding
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
else:
if prev_embedding is None:
prev_embedding = new_embedding[:, :-1, :]
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x[:, :-1, :]
for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
curr_ids = max_ids
next_pos += 1
return torch.cat(output_ids, dim=1)
def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sep_ids = input_ids.new(batch_size, 1).fill_(self.eos_id)
K = self.search_beam_size
total_scores = []
beam_masks = []
step_ids = []
step_back_ptrs = []
partial_seqs = []
forbid_word_mask = None
buf_matrix = None
if self.bert.rel_pos_bias is not None:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
else:
rel_pos = None
# print("Rel pos size = %s" % str(rel_pos.size()))
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sep_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:, start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
if rel_pos is not None:
cur_rel_pos = rel_pos[:, start_pos:next_pos + 1, :next_pos + 1]
else:
cur_rel_pos = None
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, rel_pos=cur_rel_pos)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
log_scores = torch.nn.functional.log_softmax(
prediction_scores, dim=-1)
if forbid_word_mask is not None:
log_scores += (forbid_word_mask * -10000.0)
if self.min_len and (next_pos - input_length + 1 <= self.min_len):
log_scores[:, :, self.eos_id].fill_(-10000.0)
kk_scores, kk_ids = torch.topk(log_scores, k=K)
if len(total_scores) == 0:
k_ids = torch.reshape(kk_ids, [batch_size, K])
back_ptrs = torch.zeros(batch_size, K, dtype=torch.long)
k_scores = torch.reshape(kk_scores, [batch_size, K])
else:
last_eos = torch.reshape(
beam_masks[-1], [batch_size * K, 1, 1])
last_seq_scores = torch.reshape(
total_scores[-1], [batch_size * K, 1, 1])
kk_scores += last_eos * (-10000.0) + last_seq_scores
kk_scores = torch.reshape(kk_scores, [batch_size, K * K])
k_scores, k_ids = torch.topk(kk_scores, k=K)
# back_ptrs = torch.div(k_ids, K)
back_ptrs = self.div_func(k_ids, K)
kk_ids = torch.reshape(kk_ids, [batch_size, K * K])
k_ids = torch.gather(kk_ids, 1, k_ids)
step_back_ptrs.append(back_ptrs)
step_ids.append(k_ids)
beam_masks.append(torch.eq(k_ids, self.eos_id).type_as(kk_scores))
total_scores.append(k_scores)
def first_expand(x):
input_shape = list(x.size())
expanded_shape = input_shape[:1] + [1] + input_shape[1:]
x = torch.reshape(x, expanded_shape)
repeat_count = [1, K] + [1] * (len(input_shape) - 1)
x = x.repeat(*repeat_count)
x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
return x
def select_beam_items(x, ids):
id_shape = list(ids.size())
id_rank = len(id_shape)
assert len(id_shape) == 2
x_shape = list(x.size())
x = torch.reshape(x, [batch_size, K] + x_shape[1:])
x_rank = len(x_shape) + 1
assert x_rank >= 2
if id_rank < x_rank:
ids = torch.reshape(
ids, id_shape + [1] * (x_rank - id_rank))
ids = ids.expand(id_shape + x_shape[1:])
y = torch.gather(x, 1, ids)
y = torch.reshape(y, x_shape)
return y
is_first = (prev_embedding is None)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding)
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
else:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding[:, :-1, :])
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x[:, :-1, :]) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
curr_ids = torch.reshape(k_ids, [batch_size * K, 1])
if is_first:
token_type_ids = first_expand(token_type_ids)
position_ids = first_expand(position_ids)
attention_mask = first_expand(attention_mask)
if rel_pos is not None:
rel_pos = first_expand(rel_pos)
mask_ids = first_expand(mask_ids)
if mask_qkv is not None:
mask_qkv = first_expand(mask_qkv)
if self.forbid_duplicate_ngrams:
wids = step_ids[-1].tolist()
ptrs = step_back_ptrs[-1].tolist()
if is_first:
partial_seqs = []
for b in range(batch_size):
for k in range(K):
partial_seqs.append([wids[b][k]])
else:
new_partial_seqs = []
for b in range(batch_size):
for k in range(K):
new_partial_seqs.append(
partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])
partial_seqs = new_partial_seqs
def get_dup_ngram_candidates(seq, n):
cands = set()
if len(seq) < n:
return []
tail = seq[-(n - 1):]
if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):
return []
for i in range(len(seq) - (n - 1)):
mismatch = False
for j in range(n - 1):
if tail[j] != seq[i + j]:
mismatch = True
break
if (not mismatch) and not (
self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):
cands.add(seq[i + n - 1])
return list(sorted(cands))
if len(partial_seqs[0]) >= self.ngram_size:
dup_cands = []
for seq in partial_seqs:
dup_cands.append(
get_dup_ngram_candidates(seq, self.ngram_size))
if max(len(x) for x in dup_cands) > 0:
if buf_matrix is None:
vocab_size = list(log_scores.size())[-1]
buf_matrix = np.zeros(
(batch_size * K, vocab_size), dtype=float)
else:
buf_matrix.fill(0)
for bk, cands in enumerate(dup_cands):
for i, wid in enumerate(cands):
buf_matrix[bk, wid] = 1.0
forbid_word_mask = torch.tensor(
buf_matrix, dtype=log_scores.dtype)
forbid_word_mask = torch.reshape(
forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda()
else:
forbid_word_mask = None
next_pos += 1
# [(batch, beam)]
total_scores = [x.tolist() for x in total_scores]
step_ids = [x.tolist() for x in step_ids]
step_back_ptrs = [x.tolist() for x in step_back_ptrs]
# back tracking
traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}
for b in range(batch_size):
# [(beam,)]
scores = [x[b] for x in total_scores]
wids_list = [x[b] for x in step_ids]
ptrs = [x[b] for x in step_back_ptrs]
traces['scores'].append(scores)
traces['wids'].append(wids_list)
traces['ptrs'].append(ptrs)
# first we need to find the eos frame where all symbols are eos
# any frames after the eos frame are invalid
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid == self.eos_id for wid in wids):
last_frame_id = i
break
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if wid == self.eos_id or fid == last_frame_id:
s = scores[fid][i]
if self.length_penalty > 0:
s /= math.pow((5 + fid + 1) / 6.0,
self.length_penalty)
if s > max_score:
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
traces['pred_seq'].append([0])
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
traces['pred_seq'].append(seq)
def _pad_sequence(sequences, max_len, padding_value=0):
trailing_dims = sequences[0].size()[1:]
out_dims = (len(sequences), max_len) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
# use index notation to prevent duplicate references to the tensor
out_tensor[i, :length, ...] = tensor
return out_tensor
# convert to tensors for DataParallel
for k in ('pred_seq', 'scores', 'wids', 'ptrs'):
ts_list = traces[k]
if not isinstance(ts_list[0], torch.Tensor):
dt = torch.float if k == 'scores' else torch.long
ts_list = [torch.tensor(it, dtype=dt) for it in ts_list]
traces[k] = _pad_sequence(
ts_list, output_length, padding_value=0).to(input_ids.device)
return traces
| EXA-1-master | exa/models/unilm-master/s2s-ft/s2s_ft/modeling_decoding.py |
import numpy as np
from random import randint, shuffle, choice
from random import random as rand
import math
import logging
import torch
import torch.utils.data
logger = logging.getLogger(__name__)
def get_random_word(vocab_words):
i = randint(0, len(vocab_words)-1)
return vocab_words[i]
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if x[0] is None:
batch_tensors.append(None)
elif isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def _get_word_split_index(tokens, st, end):
split_idx = []
i = st
while i < end:
if (not tokens[i].startswith('##')) or (i == st):
split_idx.append(i)
i += 1
split_idx.append(end)
return split_idx
def _expand_whole_word(tokens, st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
class Pipeline():
""" Pre-process Pipeline Class : callable """
def __init__(self):
super().__init__()
self.skipgram_prb = None
self.skipgram_size = None
self.pre_whole_word = None
self.mask_whole_word = None
self.word_subsample_prb = None
self.sp_prob = None
self.pieces_dir = None
self.vocab_words = None
self.pieces_threshold = 10
self.call_count = 0
self.offline_mode = False
self.skipgram_size_geo_list = None
self.span_same_mask = False
def __call__(self, instance):
raise NotImplementedError
class Preprocess4Seq2seqDecoder(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128,
mode="s2s", pos_shift=False, source_type_id=0, target_type_id=1,
cls_token='[CLS]', sep_token='[SEP]', pad_token='[PAD]'):
super().__init__()
self.max_len = max_len
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self.max_len = max_len
self._tril_matrix = torch.tril(torch.ones((max_len, max_len), dtype=torch.long))
self.task_idx = 3 # relax projection layer for different tasks
assert mode in ("s2s", "l2r")
self.mode = mode
self.max_tgt_length = max_tgt_length
self.pos_shift = pos_shift
self.delta = 1 if pos_shift else 2
self.cls_token = cls_token
self.sep_token = sep_token
self.pad_token = pad_token
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.cc = 0
def __call__(self, instance):
tokens_a, max_a_len = instance
padded_tokens_a = [self.cls_token] + tokens_a
if not self.pos_shift:
padded_tokens_a = padded_tokens_a + [self.sep_token]
assert len(padded_tokens_a) <= max_a_len + self.delta
if max_a_len + self.delta > len(padded_tokens_a):
padded_tokens_a += [self.pad_token] * \
(max_a_len + self.delta - len(padded_tokens_a))
assert len(padded_tokens_a) == max_a_len + self.delta
max_len_in_batch = min(self.max_tgt_length +
max_a_len + self.delta, self.max_len)
tokens = padded_tokens_a
segment_ids = [self.source_type_id] * (len(padded_tokens_a)) \
+ [self.target_type_id] * (max_len_in_batch - len(padded_tokens_a))
mask_qkv = None
position_ids = []
for i in range(len(tokens_a) + self.delta):
position_ids.append(i)
for i in range(len(tokens_a) + self.delta, max_a_len + self.delta):
position_ids.append(0)
for i in range(max_a_len + self.delta, max_len_in_batch):
position_ids.append(i - (max_a_len + self.delta) + len(tokens_a) + self.delta)
# Token Indexing
input_ids = self.indexer(tokens)
self.cc += 1
if self.cc < 20:
# print("Vocab size = %d" % len(self.vocab_words))
# for tk_id in input_ids:
# print(u"trans %d -> %s" % (tk_id, self.vocab_words[tk_id]))
logger.info(u"Input src = %s" % " ".join((self.vocab_words[tk_id]) for tk_id in input_ids))
# Zero Padding
input_mask = torch.zeros(
max_len_in_batch, max_len_in_batch, dtype=torch.long)
if self.mode == "s2s":
input_mask[:, :len(tokens_a) + self.delta].fill_(1)
else:
st, end = 0, len(tokens_a) + self.delta
input_mask[st:end, st:end].copy_(
self._tril_matrix[:end, :end])
input_mask[end:, :len(tokens_a) + self.delta].fill_(1)
second_st, second_end = len(padded_tokens_a), max_len_in_batch
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
return (input_ids, segment_ids, position_ids, input_mask, mask_qkv, self.task_idx)
| EXA-1-master | exa/models/unilm-master/s2s-ft/s2s_ft/s2s_loader.py |
import torch
import logging
from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME
logger = logging.getLogger(__name__)
def get_checkpoint_from_transformer_cache(
archive_file, pretrained_model_name_or_path, pretrained_model_archive_map,
cache_dir, force_download, proxies, resume_download,
):
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download,
proxies=proxies, resume_download=resume_download)
except EnvironmentError:
if pretrained_model_name_or_path in pretrained_model_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file)
else:
msg = "Model name '{}' was not found in model name list ({}). " \
"We assumed '{}' was a path or url to model weight files named one of {} but " \
"couldn't find any such file at this path or url.".format(
pretrained_model_name_or_path,
', '.join(pretrained_model_archive_map.keys()),
archive_file,
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME])
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
return torch.load(resolved_archive_file, map_location='cpu')
def hf_roberta_to_hf_bert(state_dict):
logger.info(" * Convert Huggingface RoBERTa format to Huggingface BERT format * ")
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key == 'roberta.embeddings.position_embeddings.weight':
value = value[2:]
if key == 'roberta.embeddings.token_type_embeddings.weight':
continue
if key.startswith('roberta'):
key = 'bert.' + key[8:]
elif key.startswith('lm_head'):
if 'layer_norm' in key or 'dense' in key:
key = 'cls.predictions.transform.' + key[8:]
else:
key = 'cls.predictions.' + key[8:]
key = key.replace('layer_norm', 'LayerNorm')
new_state_dict[key] = value
return new_state_dict
def hf_electra_to_hf_bert(state_dict):
logger.info(" * Convert Huggingface ELECTRA format to Huggingface BERT format * ")
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key.startswith('electra'):
key = 'bert.' + key[8:]
new_state_dict[key] = value
return new_state_dict
def hf_bert_to_hf_bert(state_dict):
# keep no change
return state_dict
def unilm_to_hf_bert(state_dict):
logger.info(" * Convert Fast QKV format to Huggingface BERT format * ")
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key.endswith("attention.self.q_bias"):
new_state_dict[key.replace("attention.self.q_bias", "attention.self.query.bias")] = value.view(-1)
elif key.endswith("attention.self.v_bias"):
new_state_dict[key.replace("attention.self.v_bias", "attention.self.value.bias")] = value.view(-1)
new_state_dict[key.replace("attention.self.v_bias", "attention.self.key.bias")] = torch.zeros_like(value.view(-1))
elif key.endswith("attention.self.qkv_linear.weight"):
l, _ = value.size()
assert l % 3 == 0
l = l // 3
q, k, v = torch.split(value, split_size_or_sections=(l, l, l), dim=0)
new_state_dict[key.replace("attention.self.qkv_linear.weight", "attention.self.query.weight")] = q
new_state_dict[key.replace("attention.self.qkv_linear.weight", "attention.self.key.weight")] = k
new_state_dict[key.replace("attention.self.qkv_linear.weight", "attention.self.value.weight")] = v
elif key == "bert.encoder.rel_pos_bias.weight":
new_state_dict["bert.rel_pos_bias.weight"] = value
else:
new_state_dict[key] = value
del state_dict
return new_state_dict
state_dict_convert = {
'bert': hf_bert_to_hf_bert,
'unilm': unilm_to_hf_bert,
'minilm': hf_bert_to_hf_bert,
'roberta': hf_roberta_to_hf_bert,
'xlm-roberta': hf_roberta_to_hf_bert,
'electra': hf_electra_to_hf_bert,
}
| EXA-1-master | exa/models/unilm-master/s2s-ft/s2s_ft/convert_state_dict.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Tokenization classes for UniLM."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from transformers.tokenization_bert import BertTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'unilm-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm-large-cased-vocab.txt",
'unilm-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm-base-cased-vocab.txt",
'unilm1-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased-vocab.txt",
'unilm1-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased-vocab.txt",
'unilm1.2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1.2-base-uncased-vocab.txt",
'unilm2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm2-base-uncased-vocab.txt",
'unilm2-large-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm2-large-uncased-vocab.txt",
'unilm2-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm2-large-cased-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'unilm-large-cased': 512,
'unilm-base-cased': 512,
'unilm1-large-cased': 512,
'unilm1-base-cased': 512,
'unilm1.2-base-uncased': 512,
'unilm2-base-uncased': 512,
'unilm2-large-cased': 512,
'unilm2-large-uncased': 512,
}
class UnilmTokenizer(BertTokenizer):
r"""
Constructs a UnilmTokenizer.
:class:`~transformers.UnilmTokenizer` is identical to BertTokenizer and runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
class WhitespaceTokenizer(object):
def tokenize(self, text):
return whitespace_tokenize(text)
| EXA-1-master | exa/models/unilm-master/s2s-ft/s2s_ft/tokenization_unilm.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" UniLM model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from transformers.configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
UNILM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'unilm-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm-large-cased-config.json",
'unilm-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm-base-cased-config.json",
'unilm1-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased-config.json",
'unilm1-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased-config.json",
'unilm1.2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1.2-base-uncased-config.json",
'unilm2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm2-base-uncased-config.json",
'unilm2-large-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm2-large-uncased-config.json",
'unilm2-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm2-large-cased-config.json",
}
class UnilmConfig(PretrainedConfig):
r"""
:class:`~transformers.UnilmConfig` is the configuration class to store the configuration of a
`UnilmModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `UnilmModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`UnilmModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = UNILM_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size=28996,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=6,
initializer_range=0.02,
layer_norm_eps=1e-12,
source_type_id=0,
target_type_id=1,
**kwargs):
super(UnilmConfig, self).__init__(**kwargs)
if isinstance(vocab_size, str) or (sys.version_info[0] == 2
and isinstance(vocab_size, unicode)):
with open(vocab_size, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size, int):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.source_type_id = source_type_id
self.target_type_id = target_type_id
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
| EXA-1-master | exa/models/unilm-master/s2s-ft/s2s_ft/configuration_unilm.py |
from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import torch
import tqdm
import array
import collections
import torch.utils.data
from transformers.file_utils import WEIGHTS_NAME
try:
import lmdb
except:
pass
OPTIM_NAME = "optimizer.bin"
logger = logging.getLogger(__name__)
class TrainingExample(object):
def __init__(self, source_ids, target_ids, example_id):
self.source_ids = source_ids
self.target_ids = target_ids
self.example_id = example_id
class Seq2seqDatasetForBert(torch.utils.data.Dataset):
def __init__(
self, features, max_source_len, max_target_len,
vocab_size, cls_id, sep_id, pad_id, mask_id,
random_prob, keep_prob, offset, num_training_instances,
mask_way='v1', target_mask_prob=-1.0, num_max_mask_token=0,
source_mask_prob=-1.0,
):
self.features = features
self.max_source_len = max_source_len
self.max_target_len = max_target_len
self.offset = offset
if offset > 0:
logger.info(" **** Set offset %d in Seq2seqDatasetForBert **** ", offset)
self.cls_id = cls_id
self.sep_id = sep_id
self.pad_id = pad_id
self.random_prob = random_prob
self.keep_prob = keep_prob
self.mask_id = mask_id
self.vocab_size = vocab_size
self.num_training_instances = num_training_instances
self.target_mask_prob = target_mask_prob
if mask_way == 'v0':
num_max_mask_token = self.max_target_len
logger.info("Mask way v0: set num_max_mask_token = %d" % num_max_mask_token)
self.num_max_mask_token = num_max_mask_token
self.mask_way = mask_way
assert mask_way in ('v0', 'v1', 'v2')
self.source_mask_prob = source_mask_prob
def __len__(self):
return self.num_training_instances
def __trunk(self, ids, max_len, append_sep=True):
if append_sep:
max_len -= 1
if len(ids) > max_len:
ids = ids[:max_len]
if append_sep:
ids = ids + [self.sep_id]
return ids
def __pad(self, ids, max_len):
if len(ids) < max_len:
return ids + [self.pad_id] * (max_len - len(ids))
else:
assert len(ids) == max_len
return ids
def get_masked_token(self, tk_id):
p = random.random()
if p < self.keep_prob:
return tk_id
elif p < self.keep_prob + self.random_prob:
return random.randint(0, self.vocab_size - 1)
else:
return self.mask_id
def __getitem__(self, _idx):
idx = (self.offset + _idx) % len(self.features)
# print("%d get %d" % (_idx, idx))
feature = self.features[idx]
source_ids = self.__trunk([self.cls_id] + feature.source_ids, self.max_source_len, append_sep=self.mask_way != 'v0')
target_ids = feature.target_ids
if self.mask_way == 'v0':
target_ids = [self.sep_id] + target_ids
target_ids = self.__trunk(target_ids, self.max_target_len, append_sep=self.mask_way != 'v0')
num_source_tokens = len(source_ids)
num_target_tokens = len(target_ids)
if self.source_mask_prob > 0:
for i in range(num_source_tokens):
tk_id = source_ids[i]
if tk_id != self.cls_id and tk_id != self.sep_id:
r = random.random()
if r < self.source_mask_prob:
source_ids[i] = self.get_masked_token(tk_id)
source_ids = self.__pad(source_ids, self.max_source_len)
target_ids = self.__pad(target_ids, self.max_target_len)
if self.mask_way == 'v0':
masked_pos = []
masked_ids = []
masked_weights = []
for pos in range(num_target_tokens):
if pos + 1 != num_target_tokens:
masked_ids.append(target_ids[pos + 1])
else:
masked_ids.append(self.sep_id)
masked_pos.append(pos)
masked_weights.append(1)
r = random.random()
if r < self.target_mask_prob and pos > 0:
target_ids[pos] = self.get_masked_token(target_ids[pos])
masked_ids = self.__pad(masked_ids, self.num_max_mask_token)
masked_pos = self.__pad(masked_pos, self.num_max_mask_token)
masked_weights = self.__pad(masked_weights, self.num_max_mask_token)
return source_ids, target_ids, masked_ids, masked_pos, masked_weights, num_source_tokens, num_target_tokens
elif self.mask_way == 'v1':
masked_pos = list(range(num_target_tokens))
random.shuffle(masked_pos)
num_masked_token = \
min(self.num_max_mask_token, int(self.target_mask_prob * num_target_tokens))
if num_masked_token <= 0:
num_masked_token = 1
masked_pos = masked_pos[:num_masked_token]
masked_ids = []
masked_weights = []
for pos in masked_pos:
masked_ids.append(target_ids[pos])
target_ids[pos] = self.get_masked_token(target_ids[pos])
masked_weights.append(1)
masked_ids = self.__pad(masked_ids, self.num_max_mask_token)
masked_pos = self.__pad(masked_pos, self.num_max_mask_token)
masked_weights = self.__pad(masked_weights, self.num_max_mask_token)
return source_ids, target_ids, masked_ids, masked_pos, masked_weights, num_source_tokens, num_target_tokens
elif self.mask_way == 'v2':
pseudo_ids = []
label_ids = []
for pos in range(num_target_tokens):
tk_id = target_ids[pos]
masked_tk_id = self.get_masked_token(tk_id)
pseudo_ids.append(masked_tk_id)
label_ids.append(tk_id)
r = random.random()
if r < self.target_mask_prob:
target_ids[pos] = masked_tk_id
label_ids = self.__pad(label_ids, self.max_target_len)
pseudo_ids = self.__pad(pseudo_ids, self.max_target_len)
return source_ids, target_ids, label_ids, pseudo_ids, num_source_tokens, num_target_tokens
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(output_dir, "ckpt-*/%s" % WEIGHTS_NAME))
fn_optim_list = glob.glob(os.path.join(output_dir, "ckpt-*/%s" % OPTIM_NAME))
if (not fn_model_list) or (not fn_optim_list):
return None
both_set = set([int(os.path.dirname(fn).split('-')[-1]) for fn in fn_model_list]
) & set([int(os.path.dirname(fn).split('-')[-1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None
def get_checkpoint_state_dict(output_dir, ckpt):
model_recover_checkpoint = os.path.join(output_dir, "ckpt-%d" % ckpt, WEIGHTS_NAME)
logger.info(" ** Recover model checkpoint in %s ** ", model_recover_checkpoint)
model_state_dict = torch.load(model_recover_checkpoint, map_location='cpu')
optimizer_recover_checkpoint = os.path.join(output_dir, "ckpt-%d" % ckpt, OPTIM_NAME)
checkpoint_state_dict = torch.load(optimizer_recover_checkpoint, map_location='cpu')
checkpoint_state_dict['model'] = model_state_dict
return checkpoint_state_dict
def report_length(length_counter, total_count):
max_len = max(length_counter.keys())
a = 0
tc = 0
while a < max_len:
cc = 0
for i in range(16):
cc += length_counter[a + i]
tc += cc
if cc > 0:
logger.info("%d ~ %d = %d, %.2f%%" % (a, a + 16, cc, (tc * 100.0) / total_count))
a += 16
def serialize_str(x):
return u"{}".format(x).encode('ascii')
def serialize_array(x, dtype):
data = array.array(dtype)
data.fromlist(x)
return data.tobytes()
def write_to_lmdb(db, key, value):
success = False
while not success:
txn = db.begin(write=True)
try:
txn.put(key, value)
txn.commit()
success = True
except lmdb.MapFullError:
txn.abort()
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit*2
print('>>> Doubling LMDB map size to %sMB ...' %
(new_limit >> 20,))
db.set_mapsize(new_limit) # double it
def deserialize_str(x):
return x.decode('ascii')
class DocDB(object):
def __init__(self, db_path):
self.db_path = db_path
self.env = lmdb.open(db_path, readonly=True, lock=False, readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
self.start_key_index = int(deserialize_str(txn.get(b'__start__')))
self.size = int(deserialize_str(txn.get(b'__size__')))
self.dtype = deserialize_str(txn.get(b'__dtype__'))
def _deserialize_array(self, x):
data = array.array(self.dtype)
data.frombytes(x)
return data.tolist()
def __getitem__(self, doc_id):
with self.env.begin(write=False) as txn:
# example = {
# "source_ids": self._deserialize_array(txn.get(b"src_ids_%d" % doc_id)),
# "target_ids": self._deserialize_array(txn.get(b"tgt_ids_%d" % doc_id)),
# }
example = TrainingExample(
source_ids=self._deserialize_array(txn.get(b"src_ids_%d" % doc_id)),
target_ids=self._deserialize_array(txn.get(b"tgt_ids_%d" % doc_id)),
example_id=None,
)
return example
def __len__(self):
return self.size
def load_and_cache_examples(
example_file, tokenizer, local_rank, cached_features_file, shuffle=True,
lmdb_cache=None, lmdb_dtype='h', eval_mode=False):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.isfile(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
elif cached_features_file is not None and os.path.isdir(cached_features_file) \
and os.path.exists(os.path.join(cached_features_file, 'lock.mdb')):
logger.info("Loading features from cached LMDB %s", cached_features_file)
features = DocDB(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", example_file)
examples = []
with open(example_file, mode="r", encoding="utf-8") as reader:
for line in reader:
examples.append(json.loads(line))
features = []
slc = collections.defaultdict(int)
tlc = collections.defaultdict(int)
for example in tqdm.tqdm(examples):
if isinstance(example["src"], list):
source_tokens = example["src"]
target_tokens = [] if eval_mode else example["tgt"]
else:
source_tokens = tokenizer.tokenize(example["src"])
target_tokens = [] if eval_mode else tokenizer.tokenize(example["tgt"])
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
slc[len(source_ids)] += 1
tlc[len(target_ids)] += 1
# features.append({
# "source_ids": source_ids,
# "target_ids": target_ids,
# })
features.append(
TrainingExample(
source_ids=source_ids,
target_ids=target_ids,
example_id=len(features),
)
)
if shuffle:
random.shuffle(features)
logger.info("Shuffle the features !")
logger.info("Source length:")
report_length(slc, total_count=len(examples))
logger.info("Target length:")
report_length(tlc, total_count=len(examples))
if local_rank in [-1, 0] and cached_features_file is not None:
if lmdb_cache:
db = lmdb.open(cached_features_file, readonly=False, map_async=True)
for idx, feature in enumerate(features):
write_to_lmdb(
db, b"src_ids_%d" % idx,
serialize_array(feature.source_ids, dtype=lmdb_dtype))
write_to_lmdb(
db, b"tgt_ids_%d" % idx,
serialize_array(feature.target_ids, dtype=lmdb_dtype))
write_to_lmdb(db, b"__start__", serialize_str(0))
write_to_lmdb(db, b"__size__", serialize_str(len(features)))
write_to_lmdb(db, b"__dtype__", serialize_str(lmdb_dtype))
db.sync()
db.close()
logger.info("db_key_idx = %d" % len(features))
del features
features = cached_features_file
logger.info("Saving features into cached lmdb dir %s", cached_features_file)
else:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features
| EXA-1-master | exa/models/unilm-master/s2s-ft/s2s_ft/utils.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Tokenization classes for MiniLM."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from transformers.tokenization_bert import BertTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'minilm-l12-h384-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/minilm-l12-h384-uncased-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'minilm-l12-h384-uncased': 512,
}
class MinilmTokenizer(BertTokenizer):
r"""
Constructs a MinilmTokenizer.
:class:`~transformers.MinilmTokenizer` is identical to BertTokenizer and runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
class WhitespaceTokenizer(object):
def tokenize(self, text):
return whitespace_tokenize(text)
| EXA-1-master | exa/models/unilm-master/s2s-ft/s2s_ft/tokenization_minilm.py |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import os
import torch
from torch import nn
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from transformers.modeling_bert import \
BertPreTrainedModel, BertSelfOutput, BertIntermediate, \
BertOutput, BertPredictionHeadTransform, BertPooler
from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_distilbert import DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_xlm_roberta import XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.file_utils import WEIGHTS_NAME
from s2s_ft.config import BertForSeq2SeqConfig
from s2s_ft.convert_state_dict import get_checkpoint_from_transformer_cache, state_dict_convert
logger = logging.getLogger(__name__)
BertLayerNorm = torch.nn.LayerNorm
UNILM_PRETRAINED_MODEL_ARCHIVE_MAP = {
'unilm-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased.bin",
'unilm-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased.bin",
'unilm1-base-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-base-cased.bin",
'unilm1-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1-large-cased.bin",
'unilm1.2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm1.2-base-uncased.bin",
'unilm2-base-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm2-base-uncased.bin",
'unilm2-large-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm2-large-uncased.bin",
'unilm2-large-cased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/unilm2-large-cased.bin",
}
MINILM_PRETRAINED_MODEL_ARCHIVE_MAP = {
'minilm-l12-h384-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/minilm-l12-h384-uncased.bin",
}
class BertPreTrainedForSeq2SeqModel(BertPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertForSeq2SeqConfig
supported_convert_pretrained_model_archive_map = {
"bert": BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
"roberta": ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
"xlm-roberta": XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
"unilm": UNILM_PRETRAINED_MODEL_ARCHIVE_MAP,
"minilm": MINILM_PRETRAINED_MODEL_ARCHIVE_MAP,
}
base_model_prefix = "unilm_for_seq2seq"
pretrained_model_archive_map = {
**ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
**XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
**BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
**UNILM_PRETRAINED_MODEL_ARCHIVE_MAP,
**MINILM_PRETRAINED_MODEL_ARCHIVE_MAP,
**ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP,
}
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, reuse_position_embedding=None, *model_args, **kwargs):
model_type = kwargs.pop('model_type', 'unilm')
if model_type is not None and "state_dict" not in kwargs:
if model_type in cls.supported_convert_pretrained_model_archive_map:
pretrained_model_archive_map = cls.supported_convert_pretrained_model_archive_map[model_type]
if pretrained_model_name_or_path in pretrained_model_archive_map:
state_dict = get_checkpoint_from_transformer_cache(
archive_file=pretrained_model_archive_map[pretrained_model_name_or_path],
pretrained_model_name_or_path=pretrained_model_name_or_path,
pretrained_model_archive_map=pretrained_model_archive_map,
cache_dir=kwargs.get("cache_dir", None), force_download=kwargs.get("force_download", None),
proxies=kwargs.get("proxies", None), resume_download=kwargs.get("resume_download", None),
)
state_dict = state_dict_convert[model_type](state_dict)
kwargs["state_dict"] = state_dict
logger.info("Load HF ckpts")
elif os.path.isfile(pretrained_model_name_or_path):
state_dict = torch.load(pretrained_model_name_or_path, map_location='cpu')
kwargs["state_dict"] = state_dict_convert[model_type](state_dict)
logger.info("Load local ckpts")
elif os.path.isdir(pretrained_model_name_or_path):
state_dict = torch.load(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME), map_location='cpu')
kwargs["state_dict"] = state_dict_convert[model_type](state_dict)
logger.info("Load local ckpts")
else:
raise RuntimeError("Not fined the pre-trained checkpoint !")
if kwargs["state_dict"] is None:
logger.info("s2s-ft does't support the model !")
raise NotImplementedError()
config = kwargs["config"]
state_dict = kwargs["state_dict"]
# initialize new position embeddings (From Microsoft/UniLM)
_k = 'bert.embeddings.position_embeddings.weight'
# if _k in state_dict and config.max_position_embeddings != state_dict[_k].shape[0]:
# logger.info("config.max_position_embeddings != state_dict[bert.embeddings.position_embeddings.weight] ({0} - {1})".format(
# config.max_position_embeddings, state_dict[_k].shape[0]))
# if config.max_position_embeddings > state_dict[_k].shape[0]:
# old_size = state_dict[_k].shape[0]
# # state_dict[_k].data = state_dict[_k].data.resize_(config.max_position_embeddings, state_dict[_k].shape[1])
# state_dict[_k].resize_(
# config.max_position_embeddings, state_dict[_k].shape[1])
# start = old_size
# while start < config.max_position_embeddings:
# chunk_size = min(
# old_size, config.max_position_embeddings - start)
# state_dict[_k].data[start:start+chunk_size,
# :].copy_(state_dict[_k].data[:chunk_size, :])
# start += chunk_size
# elif config.max_position_embeddings < state_dict[_k].shape[0]:
# state_dict[_k].data = state_dict[_k].data[:config.max_position_embeddings, :]
_k = 'bert.embeddings.position_embeddings.weight'
if _k in state_dict:
if config.max_position_embeddings > state_dict[_k].shape[0]:
logger.info("Resize > position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(mean=0.0, std=config.initializer_range)
max_range = config.max_position_embeddings if reuse_position_embedding else old_vocab_size
shift = 0
while shift < max_range:
delta = min(old_vocab_size, max_range - shift)
new_postion_embedding.data[shift: shift + delta, :] = state_dict[_k][:delta, :]
logger.info(" CP [%d ~ %d] into [%d ~ %d] " % (0, delta, shift, shift + delta))
shift += delta
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
elif config.max_position_embeddings < state_dict[_k].shape[0]:
logger.info("Resize < position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(mean=0.0, std=config.initializer_range)
new_postion_embedding.data.copy_(state_dict[_k][:config.max_position_embeddings, :])
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
fix_word_embedding = getattr(config, "fix_word_embedding", None)
if fix_word_embedding:
self.word_embeddings.weight.requires_grad = False
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
else:
self.token_type_embeddings = None
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
if self.token_type_embeddings:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings, position_ids
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def multi_head_attention(self, query, key, value, attention_mask, rel_pos):
query_layer = self.transpose_for_scores(query)
key_layer = self.transpose_for_scores(key)
value_layer = self.transpose_for_scores(value)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
if rel_pos is not None:
attention_scores = attention_scores + rel_pos
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return (context_layer, attention_probs) if self.output_attentions else (context_layer,)
def forward(self, hidden_states, attention_mask=None,
encoder_hidden_states=None,
split_lengths=None, rel_pos=None):
mixed_query_layer = self.query(hidden_states)
if split_lengths:
assert not self.output_attentions
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
if split_lengths:
query_parts = torch.split(mixed_query_layer, split_lengths, dim=1)
key_parts = torch.split(mixed_key_layer, split_lengths, dim=1)
value_parts = torch.split(mixed_value_layer, split_lengths, dim=1)
key = None
value = None
outputs = []
sum_length = 0
for (query, _key, _value, part_length) in zip(query_parts, key_parts, value_parts, split_lengths):
key = _key if key is None else torch.cat((key, _key), dim=1)
value = _value if value is None else torch.cat((value, _value), dim=1)
sum_length += part_length
outputs.append(self.multi_head_attention(
query, key, value, attention_mask[:, :, sum_length - part_length: sum_length, :sum_length],
rel_pos=None if rel_pos is None else rel_pos[:, :, sum_length - part_length: sum_length, :sum_length],
)[0])
outputs = (torch.cat(outputs, dim=1), )
else:
outputs = self.multi_head_attention(
mixed_query_layer, mixed_key_layer, mixed_value_layer,
attention_mask, rel_pos=rel_pos)
return outputs
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None,
split_lengths=None, rel_pos=None):
self_outputs = self.self(
hidden_states, attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
split_lengths=split_lengths, rel_pos=rel_pos)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask=None, split_lengths=None, rel_pos=None):
self_attention_outputs = self.attention(
hidden_states, attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
attention_output = self_attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + self_attention_outputs[1:]
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask=None, split_lengths=None, rel_pos=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
"""
ret = 0
if bidirectional:
num_buckets //= 2
# mtf.to_int32(mtf.less(n, 0)) * num_buckets
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance /
max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(
val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class BertModel(BertPreTrainedForSeq2SeqModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
if not isinstance(config, BertForSeq2SeqConfig):
self.pooler = BertPooler(config)
else:
self.pooler = None
if self.config.rel_pos_bins > 0:
self.rel_pos_bias = nn.Linear(self.config.rel_pos_bins, config.num_attention_heads, bias=False)
else:
self.rel_pos_bias = None
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,
position_ids=None, inputs_embeds=None, split_lengths=None):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output, position_ids = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
if self.config.rel_pos_bins > 0:
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat, num_buckets=self.config.rel_pos_bins, max_distance=self.config.max_rel_pos)
rel_pos = F.one_hot(rel_pos, num_classes=self.config.rel_pos_bins).type_as(embedding_output)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
else:
rel_pos = None
encoder_outputs = self.encoder(
embedding_output, attention_mask=extended_attention_mask,
split_lengths=split_lengths, rel_pos=rel_pos)
sequence_output = encoder_outputs[0]
outputs = (sequence_output, ) + encoder_outputs[1:] # add hidden_states and attentions if they are here
if self.pooler is None:
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
else:
pooled_output = self.pooler(sequence_output)
return sequence_output, pooled_output
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.float().repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
class BertLMPredictionHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_weight = decoder_weight
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = F.linear(hidden_states, weight=self.decoder_weight, bias=self.bias)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, decoder_weight)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
def create_mask_and_position_ids(num_tokens, max_len, offset=None):
base_position_matrix = torch.arange(
0, max_len, dtype=num_tokens.dtype, device=num_tokens.device).view(1, -1)
mask = (base_position_matrix < num_tokens.view(-1, 1)).type_as(num_tokens)
if offset is not None:
base_position_matrix = base_position_matrix + offset.view(-1, 1)
position_ids = base_position_matrix * mask
return mask, position_ids
class BertForSequenceToSequence(BertPreTrainedForSeq2SeqModel):
MODEL_NAME = 'basic class'
def __init__(self, config):
super(BertForSequenceToSequence, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.init_weights()
self.log_softmax = nn.LogSoftmax()
self.source_type_id = config.source_type_id
self.target_type_id = config.target_type_id
if config.label_smoothing > 0:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
self.crit_mask_lm = None
else:
self.crit_mask_lm_smoothed = None
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
class BertForSequenceToSequenceWithPseudoMask(BertForSequenceToSequence):
MODEL_NAME = "BertForSequenceToSequenceWithPseudoMask"
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat((torch.zeros_like(source_position_ids), target_span_ids, -target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = (0 <= to_weight) & (torch.cat((source_mask, target_mask, target_mask), dim=1) == 1).unsqueeze(1)
true_tokens_mask = (from_weight >= 0) & true_tokens & (to_weight <= from_weight)
pseudo_tokens_mask = (from_weight < 0) & true_tokens & (-to_weight > from_weight)
pseudo_tokens_mask = pseudo_tokens_mask | ((from_weight < 0) & (to_weight == from_weight))
return (true_tokens_mask | pseudo_tokens_mask).type_as(source_mask)
def forward(
self, source_ids, target_ids, label_ids, pseudo_ids,
num_source_tokens, num_target_tokens, target_span_ids=None, target_no_offset=None):
source_len = source_ids.size(1)
target_len = target_ids.size(1)
pseudo_len = pseudo_ids.size(1)
assert target_len == pseudo_len
assert source_len > 0 and target_len > 0
split_lengths = (source_len, target_len, pseudo_len)
input_ids = torch.cat((source_ids, target_ids, pseudo_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id,
torch.ones_like(pseudo_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
create_mask_and_position_ids(
num_target_tokens, target_len, offset=None if target_no_offset else num_source_tokens)
position_ids = torch.cat((source_position_ids, target_position_ids, target_position_ids), dim=1)
if target_span_ids is None:
target_span_ids = target_position_ids
attention_mask = self.create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids)
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths)
sequence_output = outputs[0]
pseudo_sequence_output = sequence_output[:, source_len + target_len:, ]
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked = self.cls(pseudo_sequence_output)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), label_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), label_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), target_mask)
return pseudo_lm_loss
class BertForSequenceToSequenceUniLMV1(BertForSequenceToSequence):
MODEL_NAME = "BertForSequenceToSequenceUniLMV1"
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat((torch.zeros_like(source_position_ids), target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = torch.cat((source_mask, target_mask), dim=1).unsqueeze(1)
return ((true_tokens == 1) & (to_weight <= from_weight)).type_as(source_mask)
def forward(self, source_ids, target_ids, masked_ids, masked_pos, masked_weight, num_source_tokens, num_target_tokens):
source_len = source_ids.size(1)
target_len = target_ids.size(1)
split_lengths = (source_len, target_len)
input_ids = torch.cat((source_ids, target_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
create_mask_and_position_ids(
num_target_tokens, target_len, offset=num_source_tokens)
position_ids = torch.cat((source_position_ids, target_position_ids), dim=1)
attention_mask = self.create_attention_mask(
source_mask, target_mask, source_position_ids, target_position_ids)
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths)
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
sequence_output = outputs[0]
target_sequence_output = sequence_output[:, source_len:, ]
masked_sequence_output = gather_seq_out_by_pos(target_sequence_output, masked_pos)
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked = self.cls(masked_sequence_output)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), masked_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), masked_weight)
return pseudo_lm_loss
class UniLMForSequenceClassification(BertPreTrainedForSeq2SeqModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import BertTokenizer, BertForSequenceClassification
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
# head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = nn.MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
| EXA-1-master | exa/models/unilm-master/s2s-ft/s2s_ft/modeling.py |
import torch.nn as nn
import torch
from fairseq.modules.quant_noise import quant_noise
from fairseq.modules import MultiheadAttention
from fairseq.modules.transformer_layer import TransformerDecoderLayerBase
from fairseq.models.transformer import TransformerDecoderBase, TransformerDecoder
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.distributed import fsdp_wrap
from fairseq.models.transformer import TransformerConfig
class UniLMMultiheadAttention(MultiheadAttention):
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0, qn_block_size=8):
super().__init__(embed_dim, num_heads, kdim=kdim, vdim=vdim, dropout=dropout, bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=self_attention, encoder_decoder_attention=encoder_decoder_attention, q_noise=q_noise, qn_block_size=qn_block_size)
self.k_proj = quant_noise(nn.Linear(self.kdim, embed_dim, bias=True), q_noise, qn_block_size)
self.k_proj.bias = nn.Parameter(torch.zeros_like(self.k_proj.bias, requires_grad=False))
class UniLMDecoderLayer(TransformerDecoderLayerBase):
def build_self_attention(
self, embed_dim, cfg, add_bias_kv=False, add_zero_attn=False
):
return UniLMMultiheadAttention(
embed_dim,
cfg.decoder.attention_heads,
dropout=cfg.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not cfg.cross_self_attention,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
class UniLMDecoderBase(TransformerDecoderBase):
def build_decoder_layer(self, cfg, no_encoder_attn=False):
layer = UniLMDecoderLayer(cfg, no_encoder_attn)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
class UniLMDecoder(UniLMDecoderBase):
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
self.args = args
super().__init__(
TransformerConfig.from_namespace(args),
dictionary,
embed_tokens,
no_encoder_attn=no_encoder_attn,
output_projection=output_projection,
)
def build_output_projection(self, args, dictionary, embed_tokens):
super().build_output_projection(
TransformerConfig.from_namespace(args), dictionary, embed_tokens
)
def build_decoder_layer(self, args, no_encoder_attn=False):
return super().build_decoder_layer(
TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn
)
| EXA-1-master | exa/models/unilm-master/trocr/unilm_models.py |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import os
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.vision_transformer import Attention, Block
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_
logger = logging.getLogger(__name__)
class Fp16FixedAttention(Attention):
def cogview_attn(self, attention_scores, alpha=32):
'''
https://arxiv.org/pdf/2105.13290.pdf
Section 2.4 Stabilization of training: Precision Bottleneck Relaxation (PB-Relax).
A replacement of the original nn.Softmax(dim=-1)(attention_scores)
Seems the new attention_probs will result in a slower speed and a little bias
Can use torch.allclose(standard_attention_probs, cogview_attention_probs, atol=1e-08) for comparison
The smaller atol (e.g., 1e-08), the better.
'''
scaled_attention_scores = attention_scores / alpha
max_value = scaled_attention_scores.amax(dim=(-1)).unsqueeze(-1)
# max_value = scaled_attention_scores.amax(dim=(-2, -1)).unsqueeze(-1).unsqueeze(-1)
new_attention_scores = (scaled_attention_scores - max_value) * alpha
return nn.Softmax(dim=-1)(new_attention_scores)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
attn = (q.float() @ k.float().transpose(-2, -1)) * self.scale
# attn = attn.softmax(dim=-1).type_as(x)
attn = self.cogview_attn(attn).type_as(x)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Fp16FixedBlock(Block):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__(dim, num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop,
attn_drop=attn_drop, drop_path=drop_path, act_layer=act_layer,
norm_layer=norm_layer)
self.attn = Fp16FixedAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
class AdaptedVisionTransformer(VisionTransformer):
def __init__(self, *args, **kwargs):
self.ape = kwargs.pop('ape', 0)
self.mask_ratio = kwargs.pop('mask_ratio', 0.0)
self.patch_size = kwargs.get('patch_size')
self.fp16fixed = kwargs.pop('fp16fixed', False)
weight_init = kwargs.get('weight_init', '')
super().__init__(*args, **kwargs)
if self.ape:
self.pos_embed = nn.Parameter(torch.zeros(1, self.ape + self.num_tokens, self.embed_dim))
if self.fp16fixed:
# img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
# num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,
# drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,
# act_layer=None, weight_init=''
embed_dim = kwargs.get('embed_dim', 768)
num_heads = kwargs.get('num_heads', 12)
mlp_ratio = kwargs.get('mlp_ratio', 4.)
qkv_bias = kwargs.get('qkv_bias', True)
drop_rate = kwargs.get('drop_rate', 0.)
attn_drop_rate = kwargs.get('attn_drop_rate', 0.)
drop_path_rate = kwargs.get('drop_path_rate', 0.)
depth = kwargs.get('depth', 12)
norm_layer = kwargs.get('norm_layer', partial(nn.LayerNorm, eps=1e-6))
act_layer = kwargs.get('act_layer', nn.GELU)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Fp16FixedBlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)
for i in range(depth)])
self.init_weights(weight_init)
def forward_features(self, x):
_, _, H, W = x.shape
Wh = H // self.patch_size
Ww = W // self.patch_size
x = self.patch_embed(x)
if self.mask_ratio != 0:
probability_matrix = torch.full(x.shape[:2], self.mask_ratio)
masked_indices = torch.bernoulli(probability_matrix).bool()
x[masked_indices] = 0
cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks
if self.dist_token is None:
x = torch.cat((cls_token, x), dim=1)
else:
x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
if self.ape:
pos_embed_patch_num = int(self.pos_embed.size(1) ** 0.5)
offset = self.num_tokens
adapt_pos_embed = self.pos_embed[:, offset:, :].view(self.pos_embed.shape[0], pos_embed_patch_num, pos_embed_patch_num, self.pos_embed.shape[-1]) # B 24 24 768
adapt_pos_embed = adapt_pos_embed.permute(0, 3, 1, 2)
pos_embed = F.interpolate(adapt_pos_embed, size=(Wh, Ww), mode='bicubic')
pos_embed = pos_embed.flatten(2).transpose(1, 2) # B Wh*Ww C
pos_embed = torch.cat((pos_embed, self.pos_embed[:, :offset, :]), dim=1)
else:
pos_embed = self.pos_embed
input_embedding = x + pos_embed
x = self.pos_drop(input_embedding)
x = self.blocks(x)
x = self.norm(x)
return x, input_embedding
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
model = AdaptedVisionTransformer(distilled=True,
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_distilled_patch16_224(pretrained=False, **kwargs):
model = AdaptedVisionTransformer(distilled=True,
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_distilled_patch16_384(pretrained=False, **kwargs):
model = AdaptedVisionTransformer(distilled=True,
img_size=384, patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth",
map_location="cpu", check_hash=True
)
# adapt 224 model to 384
model_seq_len = model.state_dict()['pos_embed'].shape[1]
ckpt_seq_len = checkpoint['model']['pos_embed'].shape[1]
logger.warning('Deit load {:d} seq len to {:d} APE {}'.format(ckpt_seq_len, model_seq_len, str(model.ape)))
if not model.ape:
if model_seq_len <= ckpt_seq_len:
checkpoint['model']['pos_embed'] = checkpoint['model']['pos_embed'][:, :model_seq_len, :]
else:
t = model.state_dict()['pos_embed']
t[:, :ckpt_seq_len, :] = checkpoint['model']['pos_embed']
checkpoint['model']['pos_embed'] = t
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_224(pretrained=False, **kwargs):
model = AdaptedVisionTransformer(distilled=True,
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs):
model = AdaptedVisionTransformer(distilled=True,
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_custom_size(pretrained=False, img_size=384, **kwargs):
model = AdaptedVisionTransformer(distilled=True,
img_size=img_size, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
map_location="cpu", check_hash=True
)
# checkpoint['model']['pos_embed'] = checkpoint['model']['pos_embed'][:, :502, :]
# ape torch.Size([1, 578, 768]) from checkpoint, the shape in current model is torch.Size([1, 1026, 768]).
model_seq_len = model.state_dict()['pos_embed'].shape[1]
ckpt_seq_len = checkpoint['model']['pos_embed'].shape[1]
logger.warning('Deit load {:d} seq len to {:d} APE {}'.format(ckpt_seq_len, model_seq_len, str(model.ape)))
if not model.ape:
if model_seq_len <= ckpt_seq_len:
checkpoint['model']['pos_embed'] = checkpoint['model']['pos_embed'][:, :model_seq_len, :]
else:
t = model.state_dict()['pos_embed']
t[:, :ckpt_seq_len, :] = checkpoint['model']['pos_embed']
checkpoint['model']['pos_embed'] = t
model.load_state_dict(checkpoint["model"])
return model
@register_model
def beit_base_patch16_384(pretrained=False, **kwargs):
model = AdaptedVisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=False,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_384(pretrained=False, **kwargs):
model = AdaptedVisionTransformer(
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=False,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model | EXA-1-master | exa/models/unilm-master/trocr/deit.py |
import os
from fairseq import search
from fairseq import scoring, utils, metrics
from fairseq.data import Dictionary, encoders
from fairseq.tasks import LegacyFairseqTask, register_task
from fairseq.tasks.fairseq_task import FairseqTask
try:
from .data import SROIETextRecognitionDataset, Receipt53KDataset, SyntheticTextRecognitionDataset
from .data_aug import build_data_aug, OptForDataAugment, DataAugment
except ImportError:
from data import SROIETextRecognitionDataset, Receipt53KDataset, SyntheticTextRecognitionDataset
from data_aug import build_data_aug, OptForDataAugment, DataAugment
import logging
import torch
logger = logging.getLogger(__name__)
@register_task('text_recognition')
class TextRecognitionTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
parser.add_argument('data', metavar='DIR',
help='the path to the data dir')
parser.add_argument('--reset-dictionary', action='store_true',
help='if reset dictionary and related parameters')
parser.add_argument('--adapt-dictionary', action='store_true',
help='if adapt dictionary and related parameters')
parser.add_argument('--adapt-encoder-pos-embed', action='store_true',
help='if adapt encoder pos embed')
parser.add_argument('--add-empty-sample', action='store_true',
help='add empty samples to the dataset (for multilingual dataset).')
parser.add_argument('--preprocess', default='ResizeNormalize', type=str,
help='the image preprocess methods (ResizeNormalize|DeiT)')
parser.add_argument('--decoder-pretrained', default=None, type=str,
help='seted to load the RoBERTa parameters to the decoder.')
parser.add_argument('--decoder-pretrained-url', default=None, type=str,
help='the ckpt url for decoder pretraining (only unilm for now)')
parser.add_argument('--dict-path-or-url', default=None, type=str,
help='the local path or url for dictionary file')
parser.add_argument('--input-size', type=int, nargs='+', help='images input size', required=True)
parser.add_argument('--data-type', type=str, default='SROIE',
help='the dataset type used for the task (SROIE or Receipt53K)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
@classmethod
def setup_task(cls, args, **kwargs):
import urllib.request
import io
if getattr(args, "dict_path_or_url", None) is not None:
if args.dict_path_or_url.startswith('http'):
logger.info('Load dictionary from {}'.format(args.dict_path_or_url))
dict_content = urllib.request.urlopen(args.dict_path_or_url).read().decode()
dict_file_like = io.StringIO(dict_content)
target_dict = Dictionary.load(dict_file_like)
else:
target_dict = Dictionary.load(args.dict_path_or_url)
elif getattr(args, "decoder_pretrained", None) is not None:
if args.decoder_pretrained == 'unilm':
url = 'https://layoutlm.blob.core.windows.net/trocr/dictionaries/unilm3.dict.txt'
logger.info('Load unilm dictionary from {}'.format(url))
dict_content = urllib.request.urlopen(url).read().decode()
dict_file_like = io.StringIO(dict_content)
target_dict = Dictionary.load(dict_file_like)
elif args.decoder_pretrained.startswith('roberta'):
url = 'https://layoutlm.blob.core.windows.net/trocr/dictionaries/gpt2_with_mask.dict.txt'
logger.info('Load gpt2 dictionary from {}'.format(url))
dict_content = urllib.request.urlopen(url).read().decode()
dict_file_like = io.StringIO(dict_content)
target_dict = Dictionary.load(dict_file_like)
else:
raise ValueError('Unknown decoder_pretrained: {}'.format(args.decoder_pretrained))
else:
raise ValueError('Either dict_path_or_url or decoder_pretrained should be set.')
logger.info('[label] load dictionary: {} types'.format(len(target_dict)))
return cls(args, target_dict)
def __init__(self, args, target_dict):
super().__init__(args)
self.args = args
self.data_dir = args.data
self.target_dict = target_dict
if 'LOCAL_RANK' in os.environ and os.environ['LOCAL_RANK'] != '0':
torch.distributed.barrier()
self.bpe = self.build_bpe(args)
if 'LOCAL_RANK' in os.environ and os.environ['LOCAL_RANK'] == '0':
torch.distributed.barrier()
def load_dataset(self, split, **kwargs):
input_size = self.args.input_size
if isinstance(input_size, list):
if len(input_size) == 1:
input_size = (input_size[0], input_size[0])
else:
input_size = tuple(input_size)
elif isinstance(input_size, int):
input_size = (input_size, input_size)
logger.info('The input size is {}, the height is {} and the width is {}'.format(input_size, input_size[0], input_size[1]))
if self.args.preprocess == 'DA2':
tfm = build_data_aug(input_size, mode=split)
elif self.args.preprocess == 'RandAugment':
opt = OptForDataAugment(eval= (split != 'train'), isrand_aug=True, imgW=input_size[1], imgH=input_size[0], intact_prob=0.5, augs_num=3, augs_mag=None)
tfm = DataAugment(opt)
else:
raise Exception('Undeined image preprocess method.')
# load the dataset
if self.args.data_type == 'SROIE':
root_dir = os.path.join(self.data_dir, split)
self.datasets[split] = SROIETextRecognitionDataset(root_dir, tfm, self.bpe, self.target_dict)
elif self.args.data_type == 'Receipt53K':
gt_path = os.path.join(self.data_dir, 'gt_{}.txt'.format(split))
self.datasets[split] = Receipt53KDataset(gt_path, tfm, self.bpe, self.target_dict)
elif self.args.data_type == 'STR':
gt_path = os.path.join(self.data_dir, 'gt_{}.txt'.format(split))
self.datasets[split] = SyntheticTextRecognitionDataset(gt_path, tfm, self.bpe, self.target_dict)
else:
raise Exception('Not defined dataset type: ' + self.args.data_type)
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
return self.target_dict
def build_generator(
self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None
):
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
from fairseq.sequence_generator import (
SequenceGenerator,
SequenceGeneratorWithAlignment,
)
try:
from .generator import TextRecognitionGenerator
except:
from generator import TextRecognitionGenerator
try:
from fairseq.fb_sequence_generator import FBSequenceGenerator
except ModuleNotFoundError:
pass
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
constrained = getattr(args, "constraints", False)
prefix_allowed_tokens_fn = getattr(args, "prefix_allowed_tokens_fn", None)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
elif constrained:
search_strategy = search.LexicallyConstrainedBeamSearch(
self.target_dictionary, args.constraints
)
elif prefix_allowed_tokens_fn:
search_strategy = search.PrefixConstrainedBeamSearch(
self.target_dictionary, prefix_allowed_tokens_fn
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
if seq_gen_cls is None:
if getattr(args, "print_alignment", False):
seq_gen_cls = SequenceGeneratorWithAlignment
extra_gen_cls_kwargs["print_alignment"] = args.print_alignment
elif getattr(args, "fb_seq_gen", False):
seq_gen_cls = FBSequenceGenerator
else:
seq_gen_cls = TextRecognitionGenerator
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
**extra_gen_cls_kwargs,
)
def filter_indices_by_size(
self, indices, dataset, max_positions=None, ignore_invalid_inputs=False
):
return indices | EXA-1-master | exa/models/unilm-master/trocr/task.py |
from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture
from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel
from fairseq.models.transformer import base_architecture as base_transformer
from fairseq.models.fairseq_encoder import EncoderOut
from torch.nn import Parameter
from fairseq import utils
from torch import Tensor
import torch
from torch.hub import load_state_dict_from_url
from timm.models import create_model
from functools import partial
import logging
import argparse
from typing import Dict, Optional, Tuple
from collections import OrderedDict
import os
logger = logging.getLogger(__name__)
DEFAULT_MAX_TARGET_POSITIONS = 1024
from argparse import Namespace
from omegaconf import DictConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
try:
from .unilm_models import UniLMDecoder
except:
from unilm_models import UniLMDecoder
@register_model('DeiT_TR')
@register_model('TrOCR')
class TrOCRModel(FairseqEncoderDecoderModel):
def load_state_dict(
self,
state_dict,
strict=True,
model_cfg: Optional[DictConfig] = None,
args: Optional[Namespace] = None,
):
if model_cfg is None and args is not None:
logger.warn("using 'args' is deprecated, please update your code to use dataclass config")
model_cfg = convert_namespace_to_omegaconf(args).model
self.upgrade_state_dict(state_dict)
from fairseq.checkpoint_utils import prune_state_dict
new_state_dict = prune_state_dict(state_dict, model_cfg)
if not model_cfg.ape:
model_seq_len = self.state_dict()['encoder.deit.pos_embed'].shape[1]
ckpt_seq_len = new_state_dict['encoder.deit.pos_embed'].shape[1]
if model_seq_len != ckpt_seq_len and getattr(args, "adapt_encoder_pos_embed", None):
logger.warning('Load from encoder.deit {:d} seq len to {:d}'.format(ckpt_seq_len, model_seq_len))
if model_seq_len <= ckpt_seq_len:
new_state_dict['encoder.deit.pos_embed'] = new_state_dict['encoder.deit.pos_embed'][:, :model_seq_len, :]
else:
t = self.state_dict()['encoder.deit.pos_embed']
t[:, :ckpt_seq_len, :] = new_state_dict['encoder.deit.pos_embed']
new_state_dict['encoder.deit.pos_embed'] = t
# if hasattr(model_cfg, "reset_dictionary") and model_cfg.reset_dictionary:
# logger.info('Reset token embed weights and output projection during loading pretrained models')
# del new_state_dict['decoder.embed_tokens.weight']
# del new_state_dict['decoder.output_projection.weight']
return super().load_state_dict(new_state_dict, strict=False)
@staticmethod
def add_args(parser):
TransformerModel.add_args(parser)
parser.add_argument(
'--deit-arch', type=str,
help='the arch name for the DeiT encoder'
)
parser.add_argument(
'--ape', action='store_true',
help='if use absolute_pos_embed'
)
parser.set_defaults(ape=False)
parser.add_argument(
'--mask-ratio', default=0.0, type=float,
help='the mask ratio for the encoder output masking.'
)
parser.add_argument(
'--only-keep-pretrained-decoder-structure', action='store_true',
help='if only keep the pretrained decoder structure'
)
parser.add_argument(
'--only-keep-pretrained-encoder-structure', action='store_true',
help='if only keep the pretrained encoder structure'
)
@staticmethod
def read_args_from_roberta(roberta_args: argparse.Namespace):
# TODO: this would become easier if encoder/decoder where using a similar
# TransformerConfig object
args = argparse.Namespace(**vars(roberta_args))
attr_map = [
("encoder_attention_heads", "decoder_attention_heads"),
("encoder_embed_dim", "decoder_embed_dim"),
("encoder_embed_dim", "decoder_output_dim"),
("encoder_normalize_before", "decoder_normalize_before"),
("encoder_layers_to_keep", "decoder_layers_to_keep"),
("encoder_ffn_embed_dim", "decoder_ffn_embed_dim"),
("encoder_layerdrop", "decoder_layerdrop"),
("encoder_layers", "decoder_layers"),
("encoder_learned_pos", "decoder_learned_pos"),
# should this be set from here ?
("max_positions", "max_target_positions"),
]
for k1, k2 in attr_map:
setattr(args, k2, getattr(roberta_args, k1))
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = not roberta_args.untie_weights_roberta
return args
@classmethod
def build_model(cls, args, task):
encoder = TrOCREncoder(
args = args,
dictionary = task.source_dictionary
)
args.encoder_embed_dim = encoder.deit.embed_dim
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
if getattr(args, "decoder_pretrained", None) == None or getattr(args, "decoder_pretrained", None).upper() == 'None':
logger.info('Decoder is randomly initialized.')
decoder_embed_tokens = cls.build_embedding(
args, task.target_dictionary, args.decoder_embed_dim, args.decoder_embed_path
)
decoder = TransformerDecoder(
args = args,
dictionary=task.target_dictionary,
embed_tokens=decoder_embed_tokens,
no_encoder_attn=False
)
elif getattr(args, "decoder_pretrained", None).startswith('roberta2'):
logger.info('Using the learned pos embedding version loading roberta.')
decoder_embed_tokens = cls.build_embedding(
args, task.target_dictionary, args.decoder_embed_dim, args.decoder_embed_path
)
pretrained_model = getattr(args, "decoder_pretrained", None)
specified = pretrained_model.find('-')!=-1
if 'LOCAL_RANK' in os.environ and os.environ['LOCAL_RANK'] != '0':
torch.distributed.barrier()
if specified:
pretrained_model = pretrained_model.replace('-', '.')
logger.info('Load pre-trained decoder parameters from {}'.format(pretrained_model))
roberta = torch.hub.load('pytorch/fairseq:main', pretrained_model)
elif args.decoder_layers == 6:
logger.info('Load pre-trained decoder parameters from roberta.base')
roberta = torch.hub.load('pytorch/fairseq:main', 'roberta.base')
elif args.decoder_layers == 12:
logger.info('Load pre-trained decoder parameters from roberta.large')
roberta = torch.hub.load('pytorch/fairseq:main', 'roberta.large')
else:
raise AttributeError('Cannot determined the pre-trained model')
if 'LOCAL_RANK' in os.environ and os.environ['LOCAL_RANK'] == '0':
torch.distributed.barrier()
roberta.model.args.encoder_layers = args.decoder_layers
roberta.model.args.fp16 = args.fp16
roberta_args = TrOCRModel.read_args_from_roberta(roberta.model.args)
roberta_args.encoder_embed_dim = args.encoder_embed_dim
decoder = TransformerDecoder(
roberta_args,
task.target_dictionary,
decoder_embed_tokens,
no_encoder_attn=False,
)
roberta_layers = roberta.model.encoder.sentence_encoder.layers
decoder_layers = decoder.layers
offset = len(roberta_layers) - len(decoder_layers)
assert offset >= 0
decoder_dict = roberta.state_dict()
new_decoder_dict = {}
for key, val in decoder_dict.items():
if key.startswith('model.encoder.sentence_encoder.layers.'):
layer_num = int(key[len('model.encoder.sentence_encoder.layers.'):].split('.')[0])
if layer_num - offset < 0:
continue
else:
new_key = 'model.encoder.sentence_encoder.layers.{}.'.format(
str(layer_num - offset)) + '.'.join(
key[len('model.encoder.sentence_encoder.layers.'):].split('.')[1:])
new_decoder_dict[new_key] = val
else:
new_decoder_dict[key] = val
decoder_dict = new_decoder_dict
for k, w in list(decoder_dict.items()):
if '.lm_head' in k:
k_proj = "output_projection." + k[len('model.encoder.lm_head.'):]
decoder_dict[k_proj] = w.detach().clone()
del decoder_dict[k]
del decoder_dict['_float_tensor']
del decoder_dict['output_projection.weight']
del decoder_dict['output_projection.bias']
del decoder_dict['output_projection.dense.weight']
del decoder_dict['output_projection.dense.bias']
del decoder_dict['output_projection.layer_norm.weight']
del decoder_dict['output_projection.layer_norm.bias']
new_decoder_dict = {}
for key, val in decoder_dict.items():
if "sentence_encoder" in key:
key = key[len('model.encoder.sentence_encoder.'):]
elif "encoder" in key:
key = key[len('model.encoder.'):]
new_decoder_dict[key] = val
if hasattr(args, 'only_keep_pretrained_decoder_structure') and args.only_keep_pretrained_decoder_structure:
logger.info('Only keep the pretrained decoder structure.')
pass
else:
missing_keys, unexpected_keys = decoder.load_state_dict(
new_decoder_dict, strict=False
)
elif getattr(args, "decoder_pretrained", None) == 'unilm':
logger.info('Decoder is pretrained using the unilm.')
prefix_of_parameter = 'bert'
decoder_embed_tokens = cls.build_embedding(
args, task.target_dictionary, args.decoder_embed_dim, args.decoder_embed_path
)
decoder = UniLMDecoder(
args,
task.target_dictionary,
decoder_embed_tokens,
no_encoder_attn=False,
)
if hasattr(args, 'decoder_pretrained_url') and args.decoder_pretrained_url != None and args.decoder_pretrained_url != '':
unilm_url = args.decoder_pretrained_url
logger.info('The unilm model url: {}.'.format(unilm_url[:unilm_url.find('?')]))
unilm_state_dict = torch.hub.load_state_dict_from_url(unilm_url)
unilm_layers = OrderedDict([(k, unilm_state_dict[k]) for k in unilm_state_dict.keys() if k.startswith(prefix_of_parameter + '.encoder.layer.')])
unilm_layers_num = []
for k in unilm_layers.keys():
t = k.replace(prefix_of_parameter + '.encoder.layer.', '')
t = t[:t.find('.')]
unilm_layers_num.append(int(t))
unilm_layers_num = max(unilm_layers_num) + 1
offset = unilm_layers_num - len(decoder.layers)
assert offset == 0
decoder_dict = decoder.state_dict()
# embedding
new_pos_weight = torch.zeros_like(decoder_dict['embed_positions.weight'])
# position padding will right offset padding idx + 1
new_pos_weight[task.target_dictionary.pad() + 1:, :] = unilm_state_dict[prefix_of_parameter + '.embeddings.position_embeddings.weight']
new_decoder_dict = {
'embed_tokens.weight': unilm_state_dict[prefix_of_parameter + '.embeddings.word_embeddings.weight'],
'embed_positions.weight': new_pos_weight,
'layernorm_embedding.weight': unilm_state_dict[prefix_of_parameter + '.embeddings.LayerNorm.weight'],
'layernorm_embedding.bias': unilm_state_dict[prefix_of_parameter + '.embeddings.LayerNorm.bias']
}
# layers
key_map = {
'self_attn.k_proj': 'attention.self.key',
'self_attn.v_proj': 'attention.self.value',
'self_attn.q_proj': 'attention.self.query',
'self_attn.out_proj': 'attention.output.dense',
'self_attn_layer_norm': 'attention.output.LayerNorm',
'fc1': 'intermediate.dense',
'fc2': 'output.dense',
'final_layer_norm': 'output.LayerNorm'
}
for layer_id in range(unilm_layers_num):
unilm_prefix = prefix_of_parameter + '.encoder.layer.{}.'.format(layer_id)
decoder_prefix = 'layers.{}.'.format(layer_id)
for key in key_map:
for suffix in ['.weight', '.bias']:
decoder_key = decoder_prefix + key + suffix
unilm_key = unilm_prefix + key_map[key] + suffix
if decoder_key in decoder_dict and unilm_key in unilm_state_dict:
new_decoder_dict[decoder_key] = unilm_state_dict[unilm_key]
if hasattr(args, "reset_dictionary") and args.reset_dictionary:
logger.info('Reset token embedding weights during decoder initialization.')
del new_decoder_dict['embed_tokens.weight']
elif hasattr(args, "adapt_dictionary") and args.adapt_dictionary:
unilm_embed_tokens_weight = new_decoder_dict['embed_tokens.weight']
logger.info('Adapt token embedding weights during decoder initialization from {} to {}'.format(unilm_embed_tokens_weight.shape[0], decoder_embed_tokens.weight.shape[0]))
new_decoder_dict['embed_tokens.weight'] = torch.zeros_like(decoder_dict['embed_tokens.weight'])
new_decoder_dict['embed_tokens.weight'][:min(unilm_embed_tokens_weight.shape[0], decoder_dict['embed_tokens.weight'].shape[0]), :] = unilm_embed_tokens_weight[:min(unilm_embed_tokens_weight.shape[0], decoder_dict['embed_tokens.weight'].shape[0]), :]
if hasattr(args, 'only_keep_pretrained_decoder_structure') and args.only_keep_pretrained_decoder_structure:
logger.info('Only keep the pretrained decoder structure.')
pass
else:
missing_keys, unexpected_keys = decoder.load_state_dict(
new_decoder_dict, strict=False
)
else:
logger.warning('You must specify the unilm model url or the decoder is randomly initialized.')
# freeze k_proj bias
for layer in decoder.layers:
layer.self_attn.k_proj.bias.requires_grad = False
elif getattr(args, "decoder_pretrained", None).startswith('roberta'):
logger.info('Using the old version loading roberta.')
decoder_embed_tokens = cls.build_embedding(
args, task.target_dictionary, args.decoder_embed_dim, args.decoder_embed_path
)
decoder = TransformerDecoder(
args = args,
dictionary=task.target_dictionary,
embed_tokens=decoder_embed_tokens,
no_encoder_attn=False
)
pretrained_model = getattr(args, "decoder_pretrained", None)
specified = pretrained_model.find('-')!=-1
if 'LOCAL_RANK' in os.environ and os.environ['LOCAL_RANK'] != '0':
torch.distributed.barrier()
if specified:
pretrained_model = pretrained_model.replace('-', '.')
logger.info('Load pre-trained decoder parameters from {}'.format(pretrained_model))
roberta = torch.hub.load('pytorch/fairseq:main', pretrained_model)
elif args.decoder_layers == 6:
logger.info('Load pre-trained decoder parameters from roberta.base')
roberta = torch.hub.load('pytorch/fairseq:main', 'roberta.base')
elif args.decoder_layers == 12:
logger.info('Load pre-trained decoder parameters from roberta.large')
roberta = torch.hub.load('pytorch/fairseq:main', 'roberta.large')
else:
raise AttributeError('Cannot determined the pre-trained model')
if 'LOCAL_RANK' in os.environ and os.environ['LOCAL_RANK'] == '0':
torch.distributed.barrier()
if hasattr(args, 'only_keep_pretrained_decoder_structure') and args.only_keep_pretrained_decoder_structure:
logger.info('Only keep the pretrained decoder structure.')
pass
else:
decoder.embed_tokens.load_state_dict(roberta.model.encoder.sentence_encoder.embed_tokens.state_dict())
roberta_layers = roberta.model.encoder.sentence_encoder.layers
decoder_layers = decoder.layers
offset = len(roberta_layers) - len(decoder_layers)
assert offset >= 0
for i in range(len(decoder_layers)):
roberta_i = i + offset
decoder_layers[i].self_attn.load_state_dict(roberta_layers[roberta_i].self_attn.state_dict())
decoder_layers[i].self_attn_layer_norm.load_state_dict(roberta_layers[roberta_i].self_attn_layer_norm.state_dict())
else:
raise Exception('Undefined decoder pretraining method.')
model = cls(encoder, decoder)
return model
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
def forward(self, imgs, prev_output_tokens, **kwargs):
encoder_out = self.encoder(imgs, **kwargs) # (seq_len, batch, embed_dim)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
) # (batch, seq_len, vocab_size)
return decoder_out
@register_model_architecture('DeiT_TR', 'deit_base_decoder_base')
def deit_base_decoder_base(args):
# DeiT Encoder deit_base_distilled_patch16_384
args.deit_arch = getattr(args, "deit_arch", "deit_base_distilled_patch16_384")
# Transformer Decoder
# args.encoder_embed_dim = 768
base_transformer(args)
@register_model_architecture('DeiT_TR', 'deit_base_decoder_large')
def deit_base_decoder_large(args):
# DeiT Encoder deit_base_distilled_patch16_384
args.deit_arch = getattr(args, "deit_arch", "deit_base_distilled_patch16_384")
# Transformer Decoder
# args.encoder_embed_dim = 768
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_transformer(args)
@register_model_architecture('TrOCR', 'trocr_base')
@register_model_architecture('DeiT_TR', 'beit_base_decoder_large')
def beit_base_decoder_large(args):
# DeiT Encoder deit_base_distilled_patch16_384
args.deit_arch = getattr(args, "deit_arch", "beit_base_patch16_384")
# Transformer Decoder
# args.encoder_embed_dim = 768
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_transformer(args)
@register_model_architecture('TrOCR', 'trocr_large')
@register_model_architecture('DeiT_TR', 'beit_large_decoder_large')
def beit_large_decoder_large(args):
# DeiT Encoder deit_base_distilled_patch16_384
args.deit_arch = getattr(args, "deit_arch", "beit_large_patch16_384")
# Transformer Decoder
# args.encoder_embed_dim = 1024
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_transformer(args)
@register_model_architecture('DeiT_TR', 'deit_base_decoder_large_custom_size')
def deit_base_decoder_large_custom_size(args):
# DeiT Encoder deit_base_distilled_patch16_custom_size
args.deit_arch = getattr(args, "deit_arch", "deit_base_distilled_patch16_custom_size")
# Transformer Decoder
# args.encoder_embed_dim = 768
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_transformer(args)
def nlrv4_compressed_tiny(args):
args.decoder_learned_pos = True
args.layernorm_embedding = True
args.decoder_attention_heads = 8
args.decoder_embed_dim = 256
args.decoder_output_dim = 256
args.decoder_ffn_embed_dim = 1024
args.dropout = 0.1
args.decoder_layers = 6
args.max_target_positions = 512
@register_model_architecture('TrOCR', 'trocr_small_224')
def trocr_small(args):
# DeiT Encoder deit_base_distilled_patch16_384
args.deit_arch = getattr(args, "deit_arch", "deit_small_distilled_patch16_224")
nlrv4_compressed_tiny(args)
# Transformer Decoder
base_transformer(args)
@register_model_architecture('TrOCR', 'trocr_small')
@register_model_architecture('TrOCR', 'trocr_small_384')
def trocr_small_384(args):
# DeiT Encoder deit_base_distilled_patch16_384
args.deit_arch = getattr(args, "deit_arch", "deit_small_distilled_patch16_384")
nlrv4_compressed_tiny(args)
# Transformer Decoder
base_transformer(args)
class TrOCREncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
if hasattr(args, 'only_keep_pretrained_encoder_structure') and args.only_keep_pretrained_encoder_structure:
pretrained = False
else:
pretrained = True
if 'custom_size' in args.deit_arch:
self.deit = create_model(args.deit_arch, pretrained=pretrained, img_size=args.input_size, ape=args.ape, mask_ratio=args.mask_ratio)
else:
self.deit = create_model(args.deit_arch, pretrained=pretrained, ape=args.ape, mask_ratio=args.mask_ratio)
self.fp16 = args.fp16
def forward(self, imgs):
if self.fp16:
imgs = imgs.half()
x, encoder_embedding = self.deit.forward_features(imgs) # bs, n + 2, dim
x = x.transpose(0, 1) # n + 2, bs, dim
encoder_padding_mask = torch.zeros(*x.shape[:2]).transpose(0, 1).to(imgs.device)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": [], # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to `new_order`.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
`encoder_out` rearranged according to `new_order`
"""
_encoder_out = encoder_out['encoder_out'][0]
_encoder_padding_mask = encoder_out['encoder_padding_mask'][0]
_encoder_embedding = encoder_out['encoder_embedding'][0]
return {
"encoder_out": [_encoder_out.index_select(1, new_order)],
"encoder_padding_mask": [_encoder_padding_mask.index_select(0, new_order)], # B x T
"encoder_embedding": [_encoder_padding_mask.index_select(0, new_order)], # B x T x C
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
if __name__ == '__main__':
pass
| EXA-1-master | exa/models/unilm-master/trocr/trocr_models.py |
from fairseq.scoring import BaseScorer, register_scorer
from nltk.metrics.distance import edit_distance
from fairseq.dataclass import FairseqDataclass
import fastwer
from Levenshtein import distance
import string
@register_scorer("cer", dataclass=FairseqDataclass)
class CERScorer(BaseScorer):
def __init__(self, cfg):
super().__init__(cfg)
self.refs = []
self.preds = []
def add_string(self, ref, pred):
self.refs.append(ref)
self.preds.append(pred)
def score(self):
return fastwer.score(self.preds, self.refs, char_level=True)
def result_string(self) -> str:
return f"CER: {self.score():.2f}"
@register_scorer("wpa", dataclass=FairseqDataclass)
class WPAScorer(BaseScorer):
def __init__(self, cfg):
super().__init__(cfg)
self.refs = []
self.preds = []
self.alphabet = string.digits + string.ascii_lowercase
def filter(self, string):
string = ''.join([i for i in string if i in self.alphabet])
return string
def add_string(self, ref, pred):
# print(f'[Pred] gt: "{ref}" | pred: "{pred}"')
self.refs.append(self.filter(ref.lower()))
self.preds.append(self.filter(pred.lower()))
def score(self):
length = len(self.refs)
correct = 0
for i in range(length):
if self.refs[i] == self.preds[i]:
correct += 1
return round(correct / length * 100, 2)
# return 100 - fastwer.score(self.preds, self.refs, char_level=False)
def result_string(self) -> str:
return f"WPA: {self.score():.2f}"
@register_scorer("acc_ed", dataclass=FairseqDataclass)
class AccEDScorer(BaseScorer):
def __init__(self, args):
super(AccEDScorer, self).__init__(args)
self.n_data = 0
self.n_correct = 0
self.ed = 0
def add_string(self, ref, pred):
self.n_data += 1
if ref == pred:
self.n_correct += 1
self.ed += edit_distance(ref, pred)
self.ref.append(ref)
self.pred.append(pred)
def score(self):
return self.n_correct / float(self.n_data) * 100, self.ed / float(self.n_data)
def result_string(self):
acc, norm_ed = self.score()
return f"Accuracy: {acc:.3f} Norm ED: {norm_ed:.2f}"
@register_scorer("sroie", dataclass=FairseqDataclass)
class SROIEScorer(BaseScorer):
def __init__(self, args):
super(SROIEScorer, self).__init__(args)
self.n_detected_words = 0
self.n_gt_words = 0
self.n_match_words = 0
def add_string(self, ref, pred):
pred_words = list(pred.split())
ref_words = list(ref.split())
self.n_gt_words += len(ref_words)
self.n_detected_words += len(pred_words)
for pred_w in pred_words:
if pred_w in ref_words:
self.n_match_words += 1
ref_words.remove(pred_w)
self.ref.append(ref)
self.pred.append(pred)
def score(self):
prec = self.n_match_words / float(self.n_detected_words) * 100
recall = self.n_match_words / float(self.n_gt_words) * 100
f1 = 2 * (prec * recall) / (prec + recall)
return prec, recall, f1
def result_string(self):
prec, recall, f1 = self.score()
return f"Precision: {prec:.3f} Recall: {recall:.3f} F1: {f1:.3f}" | EXA-1-master | exa/models/unilm-master/trocr/scoring.py |
import os
from data import SROIETask2
from tqdm import tqdm
import shutil
import zipfile
if __name__ == '__main__':
test_dir = '../SROIE_Task2_Original/test'
output_dir = 'temp'
os.makedirs(output_dir, exist_ok=True)
generate_txt_path = '../generate-test.txt'
output_file = None
output_fp = None
with open(generate_txt_path, 'r', encoding='utf8') as fp:
lines = list(fp.readlines())
while not lines[0].startswith('T-0'):
lines = lines[1:]
_, data = SROIETask2(test_dir, None, None)
for t in tqdm(data):
file_name = t['file_name']
image_id = int(t['image_id'])
this_output_file = os.path.basename(file_name).replace('.jpg', '.txt')
if this_output_file != output_file:
if output_fp is not None:
output_fp.close()
output_file = this_output_file
output_fp = open(os.path.join(output_dir, output_file), 'w', encoding='utf8')
pred_line_id = image_id * 4 + 2
pred_line = lines[pred_line_id]
assert pred_line.startswith('D-{:d}'.format(image_id))
pred_line = pred_line[pred_line.find('\t') + 1:]
pred_str = pred_line[pred_line.find('\t') + 1:]
for word in pred_str.split():
output_fp.write(word + '\n')
if output_fp:
output_fp.close()
zip_fp = zipfile.ZipFile('predictions.zip', 'w')
for txt_file in os.listdir(output_dir):
zip_fp.write(os.path.join(output_dir, txt_file), txt_file)
zip_fp.close()
shutil.rmtree(output_dir)
| EXA-1-master | exa/models/unilm-master/trocr/convert_to_SROIE_format.py |
from .task import TextRecognitionTask
from .vit_models import ViTTRModel, ViT_TR_base
from .scoring import AccEDScorer
from .deit import *
from .trocr_models import TrOCRModel
from .bpe import GPT2BPEEnhancedSpace | EXA-1-master | exa/models/unilm-master/trocr/__init__.py |
import torch
import math
from typing import Dict, List, Optional
from fairseq.sequence_generator import SequenceGenerator
from torch import Tensor
class TextRecognitionGenerator(SequenceGenerator):
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
device = sample["net_input"]["imgs"].device
# compute the encoder output for each beam
# "encoder_out": [x], # T x B x C
# "encoder_padding_mask": [encoder_padding_mask], # B x T
# "encoder_embedding": [encoder_embedding], # B x T x C
# "encoder_states": [], # List[T x B x C]
# "src_tokens": [],
# "src_lengths": [],
encoder_outs = self.model.forward_encoder(net_input) # T x B x C
src_lengths = encoder_outs[0]['encoder_padding_mask'][0].eq(0).long().sum(dim=1) # B
src_tokens = encoder_outs[0]['encoder_padding_mask'][0] # B x T
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
self.model.max_decoder_positions() - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
finished = [
False for i in range(bsz)
] # a boolean array indicating if the sentence at the index is finished or not
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
| EXA-1-master | exa/models/unilm-master/trocr/generator.py |
import torch.nn as nn
from fairseq.models import FairseqEncoder, register_model, FairseqEncoderDecoderModel, register_model_architecture
from fairseq.models.transformer import TransformerDecoder, Embedding, TransformerModel
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq import utils
# from timm.models.vision_transformer import HybridEmbed, PatchEmbed, Block
from timm.models.layers import trunc_normal_
import torch
from torch.hub import load_state_dict_from_url
from functools import partial
import logging
logger = logging.getLogger(__name__)
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model('ViT_TR')
class ViTTRModel(FairseqEncoderDecoderModel):
@staticmethod
def add_args(parser):
TransformerModel.add_args(parser)
# parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
# help='decoder embedding dimension')
parser.add_argument(
'--vit-img-size', type=int, metavar='N',
help='the image size of h and w (h=w) of the ViT'
)
parser.add_argument(
'--vit-patch-size', type=int, metavar='N',
help='the patch size of h and w (h=w) of the ViT'
)
parser.add_argument(
'--vit-dim', type=int, metavar='N',
help='the hidden size of the ViT'
)
parser.add_argument(
'--vit-depth', type=int, metavar='N',
help='the layer num of the ViT'
)
parser.add_argument(
'--vit-heads', type=int, metavar='N',
help='the head num of the ViT'
)
parser.add_argument(
'--vit-channels', type=int, metavar='N', default=3,
help='the input image channels of the ViT'
)
parser.add_argument(
'--vit-dropout', type=float, default=0.0,
help='the dropout ratio of the ViT'
)
parser.add_argument(
'--vit-atten-dropout', type=float, default=0.0,
help='the input embedding dropout ratio of the ViT'
)
parser.add_argument(
'--encoder-pretrained-url', type=str,
help='the pretrained parameter url for the ViT encoder'
)
@classmethod
def build_model(cls, args, task):
encoder = ViTTREncoder(
args = args,
dictionary = task.source_dictionary
)
if args.encoder_pretrained_url:
logger.info('load pretrianed encoder parameter from: {}'.format(args.encoder_pretrained_url))
encoder_state_dict = load_state_dict_from_url(args.encoder_pretrained_url)
encoder.load_state_dict(encoder_state_dict, strict=False)
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
decoder_embed_tokens = cls.build_embedding(
args, task.target_dictionary, args.decoder_embed_dim, args.decoder_embed_path
)
decoder = TransformerDecoder(
args = args,
dictionary=task.target_dictionary,
embed_tokens=decoder_embed_tokens,
no_encoder_attn=False
)
model = cls(encoder, decoder)
return model
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
def forward(self, imgs, prev_output_tokens, **kwargs):
encoder_out = self.encoder(imgs, **kwargs)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return decoder_out
@register_model_architecture('ViT_TR', 'ViT_TR_base')
def ViT_TR_base(args):
# ViT Encoder vit_base_patch16_224
args.vit_img_size = getattr(args, "vit_img_size", 224)
args.resize_img_size = args.vit_img_size
args.vit_patch_size = getattr(args, "vit_patch_size", 16)
args.vit_dim = getattr(args, "vit_dim", 768)
args.vit_depth = getattr(args, "vit_depth", 12)
args.vit_heads = getattr(args, "vit_heads", 12)
args.encoder_pretrained_url = getattr(args, "encoder_pretrained_url",
"https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth")
# Transformer Decoder
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
@register_model_architecture('ViT_TR', 'ViT_TR_large')
def large_architecture(args):
# ViT Encoder vit_base_patch16_224
args.vit_img_size = getattr(args, "vit_img_size", 384)
args.resize_img_size = args.vit_img_size
args.vit_patch_size = getattr(args, "vit_patch_size", 16)
args.vit_dim = getattr(args, "vit_dim", 1024)
args.vit_depth = getattr(args, "vit_depth", 24)
args.vit_heads = getattr(args, "vit_heads", 16)
args.encoder_pretrained_url = getattr(args, "encoder_pretrained_url",
"https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth")
# Transformer Decoder
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
class ViTTREncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
img_size = args.vit_img_size
patch_size = args.vit_patch_size
in_chans = args.vit_channels
embed_dim = args.vit_dim
depth = args.vit_depth
num_heads = args.vit_heads
mlp_ratio=4.
qkv_bias=True
qk_scale=None
drop_rate = args.vit_dropout
attn_drop_rate = args.vit_atten_dropout
drop_path_rate=0.
hybrid_backbone=None
norm_layer=None
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward_features(self, x):
B = x.shape[0] # bs, num_patches, dim
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed
encoder_embedding = x # bs, n + 1, dim
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x) # bs, n + 1, dim
return x, encoder_embedding
def forward(self, imgs):
x, encoder_embedding = self.forward_features(imgs) # bs, n + 1, dim
x = x.transpose(0, 1) # n + 1, bs, dim
encoder_padding_mask = torch.zeros(*x.shape[:2]).transpose(0, 1).to(imgs.device)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": [], # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to `new_order`.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
`encoder_out` rearranged according to `new_order`
"""
_encoder_out = encoder_out['encoder_out'][0]
_encoder_padding_mask = encoder_out['encoder_padding_mask'][0]
_encoder_embedding = encoder_out['encoder_embedding'][0]
return {
"encoder_out": [_encoder_out.index_select(1, new_order)],
"encoder_padding_mask": [_encoder_padding_mask.index_select(0, new_order)], # B x T
"encoder_embedding": [_encoder_padding_mask.index_select(0, new_order)], # B x T x C
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
if __name__ == '__main__':
pass | EXA-1-master | exa/models/unilm-master/trocr/vit_models.py |
import torchvision.transforms as transforms
# from torchvision.transforms.functional import InterpolationMode
from PIL import Image, ImageFilter
import random
import torch
import numpy as np
import logging
from enum import Enum
from .augmentation.warp import Curve, Distort, Stretch
from .augmentation.geometry import Rotate, Perspective, Shrink, TranslateX, TranslateY
from .augmentation.pattern import VGrid, HGrid, Grid, RectGrid, EllipseGrid
from .augmentation.noise import GaussianNoise, ShotNoise, ImpulseNoise, SpeckleNoise
from .augmentation.blur import GaussianBlur, DefocusBlur, MotionBlur, GlassBlur, ZoomBlur
from .augmentation.camera import Contrast, Brightness, JpegCompression, Pixelate
from .augmentation.weather import Fog, Snow, Frost, Rain, Shadow
from .augmentation.process import Posterize, Solarize, Invert, Equalize, AutoContrast, Sharpness, Color
# 0: InterpolationMode.NEAREST,
# 2: InterpolationMode.BILINEAR,
# 3: InterpolationMode.BICUBIC,
# 4: InterpolationMode.BOX,
# 5: InterpolationMode.HAMMING,
# 1: InterpolationMode.LANCZOS,
class InterpolationMode():
NEAREST = 0
BILINEAR = 2
BICUBIC = 3
BOX = 4
HAMMING = 5
LANCZOS = 1
logger = logging.getLogger(__name__)
class ResizePad(object):
def __init__(self, imgH=64, imgW=3072, keep_ratio_with_pad=True):
self.imgH = imgH
self.imgW = imgW
assert keep_ratio_with_pad == True
self.keep_ratio_with_pad = keep_ratio_with_pad
def __call__(self, im):
old_size = im.size # old_size[0] is in (width, height) format
ratio = float(self.imgH)/old_size[1]
new_size = tuple([int(x*ratio) for x in old_size])
im = im.resize(new_size, Image.BICUBIC)
new_im = Image.new("RGB", (self.imgW, self.imgH))
new_im.paste(im, (0, 0))
return new_im
class WeightedRandomChoice:
def __init__(self, trans, weights=None):
self.trans = trans
if not weights:
self.weights = [1] * len(trans)
else:
assert len(trans) == len(weights)
self.weights = weights
def __call__(self, img):
t = random.choices(self.trans, weights=self.weights, k=1)[0]
try:
tfm_img = t(img)
except Exception as e:
logger.warning('Error during data_aug: '+str(e))
return img
return tfm_img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class Dilation(torch.nn.Module):
def __init__(self, kernel=3):
super().__init__()
self.kernel=kernel
def forward(self, img):
return img.filter(ImageFilter.MaxFilter(self.kernel))
def __repr__(self):
return self.__class__.__name__ + '(kernel={})'.format(self.kernel)
class Erosion(torch.nn.Module):
def __init__(self, kernel=3):
super().__init__()
self.kernel=kernel
def forward(self, img):
return img.filter(ImageFilter.MinFilter(self.kernel))
def __repr__(self):
return self.__class__.__name__ + '(kernel={})'.format(self.kernel)
class Underline(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, img):
img_np = np.array(img.convert('L'))
black_pixels = np.where(img_np < 50)
try:
y1 = max(black_pixels[0])
x0 = min(black_pixels[1])
x1 = max(black_pixels[1])
except:
return img
for x in range(x0, x1):
for y in range(y1, y1-3, -1):
try:
img.putpixel((x, y), (0, 0, 0))
except:
continue
return img
class KeepOriginal(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, img):
return img
def build_data_aug(size, mode, resnet=False, resizepad=False):
if resnet:
norm_tfm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
else:
norm_tfm = transforms.Normalize(0.5, 0.5)
if resizepad:
resize_tfm = ResizePad(imgH=size[0], imgW=size[1])
else:
resize_tfm = transforms.Resize(size, interpolation=InterpolationMode.BICUBIC)
if mode == 'train':
return transforms.Compose([
WeightedRandomChoice([
# transforms.RandomHorizontalFlip(p=1),
transforms.RandomRotation(degrees=(-10, 10), expand=True, fill=255),
transforms.GaussianBlur(3),
Dilation(3),
Erosion(3),
transforms.Resize((size[0] // 3, size[1] // 3), interpolation=InterpolationMode.NEAREST),
Underline(),
KeepOriginal(),
]),
resize_tfm,
transforms.ToTensor(),
norm_tfm
])
else:
return transforms.Compose([
resize_tfm,
transforms.ToTensor(),
norm_tfm
])
class OptForDataAugment:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def isless(prob=0.5):
return np.random.uniform(0,1) < prob
class DataAugment(object):
'''
Supports with and without data augmentation
'''
def __init__(self, opt):
self.opt = opt
if not opt.eval:
self.process = [Posterize(), Solarize(), Invert(), Equalize(), AutoContrast(), Sharpness(), Color()]
self.camera = [Contrast(), Brightness(), JpegCompression(), Pixelate()]
self.pattern = [VGrid(), HGrid(), Grid(), RectGrid(), EllipseGrid()]
self.noise = [GaussianNoise(), ShotNoise(), ImpulseNoise(), SpeckleNoise()]
self.blur = [GaussianBlur(), DefocusBlur(), MotionBlur(), GlassBlur(), ZoomBlur()]
self.weather = [Fog(), Snow(), Frost(), Rain(), Shadow()]
self.noises = [self.blur, self.noise, self.weather]
self.processes = [self.camera, self.process]
self.warp = [Curve(), Distort(), Stretch()]
self.geometry = [Rotate(), Perspective(), Shrink()]
self.isbaseline_aug = False
# rand augment
if self.opt.isrand_aug:
self.augs = [self.process, self.camera, self.noise, self.blur, self.weather, self.pattern, self.warp, self.geometry]
# semantic augment
elif self.opt.issemantic_aug:
self.geometry = [Rotate(), Perspective(), Shrink()]
self.noise = [GaussianNoise()]
self.blur = [MotionBlur()]
self.augs = [self.noise, self.blur, self.geometry]
self.isbaseline_aug = True
# pp-ocr augment
elif self.opt.islearning_aug:
self.geometry = [Rotate(), Perspective()]
self.noise = [GaussianNoise()]
self.blur = [MotionBlur()]
self.warp = [Distort()]
self.augs = [self.warp, self.noise, self.blur, self.geometry]
self.isbaseline_aug = True
# scatter augment
elif self.opt.isscatter_aug:
self.geometry = [Shrink()]
self.warp = [Distort()]
self.augs = [self.warp, self.geometry]
self.baseline_aug = True
# rotation augment
elif self.opt.isrotation_aug:
self.geometry = [Rotate()]
self.augs = [self.geometry]
self.isbaseline_aug = True
def __call__(self, img):
'''
Must call img.copy() if pattern, Rain or Shadow is used
'''
img = img.resize((self.opt.imgW, self.opt.imgH), Image.BICUBIC)
if self.opt.eval or isless(self.opt.intact_prob):
pass
elif self.opt.isrand_aug or self.isbaseline_aug:
img = self.rand_aug(img)
# individual augment can also be selected
elif self.opt.issel_aug:
img = self.sel_aug(img)
img = transforms.ToTensor()(img)
img = transforms.Normalize(0.5, 0.5)(img)
return img
def rand_aug(self, img):
augs = np.random.choice(self.augs, self.opt.augs_num, replace=False)
for aug in augs:
index = np.random.randint(0, len(aug))
op = aug[index]
mag = np.random.randint(0, 3) if self.opt.augs_mag is None else self.opt.augs_mag
if type(op).__name__ == "Rain" or type(op).__name__ == "Grid":
img = op(img.copy(), mag=mag)
else:
img = op(img, mag=mag)
return img
def sel_aug(self, img):
prob = 1.
if self.opt.process:
mag = np.random.randint(0, 3)
index = np.random.randint(0, len(self.process))
op = self.process[index]
img = op(img, mag=mag, prob=prob)
if self.opt.noise:
mag = np.random.randint(0, 3)
index = np.random.randint(0, len(self.noise))
op = self.noise[index]
img = op(img, mag=mag, prob=prob)
if self.opt.blur:
mag = np.random.randint(0, 3)
index = np.random.randint(0, len(self.blur))
op = self.blur[index]
img = op(img, mag=mag, prob=prob)
if self.opt.weather:
mag = np.random.randint(0, 3)
index = np.random.randint(0, len(self.weather))
op = self.weather[index]
if type(op).__name__ == "Rain": #or "Grid" in type(op).__name__ :
img = op(img.copy(), mag=mag, prob=prob)
else:
img = op(img, mag=mag, prob=prob)
if self.opt.camera:
mag = np.random.randint(0, 3)
index = np.random.randint(0, len(self.camera))
op = self.camera[index]
img = op(img, mag=mag, prob=prob)
if self.opt.pattern:
mag = np.random.randint(0, 3)
index = np.random.randint(0, len(self.pattern))
op = self.pattern[index]
img = op(img.copy(), mag=mag, prob=prob)
iscurve = False
if self.opt.warp:
mag = np.random.randint(0, 3)
index = np.random.randint(0, len(self.warp))
op = self.warp[index]
if type(op).__name__ == "Curve":
iscurve = True
img = op(img, mag=mag, prob=prob)
if self.opt.geometry:
mag = np.random.randint(0, 3)
index = np.random.randint(0, len(self.geometry))
op = self.geometry[index]
if type(op).__name__ == "Rotate":
img = op(img, iscurve=iscurve, mag=mag, prob=prob)
else:
img = op(img, mag=mag, prob=prob)
return img | EXA-1-master | exa/models/unilm-master/trocr/data_aug.py |
import task
import deit
import trocr_models
import torch
import fairseq
from fairseq import utils
from fairseq_cli import generate
from PIL import Image
import torchvision.transforms as transforms
def init(model_path, beam=5):
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[model_path],
arg_overrides={"beam": beam, "task": "text_recognition", "data": "", "fp16": False})
device = "cuda" if torch.cuda.is_available() else "cpu"
model[0].to(device)
img_transform = transforms.Compose([
transforms.Resize((384, 384), interpolation=3),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
generator = task.build_generator(
model, cfg.generation, extra_gen_cls_kwargs={'lm_model': None, 'lm_weight': None}
)
bpe = task.build_bpe(cfg.bpe)
return model, cfg, task, generator, bpe, img_transform, device
def preprocess(img_path, img_transform):
im = Image.open(img_path).convert('RGB').resize((384, 384))
im = img_transform(im).unsqueeze(0).to(device).float()
sample = {
'net_input': {"imgs": im},
}
return sample
def get_text(cfg, generator, model, sample, bpe):
decoder_output = task.inference_step(generator, model, sample, prefix_tokens=None, constraints=None)
decoder_output = decoder_output[0][0] #top1
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=decoder_output["tokens"].int().cpu(),
src_str="",
alignment=decoder_output["alignment"],
align_dict=None,
tgt_dict=model[0].decoder.dictionary,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=generate.get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = bpe.decode(hypo_str)
return detok_hypo_str
if __name__ == '__main__':
model_path = 'path/to/model'
jpg_path = "path/to/pic"
beam = 5
model, cfg, task, generator, bpe, img_transform, device = init(model_path, beam)
sample = preprocess(jpg_path, img_transform)
text = get_text(cfg, generator, model, sample, bpe)
print(text)
print('done')
| EXA-1-master | exa/models/unilm-master/trocr/pic_inference.py |
import glob
import logging
import os
import random
import torch
from fairseq.data import FairseqDataset, data_utils
from natsort import natsorted
from PIL import Image
from tqdm import tqdm
logger = logging.getLogger(__name__)
def default_collater(target_dict, samples, dataset=None):
if not samples:
return None
if any([sample is None for sample in samples]):
if not dataset:
return None
len_batch = len(samples)
while True:
samples.append(dataset[random.choice(range(len(dataset)))])
samples =list(filter (lambda x:x is not None, samples))
if len(samples) == len_batch:
break
indices = []
imgs = [] # bs, c, h , w
target_samples = []
target_ntokens = 0
for sample in samples:
index = sample['id']
indices.append(index)
imgs.append(sample['tfm_img'])
target_samples.append(sample['label_ids'].long())
target_ntokens += len(sample['label_ids'])
num_sentences = len(samples)
target_batch = data_utils.collate_tokens(target_samples,
pad_idx=target_dict.pad(),
eos_idx=target_dict.eos(),
move_eos_to_beginning=False)
rotate_batch = data_utils.collate_tokens(target_samples,
pad_idx=target_dict.pad(),
eos_idx=target_dict.eos(),
move_eos_to_beginning=True)
indices = torch.tensor(indices, dtype=torch.long)
imgs = torch.stack(imgs, dim=0)
return {
'id': indices,
'net_input': {
'imgs': imgs,
'prev_output_tokens': rotate_batch
},
'ntokens': target_ntokens,
'nsentences': num_sentences,
'target': target_batch
}
def read_txt_and_tokenize(txt_path: str, bpe, target_dict):
annotations = []
with open(txt_path, 'r', encoding='utf8') as fp:
for line in fp.readlines():
line = line.rstrip()
if not line:
continue
line_split = line.split(',', maxsplit=8)
quadrangle = list(map(int, line_split[:8]))
content = line_split[-1]
if bpe:
encoded_str = bpe.encode(content)
else:
encoded_str = content
xs = [quadrangle[i] for i in range(0, 8, 2)]
ys = [quadrangle[i] for i in range(1, 8, 2)]
bbox = [min(xs), min(ys), max(xs), max(ys)]
annotations.append({'bbox': bbox, 'encoded_str': encoded_str, 'category_id': 0, 'segmentation': [quadrangle]}) # 0 for text, 1 for background
return annotations
def SROIETask2(root_dir: str, bpe, target_dict, crop_img_output_dir=None):
data = []
img_id = -1
crop_data = []
crop_img_id = -1
image_paths = natsorted(list(glob.glob(os.path.join(root_dir, '*.jpg'))))
for jpg_path in tqdm(image_paths):
im = Image.open(jpg_path).convert('RGB')
img_w, img_h = im.size
img_id += 1
txt_path = jpg_path.replace('.jpg', '.txt')
annotations = read_txt_and_tokenize(txt_path, bpe, target_dict)
img_dict = {'file_name': jpg_path, 'width': img_w, 'height': img_h, 'image_id':img_id, 'annotations':annotations}
data.append(img_dict)
for ann in annotations:
crop_w = ann['bbox'][2] - ann['bbox'][0]
crop_h = ann['bbox'][3] - ann['bbox'][1]
if not (crop_w > 0 and crop_h > 0):
logger.warning('Error occurs during image cropping: {} has a zero area bbox.'.format(os.path.basename(jpg_path)))
continue
crop_img_id += 1
crop_im = im.crop(ann['bbox'])
if crop_img_output_dir:
crop_im.save(os.path.join(crop_img_output_dir, '{:d}.jpg'.format(crop_img_id)))
crop_img_dict = {'img':crop_im, 'file_name': jpg_path, 'width': crop_w, 'height': crop_h, 'image_id':crop_img_id, 'encoded_str':ann['encoded_str']}
crop_data.append(crop_img_dict)
return data, crop_data
class SROIETextRecognitionDataset(FairseqDataset):
def __init__(self, root_dir, tfm, bpe_parser, target_dict, crop_img_output_dir=None):
self.root_dir = root_dir
self.tfm = tfm
self.target_dict = target_dict
# self.bpe_parser = bpe_parser
self.ori_data, self.data = SROIETask2(root_dir, bpe_parser, target_dict, crop_img_output_dir)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_dict = self.data[idx]
image = img_dict['img']
encoded_str = img_dict['encoded_str']
input_ids = self.target_dict.encode_line(encoded_str, add_if_not_exist=False)
tfm_img = self.tfm(image) # h, w, c
return {'id': idx, 'tfm_img': tfm_img, 'label_ids': input_ids}
def size(self, idx):
img_dict = self.data[idx]
encoded_str = img_dict['encoded_str']
input_ids = self.target_dict.encode_line(encoded_str, add_if_not_exist=False)
return len(input_ids)
def num_tokens(self, idx):
return self.size(idx)
def collater(self, samples):
return default_collater(self.target_dict, samples)
def STR(gt_path, bpe_parser):
root_dir = os.path.dirname(gt_path)
data = []
img_id = 0
with open(gt_path, 'r') as fp:
for line in tqdm(list(fp.readlines()), desc='Loading STR:'):
line = line.rstrip()
temp = line.split('\t', 1)
img_file = temp[0]
text = temp[1]
img_path = os.path.join(root_dir, 'image', img_file)
if not bpe_parser:
encoded_str = text
else:
encoded_str = bpe_parser.encode(text)
data.append({'img_path': img_path, 'image_id':img_id, 'text':text, 'encoded_str':encoded_str})
img_id += 1
return data
class SyntheticTextRecognitionDataset(FairseqDataset):
def __init__(self, gt_path, tfm, bpe_parser, target_dict):
self.gt_path = gt_path
self.tfm = tfm
self.target_dict = target_dict
self.data = STR(gt_path, bpe_parser)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_dict = self.data[idx]
image = Image.open(img_dict['img_path']).convert('RGB')
encoded_str = img_dict['encoded_str']
input_ids = self.target_dict.encode_line(encoded_str, add_if_not_exist=False)
tfm_img = self.tfm(image) # h, w, c
return {'id': idx, 'tfm_img': tfm_img, 'label_ids': input_ids}
def size(self, idx):
img_dict = self.data[idx]
encoded_str = img_dict['encoded_str']
input_ids = self.target_dict.encode_line(encoded_str, add_if_not_exist=False)
return len(input_ids)
def num_tokens(self, idx):
return self.size(idx)
def collater(self, samples):
return default_collater(self.target_dict, samples)
def Receipt53K(gt_path):
root_dir = os.path.dirname(gt_path)
data = []
with open(gt_path, 'r', encoding='utf8') as fp:
for line in tqdm(list(fp.readlines()), desc='Loading Receipt53K:'):
line = line.rstrip()
temp = line.split('\t', 1)
img_file = temp[0]
text = temp[1]
img_path = os.path.join(root_dir, img_file)
data.append({'img_path': img_path, 'text':text})
return data
class Receipt53KDataset(FairseqDataset):
def __init__(self, gt_path, tfm, bpe_parser, target_dict):
self.gt_path = gt_path
self.tfm = tfm
self.target_dict = target_dict
self.bpe_parser = bpe_parser
self.data = Receipt53K(gt_path)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_dict = self.data[idx]
try:
image = Image.open(img_dict['img_path']).convert('RGB')
except Exception as e:
logger.warning('Failed to load image: {}, since {}'.format(img_dict['img_path'], str(e)))
return None
encoded_str = self.bpe_parser.encode(img_dict['text'])
input_ids = self.target_dict.encode_line(encoded_str, add_if_not_exist=False)
tfm_img = self.tfm(image) # h, w, c
return {'id': idx, 'tfm_img':tfm_img, 'label_ids':input_ids}
def size(self, idx):
img_dict = self.data[idx]
return len(img_dict['text'])
# item = self[idx]
# return len(item['label_ids'])
def num_tokens(self, idx):
return self.size(idx)
def collater(self, samples):
return default_collater(self.target_dict, samples) | EXA-1-master | exa/models/unilm-master/trocr/data.py |
from tempfile import tempdir
from fairseq.data.encoders.gpt2_bpe import GPT2BPE, GPT2BPEConfig
from fairseq.data.encoders import register_bpe
import logging
logger = logging.getLogger(__name__)
INSERT_OR_REPLACE = 0 # 1 for replace and 0 for insert
@register_bpe("gpt2es", dataclass=GPT2BPEConfig) # as stands for attention space
class GPT2BPEEnhancedSpace(GPT2BPE):
def __init__(self, cfg):
logger.info('Using the GPT2BPEEnhancedSpace.')
super().__init__(cfg)
def encode(self, x: str) -> str:
# only for sroie
assert not x.startswith(' ')
assert not x.endswith(' ')
if INSERT_OR_REPLACE == 1:
temp = []
word = ''
for ch in x:
if ch == ' ':
if word:
temp.append(word)
word = ''
temp.append('<s>')
else:
word += ch
if word:
temp.append(word)
for i in range(len(temp)):
if temp[i] != '<s>':
temp[i] = ' '.join(map(str, self.bpe.encode(temp[i])))
return ' '.join(temp)
elif INSERT_OR_REPLACE == 0:
temp = []
word = ''
for ch in x:
if ch == ' ':
if word:
temp.append(word)
word = ' '
temp.append('<s>')
else:
word += ch
if word:
temp.append(word)
for i in range(len(temp)):
if temp[i] != '<s>':
temp[i] = ' '.join(map(str, self.bpe.encode(temp[i])))
return ' '.join(temp)
def decode(self, x: str) -> str:
if INSERT_OR_REPLACE == 1:
return self.bpe.decode(
[int(tok) if tok not in {"<unk>", "<mask>", "<s>"} else tok for tok in x.split()]
).replace('<s>', ' ')
elif INSERT_OR_REPLACE == 0:
return self.bpe.decode(
[int(tok) if tok not in {"<unk>", "<mask>", "<s>"} else tok for tok in x.split()]
).replace('<s>', '')
def is_beginning_of_word(self, x: str) -> bool:
return self.decode(x).startswith(" ")
| EXA-1-master | exa/models/unilm-master/trocr/bpe.py |
import cv2
import numpy as np
import math
from PIL import Image, ImageOps, ImageDraw
from skimage import color
from scipy import interpolate
from pkg_resources import resource_filename
from io import BytesIO
from .ops import plasma_fractal, clipped_zoom, MotionImage
'''
PIL resize (W,H)
'''
class Fog:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
c = [(1.5, 2), (2., 2), (2.5, 1.7)]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
n_channels = len(img.getbands())
isgray = n_channels == 1
img = np.array(img) / 255.
max_val = img.max()
fog = c[0] * plasma_fractal(wibbledecay=c[1])[:H, :W][..., np.newaxis]
#x += c[0] * plasma_fractal(wibbledecay=c[1])[:224, :224][..., np.newaxis]
#return np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
if isgray:
fog = np.squeeze(fog)
else:
fog = np.repeat(fog, 3, axis=2)
# print('img', img.shape)
# print('fog', fog.shape)
# print(H, W)
# exit(0)
fog = cv2.resize(fog, dsize=(H, W), interpolation=cv2.INTER_CUBIC)
img += fog
img = np.clip(img * max_val / (max_val + c[0]), 0, 1) * 255
return Image.fromarray(img.astype(np.uint8))
class Frost:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
c = [(1, 0.4), (0.8, 0.6), (0.7, 0.7)]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
filename = [resource_filename(__name__, 'frost/frost1.png'),
resource_filename(__name__, 'frost/frost2.png'),
resource_filename(__name__, 'frost/frost3.png'),
resource_filename(__name__, 'frost/frost4.jpg'),
resource_filename(__name__, 'frost/frost5.jpg'),
resource_filename(__name__, 'frost/frost6.jpg')
]
index = np.random.randint(0, len(filename))
filename = filename[index]
frost = cv2.imread(filename)
frost = cv2.resize(frost, dsize=(H, W), interpolation=cv2.INTER_CUBIC)
#randomly crop and convert to rgb
# x_start, y_start = np.random.randint(0, frost.shape[0] - H), np.random.randint(0, frost.shape[1] - W)
x_start = 0
y_start = 0
frost = frost[x_start:x_start + H, y_start:y_start + W][..., [2, 1, 0]]
n_channels = len(img.getbands())
isgray = n_channels == 1
img = np.array(img)
if isgray:
img = np.expand_dims(img, axis=2)
img = np.repeat(img, 3, axis=2)
img = img * c[0]
frost = frost * c[1]
img = np.clip(c[0] * img + c[1] * frost, 0, 255)
img = Image.fromarray(img.astype(np.uint8))
if isgray:
img = ImageOps.grayscale(img)
return img
class Snow:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8),
(0.2, 0.3, 2, 0.5, 12, 4, 0.7),
(0.55, 0.3, 4, 0.9, 12, 8, 0.7)]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
n_channels = len(img.getbands())
isgray = n_channels == 1
img = np.array(img, dtype=np.float32) / 255.
if isgray:
img = np.expand_dims(img, axis=2)
img = np.repeat(img, 3, axis=2)
snow_layer = np.random.normal(size=img.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome
#snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])
snow_layer[snow_layer < c[3]] = 0
snow_layer = Image.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L')
output = BytesIO()
snow_layer.save(output, format='PNG')
snow_layer = MotionImage(blob=output.getvalue())
snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45))
snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED) / 255.
#snow_layer = cv2.cvtColor(snow_layer, cv2.COLOR_BGR2RGB)
snow_layer = snow_layer[..., np.newaxis]
img = c[6] * img
gray_img = (1 - c[6]) * np.maximum(img, cv2.cvtColor(img, cv2.COLOR_RGB2GRAY).reshape(H, W, 1) * 1.5 + 0.5)
img += gray_img
img = np.clip(img + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
img = Image.fromarray(img.astype(np.uint8))
if isgray:
img = ImageOps.grayscale(img)
return img
class Rain:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
img = img.copy()
W, H = img.size
n_channels = len(img.getbands())
isgray = n_channels == 1
line_width = np.random.randint(1, 2)
c =[50, 70, 90]
if mag<0 or mag>=len(c):
index = 0
else:
index = mag
c = c[index]
n_rains = np.random.randint(c, c+20)
slant = np.random.randint(-60, 60)
fillcolor = 200 if isgray else (200,200,200)
draw = ImageDraw.Draw(img)
for i in range(1, n_rains):
length = np.random.randint(5, 10)
x1 = np.random.randint(0, W-length)
y1 = np.random.randint(0, H-length)
x2 = x1 + length*math.sin(slant*math.pi/180.)
y2 = y1 + length*math.cos(slant*math.pi/180.)
x2 = int(x2)
y2 = int(y2)
draw.line([(x1,y1), (x2,y2)], width=line_width, fill=fillcolor)
return img
class Shadow:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
#img = img.copy()
W, H = img.size
n_channels = len(img.getbands())
isgray = n_channels == 1
c =[64, 96, 128]
if mag<0 or mag>=len(c):
index = 0
else:
index = mag
c = c[index]
img = img.convert('RGBA')
overlay = Image.new('RGBA', img.size, (255,255,255,0))
draw = ImageDraw.Draw(overlay)
transparency = np.random.randint(c, c+32)
x1 = np.random.randint(0, W//2)
y1 = 0
x2 = np.random.randint(W//2, W)
y2 = 0
x3 = np.random.randint(W//2, W)
y3 = H - 1
x4 = np.random.randint(0, W//2)
y4 = H - 1
draw.polygon([(x1,y1), (x2,y2), (x3,y3), (x4,y4)], fill=(0,0,0,transparency))
img = Image.alpha_composite(img, overlay)
img = img.convert("RGB")
if isgray:
img = ImageOps.grayscale(img)
return img
| EXA-1-master | exa/models/unilm-master/trocr/augmentation/weather.py |
EXA-1-master | exa/models/unilm-master/trocr/augmentation/__init__.py |
|
import cv2
import numpy as np
from PIL import Image, ImageOps, ImageDraw
'''
PIL resize (W,H)
Torch resize is (H,W)
'''
class VGrid:
def __init__(self):
pass
def __call__(self, img, copy=True, max_width=4, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
if copy:
img = img.copy()
W, H = img.size
if mag<0 or mag>max_width:
line_width = np.random.randint(1, max_width)
image_stripe = np.random.randint(1, max_width)
else:
line_width = 1
image_stripe = 3 - mag
n_lines = W // (line_width + image_stripe) + 1
draw = ImageDraw.Draw(img)
for i in range(1, n_lines):
x = image_stripe*i + line_width*(i-1)
draw.line([(x,0), (x,H)], width=line_width, fill='black')
return img
class HGrid:
def __init__(self):
pass
def __call__(self, img, copy=True, max_width=4, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
if copy:
img = img.copy()
W, H = img.size
if mag<0 or mag>max_width:
line_width = np.random.randint(1, max_width)
image_stripe = np.random.randint(1, max_width)
else:
line_width = 1
image_stripe = 3 - mag
n_lines = H // (line_width + image_stripe) + 1
draw = ImageDraw.Draw(img)
for i in range(1, n_lines):
y = image_stripe*i + line_width*(i-1)
draw.line([(0,y), (W, y)], width=line_width, fill='black')
return img
class Grid:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
img = VGrid()(img, copy=True, mag=mag)
img = HGrid()(img, copy=False, mag=mag)
return img
class RectGrid:
def __init__(self):
pass
def __call__(self, img, isellipse=False, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
img = img.copy()
W, H = img.size
line_width = 1
image_stripe = 3 - mag #np.random.randint(2, 6)
offset = 4 if isellipse else 1
n_lines = ((H//2) // (line_width + image_stripe)) + offset
draw = ImageDraw.Draw(img)
x_center = W // 2
y_center = H // 2
for i in range(1, n_lines):
dx = image_stripe*i + line_width*(i-1)
dy = image_stripe*i + line_width*(i-1)
x1 = x_center - (dx * W//H)
y1 = y_center - dy
x2 = x_center + (dx * W/H)
y2 = y_center + dy
if isellipse:
draw.ellipse([(x1,y1), (x2, y2)], width=line_width, outline='black')
else:
draw.rectangle([(x1,y1), (x2, y2)], width=line_width, outline='black')
return img
class EllipseGrid:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
img = RectGrid()(img, isellipse=True, mag=mag, prob=prob)
return img
| EXA-1-master | exa/models/unilm-master/trocr/augmentation/pattern.py |
import os
import cv2
from warp import Curve, Distort, Stretch
from geometry import Rotate, Perspective, Shrink, TranslateX, TranslateY
from pattern import VGrid, HGrid, Grid, RectGrid, EllipseGrid
from noise import GaussianNoise, ShotNoise, ImpulseNoise, SpeckleNoise
from blur import GaussianBlur, DefocusBlur, MotionBlur, GlassBlur, ZoomBlur
from camera import Contrast, Brightness, JpegCompression, Pixelate
from weather import Fog, Snow, Frost, Rain, Shadow
from process import Posterize, Solarize, Invert, Equalize, AutoContrast, Sharpness, Color
from PIL import Image
import PIL.ImageOps
import numpy as np
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image', default="images/delivery.png", help='Load image file')
parser.add_argument('--results', default="results", help='Load image file')
parser.add_argument('--gray', action='store_true', help='Convert to grayscale 1st')
opt = parser.parse_args()
os.makedirs(opt.results, exist_ok=True)
img = Image.open(opt.image)
img = img.resize( (100,32) )
ops = [Curve(), Rotate(), Perspective(), Distort(), Stretch(), Shrink(), TranslateX(), TranslateY(), VGrid(), HGrid(), Grid(), RectGrid(), EllipseGrid()]
ops.extend([GaussianNoise(), ShotNoise(), ImpulseNoise(), SpeckleNoise()])
ops.extend([GaussianBlur(), DefocusBlur(), MotionBlur(), GlassBlur(), ZoomBlur()])
ops.extend([Contrast(), Brightness(), JpegCompression(), Pixelate()])
ops.extend([Fog(), Snow(), Frost(), Rain(), Shadow()])
ops.extend([Posterize(), Solarize(), Invert(), Equalize(), AutoContrast(), Sharpness(), Color()])
for op in ops:
for mag in range(3):
filename = type(op).__name__ + "-" + str(mag) + ".png"
out_img = op(img, mag=mag)
if opt.gray:
out_img = PIL.ImageOps.grayscale(out_img)
out_img.save(os.path.join(opt.results, filename))
| EXA-1-master | exa/models/unilm-master/trocr/augmentation/test.py |
import cv2
import numpy as np
from wand.image import Image as WandImage
from scipy.ndimage import zoom as scizoom
from wand.api import library as wandlibrary
class MotionImage(WandImage):
def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):
wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)
def clipped_zoom(img, zoom_factor):
h = img.shape[1]
# ceil crop height(= crop width)
ch = int(np.ceil(h / float(zoom_factor)))
top = (h - ch) // 2
img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
# trim off any extra pixels
trim_top = (img.shape[0] - h) // 2
return img[trim_top:trim_top + h, trim_top:trim_top + h]
def disk(radius, alias_blur=0.1, dtype=np.float32):
if radius <= 8:
L = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
def plasma_fractal(mapsize=256, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
| EXA-1-master | exa/models/unilm-master/trocr/augmentation/ops.py |
import cv2
import numpy as np
from PIL import Image, ImageOps
import torchvision.transforms as transforms
from wand.image import Image as WandImage
from scipy.ndimage import zoom as scizoom
from skimage.filters import gaussian
from wand.api import library as wandlibrary
from io import BytesIO
#from skimage import color
from .ops import MotionImage, clipped_zoom, disk, plasma_fractal
'''
PIL resize (W,H)
'''
class GaussianBlur:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
#kernel = [(31,31)] prev 1 level only
kernel = (31, 31)
sigmas = [.5, 1, 2]
if mag<0 or mag>=len(kernel):
index = np.random.randint(0, len(sigmas))
else:
index = mag
sigma = sigmas[index]
return transforms.GaussianBlur(kernel_size=kernel, sigma=sigma)(img)
class DefocusBlur:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
n_channels = len(img.getbands())
isgray = n_channels == 1
#c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)]
c = [(2, 0.1), (3, 0.1), (4, 0.1)] #, (6, 0.5)] #prev 2 levels only
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
img = np.array(img) / 255.
if isgray:
img = np.expand_dims(img, axis=2)
img = np.repeat(img, 3, axis=2)
n_channels = 3
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(n_channels):
channels.append(cv2.filter2D(img[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3
#if isgray:
# img = img[:,:,0]
# img = np.squeeze(img)
img = np.clip(channels, 0, 1) * 255
img = Image.fromarray(img.astype(np.uint8))
if isgray:
img = ImageOps.grayscale(img)
return img
class MotionBlur:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
n_channels = len(img.getbands())
isgray = n_channels == 1
#c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)]
c = [(10, 3), (12, 4), (14, 5)]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
output = BytesIO()
img.save(output, format='PNG')
img = MotionImage(blob=output.getvalue())
img.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))
img = cv2.imdecode(np.fromstring(img.make_blob(), np.uint8), cv2.IMREAD_UNCHANGED)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img.astype(np.uint8))
if isgray:
img = ImageOps.grayscale(img)
return img
class GlassBlur:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
#c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][severity - 1]
c = [(0.7, 1, 2), (0.75, 1, 2), (0.8, 1, 2)] #, (1, 2, 3)] #prev 2 levels only
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
img = np.uint8(gaussian(np.array(img) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for i in range(c[2]):
for h in range(H - c[1], c[1], -1):
for w in range(W - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
img[h, w], img[h_prime, w_prime] = img[h_prime, w_prime], img[h, w]
img = np.clip(gaussian(img / 255., sigma=c[0], multichannel=True), 0, 1) * 255
return Image.fromarray(img.astype(np.uint8))
class ZoomBlur:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
c = [np.arange(1, 1.11, .01),
np.arange(1, 1.16, .01),
np.arange(1, 1.21, .02)]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
n_channels = len(img.getbands())
isgray = n_channels == 1
uint8_img = img
img = (np.array(img) / 255.).astype(np.float32)
out = np.zeros_like(img)
for zoom_factor in c:
ZW = int(W*zoom_factor)
ZH = int(H*zoom_factor)
zoom_img = uint8_img.resize((ZW, ZH), Image.BICUBIC)
x1 = (ZW - W) // 2
y1 = (ZH - H) // 2
x2 = x1 + W
y2 = y1 + H
zoom_img = zoom_img.crop((x1,y1,x2,y2))
out += (np.array(zoom_img) / 255.).astype(np.float32)
img = (img + out) / (len(c) + 1)
img = np.clip(img, 0, 1) * 255
img = Image.fromarray(img.astype(np.uint8))
return img
| EXA-1-master | exa/models/unilm-master/trocr/augmentation/blur.py |
import cv2
import numpy as np
import skimage as sk
from PIL import Image, ImageOps
from io import BytesIO
from skimage import color
'''
PIL resize (W,H)
cv2 image is BGR
PIL image is RGB
'''
class Contrast:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
#c = [0.4, .3, .2, .1, .05]
c = [0.4, .3, .2]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
img = np.array(img) / 255.
means = np.mean(img, axis=(0, 1), keepdims=True)
img = np.clip((img - means) * c + means, 0, 1) * 255
return Image.fromarray(img.astype(np.uint8))
class Brightness:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
#W, H = img.size
#c = [.1, .2, .3, .4, .5]
c = [.1, .2, .3]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
n_channels = len(img.getbands())
isgray = n_channels == 1
img = np.array(img) / 255.
if isgray:
img = np.expand_dims(img, axis=2)
img = np.repeat(img, 3, axis=2)
img = sk.color.rgb2hsv(img)
img[:, :, 2] = np.clip(img[:, :, 2] + c, 0, 1)
img = sk.color.hsv2rgb(img)
#if isgray:
# img = img[:,:,0]
# img = np.squeeze(img)
img = np.clip(img, 0, 1) * 255
img = Image.fromarray(img.astype(np.uint8))
if isgray:
img = ImageOps.grayscale(img)
return img
#if isgray:
#if isgray:
# img = color.rgb2gray(img)
#return Image.fromarray(img.astype(np.uint8))
class JpegCompression:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
#c = [25, 18, 15, 10, 7]
c = [25, 18, 15]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
output = BytesIO()
img.save(output, 'JPEG', quality=c)
return Image.open(output)
class Pixelate:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
#c = [0.6, 0.5, 0.4, 0.3, 0.25]
c = [0.6, 0.5, 0.4]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
img = img.resize((int(W* c), int(H * c)), Image.BOX)
return img.resize((W, H), Image.BOX)
| EXA-1-master | exa/models/unilm-master/trocr/augmentation/camera.py |
import numpy as np
import skimage as sk
from PIL import Image
'''
PIL resize (W,H)
'''
class GaussianNoise:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
#c = np.random.uniform(.08, .38)
b = [.08, 0.1, 0.12]
if mag<0 or mag>=len(b):
index = 0
else:
index = mag
a = b[index]
c = np.random.uniform(a, a+0.03)
img = np.array(img) / 255.
img = np.clip(img + np.random.normal(size=img.shape, scale=c), 0, 1) * 255
return Image.fromarray(img.astype(np.uint8))
class ShotNoise:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
#c = np.random.uniform(3, 60)
b = [13, 8, 3]
if mag<0 or mag>=len(b):
index = 2
else:
index = mag
a = b[index]
c = np.random.uniform(a, a+7)
img = np.array(img) / 255.
img = np.clip(np.random.poisson(img * c) / float(c), 0, 1) * 255
return Image.fromarray(img.astype(np.uint8))
class ImpulseNoise:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
#c = np.random.uniform(.03, .27)
b = [.03, .07, .11]
if mag<0 or mag>=len(b):
index = 0
else:
index = mag
a = b[index]
c = np.random.uniform(a, a+.04)
img = sk.util.random_noise(np.array(img) / 255., mode='s&p', amount=c) * 255
return Image.fromarray(img.astype(np.uint8))
class SpeckleNoise:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
# c = np.random.uniform(.15, .6)
b = [.15, .2, .25]
if mag<0 or mag>=len(b):
index = 0
else:
index = mag
a = b[index]
c = np.random.uniform(a, a+.05)
img = np.array(img) / 255.
img = np.clip(img + img * np.random.normal(size=img.shape, scale=c), 0, 1) * 255
return Image.fromarray(img.astype(np.uint8))
| EXA-1-master | exa/models/unilm-master/trocr/augmentation/noise.py |
import cv2
import numpy as np
from PIL import Image, ImageOps
'''
PIL resize (W,H)
Torch resize is (H,W)
'''
class Shrink:
def __init__(self):
self.tps = cv2.createThinPlateSplineShapeTransformer()
self.translateXAbs = TranslateXAbs()
self.translateYAbs = TranslateYAbs()
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
img = np.array(img)
srcpt = list()
dstpt = list()
W_33 = 0.33 * W
W_50 = 0.50 * W
W_66 = 0.66 * W
H_50 = 0.50 * H
P = 0
#frac = 0.4
b = [.2, .3, .4]
if mag<0 or mag>=len(b):
index = 0
else:
index = mag
frac = b[index]
# left-most
srcpt.append([P, P])
srcpt.append([P, H-P])
x = np.random.uniform(frac-.1, frac)*W_33
y = np.random.uniform(frac-.1, frac)*H_50
dstpt.append([P+x, P+y])
dstpt.append([P+x, H-P-y])
# 2nd left-most
srcpt.append([P+W_33, P])
srcpt.append([P+W_33, H-P])
dstpt.append([P+W_33, P+y])
dstpt.append([P+W_33, H-P-y])
# 3rd left-most
srcpt.append([P+W_66, P])
srcpt.append([P+W_66, H-P])
dstpt.append([P+W_66, P+y])
dstpt.append([P+W_66, H-P-y])
# right-most
srcpt.append([W-P, P])
srcpt.append([W-P, H-P])
dstpt.append([W-P-x, P+y])
dstpt.append([W-P-x, H-P-y])
N = len(dstpt)
matches = [cv2.DMatch(i, i, 0) for i in range(N)]
dst_shape = np.array(dstpt).reshape((-1, N, 2))
src_shape = np.array(srcpt).reshape((-1, N, 2))
self.tps.estimateTransformation(dst_shape, src_shape, matches)
img = self.tps.warpImage(img)
img = Image.fromarray(img)
if np.random.uniform(0, 1) < 0.5:
img = self.translateXAbs(img, val=x)
else:
img = self.translateYAbs(img, val=y)
return img
class Rotate:
def __init__(self, square_side=224):
self.side = square_side
def __call__(self, img, iscurve=False, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
if H!=self.side or W!=self.side:
img = img.resize((self.side, self.side), Image.BICUBIC)
b = [20., 40, 60]
if mag<0 or mag>=len(b):
index = 1
else:
index = mag
rotate_angle = b[index]
angle = np.random.uniform(rotate_angle-20, rotate_angle)
if np.random.uniform(0, 1) < 0.5:
angle = -angle
#angle = np.random.normal(loc=0., scale=rotate_angle)
#angle = min(angle, 2*rotate_angle)
#angle = max(angle, -2*rotate_angle)
expand = False if iscurve else True
img = img.rotate(angle=angle, resample=Image.BICUBIC, expand=expand)
img = img.resize((W, H), Image.BICUBIC)
return img
class Perspective:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
# upper-left, upper-right, lower-left, lower-right
src = np.float32([[0, 0], [W, 0], [0, H], [W, H]])
#low = 0.3
b = [.1, .2, .3]
if mag<0 or mag>=len(b):
index = 2
else:
index = mag
low = b[index]
high = 1 - low
if np.random.uniform(0, 1) > 0.5:
toprightY = np.random.uniform(low, low+.1)*H
bottomrightY = np.random.uniform(high-.1, high)*H
dest = np.float32([[0, 0], [W, toprightY], [0, H], [W, bottomrightY]])
else:
topleftY = np.random.uniform(low, low+.1)*H
bottomleftY = np.random.uniform(high-.1, high)*H
dest = np.float32([[0, topleftY], [W, 0], [0, bottomleftY], [W, H]])
M = cv2.getPerspectiveTransform(src, dest)
img = np.array(img)
img = cv2.warpPerspective(img, M, (W, H) )
img = Image.fromarray(img)
return img
class TranslateX:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
b = [.03, .06, .09]
if mag<0 or mag>=len(b):
index = 2
else:
index = mag
v = b[index]
v = np.random.uniform(v-0.03, v)
v = v * img.size[0]
if np.random.uniform(0,1) > 0.5:
v = -v
return img.transform(img.size, Image.AFFINE, (1, 0, v, 0, 1, 0))
class TranslateY:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
b = [.07, .14, .21]
if mag<0 or mag>=len(b):
index = 2
else:
index = mag
v = b[index]
v = np.random.uniform(v-0.07, v)
v = v * img.size[1]
if np.random.uniform(0,1) > 0.5:
v = -v
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, v))
class TranslateXAbs:
def __init__(self):
pass
def __call__(self, img, val=0, prob=1.):
if np.random.uniform(0,1) > prob:
return img
v = np.random.uniform(0, val)
if np.random.uniform(0,1) > 0.5:
v = -v
return img.transform(img.size, Image.AFFINE, (1, 0, v, 0, 1, 0))
class TranslateYAbs:
def __init__(self):
pass
def __call__(self, img, val=0, prob=1.):
if np.random.uniform(0,1) > prob:
return img
v = np.random.uniform(0, val)
if np.random.uniform(0,1) > 0.5:
v = -v
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, v))
| EXA-1-master | exa/models/unilm-master/trocr/augmentation/geometry.py |
import cv2
import numpy as np
from PIL import Image, ImageOps
'''
PIL resize (W,H)
Torch resize is (H,W)
'''
class Stretch:
def __init__(self):
self.tps = cv2.createThinPlateSplineShapeTransformer()
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
img = np.array(img)
srcpt = list()
dstpt = list()
W_33 = 0.33 * W
W_50 = 0.50 * W
W_66 = 0.66 * W
H_50 = 0.50 * H
P = 0
#frac = 0.4
b = [.2, .3, .4]
if mag<0 or mag>=len(b):
index = len(b)-1
else:
index = mag
frac = b[index]
# left-most
srcpt.append([P, P])
srcpt.append([P, H-P])
srcpt.append([P, H_50])
x = np.random.uniform(0, frac)*W_33 #if np.random.uniform(0,1) > 0.5 else 0
dstpt.append([P+x, P])
dstpt.append([P+x, H-P])
dstpt.append([P+x, H_50])
# 2nd left-most
srcpt.append([P+W_33, P])
srcpt.append([P+W_33, H-P])
x = np.random.uniform(-frac, frac)*W_33
dstpt.append([P+W_33+x, P])
dstpt.append([P+W_33+x, H-P])
# 3rd left-most
srcpt.append([P+W_66, P])
srcpt.append([P+W_66, H-P])
x = np.random.uniform(-frac, frac)*W_33
dstpt.append([P+W_66+x, P])
dstpt.append([P+W_66+x, H-P])
# right-most
srcpt.append([W-P, P])
srcpt.append([W-P, H-P])
srcpt.append([W-P, H_50])
x = np.random.uniform(-frac, 0)*W_33 #if np.random.uniform(0,1) > 0.5 else 0
dstpt.append([W-P+x, P])
dstpt.append([W-P+x, H-P])
dstpt.append([W-P+x, H_50])
N = len(dstpt)
matches = [cv2.DMatch(i, i, 0) for i in range(N)]
dst_shape = np.array(dstpt).reshape((-1, N, 2))
src_shape = np.array(srcpt).reshape((-1, N, 2))
self.tps.estimateTransformation(dst_shape, src_shape, matches)
img = self.tps.warpImage(img)
img = Image.fromarray(img)
return img
class Distort:
def __init__(self):
self.tps = cv2.createThinPlateSplineShapeTransformer()
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
img = np.array(img)
srcpt = list()
dstpt = list()
W_33 = 0.33 * W
W_50 = 0.50 * W
W_66 = 0.66 * W
H_50 = 0.50 * H
P = 0
#frac = 0.4
b = [.2, .3, .4]
if mag<0 or mag>=len(b):
index = len(b)-1
else:
index = mag
frac = b[index]
# top pts
srcpt.append([P, P])
x = np.random.uniform(0, frac)*W_33
y = np.random.uniform(0, frac)*H_50
dstpt.append([P+x, P+y])
srcpt.append([P+W_33, P])
x = np.random.uniform(-frac, frac)*W_33
y = np.random.uniform(0, frac)*H_50
dstpt.append([P+W_33+x, P+y])
srcpt.append([P+W_66, P])
x = np.random.uniform(-frac, frac)*W_33
y = np.random.uniform(0, frac)*H_50
dstpt.append([P+W_66+x, P+y])
srcpt.append([W-P, P])
x = np.random.uniform(-frac, 0)*W_33
y = np.random.uniform(0, frac)*H_50
dstpt.append([W-P+x, P+y])
# bottom pts
srcpt.append([P, H-P])
x = np.random.uniform(0, frac)*W_33
y = np.random.uniform(-frac, 0)*H_50
dstpt.append([P+x, H-P+y])
srcpt.append([P+W_33, H-P])
x = np.random.uniform(-frac, frac)*W_33
y = np.random.uniform(-frac, 0)*H_50
dstpt.append([P+W_33+x, H-P+y])
srcpt.append([P+W_66, H-P])
x = np.random.uniform(-frac, frac)*W_33
y = np.random.uniform(-frac, 0)*H_50
dstpt.append([P+W_66+x, H-P+y])
srcpt.append([W-P, H-P])
x = np.random.uniform(-frac, 0)*W_33
y = np.random.uniform(-frac, 0)*H_50
dstpt.append([W-P+x, H-P+y])
N = len(dstpt)
matches = [cv2.DMatch(i, i, 0) for i in range(N)]
dst_shape = np.array(dstpt).reshape((-1, N, 2))
src_shape = np.array(srcpt).reshape((-1, N, 2))
self.tps.estimateTransformation(dst_shape, src_shape, matches)
img = self.tps.warpImage(img)
img = Image.fromarray(img)
return img
class Curve:
def __init__(self, square_side=224):
self.tps = cv2.createThinPlateSplineShapeTransformer()
self.side = square_side
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
W, H = img.size
if H!=self.side or W!=self.side:
img = img.resize((self.side, self.side), Image.BICUBIC)
isflip = np.random.uniform(0,1) > 0.5
if isflip:
img = ImageOps.flip(img)
#img = TF.vflip(img)
img = np.array(img)
w = self.side
h = self.side
w_25 = 0.25 * w
w_50 = 0.50 * w
w_75 = 0.75 * w
b = [1.1, .95, .8]
if mag<0 or mag>=len(b):
index = 0
else:
index = mag
rmin = b[index]
r = np.random.uniform(rmin, rmin+.1)*h
x1 = (r**2 - w_50**2)**0.5
h1 = r - x1
t = np.random.uniform(0.4, 0.5)*h
w2 = w_50*t/r
hi = x1*t/r
h2 = h1 + hi
sinb_2 = ((1 - x1/r)/2)**0.5
cosb_2 = ((1 + x1/r)/2)**0.5
w3 = w_50 - r*sinb_2
h3 = r - r*cosb_2
w4 = w_50 - (r-t)*sinb_2
h4 = r - (r-t)*cosb_2
w5 = 0.5*w2
h5 = h1 + 0.5*hi
h_50 = 0.50*h
srcpt = [(0,0 ), (w,0 ), (w_50,0), (0,h ), (w,h ), (w_25,0), (w_75,0 ), (w_50,h), (w_25,h), (w_75,h ), (0,h_50), (w,h_50 )]
dstpt = [(0,h1), (w,h1), (w_50,0), (w2,h2), (w-w2,h2), (w3, h3), (w-w3,h3), (w_50,t), (w4,h4 ), (w-w4,h4), (w5,h5 ), (w-w5,h5)]
N = len(dstpt)
matches = [cv2.DMatch(i, i, 0) for i in range(N)]
dst_shape = np.array(dstpt).reshape((-1, N, 2))
src_shape = np.array(srcpt).reshape((-1, N, 2))
self.tps.estimateTransformation(dst_shape, src_shape, matches)
img = self.tps.warpImage(img)
img = Image.fromarray(img)
if isflip:
#img = TF.vflip(img)
img = ImageOps.flip(img)
rect = (0, self.side//2, self.side, self.side)
else:
rect = (0, 0, self.side, self.side//2)
img = img.crop(rect)
img = img.resize((W, H), Image.BICUBIC)
return img
| EXA-1-master | exa/models/unilm-master/trocr/augmentation/warp.py |
from PIL import Image
import PIL.ImageOps, PIL.ImageEnhance
import numpy as np
class Posterize:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
c = [1, 3, 6]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
bit = np.random.randint(c, c+2)
img = PIL.ImageOps.posterize(img, bit)
return img
class Solarize:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
c = [64, 128, 192]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
thresh = np.random.randint(c, c+64)
img = PIL.ImageOps.solarize(img, thresh)
return img
class Invert:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
img = PIL.ImageOps.invert(img)
return img
class Equalize:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
mg = PIL.ImageOps.equalize(img)
return img
class AutoContrast:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
mg = PIL.ImageOps.autocontrast(img)
return img
class Sharpness:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
c = [.1, .7, 1.3]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
magnitude = np.random.uniform(c, c+.6)
img = PIL.ImageEnhance.Sharpness(img).enhance(magnitude)
return img
class Color:
def __init__(self):
pass
def __call__(self, img, mag=-1, prob=1.):
if np.random.uniform(0,1) > prob:
return img
c = [.1, .7, 1.3]
if mag<0 or mag>=len(c):
index = np.random.randint(0, len(c))
else:
index = mag
c = c[index]
magnitude = np.random.uniform(c, c+.6)
img = PIL.ImageEnhance.Color(img).enhance(magnitude)
return img
| EXA-1-master | exa/models/unilm-master/trocr/augmentation/process.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
import utils
def train_class_batch(model, samples, target, criterion):
outputs = model(samples)
if not isinstance(outputs, torch.Tensor):
# assume that the model outputs a tuple of [outputs, outputs_kd]
outputs, outputs_kd = outputs
loss = criterion(outputs, target)
return loss, outputs
def get_loss_scale_for_deepspeed(model):
optimizer = model.optimizer
return optimizer.loss_scale if hasattr(optimizer, "loss_scale") else optimizer.cur_scale
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None,
start_steps=None, lr_schedule_values=None, wd_schedule_values=None,
num_training_steps_per_epoch=None, update_freq=None):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
if loss_scaler is None:
model.zero_grad()
model.micro_steps = 0
else:
optimizer.zero_grad()
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
step = data_iter_step // update_freq
if step >= num_training_steps_per_epoch:
continue
it = start_steps + step # global training iteration
# Update LR & WD for the first acc
if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
if loss_scaler is None:
samples = samples.half()
loss, output = train_class_batch(
model, samples, targets, criterion)
else:
with torch.cuda.amp.autocast():
loss, output = train_class_batch(
model, samples, targets, criterion)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
if loss_scaler is None:
loss /= update_freq
model.backward(loss)
model.step()
if (data_iter_step + 1) % update_freq == 0:
# model.zero_grad()
# Deepspeed will call step() & model.zero_grad() automatic
if model_ema is not None:
model_ema.update(model)
grad_norm = None
loss_scale_value = get_loss_scale_for_deepspeed(model)
else:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
loss_scale_value = loss_scaler.state_dict()["scale"]
torch.cuda.synchronize()
if mixup_fn is None:
class_acc = (output.max(-1)[-1] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(class_acc=class_acc, head="loss")
log_writer.update(loss_scale=loss_scale_value, head="opt")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| EXA-1-master | exa/models/unilm-master/dit/classification/engine_for_finetuning.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# --------------------------------------------------------'
import math
import random
import warnings
import torchvision.transforms.functional as F
from timm.data.transforms import interp_mode_to_str, _RANDOM_INTERPOLATION, str_to_interp_mode
class RandomResizedCropAndInterpolationWithTwoPic:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, second_size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
interpolation='bilinear', second_interpolation='lanczos'):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if second_size is not None:
if isinstance(second_size, tuple):
self.second_size = second_size
else:
self.second_size = (second_size, second_size)
else:
self.second_size = None
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = str_to_interp_mode(interpolation)
self.second_interpolation = str_to_interp_mode(second_interpolation)
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
if self.second_size is None:
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
else:
return F.resized_crop(img, i, j, h, w, self.size, interpolation), \
F.resized_crop(img, i, j, h, w, self.second_size, self.second_interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = ' '.join([interp_mode_to_str(x) for x in self.interpolation])
else:
interpolate_str = interp_mode_to_str(self.interpolation)
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0}'.format(interpolate_str)
if self.second_size is not None:
format_string += ', second_size={0}'.format(self.second_size)
format_string += ', second_interpolation={0}'.format(interp_mode_to_str(self.second_interpolation))
format_string += ')'
return format_string | EXA-1-master | exa/models/unilm-master/dit/classification/transforms.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
from timm.data import create_transform
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data.transforms import str_to_interp_mode
from torchvision import transforms
from dataset_folder import RvlcdipImageFolder
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
print("Transform = ")
if isinstance(transform, tuple):
for trans in transform:
print(" - - - - - - - - - - ")
for t in trans.transforms:
print(t)
else:
for t in transform.transforms:
print(t)
print("---------------------------")
if args.data_set == 'rvlcdip':
root = args.data_path if is_train else args.eval_data_path
split = "train" if is_train else "test"
dataset = RvlcdipImageFolder(root, split=split, transform=transform)
nb_classes = args.nb_classes
assert len(dataset.class_to_idx) == nb_classes
else:
raise NotImplementedError()
assert nb_classes == args.nb_classes
print("Number of the class = %d" % args.nb_classes)
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
if args.crop_pct is None:
if args.input_size < 384:
args.crop_pct = 224 / 256
else:
args.crop_pct = 1.0
size = int(args.input_size / args.crop_pct)
t.append(
transforms.Resize(size, interpolation=str_to_interp_mode("bicubic")),
# to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
| EXA-1-master | exa/models/unilm-master/dit/classification/datasets.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.