python_code
stringlengths
0
4.04M
repo_name
stringlengths
8
58
file_path
stringlengths
5
147
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # """ Classes Node, Arc, DependencyTree providing functionality for syntactic dependency trees """ from __future__ import print_function, division import re from queue import Queue import conll_utils as conll_utils class Node(object): def __init__(self, index=None, word="", lemma="", head_id=None, pos="", dep_label="", morph="_", size=None, dep_label_new=None): """ :param index: int :param word: str :param head_id: int :param pos: str :param dep_label: str """ self.index = index self.word = word self.lemma = lemma self.head_id = head_id self.pos = pos self.dep_label = dep_label self.morph = morph if dep_label_new is None: self.dep_label_new = dep_label else: self.dep_label_new = dep_label_new # to assign after tree creation self.size = size self.dir = None def __str__(self): return "\t".join([str(self.index), self.word, self.pos, self.morph, str(self.head_id), str(self.dep_label)]) def __repr__(self): return "\t".join([str(v) for (a, v) in self.__dict__.items() if v]) @classmethod def from_str(cls, string): index, word, pos, head_id, dep_label = [None if x == "None" else x for x in string.split("\t")] return Node(index, word, head_id, pos, dep_label) def __eq__(self, other): return other is not None and \ self.index == other.index and \ self.word == other.word and \ self.head_id == other.head_id and \ self.pos == other.pos and \ self.dep_label == other.dep_label def __hash__(self): return hash(tuple(self.__dict__.values())) def is_root(self): generic_root = DependencyTree.generic_root(conll_utils.UD_CONLL_CONFIG) if self.word == generic_root.word and self.pos == generic_root.pos: return True return False class Arc(object): LEFT = "L" RIGHT = "R" def __init__(self, head, direction, child): self.head = head self.dir = direction self.child = child self.dep_label = child.dep_label def __str__(self): return str(self.head) + " " + self.dir + " " + str(self.child) def __repr__(self): return str(self) @classmethod def from_str(cls, string): head_str, dir, child_str = string.split(" ") return Arc(Node.from_str(head_str), dir, Node.from_str(child_str)) def __eq__(self, other): if type(other) is type(self): return self.__dict__ == other.__dict__ return False def __hash__(self): return hash(tuple(self.__dict__.values())) def length(self): # arcs to ROOT node have length 0 if self.head.is_root(): return 0 else: return abs(self.child.index - self.head.index) class DependencyTree(object): def __init__(self, nodes, arcs, config, fused_nodes): self.nodes = nodes self.arcs = arcs self.assign_sizes_to_nodes() self.config = config # for UD annotation to be able to recover original sentence (without split morphemes) self.fused_nodes = fused_nodes def __str__(self): return "\n".join([str(n) for n in self.nodes]) def __repr__(self): return str(self) def children(self, head): children = [] for arc in self.arcs: if arc.head == head: children.append(arc.child) return children def assign_sizes_to_nodes(self): for node in self.nodes: node.size = len(self.children(node)) + 1 def reindex(self, nodes, conll_config): """ After reordering 'nodes' list reflects the final order of nodes, however the indices of node objects do not correspond to this order. This function fixes it. """ new_positions = {} new_nodes = [] # in order for i in range(len(nodes)): new_positions[nodes[i].index] = i for i in range(len(nodes)): new_nodes.append(nodes[i]) if nodes[i].head_id == conll_config.ROOT_INDEX: nodes[i].index = i + conll_config.OFFSET else: nodes[i].index = i + conll_config.OFFSET nodes[i].head_id = new_positions[nodes[i].head_id] + conll_config.OFFSET self.nodes = new_nodes def remove_node(self, node_x): assert len(self.children(node_x)) == 0 self.nodes.remove(node_x) for node in self.nodes: if node.head_id > node_x.index: node.head_id = node.head_id - 1 if node.index > node_x.index: node.index = node.index - 1 for i in range(len(self.fused_nodes)): start, end, token = self.fused_nodes[i] if start > node_x.index: start = start - 1 if end > node_x.index: end = end - 1 self.fused_nodes[i] = (start, end, token) def subtree(self, head): elements = set() queue = Queue() queue.put(head) #head_ = Node(head.index, head.word, head.pos + "X") elements.add(head) visited = set() while not queue.empty(): next_node = queue.get() if next_node in visited: continue visited.add(next_node) for child in self.children(next_node): elements.add(child) queue.put(child) return sorted(elements, key=lambda element: int(element.index)) def is_projective_arc(self, arc): st = self.subtree(arc.head) # all nodes in subtree of the arc head st_idx = [node.index for node in st] # span between the child and the head indexes = range(arc.child.index + 1, arc.head.index) if arc.child.index < arc.head.index else range( arc.head.index + 1, arc.child.index) # each node/word between child and head should be part of the subtree # if not, than the child-head arc is crossed by some other arc and is non-projective for i in indexes: if i not in st_idx: return False return True def is_projective(self): return all(self.is_projective_arc(arc) for arc in self.arcs) def length(self): return sum(arc.length() for arc in self.arcs) def average_branching_factor(self): heads = [node.head_id for node in self.nodes] return len(self.nodes)/len(set(heads)) def root(self): return DependencyTree.generic_root(self.config) def remerge_segmented_morphemes(self): """ UD format only: Remove segmented words and morphemes and substitute them by the original word form - all children of the segments are attached to the merged word form - word form features are assigned heuristically (should work for Italian, not sure about other languages) - pos tag and morphology (zero?) comes from the first morpheme :return: """ for start, end, token in self.fused_nodes: # assert start + 1 == end, t self.nodes[start - 1].word = token for i in range(end - start): # print(i) if len(self.children(self.nodes[start])) != 0: for c in self.children(self.nodes[start]): c.head_id = self.nodes[start - 1].index self.arcs.remove(Arc(child=c, head=self.nodes[start], direction=c.dir)) self.arcs.append(Arc(child=c, head=self.nodes[start - 1], direction=c.dir)) assert len(self.children(self.nodes[start])) == 0, (self, start, end, token, i, self.arcs) self.remove_node(self.nodes[start]) # print(t) # print(t) self.fused_nodes = [] @classmethod def generic_root(cls, conll_config): return Node(conll_config.ROOT_INDEX, "ROOT", "ROOT", 0, "ROOT", size=0) @classmethod def from_sentence(cls, sentence, conll_config): nodes = [] fused_nodes = [] for i in range(len(sentence)): row = sentence[i] if conll_config.MORPH is not None: morph = row[conll_config.MORPH] else: morph = "_" # saving original word segments separated in UD (e.g. Italian darglielo -> dare + gli + lo) if conll_config == conll_utils.UD_CONLL_CONFIG: if re.match(r"[0-9]+-[0-9]+", row[0]): fused_nodes.append((int(row[0].split("-")[0]), int(row[0].split("-")[1]), row[1])) continue # empty elements (e.g. copula in Russian) if re.match(r"[0-9]+\.[0-9]+", row[0]): continue if conll_config.INDEX is not None: nodes.append( Node(int(row[conll_config.INDEX]), row[conll_config.WORD], row[conll_config.LEMMA], int(row[conll_config.HEAD_INDEX]), pos=row[conll_config.POS], dep_label=row[conll_config.DEP_LABEL], morph=morph)) else: nodes.append(Node(i, row[conll_config.WORD], row[conll_config.LEMMA], int(row[conll_config.HEAD_INDEX]), pos=row[conll_config.POS], dep_label=row[conll_config.DEP_LABEL], morph=morph)) arcs = [] for node in nodes: head_index = int(node.head_id) head_element = nodes[head_index - conll_config.OFFSET] if head_index == conll_config.ROOT_INDEX: arcs.append(Arc(cls.generic_root(conll_config), Arc.LEFT, node)) elif head_index < int(node.index): arcs.append(Arc(head_element, Arc.RIGHT, node)) node.dir = Arc.RIGHT else: arcs.append(Arc(head_element, Arc.LEFT, node)) node.dir = Arc.LEFT return cls(nodes, arcs, conll_config, fused_nodes) def pprint(self, conll_config, lower_case=False): # TODO: change the indices of heads in accordance with the config s = "" for node in self.nodes: row = ["_"] * conll_config.NCOLS if conll_config.INDEX is not None: row[conll_config.INDEX] = str(node.index) if node.word: if lower_case: row[conll_config.WORD] = node.word.lower() else: row[conll_config.WORD] = node.word if node.pos: row[conll_config.POS] = node.pos if node.morph: row[conll_config.MORPH] = node.morph if node.lemma: row[conll_config.LEMMA] = node.lemma row[conll_config.HEAD_INDEX] = str(node.head_id) if node.dep_label: row[conll_config.DEP_LABEL] = node.dep_label s = s + "\t".join(row) + "\n" return s #.encode("utf-8") def load_trees_from_conll(file_name, config=None): sentences = conll_utils.read_sentences_from_columns(open(file_name)) # config for the default cases, to facilitate handling of multiple formats at the same time # for guaranteed performance, config should be supplied if config is None: if len(sentences[0][0]) == conll_utils.ZGEN_CONLL_CONFIG.NCOLS: config = conll_utils.ZGEN_CONLL_CONFIG elif len(sentences[0][0]) == conll_utils.UD_CONLL_CONFIG.NCOLS: config = conll_utils.UD_CONLL_CONFIG else: print("Unrecognised format of ", file_name) return None trees = [] for s in sentences: trees.append(DependencyTree.from_sentence(s, config)) return trees
colorlessgreenRNNs-main
src/syntactic_testsets/tree_module.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import sys from utils import read_paradigms, load_vocab, extract_sent_features, transform_gold, vocab_freqs import pandas as pd lang = sys.argv[1] path_test_data = "/private/home/gulordava/colorlessgreen/data/agreement/" + lang + "/generated" path_lm_data = "/private/home/gulordava/colorlessgreen/data/lm/" + lang if lang == "English": path_paradigms = "/private/home/gulordava/edouard_data/enwiki/paradigms_UD.txt" if lang == "Italian": path_paradigms = "/private/home/gulordava/edouard_data/itwiki/paradigms_UD.txt" if lang == "Italian_srnn": path_paradigms = "/private/home/gulordava/edouard_data/itwiki/paradigms_UD.txt" if lang == "Russian": path_paradigms = "/private/home/gulordava/edouard_data/ruwiki/paradigms_UD.txt" if lang == "Hebrew": path_paradigms = "/private/home/gulordava/edouard_data/hewiki/p2" gold = open(path_test_data + ".gold").readlines() sents = open(path_test_data + ".text").readlines() paradigms = read_paradigms(path_paradigms) output = [] vocab = load_vocab(path_lm_data + "/vocab.txt") data = transform_gold(gold) data = pd.DataFrame(data, columns=["pattern_id", "constr_id", "sent_id", "correct_number", "form", "class"]) data.loc[data.sent_id == 0, "type"] = "original" data.loc[data.sent_id > 0, "type"] = "generated" # getting simpler pattern labels patterns = {p: "__".join(p.split("!")[:2]) for p in set(data.pattern_id)} data["pattern"] = data["pattern_id"].map(patterns) df_sents = extract_sent_features(sents, gold, vocab, paradigms) full_df = data.merge(df_sents, on=["pattern_id", "constr_id", "sent_id"]) freq_dict = vocab_freqs(path_lm_data + "/train.txt", vocab) full_df["freq"] = full_df["form"].map(freq_dict) fields = ["pattern", "constr_id", "sent_id", "correct_number", "form", "class", "type", "prefix", "n_attr", "punct", "freq", "len_context", "len_prefix", "sent"] full_df[fields].to_csv(path_test_data + ".tab", sep="\t", index=False)
colorlessgreenRNNs-main
src/syntactic_testsets/_create_datatable.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. #
colorlessgreenRNNs-main
src/syntactic_testsets/__init__.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # from __future__ import print_function #!/usr/bin/env python import sys import re from collections import namedtuple ConllConfig = namedtuple('CONLL_config', ['INDEX', 'WORD', 'POS', 'LEMMA', 'MORPH', 'HEAD_INDEX', 'DEP_LABEL', 'OFFSET', 'ROOT_INDEX', 'NCOLS'], verbose=False) UD_CONLL_CONFIG = ConllConfig(INDEX=0, WORD=1, LEMMA=2, POS=3, MORPH=5, HEAD_INDEX=6, DEP_LABEL=7, OFFSET=1, ROOT_INDEX=0, NCOLS=10) UD_CONLL_FINE_POS_CONFIG = ConllConfig(INDEX=0, WORD=1, LEMMA=2, POS=4, MORPH=5, HEAD_INDEX=6, DEP_LABEL=7, OFFSET=1, ROOT_INDEX=0, NCOLS=10) CONLL09_CONFIG = ConllConfig(INDEX=0, WORD=1, LEMMA=2, POS=4, MORPH=6, #TODO check morph column id HEAD_INDEX=8, DEP_LABEL=10, OFFSET=1, ROOT_INDEX=0, NCOLS=12) ZGEN_CONLL_CONFIG = ConllConfig(INDEX=None, WORD=0, LEMMA=0, POS=1, MORPH=None, HEAD_INDEX=2, DEP_LABEL=3, OFFSET=0, ROOT_INDEX=-1, NCOLS=4) ARCS_CONLL_CONFIG = ConllConfig(INDEX=0, WORD=1, LEMMA=1, POS=2, MORPH=None, HEAD_INDEX=3, DEP_LABEL=6, OFFSET=1, ROOT_INDEX=0, NCOLS=7) DEP_LABEL_TYPES = { "core": "ccomp csubj csubjpass dobj iobj nsubj nsubjpass xcomp".split(), "non_core": """acl discourse nmod advcl dislocated nummod advmod expl parataxis amod foreign remnant appos goeswith reparandum compound list root -NONE- conj mwe vocative dep name""".split(), "func": "aux auxpass case cc cop det mark neg".split(), "other": "punct".split()} def get_config(name): if name == "UD": return UD_CONLL_CONFIG elif name == "ZGEN": return ZGEN_CONLL_CONFIG elif name == "CONLL09": return CONLL09_CONFIG elif name == "UD_fine_pos": return UD_CONLL_FINE_POS_CONFIG def read_blankline_block(stream): s = '' list = [] while True: line = stream.readline() # End of file: if not line: list.append(s) return list # Blank line: elif line and not line.strip(): list.append(s) s = '' # Other line: # in Google UD some lines can be commented and some can have multiword expressions/fused morphemes introduced by "^11-12 sss" # and not re.match("[0-9]+-[0-9]+",line) and not line.startswith("<") elif not line.startswith("#"): # and "_\t_\t_\t_\t_\t" in line): # and not line.startswith("<"): s += line def read_sentences_from_columns(stream): # grids are sentences in column format grids = [] for block in read_blankline_block(stream): block = block.strip() if not block: continue grid = [line.split('\t') for line in block.split('\n')] appendFlag = True # Check that the grid is consistent. for row in grid: if len(row) != len(grid[0]): print(grid) #raise ValueError('Inconsistent number of columns:\n%s'% block) sys.stderr.write('Inconsistent number of columns', block) appendFlag = False break if appendFlag: grids.append(grid) return grids def output_conll(sentences, prefix): f_gold = open(prefix + "_conll.gold", "w") f_guess = open(prefix + "_conll.guess", "w") for sentence in sentences: for (num, word, pos, correct_dep, guess_dep) in sentence: f_gold.write("\t".join([num, word, word, pos, pos, "_", correct_dep, "_", "_", "_"]) + "\n") f_guess.write("\t".join([num, word, word, pos, pos, "_", guess_dep, "_", "_", "_"]) + "\n") f_gold.write("\n") f_guess.write("\n") def pprint(column_sentence): for row in column_sentence: print("\t".join([word for word in row])) print("") def write_conll(sentences, file_out): for sentence in sentences: # print "\n".join("\t".join(word for word in row)for row in sentence) file_out.write("\n".join("\t".join(word for word in row) for row in sentence)) file_out.write("\n\n") def pseudo_rand_split(sentences): i = 0 train = [] test = [] for sentence in sentences: i += 1 if i < 10: train.append(sentence) else: test.append(sentence) i = 0 return train, test ''' def main(): s = conll_utils() s.read() s.output_short_sentences() #s.print_dep_length() class conll_utils(object): # num_sents[len(extract_arcs(tree))] += 1 # print "\n".join("%d\t%f\t%f" % (size, counts_real[size]/float(num_sents[size]), counts_rand[size]/float(num_sents[size])) for size in counts_real.keys()) #print dep_length(extract_arcs(tree)) def output_short_sentences(self): sentences = read_sentences_from_columns(open(self.input)) for sentence in sentences: if len(sentence) == 10: # and len(sentence) > 8: pprint(sentence) def read(self): #sys.stderr.write("Main..\n") self.sentences = read_sentences_from_columns(open(self.input)) #print self.sentences """ self.correct_trees = [] self.guess_trees = [] for sentence in self.sentences: self.correct_trees.append([row[:4] for row in sentence]) self.guess_trees.append([row[:3] + [row[4]] for row in sentence]) """ def __init__(self): optparser = optparse.OptionParser() optparser.add_option("-n", "--num_training_iterations", dest="iterations", default=5, type="int", help="Number of training iterations") optparser.add_option("-N", "--num_training_sentences", dest="num_sents", default=1000, type="int", help="Number of training sentences to use") optparser.add_option("-t", "--threshold", dest="threshold", default=0.5, type="float", help="Score threshold for alignment") optparser.add_option("-d", "--display_count", dest="display_count", default=5, type="int", help="Number of alignments to display") optparser.add_option("-i", "--input", dest="input", default="test", help="Input file name") optparser.add_option("-e", "--evaluation", dest="evaluation", default="undirected", help="Type of dependency evaluation") (opts, args) = optparser.parse_args() self.input = opts.input self.evaluation = opts.evaluation return def accuracy(): sum = 0 length = 0 print len(self.sentences) if (self.evaluation == "directed"): for sentence in self.sentences: sum += correct_dir(sentence) length += len(sentence) #undirected else: for (correct_tree, guess_tree) in zip(self.correct_trees, self.guess_trees): sum += correct_undir(correct_tree, guess_tree) print "\n".join(str(row) for row in zip(correct_tree, guess_tree)) print correct_undir(correct_tree, guess_tree) length += len(correct_tree) print sum / float(length) for c1, c2 in zip(collect_statistics(self.correct_trees), collect_statistics(self.guess_trees)): print ' '.join(str(i) for i in c1[0]) + "\t" + str(c1[1]) #output_conll(self.sentences, self.input) if __name__ == "__main__": main() '''
colorlessgreenRNNs-main
src/syntactic_testsets/conll_utils.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import pandas as pd from collections import defaultdict import string def read_paradigms(path): """ reads morphological paradigms from a file with token, lemma, tag, morph, freq fields returns a simple dict: token -> list of all its analyses and their frequencies """ d = defaultdict(list) with open(path, "r") as f: for line in f: token, lemma, tag, morph, freq = line.split("\t") s_m = morph.split("|") s_m.sort() morph = "|".join(s_m) d[token].append((lemma, tag, morph, int(freq))) return d def load_vocab(vocab_file): f_vocab = open(vocab_file, "r") vocab = {w: i for i, w in enumerate(f_vocab.read().split())} f_vocab.close() return vocab def ltm_to_word(paradigms): """ converts standard paradigms dict (token -> list of analyses) to a dict (l_emma, t_ag, m_orph -> word) (where word in the most frequent form, e.g. between capitalized and non-capitalized Fanno and fanno) """ #paradigms = read_paradigms("/private/home/gulordava/edouard_data/itwiki//paradigms_UD.txt") paradigms_lemmas = defaultdict(lambda: defaultdict(lambda: defaultdict(int))) for w in paradigms: for lemma, tag, morph, freq in paradigms[w]: paradigms_lemmas[(lemma, tag)][morph][w] = int(freq) best_paradigms_lemmas = defaultdict(lambda: defaultdict(lambda: defaultdict(str))) for l, t in paradigms_lemmas: for m in paradigms_lemmas[(l, t)]: word = sorted(paradigms_lemmas[(l, t)][m].items(), key=lambda x: -x[1])[0][0] best_paradigms_lemmas[l][t][m] = word return best_paradigms_lemmas def vocab_freqs(train_data_file, vocab): train_data = open(train_data_file).readlines() freq_dict = {} for w in vocab: freq_dict[w] = 0 for line in train_data: for w in line.split(): if w in vocab: freq_dict[w] += 1 return freq_dict """ def number_agreement_data(sents, gold, ltm_paradigms, vocab): data = [] sentence_parts = [] for sent, g in zip(sents, gold): pattern_id, constr_id, sent_id, idx, gold_pos, gold_morph, _, _, _ = g.split() if "Number=Plur" in gold_morph: correct_number = "plur" elif "Number=Sing" in gold_morph: correct_number = "sing" else: continue sent_part = sent.split()[:int(idx)] # print(sent_part, gold_pos, gold_morph) for lemma, form, form_alt in choose_random_forms(ltm_paradigms, vocab, gold_pos, gold_morph): sentence_parts.append(" ".join(sent_part) + " " + form + "\n") sentence_parts.append(" ".join(sent_part) + " " + form_alt + "\n") data.append((pattern_id, int(constr_id), int(sent_id), lemma, correct_number, form, "correct")) data.append((pattern_id, int(constr_id), int(sent_id), lemma, correct_number, form_alt, "wrong")) return data, sentence_parts """ def plurality(morph): if "Number=Plur" in morph: return "plur" elif "Number=Sing" in morph: return "sing" else: return "none" def transform_gold(gold): data = [] for g in gold: pattern_id, constr_id, sent_id, r_idx, r_pos, r_morph, form, form_alt, lemma, l_idx, l_pos, prefix = g.split( "\t") correct_number = plurality(r_morph) data.append((pattern_id, int(constr_id), int(sent_id), correct_number, form, "correct")) data.append((pattern_id, int(constr_id), int(sent_id), correct_number, form_alt, "wrong")) return data def is_attr(word, pos, number, paradigms): """ verify whether a word is attractor, that is of tag *pos* and of the number opposite of *number* """ if not paradigms[word]: return False max_freq = max([p[3] for p in paradigms[word]]) for lemma, tag, morph, freq in paradigms[word]: # a word can have different tags (be ambiguous) # we filter out tags which are very infrequent (including wrong tags for functional words) if freq < max_freq / 10: continue if tag == pos and plurality(morph) != "none" and plurality(morph) != number: return True return False def extract_sent_features(sents, gold, vocab, paradigms): """ Extracting some features of the construction and the sentence for data analysis """ paradigms_word_tag = defaultdict(list) for w in paradigms: for lemma, tag, morph, freq in paradigms[w]: paradigms_word_tag[w].append(tag) df_sents = [] constr_id_unk = [] n_attractors = [] punct = [] for s, g in zip(sents, gold): pattern_id, constr_id, sent_id, r_idx, r_pos, r_morph, form, form_alt, lemma, l_idx, l_pos, prefix = g.split("\t") sent_id = int(sent_id) r_idx = int(r_idx) l_idx = int(l_idx) s_lm = " ".join([w if w in vocab else "<unk>" for w in s.split()[:r_idx]]) n_unk = len([w for w in s.split()[:r_idx] if w not in vocab ]) if sent_id == 0: constr_id_unk.append((pattern_id, int(constr_id), n_unk)) number = plurality(r_morph) #print(r_morph, number) attrs = [w for w in s.split()[l_idx + 1:r_idx] if is_attr(w, l_pos, number, paradigms)] n_attractors.append((pattern_id, int(constr_id), len(attrs))) #punct.append((pattern_id, int(constr_id), "PUNCT" in pos_seq)) punct.append((pattern_id, int(constr_id), any(p in prefix.split() for p in string.punctuation))) #print(s_lm) #print(attrs) n_unk = s_lm.count("<unk>") len_prefix = len(s_lm.split()) len_context = r_idx - l_idx df_sents.append((pattern_id, int(constr_id), int(sent_id), s.strip(), s_lm, n_unk, len_context, len_prefix)) df_sents = pd.DataFrame(df_sents, columns = ["pattern_id","constr_id", "sent_id", "sent", "prefix", "n_unk","len_context","len_prefix"]) #print(constr_id_unk) unk = pd.DataFrame(constr_id_unk, columns=["pattern_id", "constr_id", "n_unk_original"]) attr = pd.DataFrame(n_attractors, columns=["pattern_id","constr_id","n_attr"]) punct = pd.DataFrame(punct, columns=["pattern_id","constr_id","punct"]) df_sents = df_sents.merge(unk, on=["pattern_id", "constr_id"]) df_sents = df_sents.merge(attr, on=["pattern_id", "constr_id"]) df_sents = df_sents.merge(punct, on=["pattern_id", "constr_id"]) return df_sents
colorlessgreenRNNs-main
src/syntactic_testsets/utils.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse import random import pandas as pd import tree_module as tm from extract_dependency_patterns import grep_morph_pattern from generate_utils import is_good_form, get_alt_form, match_features, alt_numeral_morph from utils import read_paradigms, load_vocab, ltm_to_word, extract_sent_features, transform_gold, vocab_freqs def generate_morph_pattern_test(trees, pattern, paradigms, vocab, n_sentences=10): arc_dir, context = pattern.split("\t")[:2] context = tuple(context.split("_")) l_values = pattern.split("\t")[2:] pattern_id = pattern.replace("\t", "!") ltm_paradigms = ltm_to_word(paradigms) output = [] constr_id = 0 n_vocab_unk = 0 n_paradigms_unk = 0 # 'nodes' constitute Y, without X or Z included for context, l, r, t, nodes in grep_morph_pattern(trees, context, l_values, arc_dir): #pos_constr = "_".join(n.pos for n in t.nodes[l.index - 1: r.index]) # filter model sentences with unk and the choice word not in vocab if not all([n.word in vocab for n in nodes + [l, r]]): n_vocab_unk += 1 continue if not is_good_form(r.word, r.word, r.morph, r.lemma, r.pos, vocab, ltm_paradigms): n_paradigms_unk += 1 continue prefix = " ".join(n.word for n in t.nodes[:r.index]) for i in range(n_sentences): # sent_id = 0 - original sentence with good lexical items, other sentences are generated if i == 0: new_context = " ".join(n.word for n in t.nodes) form = r.word form_alt = get_alt_form(r.lemma,r.pos,r.morph,ltm_paradigms) lemma = r.lemma else: new_context = generate_context(t.nodes, paradigms, vocab) random_forms = choose_random_forms(ltm_paradigms,vocab, r.pos,r.morph, n_samples=1, gold_word=r.word) if len(random_forms) > 0: lemma, form, form_alt = random_forms[0] else: # in rare cases, there is no (form, form_alt) both in vocab # original form and its alternation are not found because e.g. one or the other is not in paradigms # (they should anyway be in the vocabulary) lemma, form = r.lemma, r.word form_alt = get_alt_form(r.lemma, r.pos, r.morph, ltm_paradigms) # constr_id sent_id Z_index Z_pos Z_gold_morph gold_str = "\t".join([pattern_id, str(constr_id), str(i), str(r.index - 1), r.pos, r.morph, form, form_alt, lemma, str(l.index - 1), l.pos, prefix]) + "\n" output.append((new_context + " <eos>\n", gold_str)) constr_id += 1 print("Problematic sentences vocab/paradigms", n_vocab_unk, n_paradigms_unk) return output def is_content_word(pos): return pos in ["ADJ", "NOUN", "VERB", "PROPN", "NUM", "ADV"] def generate_context(nodes, paradigms, vocab): output = [] for i in range(len(nodes)): substitutes = [] n = nodes[i] # substituting content words if is_content_word(n.pos): for word in paradigms: if word == n.word: continue # matching capitalization and vowel if not match_features(word, n.word): continue tag_set = set([p[1] for p in paradigms[word]]) # use words with unambiguous POS if len(tag_set) == 1 and tag_set.pop() == n.pos: for _, _, morph, freq in paradigms[word]: if n.morph == morph and int(freq) > 1 and word in vocab: substitutes.append(word) if len(substitutes) == 0: output.append(n.word) else: output.append(random.choice(substitutes)) else: output.append(n.word) return " ".join(output) def choose_random_forms(ltm_paradigms, vocab, gold_pos, morph, n_samples=10, gold_word=None): candidates = set() #lemma_tag_pairs = ltm_paradigms.keys() #test_lemmas = [l for l, t in lemma_tag_pairs] for lemma in ltm_paradigms: poses = list(ltm_paradigms[lemma].keys()) if len(set(poses)) == 1 and poses.pop() == gold_pos: form = ltm_paradigms[lemma][gold_pos][morph] _, morph_alt = alt_numeral_morph(morph) form_alt = ltm_paradigms[lemma][gold_pos][morph_alt] if not is_good_form(gold_word, form, morph, lemma, gold_pos, vocab, ltm_paradigms): continue candidates.add((lemma, form, form_alt)) if len(candidates) > n_samples: return random.sample(candidates, n_samples) else: return random.sample(candidates, len(candidates)) def main(): parser = argparse.ArgumentParser(description='Generating sentences based on patterns') parser.add_argument('--treebank', type=str, required=True, help='input file (in a CONLL column format)') parser.add_argument('--paradigms', type=str, required=True, help="the dictionary of tokens and their morphological annotations") parser.add_argument('--vocab', type=str, required=True,help='(LM) Vocabulary to generate words from') parser.add_argument('--patterns', type=str, required=True) parser.add_argument('--output', type=str, required=True, help="prefix for generated text and annotation data") parser.add_argument('--lm_data', type=str, required=False, help="path to LM data to estimate word frequencies") args = parser.parse_args() trees = tm.load_trees_from_conll(args.treebank) for t in trees: t.remerge_segmented_morphemes() paradigms = read_paradigms(args.paradigms) f_text = open(args.output + ".text", "w") f_gold = open(args.output + ".gold", "w") f_eval = open(args.output + ".eval", "w") output = [] vocab = load_vocab(args.vocab) for line in open(args.patterns, "r"): print("Generating sentences with pattern", line.strip()) #l_values = ('Gender=Fem|Number=Sing','Gender=Masc|Number=Plur') data = generate_morph_pattern_test(trees, line.strip(), paradigms, vocab) output.extend(data) print("Generated", len(data), "sentences") random.shuffle(output) sents, golds = zip(*output) f_text.writelines(sents) f_gold.writelines(golds) # save the index of the target word to evaluate f_eval.writelines([g.split("\t")[3] + "\n" for g in golds]) ############################################################## # Make a readable data table with fields useful for analysis # ############################################################## data = transform_gold(golds) data = pd.DataFrame(data, columns=["pattern_id", "constr_id", "sent_id", "correct_number", "form", "class"]) data.loc[data.sent_id == 0, "type"] = "original" data.loc[data.sent_id > 0, "type"] = "generated" # getting simpler pattern labels patterns = {p: "__".join(p.split("!")[:2]) for p in set(data.pattern_id)} data["pattern"] = data["pattern_id"].map(patterns) df_sents = extract_sent_features(sents, golds, vocab, paradigms) full_df = data.merge(df_sents, on=["pattern_id", "constr_id", "sent_id"]) if args.lm_data: freq_dict = vocab_freqs(args.lm_data + "/train.txt", vocab) full_df["freq"] = full_df["form"].map(freq_dict) fields = ["pattern", "constr_id", "sent_id", "correct_number", "form", "class", "type", "prefix", "n_attr", "punct","freq", "len_context", "len_prefix", "sent"] else: fields = ["pattern", "constr_id", "sent_id", "correct_number", "form", "class", "type", "prefix", "n_attr", "punct","len_context", "len_prefix", "sent"] full_df[fields].to_csv(args.output + ".tab", sep="\t", index=False) if __name__ == "__main__": main()
colorlessgreenRNNs-main
src/syntactic_testsets/generate_nonsense.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # def is_vowel(c): return c in ["a","o","u","e","i","A","O","U","E","I","è"] def alt_numeral_morph(morph): if "Number=Plur" in morph: morph_alt = morph.replace("Plur", "Sing") return "plur", morph_alt elif "Number=Sing" in morph: morph_alt = morph.replace("Sing", "Plur") return "sing", morph_alt def is_good_form(gold_form, new_form, gold_morph, lemma, pos, vocab, ltm_paradigms): _, alt_morph = alt_numeral_morph(gold_morph) if not new_form in vocab: return False alt_form = ltm_paradigms[lemma][pos][alt_morph] if not alt_form in vocab: return False if gold_form is None: print(gold_form, gold_morph) return True if not match_features(new_form, gold_form): return False if not match_features(alt_form, gold_form): return False return True def get_alt_form(lemma, pos, morph, ltm_paradigms): _, alt_morph = alt_numeral_morph(morph) return ltm_paradigms[lemma][pos][alt_morph] def match_features(w1, w2): return w1[0].isupper() == w2[0].isupper() and is_vowel(w1[0]) == is_vowel(w2[0])
colorlessgreenRNNs-main
src/syntactic_testsets/generate_utils.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse from collections import defaultdict from data import data_utils parser = argparse.ArgumentParser(description='Reading and processing a large gzip file') parser.add_argument('--input', type=str, required=True, help='Input path (in a column CONLL UD format)') parser.add_argument('--output', type=str, required=True, help="Output file name") parser.add_argument('--nwords', type=int, default='100000000', required=False, help='How many words to process') parser.add_argument('--min_freq', type=int, default='5', required=False, help='Minimal frequency of paradigm to be included in the dictionary') args = parser.parse_args() nwords = 0 paradigms = defaultdict(int) for line in data_utils.read(args.input): if line.strip() == "" or len(line.split("\t")) < 2: continue else: fields = line.split("\t") if fields[1].isalpha(): paradigms[(fields[1], fields[2], fields[3], fields[5])] += 1 nwords += 1 if nwords > args.nwords: break with open(args.output, 'w') as f: for p in paradigms: if paradigms[p] > args.min_freq: f.write("\t".join(el for el in p) + "\t" + str(paradigms[p]) + "\n") f.close()
colorlessgreenRNNs-main
src/data/collect_paradigms.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import sys file_name = sys.argv[1] for l in open(file_name): fields = l.strip().split("\t") if len(fields) == 10: morph = fields[5] # annotate non-singular verbs in present as Plural if "Tense=Pres" in morph and "VerbForm=Fin" in morph and "Number=Sing" not in morph: morph = morph + "|Number=Plur" s_m = morph.split("|") s_m.sort() morph = "|".join(s_m) elif "Number=Sing" in morph: feats = morph.split("|") # remove Person=3 annotation (since we don't have it for non-singular cases) feats = [f for f in feats if "Person=3" not in f] morph = "|".join(feats) print("\t".join(fields[:5] + [morph, ] + fields[6:])) else: print(l.strip())
colorlessgreenRNNs-main
src/data/preprocess_EnglishUD_morph.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import argparse import logging from collections import defaultdict from random import shuffle from data import data_utils parser = argparse.ArgumentParser() parser.add_argument('--input', type=str, help='Input file path') parser.add_argument('--output', type=str, help='Output file path') parser.add_argument('--output_dir', type=str, help='Output path for training/valid/test sets') parser.add_argument('--vocab', type=int, default=10000, help="The size of vocabulary, default = 10K") args = parser.parse_args() logging.basicConfig(level=logging.INFO) def create_vocab(path, vocab_size): counter = defaultdict(int) for line in data_utils.read(path): for word in line.replace("\n"," <eos>").split(): counter[word] += 1 count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))[:vocab_size] words = [w for (w, v) in count_pairs] print(len(counter), count_pairs[vocab_size - 1]) w2idx = dict(zip(words, range(len(words)))) idx2w = dict(zip(range(len(words)), words)) return w2idx, idx2w def convert_text(input_path, output_path, vocab): with open(output_path, 'w') as output: for line in data_utils.read(input_path): words = [filter_word(word, vocab) for word in line.replace("\n", " <eos>").split()] output.write(" ".join(words) + "\n") output.close() def convert_line(line, vocab): return [filter_word(word, vocab) for word in line.replace("\n", " <eos>").split()] def word_to_idx(word, vocab): if word in vocab: return vocab[word] else: return vocab["<unk>"] def filter_word(word, vocab): if word in vocab: return word else: return "<unk>" def create_corpus(input_path, output_path, vocab): """ Split data to create training, validation and test corpus """ nlines = 0 f_train = open(output_path + "/train.txt", 'w') f_valid = open(output_path + "/valid.txt", 'w') f_test = open(output_path + "/test.txt", 'w') train = [] for line in data_utils.read(input_path): if nlines % 10 == 0: f_valid.write(" ".join(convert_line(line, vocab)) + "\n") elif nlines % 10 == 1: f_test.write(" ".join(convert_line(line, vocab)) + "\n") else: train.append(" ".join(convert_line(line, vocab)) + "\n") nlines += 1 shuffle(train) f_train.writelines(train) f_train.close() f_valid.close() f_test.close() w2idx, idx2w = create_vocab(args.input, args.vocab) #convert_text(args.input, args.output, w2idx) create_corpus(args.input, args.output_dir, w2idx)
colorlessgreenRNNs-main
src/data/data_vocab_prep.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import gzip import logging def read_gzip_stream(path): with gzip.open(path, 'rt', encoding="UTF-8") as f: for line in f: yield line def read_text_stream(path): with open(path, 'r', encoding="UTF-8") as f: for line in f: yield line def read(path): if path.endswith(".gz"): logging.info("Reading GZIP file") return read_gzip_stream(path) else: return read_text_stream(path)
colorlessgreenRNNs-main
src/data/data_utils.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import conll_utils import tree_module as tm def remove_segmented_morphemes_hebrew(t): for start, end, token in t.fused_nodes: # assert start + 1 == end, t # don't need to change anything if all(not n.word.startswith("_") and not n.word.endswith("_") for n in t.nodes[start - 1:end]): # print(start, end, token) continue tokens_separated = "" for n in t.nodes[start - 1:end]: if not n.word.startswith("_") and not n.word.endswith("_"): start = start + 1 tokens_separated = tokens_separated + n.word else: break # print("tokens sep", tokens_separated) head = None for n in t.nodes[start - 1:end]: # print(start-1, end-1) # print(n.head_id) if n.head_id > end or n.head_id < start: # in two sentences two parts of a word had two different heads # in 20 cases several parts of a word had the same head - annotated with 'fixed' dependency # assert head is None, (t, t.fused_nodes, start, end, t.nodes[start]) # if head is not None and head.head_id == n.head_id: # print("fixed") head = n assert head is not None, (t, t.fused_nodes, start, end, t.nodes[start]) # print(start - 1, end) # print("head", head) merged_part = token[len(tokens_separated):] # print("merged part", ) if merged_part == "": start = start - 1 else: t.nodes[start - 1].word = token[len(tokens_separated):] t.nodes[start - 1].lemma = head.lemma t.nodes[start - 1].pos = head.pos t.nodes[start - 1].morph = head.morph t.nodes[start - 1].dep_label = head.dep_label # print(t.nodes[start - 1].head_id) for i in range(end - start): if t.nodes[start].dep_label == "nmod:poss": t.nodes[start - 1].morph = t.nodes[start - 1].morph + "|Poss=Yes" # print(i) if len(t.children(t.nodes[start])) != 0: for c in t.children(t.nodes[start]): c.head_id = t.nodes[start - 1].index t.arcs.remove(tm.Arc(child=c, head=t.nodes[start], direction=c.dir)) t.arcs.append(tm.Arc(child=c, head=t.nodes[start - 1], direction=c.dir)) assert len(t.children(t.nodes[start])) == 0, (t, start, end, token, i, t.arcs) t.remove_node(t.nodes[start]) # print(t) # important, after removal of other nodes so that their dependencies get attached first to the right head t.nodes[start - 1].head_id = head.head_id t.fused_nodes = [] path = "/private/home/gulordava/edouard_data/hewiki/hebrew.conllu" trees = tm.load_trees_from_conll(path) for i, t in enumerate(trees): # in place remove_segmented_morphemes_hebrew(t) f_trees_new = open(path + "_new", "w") for t in trees: f_trees_new.write(t.pprint(conll_utils.UD_CONLL_CONFIG) + "\n") #print(t.pprint(conll_utils.UD_CONLL_CONFIG))
colorlessgreenRNNs-main
src/data/hebrew/preprocess_HebrewUD_morph.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import sys file_name = sys.argv[1] for l in open(file_name): fields = l.strip().split("\t") if len(fields) == 10: morph = fields[5] fine_tag = fields[4] if "NN+POS+PRP" in fine_tag: morph = morph + "|Poss=Yes" print("\t".join(fields[:5] + [morph,] + fields[6:])) else: print(l.strip())
colorlessgreenRNNs-main
src/data/hebrew/add_poss_wiki_annotation.py
# Copyright (c) 2018-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import sys file_name = sys.argv[1] for l in open(file_name): fields = l.strip().split("\t") if len(fields) == 10: morph = fields[5] feats = morph.split("|") feats = [f for f in feats if "HebBi" not in f and "HebCo" not in f and "Voice" not in f] morph = "|".join(feats) print("\t".join(fields[:5] + [morph,] + fields[6:])) else: print(l.strip())
colorlessgreenRNNs-main
src/data/hebrew/remove_binyanim.py
# Copyright (c) Meta Platforms, Inc. and affiliates. import logging from dataclasses import dataclass, field from math import sqrt from typing import List, Optional, Union import torch import torch.nn as nn logger: logging.Logger = logging.getLogger(__name__) @dataclass class MtlConfigs: mtl_model: str = "att_sp" # consider using enum num_task_experts: int = 1 num_shared_experts: int = 1 expert_out_dims: List[List[int]] = field(default_factory=list) self_exp_res_connect: bool = False expert_archs: Optional[List[List[int]]] = None gate_archs: Optional[List[List[int]]] = None num_experts: Optional[int] = None @dataclass(frozen=True) class ArchInputs: num_task: int = 3 task_mlp: List[int] = field(default_factory=list) mtl_configs: Optional[MtlConfigs] = field(default=None) # Parameters related to activation function activation_type: str = "RELU" class AdaTTSp(nn.Module): """ paper title: "AdaTT: Adaptive Task-to-Task Fusion Network for Multitask Learning in Recommendations" paper link: https://doi.org/10.1145/3580305.3599769 Call Args: inputs: inputs is a tensor of dimension [batch_size, self.num_tasks, self.input_dim]. Experts in the same module share the same input. outputs dimensions: [B, T, D_out] Example:: AdaTTSp( input_dim=256, expert_out_dims=[[128, 128]], num_tasks=8, num_task_experts=2, self_exp_res_connect=True, ) """ def __init__( self, input_dim: int, expert_out_dims: List[List[int]], num_tasks: int, num_task_experts: int, self_exp_res_connect: bool = True, activation: str = "RELU", ) -> None: super().__init__() if len(expert_out_dims) == 0: logger.warning( "AdaTTSp is noop! size of expert_out_dims which is the number of " "extraction layers should be at least 1." ) return self.num_extraction_layers: int = len(expert_out_dims) self.num_tasks = num_tasks self.num_task_experts = num_task_experts self.total_experts_per_layer: int = num_task_experts * num_tasks self.self_exp_res_connect = self_exp_res_connect self.experts = torch.nn.ModuleList() self.gate_weights = torch.nn.ModuleList() self_exp_weight_list = [] layer_input_dim = input_dim for expert_out_dim in expert_out_dims: self.experts.append( torch.nn.ModuleList( [ MLP(layer_input_dim, expert_out_dim, activation) for i in range(self.total_experts_per_layer) ] ) ) self.gate_weights.append( torch.nn.ModuleList( [ torch.nn.Sequential( torch.nn.Linear( layer_input_dim, self.total_experts_per_layer ), torch.nn.Softmax(dim=-1), ) for _ in range(num_tasks) ] ) ) # self.gate_weights is of shape L X T, after we loop over all layers. if self_exp_res_connect and num_task_experts > 1: params = torch.empty(num_tasks, num_task_experts) scale = sqrt(1.0 / num_task_experts) torch.nn.init.uniform_(params, a=-scale, b=scale) self_exp_weight_list.append(torch.nn.Parameter(params)) layer_input_dim = expert_out_dim[-1] self.self_exp_weights = nn.ParameterList(self_exp_weight_list) def forward( self, inputs: torch.Tensor, ) -> torch.Tensor: for layer_i in range(self.num_extraction_layers): # all task expert outputs. experts_out = torch.stack( [ expert(inputs[:, expert_i // self.num_task_experts, :]) for expert_i, expert in enumerate(self.experts[layer_i]) ], dim=1, ) # [B * E (total experts) * D_out] gates = torch.stack( [ gate_weight( inputs[:, task_i, :] ) # W ([B, D]) * S ([D, E]) -> G, dim is [B, E] for task_i, gate_weight in enumerate(self.gate_weights[layer_i]) ], dim=1, ) # [B, T, E] fused_experts_out = torch.bmm( gates, experts_out, ) # [B, T, E] X [B * E (total experts) * D_out] -> [B, T, D_out] if self.self_exp_res_connect: if self.num_task_experts > 1: # residual from the linear combination of tasks' own experts. self_exp_weighted = torch.einsum( "te,bted->btd", self.self_exp_weights[layer_i], experts_out.view( experts_out.size(0), self.num_tasks, self.num_task_experts, -1, ), # [B * E (total experts) * D_out] -> [B * T * E_task * D_out] ) # bmm: [T * E_task] X [B * T * E_task * D_out] -> [B, T, D_out] fused_experts_out = ( fused_experts_out + self_exp_weighted ) # [B, T, D_out] else: fused_experts_out = fused_experts_out + experts_out inputs = fused_experts_out return inputs class AdaTTWSharedExps(nn.Module): """ paper title: "AdaTT: Adaptive Task-to-Task Fusion Network for Multitask Learning in Recommendations" paper link: https://doi.org/10.1145/3580305.3599769 Call Args: inputs: inputs is a tensor of dimension [batch_size, self.num_tasks, self.input_dim]. Experts in the same module share the same input. outputs dimensions: [B, T, D_out] Example:: AdaTTWSharedExps( input_dim=256, expert_out_dims=[[128, 128]], num_tasks=8, num_shared_experts=1, num_task_experts=2, self_exp_res_connect=True, ) """ def __init__( self, input_dim: int, expert_out_dims: List[List[int]], num_tasks: int, num_shared_experts: int, num_task_experts: Optional[int] = None, num_task_expert_list: Optional[List[int]] = None, # Set num_task_expert_list for experimenting with a flexible number of # experts for different task_specific units. self_exp_res_connect: bool = True, activation: str = "RELU", ) -> None: super().__init__() if len(expert_out_dims) == 0: logger.warning( "AdaTTWSharedExps is noop! size of expert_out_dims which is the number of " "extraction layers should be at least 1." ) return self.num_extraction_layers: int = len(expert_out_dims) self.num_tasks = num_tasks assert (num_task_experts is None) ^ (num_task_expert_list is None) if num_task_experts is not None: self.num_expert_list = [num_task_experts for _ in range(num_tasks)] else: # num_expert_list is guaranteed to be not None here. # pyre-ignore self.num_expert_list: List[int] = num_task_expert_list self.num_expert_list.append(num_shared_experts) self.total_experts_per_layer: int = sum(self.num_expert_list) self.self_exp_res_connect = self_exp_res_connect self.experts = torch.nn.ModuleList() self.gate_weights = torch.nn.ModuleList() layer_input_dim = input_dim for layer_i, expert_out_dim in enumerate(expert_out_dims): self.experts.append( torch.nn.ModuleList( [ MLP(layer_input_dim, expert_out_dim, activation) for i in range(self.total_experts_per_layer) ] ) ) num_full_active_modules = ( num_tasks if layer_i == self.num_extraction_layers - 1 else num_tasks + 1 ) self.gate_weights.append( torch.nn.ModuleList( [ torch.nn.Sequential( torch.nn.Linear( layer_input_dim, self.total_experts_per_layer ), torch.nn.Softmax(dim=-1), ) for _ in range(num_full_active_modules) ] ) ) # self.gate_weights is a 2d module list of shape L X T (+ 1), after we loop over all layers. layer_input_dim = expert_out_dim[-1] self_exp_weight_list = [] if self_exp_res_connect: # If any tasks have number of experts not equal to 1, we learn linear combinations of native experts. if any(num_experts != 1 for num_experts in self.num_expert_list): for i in range(num_tasks + 1): num_full_active_layer = ( self.num_extraction_layers - 1 if i == num_tasks else self.num_extraction_layers ) params = torch.empty( num_full_active_layer, self.num_expert_list[i], ) scale = sqrt(1.0 / self.num_expert_list[i]) torch.nn.init.uniform_(params, a=-scale, b=scale) self_exp_weight_list.append(torch.nn.Parameter(params)) self.self_exp_weights = nn.ParameterList(self_exp_weight_list) self.expert_input_idx: List[int] = [] for i in range(num_tasks + 1): self.expert_input_idx.extend([i for _ in range(self.num_expert_list[i])]) def forward( self, inputs: torch.Tensor, ) -> torch.Tensor: for layer_i in range(self.num_extraction_layers): num_full_active_modules = ( self.num_tasks if layer_i == self.num_extraction_layers - 1 else self.num_tasks + 1 ) # all task expert outputs. experts_out = torch.stack( [ expert(inputs[:, self.expert_input_idx[expert_i], :]) for expert_i, expert in enumerate(self.experts[layer_i]) ], dim=1, ) # [B * E (total experts) * D_out] # gate weights for fusing all experts. gates = torch.stack( [ gate_weight(inputs[:, i, :]) # [B, D] * [D, E] -> [B, E] for i, gate_weight in enumerate(self.gate_weights[layer_i]) ], dim=1, ) # [B, T (+ 1), E] # add all expert gate weights with native expert weights. if self.self_exp_res_connect: prev_idx = 0 use_unit_naive_weights = all( num_expert == 1 for num_expert in self.num_expert_list ) for module_i in range(num_full_active_modules): next_idx = self.num_expert_list[module_i] + prev_idx if use_unit_naive_weights: gates[:, module_i, prev_idx:next_idx] += torch.ones( 1, self.num_expert_list[module_i] ) else: gates[:, module_i, prev_idx:next_idx] += self.self_exp_weights[ module_i ][layer_i].unsqueeze(0) prev_idx = next_idx fused_experts_out = torch.bmm( gates, experts_out, ) # [B, T (+ 1), E (total)] X [B * E (total) * D_out] -> [B, T (+ 1), D_out] inputs = fused_experts_out return inputs class MLP(nn.Module): """ Args: input_dim (int): mlp_arch (List[int]): activation (str): Call Args: input (torch.Tensor): tensor of shape (B, I) Returns: output (torch.Tensor): MLP result Example:: mlp = MLP(100, [100]) """ def __init__( self, input_dim: int, mlp_arch: List[int], activation: str = "RELU", bias: bool = True, ) -> None: super().__init__() mlp_net = [] for mlp_dim in mlp_arch: mlp_net.append( nn.Linear(in_features=input_dim, out_features=mlp_dim, bias=bias) ) if activation == "RELU": mlp_net.append(nn.ReLU()) else: raise ValueError("only RELU is included currently") input_dim = mlp_dim self.mlp_net = nn.Sequential(*mlp_net) def forward( self, input: torch.Tensor, ) -> torch.Tensor: return self.mlp_net(input) class SharedBottom(nn.Module): def __init__( self, input_dim: int, hidden_dims: List[int], num_tasks: int, activation: str ) -> None: super().__init__() self.bottom_projection = MLP(input_dim, hidden_dims, activation) self.num_tasks: int = num_tasks def forward( self, input: torch.Tensor, ) -> torch.Tensor: # input dim [T, D_in] # output dim [B, T, D_out] return self.bottom_projection(input).unsqueeze(1).expand(-1, self.num_tasks, -1) class CrossStitch(torch.nn.Module): """ cross-stitch paper title: "Cross-stitch Networks for Multi-task Learning". paper link: https://openaccess.thecvf.com/content_cvpr_2016/papers/Misra_Cross-Stitch_Networks_for_CVPR_2016_paper.pdf """ def __init__( self, input_dim: int, expert_archs: List[List[int]], num_tasks: int, activation: str = "RELU", ) -> None: super().__init__() self.num_layers: int = len(expert_archs) self.num_tasks = num_tasks self.experts = torch.nn.ModuleList() self.stitchs = torch.nn.ModuleList() expert_input_dim = input_dim for layer_ind in range(self.num_layers): self.experts.append( torch.nn.ModuleList( [ MLP( expert_input_dim, expert_archs[layer_ind], activation, ) for _ in range(self.num_tasks) ] ) ) self.stitchs.append( torch.nn.Linear( self.num_tasks, self.num_tasks, bias=False, ) ) expert_input_dim = expert_archs[layer_ind][-1] def forward(self, input: torch.Tensor) -> torch.Tensor: """ input dim [B, T, D_in] output dim [B, T, D_out] """ x = input for layer_ind in range(self.num_layers): expert_out = torch.stack( [ expert(x[:, expert_ind, :]) # [B, D_out] for expert_ind, expert in enumerate(self.experts[layer_ind]) ], dim=1, ) # [B, T, D_out] stitch_out = self.stitchs[layer_ind](expert_out.transpose(1, 2)).transpose( 1, 2 ) # [B, T, D_out] x = stitch_out return x class MLMMoE(torch.nn.Module): """ Multi-level Multi-gate Mixture of Experts This code implements a multi-level extension of the MMoE model, as described in the paper titled "Modeling Task Relationships in Multi-task Learning with Multi-gate Mixture-of-Experts". Paper link: https://dl.acm.org/doi/10.1145/3219819.3220007 To run the original MMoE, use only one fusion level. For example, set expert_archs as [[96, 48]]. To configure multiple fusion levels, set expert_archs as something like [[96], [48]]. """ def __init__( self, input_dim: int, expert_archs: List[List[int]], gate_archs: List[List[int]], num_tasks: int, num_experts: int, activation: str = "RELU", ) -> None: super().__init__() self.num_layers: int = len(expert_archs) self.num_tasks: int = num_tasks self.num_experts = num_experts self.experts = torch.nn.ModuleList() self.gates = torch.nn.ModuleList() expert_input_dim = input_dim for layer_ind in range(self.num_layers): self.experts.append( torch.nn.ModuleList( [ MLP( expert_input_dim, expert_archs[layer_ind], activation, ) for _ in range(self.num_experts) ] ) ) self.gates.append( torch.nn.ModuleList( [ torch.nn.Sequential( MLP( input_dim, gate_archs[layer_ind], activation, ), torch.nn.Linear( gate_archs[layer_ind][-1] if gate_archs[layer_ind] else input_dim, self.num_experts, ), torch.nn.Softmax(dim=-1), ) for _ in range( self.num_experts if layer_ind < self.num_layers - 1 else self.num_tasks ) ] ) ) expert_input_dim = expert_archs[layer_ind][-1] def forward(self, input: torch.Tensor) -> torch.Tensor: """ input dim [B, D_in] output dim [B, T, D_out] """ x = input.unsqueeze(1).expand([-1, self.num_experts, -1]) # [B, E, D_in] for layer_ind in range(self.num_layers): expert_out = torch.stack( [ expert(x[:, expert_ind, :]) # [B, D_out] for expert_ind, expert in enumerate(self.experts[layer_ind]) ], dim=1, ) # [B, E, D_out] gate_out = torch.stack( [ gate(input) # [B, E] for gate_ind, gate in enumerate(self.gates[layer_ind]) ], dim=1, ) # [B, T, E] gated_out = torch.matmul(gate_out, expert_out) # [B, T, D_out] x = gated_out return x class PLE(nn.Module): """ PLE module is based on the paper "Progressive Layered Extraction (PLE): A Novel Multi-Task Learning (MTL) Model for Personalized Recommendations". Paper link: https://doi.org/10.1145/3383313.3412236 PLE aims to address negative transfer and seesaw phenomenon in multi-task learning. PLE distinguishes shared and task-specic experts explicitly and adopts a progressive routing mechanism to extract and separate deeper semantic knowledge gradually. When there is only one extraction layer, PLE falls back to CGC. Args: input_dim: input embedding dimension expert_out_dims (List[List[int]]): dimension of an expert's output at each layer. This list's length equals the number of extraction layers num_tasks: number of tasks num_task_experts: number of experts for each task module at each layer. * If the number of experts is the same for all tasks, use an integer here. * If the number of experts is different for different tasks, use a list of integers here. num_shared_experts: number of experts for shared module at each layer Call Args: inputs: inputs is a tensor of dimension [batch_size, self.num_tasks + 1, self.input_dim]. Task specific module inputs are placed first, followed by shared module input. (Experts in the same module share the same input) Returns: output: output of extraction layer to be feed into task-specific tower networks. It's a list of tensors, each of which is for one task. Example:: PLE( input_dim=256, expert_out_dims=[[128]], num_tasks=8, num_task_experts=2, num_shared_experts=2, ) """ def __init__( self, input_dim: int, expert_out_dims: List[List[int]], num_tasks: int, num_task_experts: Union[int, List[int]], num_shared_experts: int, activation: str = "RELU", ) -> None: super().__init__() if len(expert_out_dims) == 0: raise ValueError("Expert out dims cannot be empty list") self.num_extraction_layers: int = len(expert_out_dims) self.num_tasks = num_tasks self.num_task_experts = num_task_experts if type(num_task_experts) is int: self.total_experts_per_layer: int = ( num_task_experts * num_tasks + num_shared_experts ) else: self.total_experts_per_layer: int = ( sum(num_task_experts) + num_shared_experts ) assert len(num_task_experts) == num_tasks self.num_shared_experts = num_shared_experts self.experts = nn.ModuleList() expert_input_dim = input_dim for expert_out_dim in expert_out_dims: self.experts.append( nn.ModuleList( [ MLP(expert_input_dim, expert_out_dim, activation) for i in range(self.total_experts_per_layer) ] ) ) expert_input_dim = expert_out_dim[-1] self.gate_weights = nn.ModuleList() selector_dim = input_dim for i in range(self.num_extraction_layers): expert_out_dim = expert_out_dims[i] # task specific gates. if type(num_task_experts) is int: gate_weights_in_layer = nn.ModuleList( [ nn.Sequential( nn.Linear( selector_dim, num_task_experts + num_shared_experts ), nn.Softmax(dim=-1), ) for i in range(num_tasks) ] ) else: gate_weights_in_layer = nn.ModuleList( [ nn.Sequential( nn.Linear( selector_dim, num_task_experts[i] + num_shared_experts ), nn.Softmax(dim=-1), ) for i in range(num_tasks) ] ) # Shared module gates. Note last layer has only task specific module gates for task towers later. if i != self.num_extraction_layers - 1: gate_weights_in_layer.append( nn.Sequential( nn.Linear(selector_dim, self.total_experts_per_layer), nn.Softmax(dim=-1), ) ) self.gate_weights.append(gate_weights_in_layer) selector_dim = expert_out_dim[-1] if type(self.num_task_experts) is list: experts_idx_2_task_idx = [] for i in range(num_tasks): # pyre-ignore experts_idx_2_task_idx += [i] * self.num_task_experts[i] experts_idx_2_task_idx += [num_tasks] * num_shared_experts self.experts_idx_2_task_idx: List[int] = experts_idx_2_task_idx def forward( self, inputs: torch.Tensor, ) -> torch.Tensor: for layer_i in range(self.num_extraction_layers): # all task specific and shared experts' outputs. # Note first num_task_experts * num_tasks experts are task specific, # last num_shared_experts experts are shared. if type(self.num_task_experts) is int: experts_out = torch.stack( [ self.experts[layer_i][expert_i]( inputs[ :, # pyre-ignore min(expert_i // self.num_task_experts, self.num_tasks), :, ] ) for expert_i in range(self.total_experts_per_layer) ], dim=1, ) # [B * E (num experts) * D_out] else: experts_out = torch.stack( [ self.experts[layer_i][expert_i]( inputs[ :, self.experts_idx_2_task_idx[expert_i], :, ] ) for expert_i in range(self.total_experts_per_layer) ], dim=1, ) # [B * E (num experts) * D_out] gates_out = [] # Loop over all the gates in the layer. Note for the last layer, # there is no shared gating network. prev_idx = 0 for gate_i in range(len(self.gate_weights[layer_i])): # This is for shared gating network, which uses all the experts. if gate_i == self.num_tasks: selected_matrix = experts_out # S_share # This is for task gating network, which only uses shared and its own experts. else: if type(self.num_task_experts) is int: task_experts_out = experts_out[ :, # pyre-ignore (gate_i * self.num_task_experts) : (gate_i + 1) # pyre-ignore * self.num_task_experts, :, ] # task specific experts else: # pyre-ignore next_idx = prev_idx + self.num_task_experts[gate_i] task_experts_out = experts_out[ :, prev_idx:next_idx, :, ] # task specific experts prev_idx = next_idx shared_experts_out = experts_out[ :, -self.num_shared_experts :, :, ] # shared experts selected_matrix = torch.concat( [task_experts_out, shared_experts_out], dim=1 ) # S_k with dimension of [B * E_selected * D_out] gates_out.append( torch.bmm( self.gate_weights[layer_i][gate_i]( inputs[:, gate_i, :] ).unsqueeze(dim=1), selected_matrix, ) # W * S -> G # [B, 1, E_selected] X [B * E_selected * D_out] -> [B, 1, D_out] ) inputs = torch.cat(gates_out, dim=1) # [B, T, D_out] return inputs class CentralTaskArch(nn.Module): def __init__( self, mtl_configs: MtlConfigs, opts: ArchInputs, input_dim: int, ) -> None: super().__init__() self.opts = opts assert len(mtl_configs.expert_out_dims) > 0, "expert_out_dims is empty." self.num_tasks: int = opts.num_task self.mtl_model: str = mtl_configs.mtl_model logger.info(f"mtl_model is {mtl_configs.mtl_model}") expert_out_dims: List[List[int]] = mtl_configs.expert_out_dims # AdaTT-sp # consider consolidating the implementation of att_sp and att_g. if mtl_configs.mtl_model == "att_sp": self.mtl_arch: nn.Module = AdaTTSp( input_dim=input_dim, expert_out_dims=expert_out_dims, num_tasks=self.num_tasks, num_task_experts=mtl_configs.num_task_experts, self_exp_res_connect=mtl_configs.self_exp_res_connect, activation=opts.activation_type, ) # AdaTT-general elif mtl_configs.mtl_model == "att_g": self.mtl_arch: nn.Module = AdaTTWSharedExps( input_dim=input_dim, expert_out_dims=expert_out_dims, num_tasks=self.num_tasks, num_task_experts=mtl_configs.num_task_experts, num_shared_experts=mtl_configs.num_shared_experts, self_exp_res_connect=mtl_configs.self_exp_res_connect, activation=opts.activation_type, ) # PLE elif mtl_configs.mtl_model == "ple": self.mtl_arch: nn.Module = PLE( input_dim=input_dim, expert_out_dims=expert_out_dims, num_tasks=self.num_tasks, num_task_experts=mtl_configs.num_task_experts, num_shared_experts=mtl_configs.num_shared_experts, activation=opts.activation_type, ) # cross-stitch elif mtl_configs.mtl_model == "cross_st": self.mtl_arch: nn.Module = CrossStitch( input_dim=input_dim, expert_archs=mtl_configs.expert_out_dims, num_tasks=self.num_tasks, activation=opts.activation_type, ) # multi-layer MMoE or MMoE elif mtl_configs.mtl_model == "mmoe": self.mtl_arch: nn.Module = MLMMoE( input_dim=input_dim, expert_archs=mtl_configs.expert_out_dims, gate_archs=[[] for i in range(len(mtl_configs.expert_out_dims))], num_tasks=self.num_tasks, num_experts=mtl_configs.num_shared_experts, activation=opts.activation_type, ) # shared bottom elif mtl_configs.mtl_model == "share_bottom": self.mtl_arch: nn.Module = SharedBottom( input_dim, [dim for dims in expert_out_dims for dim in dims], self.num_tasks, opts.activation_type, ) else: raise ValueError("invalid model type") task_modules_input_dim = expert_out_dims[-1][-1] self.task_modules: nn.ModuleList = nn.ModuleList( [ nn.Sequential( MLP( task_modules_input_dim, self.opts.task_mlp, opts.activation_type ), torch.nn.Linear(self.opts.task_mlp[-1], 1), ) for i in range(self.num_tasks) ] ) def forward( self, task_arch_input: torch.Tensor, ) -> List[torch.Tensor]: if self.mtl_model in ["att_sp", "cross_st"]: task_arch_input = task_arch_input.unsqueeze(1).expand( -1, self.num_tasks, -1 ) elif self.mtl_model in ["att_g", "ple"]: task_arch_input = task_arch_input.unsqueeze(1).expand( -1, self.num_tasks + 1, -1 ) task_specific_outputs = self.mtl_arch(task_arch_input) task_arch_output = [ task_module(task_specific_outputs[:, i, :]) for i, task_module in enumerate(self.task_modules) ] return task_arch_output
AdaTT-main
mtl_lib.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import os import json import argparse import numpy as np class BisonEval: def __init__(self, anno, pred): if pred.getBisonIds() != anno.getBisonIds(): print('[Warning] The prediction does not' + 'cover the entire set of bison data.' + 'The evaluation is running on the {}'.format( len(pred.getBisonIds())) + 'subset from prediction file.') self.params = {'bison_ids': pred.getBisonIds()} self.anno = anno self.pred = pred def evaluate(self): accuracy = [] for bison_id in self.params['bison_ids']: accuracy.append(self.anno[bison_id]['true_image_id'] == self.pred[bison_id]) mean_accuracy = np.mean(accuracy) print("[Result] Mean BISON accuracy on {}: {:.2f}%".format( self.anno.dataset, mean_accuracy * 100) ) return mean_accuracy class Annotation: def __init__(self, anno_filepath): assert os.path.exists(anno_filepath), 'Annotation file does not exist' with open(anno_filepath) as fd: anno_results = json.load(fd) self._data = {res['bison_id']: res for res in anno_results['data']} self.dataset = "{}.{}".format(anno_results['info']['source'], anno_results['info']['split']) def getBisonIds(self): return self._data.keys() def __getitem__(self, key): return self._data[key] class Prediction: def __init__(self, pred_filepath): assert os.path.exists(pred_filepath), 'Prediction file does not exist' with open(pred_filepath) as fd: pred_results = json.load(fd) self._data = {result['bison_id']: result['predicted_image_id'] for result in pred_results} def getBisonIds(self): return self._data.keys() def __getitem__(self, key): return self._data[key] def _command_line_parser(): parser = argparse.ArgumentParser() default_anno = './annotations/bison_annotations.cocoval2014.json' default_pred = './predictions/fake_predictions.cocoval2014.json' parser.add_argument('--anno_path', default=default_anno, help='Path to the annotation file') parser.add_argument('--pred_path', default=default_pred, help='Path to the prediction file') return parser def main(args): anno = Annotation(args.anno_path) pred = Prediction(args.pred_path) bison = BisonEval(anno, pred) bison.evaluate() if __name__ == '__main__': parser = _command_line_parser() args = parser.parse_args() main(args)
binary-image-selection-main
bison_eval.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import sys import hydra import torch from lib.ddp_trainer import SegmentationTrainer from lib.distributed import multi_proc_run def single_proc_run(config): if not torch.cuda.is_available(): raise Exception('No GPUs FOUND.') torch.manual_seed(config.misc.seed) torch.cuda.manual_seed(config.misc.seed) trainer = SegmentationTrainer(config) if config.train.is_train: trainer.train() else: trainer.test() @hydra.main(config_path='config', config_name='default.yaml') def main(config): # Convert to dict if config.misc.num_gpus > 1: multi_proc_run(config.misc.num_gpus, fun=single_proc_run, fun_args=(config,)) else: single_proc_run(config) if __name__ == '__main__': __spec__ = None os.environ['MKL_THREADING_LAYER'] = 'GNU' main()
ContrastiveSceneContexts-main
downstream/semseg/ddp_main.py
import random import logging import numpy as np import scipy import scipy.ndimage import scipy.interpolate import torch # A sparse tensor consists of coordinates and associated features. # You must apply augmentation to both. # In 2D, flip, shear, scale, and rotation of images are coordinate transformation # color jitter, hue, etc., are feature transformations ############################## # Feature transformations ############################## class ChromaticTranslation(object): """Add random color to the image, input must be an array in [0,255] or a PIL image""" def __init__(self, trans_range_ratio=1e-1): """ trans_range_ratio: ratio of translation i.e. 255 * 2 * ratio * rand(-0.5, 0.5) """ self.trans_range_ratio = trans_range_ratio def __call__(self, coords, feats, labels): if random.random() < 0.95: tr = (np.random.rand(1, 3) - 0.5) * 255 * 2 * self.trans_range_ratio feats[:, :3] = np.clip(tr + feats[:, :3], 0, 255) return coords, feats, labels class ChromaticAutoContrast(object): def __init__(self, randomize_blend_factor=True, blend_factor=0.5): self.randomize_blend_factor = randomize_blend_factor self.blend_factor = blend_factor def __call__(self, coords, feats, labels): if random.random() < 0.2: # mean = np.mean(feats, 0, keepdims=True) # std = np.std(feats, 0, keepdims=True) # lo = mean - std # hi = mean + std lo = feats[:, :3].min(0, keepdims=True) hi = feats[:, :3].max(0, keepdims=True) assert hi.max() > 1, f"invalid color value. Color is supposed to be [0-255]" scale = 255 / (hi - lo) contrast_feats = (feats[:, :3] - lo) * scale blend_factor = random.random() if self.randomize_blend_factor else self.blend_factor feats[:, :3] = (1 - blend_factor) * feats + blend_factor * contrast_feats return coords, feats, labels class ChromaticJitter(object): def __init__(self, std=0.01): self.std = std def __call__(self, coords, feats, labels): if random.random() < 0.95: noise = np.random.randn(feats.shape[0], 3) noise *= self.std * 255 feats[:, :3] = np.clip(noise + feats[:, :3], 0, 255) return coords, feats, labels class HueSaturationTranslation(object): @staticmethod def rgb_to_hsv(rgb): # Translated from source of colorsys.rgb_to_hsv # r,g,b should be a numpy arrays with values between 0 and 255 # rgb_to_hsv returns an array of floats between 0.0 and 1.0. rgb = rgb.astype('float') hsv = np.zeros_like(rgb) # in case an RGBA array was passed, just copy the A channel hsv[..., 3:] = rgb[..., 3:] r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2] maxc = np.max(rgb[..., :3], axis=-1) minc = np.min(rgb[..., :3], axis=-1) hsv[..., 2] = maxc mask = maxc != minc hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask] rc = np.zeros_like(r) gc = np.zeros_like(g) bc = np.zeros_like(b) rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask] gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask] bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask] hsv[..., 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc) hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0 return hsv @staticmethod def hsv_to_rgb(hsv): # Translated from source of colorsys.hsv_to_rgb # h,s should be a numpy arrays with values between 0.0 and 1.0 # v should be a numpy array with values between 0.0 and 255.0 # hsv_to_rgb returns an array of uints between 0 and 255. rgb = np.empty_like(hsv) rgb[..., 3:] = hsv[..., 3:] h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2] i = (h * 6.0).astype('uint8') f = (h * 6.0) - i p = v * (1.0 - s) q = v * (1.0 - s * f) t = v * (1.0 - s * (1.0 - f)) i = i % 6 conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5] rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v) rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t) rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p) return rgb.astype('uint8') def __init__(self, hue_max, saturation_max): self.hue_max = hue_max self.saturation_max = saturation_max def __call__(self, coords, feats, labels): # Assume feat[:, :3] is rgb hsv = HueSaturationTranslation.rgb_to_hsv(feats[:, :3]) hue_val = (random.random() - 0.5) * 2 * self.hue_max sat_ratio = 1 + (random.random() - 0.5) * 2 * self.saturation_max hsv[..., 0] = np.remainder(hue_val + hsv[..., 0] + 1, 1) hsv[..., 1] = np.clip(sat_ratio * hsv[..., 1], 0, 1) feats[:, :3] = np.clip(HueSaturationTranslation.hsv_to_rgb(hsv), 0, 255) return coords, feats, labels ############################## # Coordinate transformations ############################## class RandomDropout(object): def __init__(self, dropout_ratio=0.2, dropout_application_ratio=0.5): """ upright_axis: axis index among x,y,z, i.e. 2 for z """ self.dropout_ratio = dropout_ratio self.dropout_application_ratio = dropout_application_ratio def __call__(self, coords, feats, labels): if random.random() < self.dropout_ratio: N = len(coords) inds = np.random.choice(N, int(N * (1 - self.dropout_ratio)), replace=False) return coords[inds], feats[inds], labels[inds] return coords, feats, labels class RandomHorizontalFlip(object): def __init__(self, upright_axis, is_temporal): """ upright_axis: axis index among x,y,z, i.e. 2 for z """ self.is_temporal = is_temporal self.D = 4 if is_temporal else 3 self.upright_axis = {'x': 0, 'y': 1, 'z': 2}[upright_axis.lower()] # Use the rest of axes for flipping. self.horz_axes = set(range(self.D)) - set([self.upright_axis]) def __call__(self, coords, feats, labels): if random.random() < 0.95: for curr_ax in self.horz_axes: if random.random() < 0.5: coord_max = np.max(coords[:, curr_ax]) coords[:, curr_ax] = coord_max - coords[:, curr_ax] return coords, feats, labels class ElasticDistortion: def __init__(self, distortion_params): self.distortion_params = distortion_params def elastic_distortion(self, coords, feats, labels, granularity, magnitude): """Apply elastic distortion on sparse coordinate space. pointcloud: numpy array of (number of points, at least 3 spatial dims) granularity: size of the noise grid (in same scale[m/cm] as the voxel grid) magnitude: noise multiplier """ blurx = np.ones((3, 1, 1, 1)).astype('float32') / 3 blury = np.ones((1, 3, 1, 1)).astype('float32') / 3 blurz = np.ones((1, 1, 3, 1)).astype('float32') / 3 coords_min = coords.min(0) # Create Gaussian noise tensor of the size given by granularity. noise_dim = ((coords - coords_min).max(0) // granularity).astype(int) + 3 noise = np.random.randn(*noise_dim, 3).astype(np.float32) # Smoothing. for _ in range(2): noise = scipy.ndimage.filters.convolve(noise, blurx, mode='constant', cval=0) noise = scipy.ndimage.filters.convolve(noise, blury, mode='constant', cval=0) noise = scipy.ndimage.filters.convolve(noise, blurz, mode='constant', cval=0) # Trilinear interpolate noise filters for each spatial dimensions. ax = [ np.linspace(d_min, d_max, d) for d_min, d_max, d in zip(coords_min - granularity, coords_min + granularity * (noise_dim - 2), noise_dim) ] interp = scipy.interpolate.RegularGridInterpolator(ax, noise, bounds_error=0, fill_value=0) coords += interp(coords) * magnitude return coords, feats, labels def __call__(self, coords, feats, labels): if self.distortion_params is not None: if random.random() < 0.95: for granularity, magnitude in self.distortion_params: coords, feats, labels = self.elastic_distortion(coords, feats, labels, granularity, magnitude) return coords, feats, labels class Compose(object): """Composes several transforms together.""" def __init__(self, transforms): self.transforms = transforms def __call__(self, *args): for t in self.transforms: args = t(*args) return args class cfl_collate_fn_factory: """Generates collate function for coords, feats, labels. Args: limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch size so that the number of input coordinates is below limit_numpoints. """ def __init__(self, limit_numpoints): self.limit_numpoints = limit_numpoints def __call__(self, list_data): coords, feats, labels = list(zip(*list_data)) coords_batch, feats_batch, labels_batch = [], [], [] batch_id = 0 batch_num_points = 0 for batch_id, _ in enumerate(coords): num_points = coords[batch_id].shape[0] batch_num_points += num_points if self.limit_numpoints and batch_num_points > self.limit_numpoints: num_full_points = sum(len(c) for c in coords) num_full_batch_size = len(coords) logging.warning( f'\t\tCannot fit {num_full_points} points into {self.limit_numpoints} points ' f'limit. Truncating batch size at {batch_id} out of {num_full_batch_size} with {batch_num_points - num_points}.' ) break # coords_batch.append( # torch.cat((torch.from_numpy( # coords[batch_id]).int(), torch.ones(num_points, 1).int() * batch_id), 1)) coords_batch.append( torch.cat((torch.ones(num_points, 1).int() * batch_id, torch.from_numpy( coords[batch_id]).int()), 1)) feats_batch.append(torch.from_numpy(feats[batch_id])) labels_batch.append(torch.from_numpy(labels[batch_id]).int()) batch_id += 1 # Concatenate all lists coords_batch = torch.cat(coords_batch, 0).int() feats_batch = torch.cat(feats_batch, 0).float() labels_batch = torch.cat(labels_batch, 0).int() return coords_batch, feats_batch, labels_batch class cflt_collate_fn_factory: """Generates collate function for coords, feats, labels, point_clouds, transformations. Args: limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch size so that the number of input coordinates is below limit_numpoints. """ def __init__(self, limit_numpoints): self.limit_numpoints = limit_numpoints def __call__(self, list_data): coords, feats, labels, transformations = list(zip(*list_data)) cfl_collate_fn = cfl_collate_fn_factory(limit_numpoints=self.limit_numpoints) coords_batch, feats_batch, labels_batch = cfl_collate_fn(list(zip(coords, feats, labels))) num_truncated_batch = coords_batch[:, -1].max().item() + 1 batch_id = 0 transformations_batch = [] for transformation in transformations: if batch_id >= num_truncated_batch: break transformations_batch.append(torch.from_numpy(transformation).float()) batch_id += 1 transformations_batch = torch.stack(transformations_batch, 0) return coords_batch, feats_batch, labels_batch, transformations_batch
ContrastiveSceneContexts-main
downstream/semseg/datasets/transforms.py
#from lib.datasets import synthia #from lib.datasets import shapenet from datasets import stanford from datasets import scannet DATASETS = [] def add_datasets(module): DATASETS.extend([getattr(module, a) for a in dir(module) if 'Dataset' in a]) add_datasets(stanford) #add_datasets(synthia) add_datasets(scannet) #add_datasets(shapenet) def load_dataset(name): '''Creates and returns an instance of the datasets given its name. ''' # Find the model class from its name mdict = {dataset.__name__: dataset for dataset in DATASETS} if name not in mdict: print('Invalid dataset index. Options are:') # Display a list of valid dataset names for dataset in DATASETS: print('\t* {}'.format(dataset.__name__)) raise ValueError(f'Dataset {name} not defined') DatasetClass = mdict[name] return DatasetClass
ContrastiveSceneContexts-main
downstream/semseg/datasets/__init__.py
import logging import unittest import imageio import os import os.path as osp import pickle import numpy as np from collections import defaultdict from plyfile import PlyData from lib.pc_utils import Camera, read_plyfile from lib.dataset import DictDataset, VoxelizationDataset, TemporalVoxelizationDataset, \ str2datasetphase_type, DatasetPhase from lib.transforms import cfl_collate_fn_factory from lib.utils import read_txt, debug_on class SynthiaDataset(DictDataset): NUM_LABELS = 16 def __init__(self, data_path_file, input_transform=None, target_transform=None): with open(data_path_file, 'r') as f: data_paths = pickle.load(f) super(SynthiaDataset, self).__init__(data_paths, input_transform, target_transform) @staticmethod def load_extrinsics(extrinsics_file): """Load the camera extrinsics from a .txt file. """ lines = read_txt(extrinsics_file) params = [float(x) for x in lines[0].split(' ')] extrinsics_matrix = np.asarray(params).reshape([4, 4]) return extrinsics_matrix @staticmethod def load_intrinsics(intrinsics_file): """Load the camera intrinsics from a intrinsics.txt file. intrinsics.txt: a text file containing 4 values that represent (in this order) {focal length, principal-point-x, principal-point-y, baseline (m) with the corresponding right camera} """ lines = read_txt(intrinsics_file) assert len(lines) == 7 intrinsics = { 'focal_length': float(lines[0]), 'pp_x': float(lines[2]), 'pp_y': float(lines[4]), 'baseline': float(lines[6]), } return intrinsics @staticmethod def load_depth(depth_file): """Read a single depth map (.png) file. 1280x760 760 rows, 1280 columns. Depth is encoded in any of the 3 channels in centimetres as an ushort. """ img = np.asarray(imageio.imread(depth_file, format='PNG-FI')) # uint16 img = img.astype(np.int32) # Convert to int32 for torch compatibility return img @staticmethod def load_label(label_file): """Load the ground truth semantic segmentation label. Annotations are given in two channels. The first channel contains the class of that pixel (see the table below). The second channel contains the unique ID of the instance for those objects that are dynamic (cars, pedestrians, etc.). Class R G B ID Void 0 0 0 0 Sky 128 128 128 1 Building 128 0 0 2 Road 128 64 128 3 Sidewalk 0 0 192 4 Fence 64 64 128 5 Vegetation 128 128 0 6 Pole 192 192 128 7 Car 64 0 128 8 Traffic Sign 192 128 128 9 Pedestrian 64 64 0 10 Bicycle 0 128 192 11 Lanemarking 0 172 0 12 Reserved - - - 13 Reserved - - - 14 Traffic Light 0 128 128 15 """ img = np.asarray(imageio.imread(label_file, format='PNG-FI')) # uint16 img = img.astype(np.int32) # Convert to int32 for torch compatibility return img @staticmethod def load_rgb(rgb_file): """Load RGB images. 1280x760 RGB images used for training. 760 rows, 1280 columns. """ img = np.array(imageio.imread(rgb_file)) # uint8 return img class SynthiaVoxelizationDataset(VoxelizationDataset): """Load the ground truth semantic segmentation label. Annotations are given in two channels. The first channel contains the class of that pixel (see the table below). The second channel contains the unique ID of the instance for those objects that are dynamic (cars, pedestrians, etc.). Class R G B ID Void 0 0 0 0 Sky 128 128 128 1 Building 128 0 0 2 Road 128 64 128 3 Sidewalk 0 0 192 4 Fence 64 64 128 5 Vegetation 128 128 0 6 Pole 192 192 128 7 Car 64 0 128 8 Traffic Sign 192 128 128 9 Pedestrian 64 64 0 10 Bicycle 0 128 192 11 Lanemarking 0 172 0 12 Reserved - - - 13 Reserved - - - 14 Traffic Light 0 128 128 15 """ CLASS_LABELS = ('building', 'road', 'sidewalk', 'fence', 'vegetation', 'pole', 'car', 'sign', 'pedestrian', 'cyclist', 'lanemarking', 'traffic light') VALID_CLASS_IDS = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15) # Voxelization arguments CLIP_BOUND = ((-1800, 1800), (-1800, 1800), (-1800, 1800)) TEST_CLIP_BOUND = ((-2500, 2500), (-2500, 2500), (-2500, 2500)) VOXEL_SIZE = 15 # cm PREVOXELIZATION_VOXEL_SIZE = 7.5 # Elastic distortion, (granularity, magitude) pairs # ELASTIC_DISTORT_PARAMS = ((80, 300),) # Augmentation arguments ROTATION_AUGMENTATION_BOUND = ((0, 0), (-np.pi, np.pi), (0, 0)) TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.1, 0.1), (0, 0), (-0.1, 0.1)) ROTATION_AXIS = 'y' LOCFEAT_IDX = 1 NUM_LABELS = 16 # Automatically subtract ignore labels after processed IGNORE_LABELS = (0, 1, 13, 14) # void, sky, reserved, reserved # Split used in the Minkowski ConvNet, CVPR'19 DATA_PATH_FILE = { DatasetPhase.Train: 'train_cvpr19.txt', DatasetPhase.Val: 'val_cvpr19.txt', DatasetPhase.Test: 'test_cvpr19.txt' } def __init__(self, config, prevoxel_transform=None, input_transform=None, target_transform=None, augment_data=True, elastic_distortion=False, cache=False, phase=DatasetPhase.Train): if isinstance(phase, str): phase = str2datasetphase_type(phase) if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]: self.CLIP_BOUND = self.TEST_CLIP_BOUND data_root = config.data.synthia_path data_paths = read_txt(osp.join('/checkpoint/jihou/data/synthia4d/splits', self.DATA_PATH_FILE[phase])) if phase == DatasetPhase.Train: data_paths = data_paths[:int(len(data_paths)*config.data.data_ratio)] data_paths = [d.split()[0] for d in data_paths] logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase])) super().__init__( data_paths, data_root=data_root, input_transform=input_transform, target_transform=target_transform, ignore_label=config.data.ignore_label, return_transformation=config.data.return_transformation, augment_data=augment_data, elastic_distortion=elastic_distortion, config=config) def load_data(self, index): filepath = self.data_root / self.data_paths[index] plydata = PlyData.read(filepath) data = plydata.elements[0].data coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T feats = np.array([data['r'], data['g'], data['b']], dtype=np.float32).T labels = np.array(data['l'], dtype=np.int32) instances = np.zeros_like(labels) return coords, feats, labels, instances class SynthiaCVPR15cmVoxelizationDataset(SynthiaVoxelizationDataset): pass class SynthiaCVPR30cmVoxelizationDataset(SynthiaVoxelizationDataset): VOXEL_SIZE = 30 class SynthiaAllSequencesVoxelizationDataset(SynthiaVoxelizationDataset): DATA_PATH_FILE = { DatasetPhase.Train: 'train_raw.txt', DatasetPhase.Val: 'val_raw.txt', DatasetPhase.Test: 'test_raw.txt' } class TestSynthia(unittest.TestCase): @debug_on() def test(self): from torch.utils.data import DataLoader from lib.utils import Timer from config import get_config config = get_config() dataset = SynthiaVoxelizationDataset(config) timer = Timer() data_loader = DataLoader( dataset=dataset, collate_fn=cfl_collate_fn_factory(limit_numpoints=False), num_workers=0, batch_size=4, shuffle=True) # Start from index 1 # for i, batch in enumerate(data_loader, 1): iter = data_loader.__iter__() for i in range(100): timer.tic() batch = iter.next() print(batch, timer.toc()) if __name__ == '__main__': unittest.main()
ContrastiveSceneContexts-main
downstream/semseg/datasets/synthia.py
from abc import ABC from pathlib import Path from collections import defaultdict import random import numpy as np from enum import Enum import torch from torch.utils.data import Dataset, DataLoader import MinkowskiEngine as ME from plyfile import PlyData import datasets.transforms as t from datasets.dataloader import InfSampler, DistributedInfSampler from datasets.voxelizer import Voxelizer from lib.distributed import get_world_size class DatasetPhase(Enum): Train = 0 Val = 1 Val2 = 2 TrainVal = 3 Test = 4 Debug = 5 def datasetphase_2str(arg): if arg == DatasetPhase.Train: return 'train' elif arg == DatasetPhase.Val: return 'val' elif arg == DatasetPhase.Val2: return 'val2' elif arg == DatasetPhase.TrainVal: return 'trainval' elif arg == DatasetPhase.Test: return 'test' elif arg == DatasetPhase.Debug: return 'debug' else: raise ValueError('phase must be one of dataset enum.') def str2datasetphase_type(arg): if arg.upper() == 'TRAIN': return DatasetPhase.Train elif arg.upper() == 'VAL': return DatasetPhase.Val elif arg.upper() == 'VAL2': return DatasetPhase.Val2 elif arg.upper() == 'TRAINVAL': return DatasetPhase.TrainVal elif arg.upper() == 'TEST': return DatasetPhase.Test elif arg.upper() == 'DEBUG': return DatasetPhase.Debug else: raise ValueError('phase must be one of train/val/test') def cache(func): def wrapper(self, *args, **kwargs): # Assume that args[0] is index index = args[0] if self.cache: if index not in self.cache_dict[func.__name__]: results = func(self, *args, **kwargs) self.cache_dict[func.__name__][index] = results return self.cache_dict[func.__name__][index] else: return func(self, *args, **kwargs) return wrapper class DictDataset(Dataset, ABC): IS_FULL_POINTCLOUD_EVAL = False def __init__(self, data_paths, prevoxel_transform=None, input_transform=None, target_transform=None, cache=False, data_root='/'): """ data_paths: list of lists, [[str_path_to_input, str_path_to_label], [...]] """ Dataset.__init__(self) # Allows easier path concatenation if not isinstance(data_root, Path): data_root = Path(data_root) self.data_root = data_root self.data_paths = sorted(data_paths) self.prevoxel_transform = prevoxel_transform self.input_transform = input_transform self.target_transform = target_transform # dictionary of input self.data_loader_dict = { 'input': (self.load_input, self.input_transform), 'target': (self.load_target, self.target_transform) } # For large dataset, do not cache self.cache = cache self.cache_dict = defaultdict(dict) self.loading_key_order = ['input', 'target'] def load_input(self, index): raise NotImplementedError def load_target(self, index): raise NotImplementedError def get_classnames(self): pass def reorder_result(self, result): return result def __getitem__(self, index): out_array = [] for k in self.loading_key_order: loader, transformer = self.data_loader_dict[k] v = loader(index) if transformer: v = transformer(v) out_array.append(v) return out_array def __len__(self): return len(self.data_paths) class VoxelizationDatasetBase(DictDataset, ABC): IS_TEMPORAL = False CLIP_BOUND = (-1000, -1000, -1000, 1000, 1000, 1000) ROTATION_AXIS = None NUM_IN_CHANNEL = None NUM_LABELS = -1 # Number of labels in the dataset, including all ignore classes IGNORE_LABELS = None # labels that are not evaluated def __init__(self, data_paths, prevoxel_transform=None, input_transform=None, target_transform=None, cache=False, data_root='/', ignore_mask=255, return_transformation=False, **kwargs): """ ignore_mask: label value for ignore class. It will not be used as a class in the loss or evaluation. """ DictDataset.__init__( self, data_paths, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, cache=cache, data_root=data_root) self.ignore_mask = ignore_mask self.return_transformation = return_transformation def __getitem__(self, index): raise NotImplementedError def load_ply(self, index): filepath = self.data_root / self.data_paths[index] plydata = PlyData.read(filepath) data = plydata.elements[0].data coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T labels = np.array(data['label'], dtype=np.int32) return coords, feats, labels, None def load_data(self, index): raise NotImplementedError def __len__(self): num_data = len(self.data_paths) return num_data class VoxelizationDataset(VoxelizationDatasetBase): """This dataset loads RGB point clouds and their labels as a list of points and voxelizes the pointcloud with sufficient data augmentation. """ # Voxelization arguments VOXEL_SIZE = 0.05 # 5cm # Coordinate Augmentation Arguments: Unlike feature augmentation, coordinate # augmentation has to be done before voxelization SCALE_AUGMENTATION_BOUND = (0.9, 1.1) ROTATION_AUGMENTATION_BOUND = ((-np.pi / 6, np.pi / 6), (-np.pi, np.pi), (-np.pi / 6, np.pi / 6)) TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.05, 0.05), (-0.2, 0.2)) ELASTIC_DISTORT_PARAMS = None # MISC. PREVOXELIZATION_VOXEL_SIZE = None # Augment coords to feats AUGMENT_COORDS_TO_FEATS = False def __init__(self, data_paths, prevoxel_transform=None, input_transform=None, target_transform=None, data_root='/', ignore_label=255, return_transformation=False, augment_data=False, config=None, **kwargs): self.augment_data = augment_data self.config = config VoxelizationDatasetBase.__init__( self, data_paths, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, cache=cache, data_root=data_root, ignore_mask=ignore_label, return_transformation=return_transformation) # Prevoxel transformations self.voxelizer = Voxelizer( voxel_size=self.VOXEL_SIZE, clip_bound=self.CLIP_BOUND, use_augmentation=augment_data, scale_augmentation_bound=self.SCALE_AUGMENTATION_BOUND, rotation_augmentation_bound=self.ROTATION_AUGMENTATION_BOUND, translation_augmentation_ratio_bound=self.TRANSLATION_AUGMENTATION_RATIO_BOUND, ignore_label=ignore_label) # map labels not evaluated to ignore_label label_map = {} n_used = 0 for l in range(self.NUM_LABELS): if l in self.IGNORE_LABELS: label_map[l] = self.ignore_mask else: label_map[l] = n_used n_used += 1 label_map[self.ignore_mask] = self.ignore_mask self.label_map = label_map self.NUM_LABELS -= len(self.IGNORE_LABELS) def _augment_coords_to_feats(self, coords, feats, labels=None): norm_coords = coords - coords.mean(0) # color must come first. if isinstance(coords, np.ndarray): feats = np.concatenate((feats, norm_coords), 1) else: feats = torch.cat((feats, norm_coords), 1) return coords, feats, labels def convert_mat2cfl(self, mat): # Generally, xyz,rgb,label return mat[:, :3], mat[:, 3:-1], mat[:, -1] def get_instance_info(self, xyz, instance_ids): ''' :param xyz: (n, 3) :param instance_ids: (n), int, (1~nInst, -1) :return: instance_num, dict ''' centers = np.ones((xyz.shape[0], 3), dtype=np.float32) * -1 # (n, 9), float, (cx, cy, cz, minx, miny, minz, maxx, maxy, maxz, occ, num_instances) occupancy = {} # (nInst), int bbox = {} unique_ids = np.unique(instance_ids) for id_ in unique_ids: if id_ == -1: continue mask = (instance_ids == id_) xyz_ = xyz[mask] bbox_min = xyz_.min(0) bbox_max = xyz_.max(0) center = xyz_.mean(0) centers[mask] = center occupancy[id_] = mask.sum() bbox[id_] = np.concatenate([bbox_min, bbox_max]) return {"ids": instance_ids, "center": centers, "occupancy": occupancy, "bbox": bbox} def __getitem__(self, index): coords, feats, labels = self.load_data(index) # Downsample the pointcloud with finer voxel size before transformation for memory and speed if self.PREVOXELIZATION_VOXEL_SIZE is not None: inds = ME.utils.sparse_quantize( coords / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True) coords = coords[inds] feats = feats[inds] labels = labels[inds] # Prevoxel transformations if self.prevoxel_transform is not None: coords, feats, labels = self.prevoxel_transform(coords, feats, labels) coords, feats, labels, transformation = self.voxelizer.voxelize( coords, feats, labels) # map labels not used for evaluation to ignore_label if self.input_transform is not None: coords, feats, labels = self.input_transform(coords, feats, labels) if self.target_transform is not None: coords, feats, labels = self.target_transform(coords, feats, labels) if self.augment_data: # For some networks, making the network invariant to even, odd coords is important coords += (torch.rand(3) * 100).int().numpy() # ------------- label mapping -------------------- if self.IGNORE_LABELS is not None: labels = np.array([self.label_map[x] for x in labels], dtype=np.int) # Use coordinate features if config is set if self.AUGMENT_COORDS_TO_FEATS: coords, feats, labels = self._augment_coords_to_feats(coords, feats, labels) return_args = [coords, feats, labels] if self.return_transformation: return_args.append(transformation.astype(np.float32)) return tuple(return_args) def initialize_data_loader(DatasetClass, config, phase, num_workers, shuffle, repeat, augment_data, batch_size, limit_numpoints, input_transform=None, target_transform=None): if isinstance(phase, str): phase = str2datasetphase_type(phase) if config.data.return_transformation: collate_fn = t.cflt_collate_fn_factory(limit_numpoints) else: collate_fn = t.cfl_collate_fn_factory(limit_numpoints) prevoxel_transform_train = [] if augment_data: prevoxel_transform_train.append(t.ElasticDistortion(DatasetClass.ELASTIC_DISTORT_PARAMS)) if len(prevoxel_transform_train) > 0: prevoxel_transforms = t.Compose(prevoxel_transform_train) else: prevoxel_transforms = None input_transforms = [] if input_transform is not None: input_transforms += input_transform if augment_data: input_transforms += [ t.RandomDropout(0.2), t.RandomHorizontalFlip(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL), t.ChromaticAutoContrast(), t.ChromaticTranslation(config.augmentation.data_aug_color_trans_ratio), t.ChromaticJitter(config.augmentation.data_aug_color_jitter_std), # t.HueSaturationTranslation(config.data_aug_hue_max, config.data_aug_saturation_max), ] if len(input_transforms) > 0: input_transforms = t.Compose(input_transforms) else: input_transforms = None dataset = DatasetClass( config, prevoxel_transform=prevoxel_transforms, input_transform=input_transforms, target_transform=target_transform, cache=config.data.cache_data, augment_data=augment_data, phase=phase) data_args = { 'dataset': dataset, 'num_workers': num_workers, 'batch_size': batch_size, 'collate_fn': collate_fn, } if repeat: if get_world_size() > 1: data_args['sampler'] = DistributedInfSampler(dataset, shuffle=shuffle) # torch.utils.data.distributed.DistributedSampler(dataset) else: data_args['sampler'] = InfSampler(dataset, shuffle) else: data_args['shuffle'] = shuffle data_loader = DataLoader(**data_args) return data_loader
ContrastiveSceneContexts-main
downstream/semseg/datasets/dataset.py
import logging import os import sys import numpy as np from collections import defaultdict from scipy import spatial import torch from plyfile import PlyData from lib.utils import read_txt, fast_hist, per_class_iu from datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type, cache import datasets.transforms as t class StanfordVoxelizationDatasetBase: # added NUM_LABELS = 14 CLASS_LABELS = ('clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column', 'door', 'floor', 'sofa', 'table', 'wall', 'window') VALID_CLASS_IDS = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13) IGNORE_LABELS = tuple(set(range(14)) - set(VALID_CLASS_IDS)) CLASS_LABELS_INSTANCE = ('clutter', 'beam', 'board', 'bookcase', 'chair', 'column', 'door', 'sofa', 'table', 'window') VALID_CLASS_IDS_INSTANCE = (0, 1, 2, 3, 5, 6, 7, 9, 11, 13) IGNORE_LABELS_INSTANCE = tuple(set(range(14)) - set(VALID_CLASS_IDS_INSTANCE)) #--------- CLIP_SIZE = None CLIP_BOUND = None LOCFEAT_IDX = 2 ROTATION_AXIS = 'z' #IGNORE_LABELS = (10,) # remove stairs, following SegCloud # CLASSES = [ # 'clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column', 'door', 'floor', 'sofa', # 'table', 'wall', 'window' # ] IS_FULL_POINTCLOUD_EVAL = True DATA_PATH_FILE = { DatasetPhase.Train: 'train.txt', DatasetPhase.Val: 'val.txt', DatasetPhase.TrainVal: 'trainval.txt', DatasetPhase.Test: 'test.txt' } def test_pointcloud(self, pred_dir): print('Running full pointcloud evaluation.') # Join room by their area and room id. room_dict = defaultdict(list) for i, data_path in enumerate(self.data_paths): area, room = data_path.split(os.sep) room, _ = os.path.splitext(room) room_id = '_'.join(room.split('_')[:-1]) room_dict[(area, room_id)].append(i) # Test independently for each room. sys.setrecursionlimit(100000) # Increase recursion limit for k-d tree. pred_list = sorted(os.listdir(pred_dir)) hist = np.zeros((self.NUM_LABELS, self.NUM_LABELS)) for room_idx, room_list in enumerate(room_dict.values()): print(f'Evaluating room {room_idx} / {len(room_dict)}.') # Join all predictions and query pointclouds of split data. pred = np.zeros((0, 4)) pointcloud = np.zeros((0, 7)) for i in room_list: pred = np.vstack((pred, np.load(os.path.join(pred_dir, pred_list[i])))) pointcloud = np.vstack((pointcloud, self.load_ply(i)[0])) # Deduplicate all query pointclouds of split data. pointcloud = np.array(list(set(tuple(l) for l in pointcloud.tolist()))) # Run test for each room. pred_tree = spatial.KDTree(pred[:, :3], leafsize=500) _, result = pred_tree.query(pointcloud[:, :3]) ptc_pred = pred[result, 3].astype(int) ptc_gt = pointcloud[:, -1].astype(int) if self.IGNORE_LABELS: ptc_pred = self.label2masked[ptc_pred] ptc_gt = self.label2masked[ptc_gt] hist += fast_hist(ptc_pred, ptc_gt, self.NUM_LABELS) # Print results. ious = [] print('Per class IoU:') for i, iou in enumerate(per_class_iu(hist) * 100): result_str = '' if hist.sum(1)[i]: result_str += f'{iou}' ious.append(iou) else: result_str += 'N/A' # Do not print if data not in ground truth. print(result_str) print(f'Average IoU: {np.nanmean(ious)}') def _augment_coords_to_feats(self, coords, feats, labels=None): # Center x,y coords_center = coords.mean(0, keepdims=True) coords_center[0, 2] = 0 norm_coords = coords - coords_center feats = np.concatenate((feats, norm_coords), 1) return coords, feats, labels class StanfordDataset(StanfordVoxelizationDatasetBase, VoxelizationDataset): # Voxelization arguments VOXEL_SIZE = 0.05 # 5cm CLIP_BOUND = 4 # [-N, N] TEST_CLIP_BOUND = None # Augmentation arguments ROTATION_AUGMENTATION_BOUND = \ ((-np.pi / 32, np.pi / 32), (-np.pi / 32, np.pi / 32), (-np.pi, np.pi)) TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (-0.05, 0.05)) # AUGMENT_COORDS_TO_FEATS = True # NUM_IN_CHANNEL = 6 AUGMENT_COORDS_TO_FEATS = False NUM_IN_CHANNEL = 3 def __init__(self, config, prevoxel_transform=None, input_transform=None, target_transform=None, cache=False, augment_data=True, elastic_distortion=False, phase=DatasetPhase.Train): if isinstance(phase, str): phase = str2datasetphase_type(phase) if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]: self.CLIP_BOUND = self.TEST_CLIP_BOUND data_root = config.data.stanford3d_path if isinstance(self.DATA_PATH_FILE[phase], (list, tuple)): data_paths = [] for split in self.DATA_PATH_FILE[phase]: data_paths += read_txt(os.path.join(data_root, 'splits', split)) else: data_paths = read_txt(os.path.join(data_root, 'splits', self.DATA_PATH_FILE[phase])) if config.data.voxel_size: self.VOXEL_SIZE = config.data.voxel_size logging.info('voxel size: {}'.format(self.VOXEL_SIZE)) logging.info('Loading {} {}: {}'.format(self.__class__.__name__, phase, self.DATA_PATH_FILE[phase])) VoxelizationDataset.__init__( self, data_paths, data_root=data_root, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, ignore_label=config.data.ignore_label, return_transformation=config.data.return_transformation, augment_data=augment_data, elastic_distortion=elastic_distortion, config=config) @cache def load_ply(self, index): filepath = self.data_root / self.data_paths[index] plydata = PlyData.read(filepath) data = plydata.elements[0].data coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T labels = np.array(data['label'], dtype=np.int32) return coords, feats, labels, None @cache def load_data(self, index): filepath = self.data_root / self.data_paths[index] pointcloud = torch.load(filepath) coords = pointcloud[:,:3].astype(np.float32) feats = pointcloud[:,3:6].astype(np.float32) labels = pointcloud[:,6].astype(np.int32) return coords, feats, labels class StanfordArea5Dataset(StanfordDataset): DATA_PATH_FILE = { DatasetPhase.Train: ['area1.txt', 'area2.txt', 'area3.txt', 'area4.txt', 'area6.txt'], DatasetPhase.Val: 'area5.txt', DatasetPhase.Test: 'area5.txt' } class StanfordArea53cmDataset(StanfordArea5Dataset): CLIP_BOUND = 3.2 VOXEL_SIZE = 0.03 class StanfordArea57d5cmDataset(StanfordArea5Dataset): VOXEL_SIZE = 0.075 class StanfordArea510cmDataset(StanfordArea5Dataset): VOXEL_SIZE = 0.1 def test(config): """Test point cloud data loader. """ from torch.utils.data import DataLoader from lib.utils import Timer import open3d as o3d def make_pcd(coords, feats): pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(coords[:, :3].float().numpy()) pcd.colors = o3d.utility.Vector3dVector(feats[:, :3].numpy() / 255) return pcd timer = Timer() DatasetClass = StanfordArea5Dataset transformations = [ t.RandomHorizontalFlip(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL), t.ChromaticAutoContrast(), t.ChromaticTranslation(config.data_aug_color_trans_ratio), t.ChromaticJitter(config.data_aug_color_jitter_std), ] dataset = DatasetClass( config, prevoxel_transform=t.ElasticDistortion(DatasetClass.ELASTIC_DISTORT_PARAMS), input_transform=t.Compose(transformations), augment_data=True, cache=True, elastic_distortion=True) data_loader = DataLoader( dataset=dataset, collate_fn=t.cfl_collate_fn_factory(limit_numpoints=False), batch_size=1, shuffle=True) # Start from index 1 iter = data_loader.__iter__() for i in range(100): timer.tic() coords, feats, labels = iter.next() pcd = make_pcd(coords, feats) o3d.visualization.draw_geometries([pcd]) print(timer.toc()) if __name__ == '__main__': from config import get_config config = get_config() test(config)
ContrastiveSceneContexts-main
downstream/semseg/datasets/stanford.py
import collections import numpy as np import MinkowskiEngine as ME from scipy.linalg import expm, norm # Rotation matrix along axis with angle theta def M(axis, theta): return expm(np.cross(np.eye(3), axis / norm(axis) * theta)) class Voxelizer: def __init__(self, voxel_size=1, clip_bound=None, use_augmentation=False, scale_augmentation_bound=None, rotation_augmentation_bound=None, translation_augmentation_ratio_bound=None, ignore_label=255): """ Args: voxel_size: side length of a voxel clip_bound: boundary of the voxelizer. Points outside the bound will be deleted expects either None or an array like ((-100, 100), (-100, 100), (-100, 100)). scale_augmentation_bound: None or (0.9, 1.1) rotation_augmentation_bound: None or ((np.pi / 6, np.pi / 6), None, None) for 3 axis. Use random order of x, y, z to prevent bias. translation_augmentation_bound: ((-5, 5), (0, 0), (-10, 10)) ignore_label: label assigned for ignore (not a training label). """ self.voxel_size = voxel_size self.clip_bound = clip_bound self.ignore_label = ignore_label # Augmentation self.use_augmentation = use_augmentation self.scale_augmentation_bound = scale_augmentation_bound self.rotation_augmentation_bound = rotation_augmentation_bound self.translation_augmentation_ratio_bound = translation_augmentation_ratio_bound def get_transformation_matrix(self): voxelization_matrix, rotation_matrix = np.eye(4), np.eye(4) # Get clip boundary from config or pointcloud. # Get inner clip bound to crop from. # Transform pointcloud coordinate to voxel coordinate. # 1. Random rotation rot_mat = np.eye(3) if self.use_augmentation and self.rotation_augmentation_bound is not None: if isinstance(self.rotation_augmentation_bound, collections.Iterable): rot_mats = [] for axis_ind, rot_bound in enumerate(self.rotation_augmentation_bound): theta = 0 axis = np.zeros(3) axis[axis_ind] = 1 if rot_bound is not None: theta = np.random.uniform(*rot_bound) rot_mats.append(M(axis, theta)) # Use random order np.random.shuffle(rot_mats) rot_mat = rot_mats[0] @ rot_mats[1] @ rot_mats[2] else: raise ValueError() rotation_matrix[:3, :3] = rot_mat # 2. Scale and translate to the voxel space. scale = 1 / self.voxel_size if self.use_augmentation and self.scale_augmentation_bound is not None: scale *= np.random.uniform(*self.scale_augmentation_bound) np.fill_diagonal(voxelization_matrix[:3, :3], scale) # Get final transformation matrix. return voxelization_matrix, rotation_matrix def clip(self, coords, center=None, trans_aug_ratio=None): bound_min = np.min(coords, 0).astype(float) bound_max = np.max(coords, 0).astype(float) bound_size = bound_max - bound_min if center is None: center = bound_min + bound_size * 0.5 if trans_aug_ratio is not None: trans = np.multiply(trans_aug_ratio, bound_size) center += trans lim = self.clip_bound if isinstance(self.clip_bound, (int, float)): if bound_size.max() < self.clip_bound: return None else: clip_inds = ((coords[:, 0] >= (-lim + center[0])) & \ (coords[:, 0] < (lim + center[0])) & \ (coords[:, 1] >= (-lim + center[1])) & \ (coords[:, 1] < (lim + center[1])) & \ (coords[:, 2] >= (-lim + center[2])) & \ (coords[:, 2] < (lim + center[2]))) return clip_inds # Clip points outside the limit clip_inds = ((coords[:, 0] >= (lim[0][0] + center[0])) & \ (coords[:, 0] < (lim[0][1] + center[0])) & \ (coords[:, 1] >= (lim[1][0] + center[1])) & \ (coords[:, 1] < (lim[1][1] + center[1])) & \ (coords[:, 2] >= (lim[2][0] + center[2])) & \ (coords[:, 2] < (lim[2][1] + center[2]))) return clip_inds def voxelize(self, coords, feats, labels, center=None): assert coords.shape[1] == 3 and coords.shape[0] == feats.shape[0] and coords.shape[0] if self.clip_bound is not None: trans_aug_ratio = np.zeros(3) if self.use_augmentation and self.translation_augmentation_ratio_bound is not None: for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound): trans_aug_ratio[axis_ind] = np.random.uniform(*trans_ratio_bound) clip_inds = self.clip(coords, center, trans_aug_ratio) if clip_inds is not None: coords, feats = coords[clip_inds], feats[clip_inds] if labels is not None: labels = labels[clip_inds] # Get rotation and scale M_v, M_r = self.get_transformation_matrix() # Apply transformations rigid_transformation = M_v if self.use_augmentation: rigid_transformation = M_r @ rigid_transformation homo_coords = np.hstack((coords, np.ones((coords.shape[0], 1), dtype=coords.dtype))) coords_aug = np.floor(homo_coords @ rigid_transformation.T[:, :3]) # Align all coordinates to the origin. min_coords = coords_aug.min(0) M_t = np.eye(4) M_t[:3, -1] = -min_coords rigid_transformation = M_t @ rigid_transformation coords_aug = np.floor(coords_aug - min_coords) # key = self.hash(coords_aug) # floor happens by astype(np.uint64) mapping, colabels = ME.utils.sparse_quantize( coords_aug, feats, labels=labels, return_index=True, ignore_label=self.ignore_label) coords_aug = coords_aug[mapping] feats = feats[mapping] labels = colabels return coords_aug, feats, labels, rigid_transformation.flatten() def voxelize_temporal(self, coords_t, feats_t, labels_t, centers=None, return_transformation=False): # Legacy code, remove if centers is None: centers = [ None, ] * len(coords_t) coords_tc, feats_tc, labels_tc, transformation_tc = [], [], [], [] # ######################### Data Augmentation ############################# # Get rotation and scale M_v, M_r = self.get_transformation_matrix() # Apply transformations rigid_transformation = M_v if self.use_augmentation: rigid_transformation = M_r @ rigid_transformation # ######################### Voxelization ############################# # Voxelize coords for coords, feats, labels, center in zip(coords_t, feats_t, labels_t, centers): ################################### # Clip the data if bound exists if self.clip_bound is not None: trans_aug_ratio = np.zeros(3) if self.use_augmentation and self.translation_augmentation_ratio_bound is not None: for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound): trans_aug_ratio[axis_ind] = np.random.uniform(*trans_ratio_bound) clip_inds = self.clip(coords, center, trans_aug_ratio) if clip_inds is not None: coords, feats = coords[clip_inds], feats[clip_inds] if labels is not None: labels = labels[clip_inds] ################################### homo_coords = np.hstack((coords, np.ones((coords.shape[0], 1), dtype=coords.dtype))) coords_aug = np.floor(homo_coords @ rigid_transformation.T)[:, :3] coords_aug, feats, labels = ME.utils.sparse_quantize( coords_aug, feats, labels=labels, ignore_label=self.ignore_label) coords_tc.append(coords_aug) feats_tc.append(feats) labels_tc.append(labels) transformation_tc.append(rigid_transformation.flatten()) return_args = [coords_tc, feats_tc, labels_tc] if return_transformation: return_args.append(transformation_tc) return tuple(return_args) def test(): N = 16575 coords = np.random.rand(N, 3) * 10 feats = np.random.rand(N, 4) labels = np.floor(np.random.rand(N) * 3) coords[:3] = 0 labels[:3] = 2 voxelizer = Voxelizer() print(voxelizer.voxelize(coords, feats, labels)) if __name__ == '__main__': test()
ContrastiveSceneContexts-main
downstream/semseg/datasets/voxelizer.py
import math import torch import torch.distributed as dist from torch.utils.data.sampler import Sampler class InfSampler(Sampler): """Samples elements randomly, without replacement. Arguments: data_source (Dataset): dataset to sample from """ def __init__(self, data_source, shuffle=False): self.data_source = data_source self.shuffle = shuffle self.reset_permutation() def reset_permutation(self): perm = len(self.data_source) if self.shuffle: perm = torch.randperm(perm) self._perm = perm.tolist() def __iter__(self): return self def __next__(self): if len(self._perm) == 0: self.reset_permutation() return self._perm.pop() def __len__(self): return len(self.data_source) next = __next__ # Python 2 compatibility class DistributedInfSampler(InfSampler): def __init__(self, data_source, num_replicas=None, rank=None, shuffle=True): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.data_source = data_source self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.it = 0 self.num_samples = int(math.ceil(len(self.data_source) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas self.shuffle = shuffle self.reset_permutation() def __next__(self): it = self.it * self.num_replicas + self.rank value = self._perm[it % len(self._perm)] self.it = self.it + 1 if (self.it * self.num_replicas) >= len(self._perm): self.reset_permutation() self.it = 0 return value def __len__(self): return self.num_samples
ContrastiveSceneContexts-main
downstream/semseg/datasets/dataloader.py
import logging import os import sys from pathlib import Path import torch import numpy as np from scipy import spatial from datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type from lib.pc_utils import read_plyfile, save_point_cloud from lib.utils import read_txt, fast_hist, per_class_iu from lib.io3d import write_triangle_mesh, create_color_palette class ScannetVoxelizationDataset(VoxelizationDataset): # added NUM_LABELS = 41 # Will be converted to 20 as defined in IGNORE_LABELS. NUM_IN_CHANNEL = 3 CLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture') VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39) IGNORE_LABELS = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS)) CLASS_LABELS_INSTANCE = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] VALID_CLASS_IDS_INSTANCE = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) IGNORE_LABELS_INSTANCE = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS_INSTANCE)) # Voxelization arguments CLIP_BOUND = None TEST_CLIP_BOUND = None VOXEL_SIZE = 0.05 # Augmentation arguments ROTATION_AUGMENTATION_BOUND = ((-np.pi / 64, np.pi / 64), (-np.pi / 64, np.pi / 64), (-np.pi, np.pi)) TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (0, 0)) ELASTIC_DISTORT_PARAMS = ((0.2, 0.4), (0.8, 1.6)) ROTATION_AXIS = 'z' LOCFEAT_IDX = 2 IS_FULL_POINTCLOUD_EVAL = True # If trainval.txt does not exist, copy train.txt and add contents from val.txt DATA_PATH_FILE = { DatasetPhase.Train: 'scannetv2_train.txt', DatasetPhase.Val: 'scannetv2_val.txt', DatasetPhase.TrainVal: 'scannetv2_trainval.txt', DatasetPhase.Test: 'scannetv2_test.txt', } def __init__(self, config, prevoxel_transform=None, input_transform=None, target_transform=None, augment_data=True, elastic_distortion=False, cache=False, phase=DatasetPhase.Train): if isinstance(phase, str): phase = str2datasetphase_type(phase) # Use cropped rooms for train/val data_root = config.data.scannet_path if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]: self.CLIP_BOUND = self.TEST_CLIP_BOUND data_paths = read_txt(os.path.join(data_root, 'splits', self.DATA_PATH_FILE[phase])) if phase == DatasetPhase.Train and config.data.train_file: data_paths = read_txt(config.data.train_file) # data efficiency by sampling points self.sampled_inds = {} if config.data.sampled_inds and phase == DatasetPhase.Train: self.sampled_inds = torch.load(config.data.sampled_inds) data_paths = [data_path + '.pth' for data_path in data_paths] logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase])) super().__init__( data_paths, data_root=data_root, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, ignore_label=config.data.ignore_label, return_transformation=config.data.return_transformation, augment_data=augment_data, elastic_distortion=elastic_distortion, config=config) def get_output_id(self, iteration): return '_'.join(Path(self.data_paths[iteration]).stem.split('_')[:2]) def _augment_locfeat(self, pointcloud): # Assuming that pointcloud is xyzrgb(...), append location feat. pointcloud = np.hstack( (pointcloud[:, :6], 100 * np.expand_dims(pointcloud[:, self.LOCFEAT_IDX], 1), pointcloud[:, 6:])) return pointcloud def load_data(self, index): filepath = self.data_root / self.data_paths[index] pointcloud = torch.load(filepath) coords = pointcloud[0].astype(np.float32) feats = pointcloud[1].astype(np.float32) labels = pointcloud[2].astype(np.int32) if self.sampled_inds: scene_name = self.get_output_id(index) mask = np.ones_like(labels).astype(np.bool) sampled_inds = self.sampled_inds[scene_name] mask[sampled_inds] = False labels[mask] = 0 return coords, feats, labels def save_features(self, coords, upsampled_features, transformation, iteration, save_dir): inds_mapping, xyz = self.get_original_pointcloud(coords, transformation, iteration) ptc_feats = upsampled_features.cpu().numpy()[inds_mapping] room_id = self.get_output_id(iteration) torch.save(ptc_feats, f'{save_dir}/{room_id}') def get_original_pointcloud(self, coords, transformation, iteration): logging.info('===> Start testing on original pointcloud space.') data_path = self.data_paths[iteration] fullply_f = self.data_root / data_path query_xyz, _, query_label, _ = torch.load(fullply_f) coords = coords[:, 1:].numpy() + 0.5 curr_transformation = transformation[0, :16].numpy().reshape(4, 4) coords = np.hstack((coords, np.ones((coords.shape[0], 1)))) coords = (np.linalg.inv(curr_transformation) @ coords.T).T # Run test for each room. from pykeops.numpy import LazyTensor from pykeops.numpy.utils import IsGpuAvailable query_xyz = np.array(query_xyz) x_i = LazyTensor( query_xyz[:,None,:] ) # x_i.shape = (1e6, 1, 3) y_j = LazyTensor( coords[:,:3][None,:,:] ) # y_j.shape = ( 1, 2e6,3) D_ij = ((x_i - y_j) ** 2).sum(-1) # (M**2, N) symbolic matrix of squared distances indKNN = D_ij.argKmin(1, dim=1) # Grid <-> Samples, (M**2, K) integer tensor inds = indKNN[:,0] return inds, query_xyz class ScannetVoxelization2cmDataset(ScannetVoxelizationDataset): VOXEL_SIZE = 0.02
ContrastiveSceneContexts-main
downstream/semseg/datasets/scannet.py
# Evaluates semantic label task # Input: # - path to .txt prediction files # - path to .txt ground truth files # - output file to write results to # Note that only the valid classes are used for evaluation, # i.e., any ground truth label not in the valid label set # is ignored in the evaluation. # # example usage: evaluate_semantic_label.py --scan_path [path to scan data] --output_file [output file] # python imports import math import logging import os, sys, argparse import inspect try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: from itertools import izip except ImportError: izip = zip #currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) #parentdir = os.path.dirname(currentdir) #sys.path.insert(0,parentdir) from datasets.evaluation.scannet_benchmark_utils import util_3d from datasets.evaluation.scannet_benchmark_utils import util class Evaluator: def __init__(self, CLASS_LABELS, VALID_CLASS_IDS): #CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', # 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', # 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] #VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) self.CLASS_LABELS = CLASS_LABELS self.VALID_CLASS_IDS = VALID_CLASS_IDS self.UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1 self.gt = {} self.pred = {} max_id = self.UNKNOWN_ID self.confusion = np.zeros((max_id+1, max_id+1), dtype=np.ulonglong) def update_confusion(self, pred_ids, gt_ids, sceneId=None): # sanity checks if not pred_ids.shape == gt_ids.shape: util.print_error('%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True) n = self.confusion.shape[0] k = (gt_ids >= 0) & (gt_ids < n) temporal = np.bincount(n * gt_ids[k].astype(int) + pred_ids[k], minlength=n**2).reshape(n, n) for valid_class_row in self.VALID_CLASS_IDS: for valid_class_col in self.VALID_CLASS_IDS: self.confusion[valid_class_row][valid_class_col] += temporal[valid_class_row][valid_class_col] @staticmethod def write_to_benchmark(base='benchmark_segmentation', scene_id=None, pred_ids=None): os.makedirs(base, exist_ok=True) util_3d.export_ids('{}.txt'.format(os.path.join(base, scene_id)), pred_ids) def get_iou(self, label_id, confusion): if not label_id in self.VALID_CLASS_IDS: return float('nan') # #true positives tp = np.longlong(confusion[label_id, label_id]) # #false negatives fn = np.longlong(confusion[label_id, :].sum()) - tp # #false positives not_ignored = [l for l in self.VALID_CLASS_IDS if not l == label_id] fp = np.longlong(confusion[not_ignored, label_id].sum()) denom = (tp + fp + fn) if denom == 0: return float('nan') return (float(tp) / denom, tp, denom) def write_result_file(self, confusion, ious, filename): with open(filename, 'w') as f: f.write('iou scores\n') for i in range(len(self.VALID_CLASS_IDS)): label_id = self.VALID_CLASS_IDS[i] label_name = self.CLASS_LABELS[i] iou = ious[label_name][0] f.write('{0:<14s}({1:<2d}): {2:>5.3f}\n'.format(label_name, label_id, iou)) f.write("{0:<14s}: {1:>5.3f}".format('mean', np.array([ious[k][0] for k in ious]).mean())) f.write('\nconfusion matrix\n') f.write('\t\t\t') for i in range(len(self.VALID_CLASS_IDS)): #f.write('\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i])) f.write('{0:<8d}'.format(self.VALID_CLASS_IDS[i])) f.write('\n') for r in range(len(self.VALID_CLASS_IDS)): f.write('{0:<14s}({1:<2d})'.format(self.CLASS_LABELS[r], self.VALID_CLASS_IDS[r])) for c in range(len(self.VALID_CLASS_IDS)): f.write('\t{0:>5.3f}'.format(confusion[self.VALID_CLASS_IDS[r],self.VALID_CLASS_IDS[c]])) f.write('\n') print('wrote results to', filename) def evaluate_confusion(self, output_file=None): class_ious = {} counter = 0 summation = 0 for i in range(len(self.VALID_CLASS_IDS)): label_name = self.CLASS_LABELS[i] label_id = self.VALID_CLASS_IDS[i] class_ious[label_name] = self.get_iou(label_id, self.confusion) # print logging.info('classes IoU') logging.info('----------------------------') for i in range(len(self.VALID_CLASS_IDS)): label_name = self.CLASS_LABELS[i] try: logging.info('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2])) summation += class_ious[label_name][0] counter += 1 except: logging.info('{0:<14s}: nan ( nan/nan )'.format(label_name)) logging.info("{0:<14s}: {1:>5.3f}".format('mean', summation / counter)) if output_file: self.write_result_file(self.confusion, class_ious, output_file) return summation / counter def config(): parser = argparse.ArgumentParser() parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files') parser.add_argument('--gt_path', required=True, help='path to gt files') parser.add_argument('--output_file', type=str, default='./semantic_label_evaluation.txt') opt = parser.parse_args() return opt def main(): opt = config() #------------------------- ScanNet -------------------------- CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS) print('reading', len(os.listdir(opt.pred_path))-1, 'scans...') for i, pred_file in enumerate(os.listdir(opt.pred_path)): if pred_file == 'semantic_label_evaluation.txt': continue gt_file = os.path.join(opt.gt_path, pred_file) if not os.path.isfile(gt_file): util.print_error('Result file {} does not match any gt file'.format(pred_file), user_fault=True) gt_ids = util_3d.load_ids(gt_file) pred_file = os.path.join(opt.pred_path, pred_file) pred_ids = util_3d.load_ids(pred_file) evaluator.update_confusion(pred_ids, gt_ids, pred_file.split('.')[0]) sys.stdout.write("\rscans processed: {}".format(i+1)) sys.stdout.flush() # evaluate evaluator.evaluate_confusion(opt.output_file) if __name__ == '__main__': main()
ContrastiveSceneContexts-main
downstream/semseg/datasets/evaluation/evaluate_semantic_label.py
# Evaluates semantic instance task # Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation # Input: # - path to .txt prediction files # - path to .txt ground truth files # - output file to write results to # Each .txt prediction file look like: # [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence] # [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence] # [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence] # ... # # NOTE: The prediction files must live in the root of the given prediction path. # Predicted mask .txt files must live in a subfolder. # Additionally, filenames must not contain spaces. # The relative paths to predicted masks must contain one integer per line, # where each line corresponds to vertices in the *_vh_clean_2.ply (in that order). # Non-zero integers indicate part of the predicted instance. # The label ids specify the class of the corresponding mask. # Confidence is a float confidence score of the mask. # # Note that only the valid classes are used for evaluation, # i.e., any ground truth label not in the valid label set # is ignored in the evaluation. # # example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file] # python imports import logging import math import os, sys, argparse import inspect from copy import deepcopy import argparse import numpy as np #currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) #parentdir = os.path.dirname(currentdir) #sys.path.insert(0,parentdir) from datasets.evaluation.scannet_benchmark_utils import util_3d from datasets.evaluation.scannet_benchmark_utils import util def setup_logging(): ch = logging.StreamHandler(sys.stdout) logging.getLogger().setLevel(logging.INFO) logging.basicConfig( format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch]) class Evaluator: # ---------- Evaluation params ---------- # # overlaps for evaluation overlaps = np.append(np.arange(0.5,0.95,0.05), 0.25) # minimum region size for evaluation [verts] min_region_sizes = np.array( [ 10 ] ) # distance thresholds [m] distance_threshes = np.array( [ float('inf') ] ) # distance confidences distance_confs = np.array( [ -float('inf') ] ) def __init__(self, CLASS_LABELS, VALID_CLASS_IDS, benchmark=False): # ---------- Label info ---------- # #CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', # 'window', 'bookshelf', 'picture', 'counter', # 'desk', 'curtain', 'refrigerator', 'shower curtain', # 'toilet', 'sink', 'bathtub', 'otherfurniture'] #VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) self.CLASS_LABELS = CLASS_LABELS self.VALID_CLASS_IDS = VALID_CLASS_IDS self.ID_TO_LABEL = {} self.LABEL_TO_ID = {} for i in range(len(VALID_CLASS_IDS)): self.LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] self.ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] self.pred_instances = {} self.gt_instances = {} self.benchmark = benchmark def evaluate_matches(self, matches): # results: class x overlap ap = np.zeros( (len(self.distance_threshes) , len(self.CLASS_LABELS) , len(self.overlaps)) , np.float ) for di, (min_region_size, distance_thresh, distance_conf) in enumerate(zip(self.min_region_sizes, self.distance_threshes, self.distance_confs)): for oi, overlap_th in enumerate(self.overlaps): pred_visited = {} for m in matches: for p in matches[m]['pred']: for label_name in self.CLASS_LABELS: for p in matches[m]['pred'][label_name]: if 'filename' in p: pred_visited[p['filename']] = False for li, label_name in enumerate(self.CLASS_LABELS): y_true = np.empty(0) y_score = np.empty(0) hard_false_negatives = 0 has_gt = False has_pred = False for m in matches: pred_instances = matches[m]['pred'][label_name] gt_instances = matches[m]['gt'][label_name] # filter groups in ground truth gt_instances = [ gt for gt in gt_instances if gt['instance_id']>=1000 and gt['vert_count']>=min_region_size and gt['med_dist']<=distance_thresh and gt['dist_conf']>=distance_conf ] if gt_instances: has_gt = True if pred_instances: has_pred = True cur_true = np.ones ( len(gt_instances) ) cur_score = np.ones ( len(gt_instances) ) * (-float("inf")) cur_match = np.zeros( len(gt_instances) , dtype=np.bool ) # collect matches for (gti,gt) in enumerate(gt_instances): found_match = False num_pred = len(gt['matched_pred']) for pred in gt['matched_pred']: # greedy assignments if pred_visited[pred['filename']]: continue overlap = float(pred['intersection']) / (gt['vert_count']+pred['vert_count']-pred['intersection']) if overlap > overlap_th: confidence = pred['confidence'] # if already have a prediction for this gt, # the prediction with the lower score is automatically a false positive if cur_match[gti]: max_score = max( cur_score[gti] , confidence ) min_score = min( cur_score[gti] , confidence ) cur_score[gti] = max_score # append false positive cur_true = np.append(cur_true,0) cur_score = np.append(cur_score,min_score) cur_match = np.append(cur_match,True) # otherwise set score else: found_match = True cur_match[gti] = True cur_score[gti] = confidence pred_visited[pred['filename']] = True if not found_match: hard_false_negatives += 1 # remove non-matched ground truth instances cur_true = cur_true [ cur_match==True ] cur_score = cur_score[ cur_match==True ] # collect non-matched predictions as false positive for pred in pred_instances: found_gt = False for gt in pred['matched_gt']: overlap = float(gt['intersection']) / (gt['vert_count']+pred['vert_count']-gt['intersection']) if overlap > overlap_th: found_gt = True break if not found_gt: num_ignore = pred['void_intersection'] for gt in pred['matched_gt']: # group? if gt['instance_id'] < 1000: num_ignore += gt['intersection'] # small ground truth instances if gt['vert_count'] < min_region_size or gt['med_dist']>distance_thresh or gt['dist_conf']<distance_conf: num_ignore += gt['intersection'] proportion_ignore = float(num_ignore)/pred['vert_count'] # if not ignored append false positive if proportion_ignore <= overlap_th: cur_true = np.append(cur_true,0) confidence = pred["confidence"] cur_score = np.append(cur_score,confidence) # append to overall results y_true = np.append(y_true,cur_true) y_score = np.append(y_score,cur_score) # compute average precision if has_gt and has_pred: # compute precision recall curve first # sorting and cumsum score_arg_sort = np.argsort(y_score) y_score_sorted = y_score[score_arg_sort] y_true_sorted = y_true[score_arg_sort] y_true_sorted_cumsum = np.cumsum(y_true_sorted) # unique thresholds (thresholds,unique_indices) = np.unique( y_score_sorted , return_index=True ) num_prec_recall = len(unique_indices) + 1 # prepare precision recall num_examples = len(y_score_sorted) try: num_true_examples = y_true_sorted_cumsum[-1] except: num_true_examples = 0 precision = np.zeros(num_prec_recall) recall = np.zeros(num_prec_recall) # deal with the first point y_true_sorted_cumsum = np.append( y_true_sorted_cumsum , 0 ) # deal with remaining for idx_res,idx_scores in enumerate(unique_indices): cumsum = y_true_sorted_cumsum[idx_scores-1] tp = num_true_examples - cumsum fp = num_examples - idx_scores - tp fn = cumsum + hard_false_negatives p = float(tp)/(tp+fp) r = float(tp)/(tp+fn) precision[idx_res] = p recall [idx_res] = r # first point in curve is artificial precision[-1] = 1. recall [-1] = 0. # compute average of precision-recall curve recall_for_conv = np.copy(recall) recall_for_conv = np.append(recall_for_conv[0], recall_for_conv) recall_for_conv = np.append(recall_for_conv, 0.) stepWidths = np.convolve(recall_for_conv,[-0.5,0,0.5],'valid') # integrate is now simply a dot product ap_current = np.dot(precision, stepWidths) elif has_gt: ap_current = 0.0 else: ap_current = float('nan') ap[di,li,oi] = ap_current return ap def compute_averages(self, aps): d_inf = 0 o50 = np.where(np.isclose(self.overlaps,0.5)) o25 = np.where(np.isclose(self.overlaps,0.25)) oAllBut25 = np.where(np.logical_not(np.isclose(self.overlaps,0.25))) avg_dict = {} #avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ]) avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,oAllBut25]) avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50]) avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25]) avg_dict["classes"] = {} for (li,label_name) in enumerate(self.CLASS_LABELS): avg_dict["classes"][label_name] = {} #avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :]) avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li,oAllBut25]) avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50]) avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25]) return avg_dict def assign_instances_for_scan(self, scene_id): # get gt instances gt_ids = self.gt_instances[scene_id] gt_instances = util_3d.get_instances(gt_ids, self.VALID_CLASS_IDS, self.CLASS_LABELS, self.ID_TO_LABEL) # associate gt2pred = deepcopy(gt_instances) for label in gt2pred: for gt in gt2pred[label]: gt['matched_pred'] = [] pred2gt = {} for label in self.CLASS_LABELS: pred2gt[label] = [] num_pred_instances = 0 # mask of void labels in the groundtruth bool_void = np.logical_not(np.in1d(gt_ids//1000, self.VALID_CLASS_IDS)) # go thru all prediction masks for instance_id in self.pred_instances[scene_id]: label_id = int(self.pred_instances[scene_id][instance_id]['label_id']) conf = self.pred_instances[scene_id][instance_id]['conf'] if not label_id in self.ID_TO_LABEL: continue label_name = self.ID_TO_LABEL[label_id] # read the mask pred_mask = self.pred_instances[scene_id][instance_id]['pred_mask'] # convert to binary num = np.count_nonzero(pred_mask) if num < self.min_region_sizes[0]: continue # skip if empty pred_instance = {} pred_instance['filename'] = str(scene_id) + '/' + str(instance_id) pred_instance['pred_id'] = num_pred_instances pred_instance['label_id'] = label_id pred_instance['vert_count'] = num pred_instance['confidence'] = conf pred_instance['void_intersection'] = np.count_nonzero(np.logical_and(bool_void, pred_mask)) # matched gt instances matched_gt = [] # go thru all gt instances with matching label for (gt_num, gt_inst) in enumerate(gt2pred[label_name]): intersection = np.count_nonzero(np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask)) if intersection > 0: gt_copy = gt_inst.copy() pred_copy = pred_instance.copy() gt_copy['intersection'] = intersection pred_copy['intersection'] = intersection matched_gt.append(gt_copy) gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy) pred_instance['matched_gt'] = matched_gt num_pred_instances += 1 pred2gt[label_name].append(pred_instance) return gt2pred, pred2gt def print_results(self, avgs): sep = "" col1 = ":" lineLen = 64 logging.info("") logging.info("#"*lineLen) line = "" line += "{:<15}".format("what" ) + sep + col1 line += "{:>15}".format("AP" ) + sep line += "{:>15}".format("AP_50%" ) + sep line += "{:>15}".format("AP_25%" ) + sep logging.info(line) logging.info("#"*lineLen) for (li,label_name) in enumerate(self.CLASS_LABELS): ap_avg = avgs["classes"][label_name]["ap"] ap_50o = avgs["classes"][label_name]["ap50%"] ap_25o = avgs["classes"][label_name]["ap25%"] line = "{:<15}".format(label_name) + sep + col1 line += sep + "{:>15.3f}".format(ap_avg ) + sep line += sep + "{:>15.3f}".format(ap_50o ) + sep line += sep + "{:>15.3f}".format(ap_25o ) + sep logging.info(line) all_ap_avg = avgs["all_ap"] all_ap_50o = avgs["all_ap_50%"] all_ap_25o = avgs["all_ap_25%"] logging.info("-"*lineLen) line = "{:<15}".format("average") + sep + col1 line += "{:>15.3f}".format(all_ap_avg) + sep line += "{:>15.3f}".format(all_ap_50o) + sep line += "{:>15.3f}".format(all_ap_25o) + sep logging.info(line) logging.info("") @staticmethod def write_to_benchmark(output_path='benchmark_instance', scene_id=None, pred_inst={}): os.makedirs(output_path, exist_ok=True) os.makedirs(os.path.join(output_path, 'predicted_masks'), exist_ok=True) f = open(os.path.join(output_path, scene_id + '.txt'), 'w') for instance_id in pred_inst: # for pred instance id starts from 0; in gt valid instance id starts from 1 score = pred_inst[instance_id]['conf'] label = pred_inst[instance_id]['label_id'] mask = pred_inst[instance_id]['pred_mask'] f.write('predicted_masks/{}_{:03d}.txt {} {:.4f}'.format(scene_id, instance_id, label, score)) if instance_id < len(pred_inst) - 1: f.write('\n') util_3d.export_ids(os.path.join(output_path, 'predicted_masks', scene_id + '_%03d.txt' % (instance_id)), mask) f.close() def add_prediction(self, instance_info, id): self.pred_instances[id] = instance_info def add_gt(self, instance_info, id): self.gt_instances[id] = instance_info def add_gt_from_benchmark(self, scene_id): try: gt_file_path = '/rhome/jhou/.gt/gt_insts/' gt_file = os.path.join(gt_file_path, scene_id + '.txt') gt_ids = util_3d.load_ids(gt_file) except: gt_file_path = '/rhome/jhou/data/dataset/scannet/scannet_benchmark/gt_instance/' gt_file = os.path.join(gt_file_path, scene_id + '.txt') gt_ids = util_3d.load_ids(gt_file) self.add_gt(gt_ids, scene_id) def evaluate(self): print('evaluating', len(self.pred_instances), 'scans...') matches = {} for i, scene_id in enumerate(self.pred_instances): gt2pred, pred2gt = self.assign_instances_for_scan(scene_id) matches[scene_id] = {} matches[scene_id]['gt'] = gt2pred matches[scene_id]['pred'] = pred2gt sys.stdout.write("\rscans processed: {}".format(i+1)) sys.stdout.flush() print('') ap_scores = self.evaluate_matches(matches) avgs = self.compute_averages(ap_scores) # print self.print_results(avgs) return avgs['all_ap'], avgs['all_ap_50%'], avgs['all_ap_25%'] def write_result_file(avgs, filename): _SPLITTER = ',' with open(filename, 'w') as f: f.write(_SPLITTER.join(['class', 'class id', 'ap', 'ap50', 'ap25']) + '\n') for i in range(len(VALID_CLASS_IDS)): class_name = CLASS_LABELS[i] class_id = VALID_CLASS_IDS[i] ap = avgs["classes"][class_name]["ap"] ap50 = avgs["classes"][class_name]["ap50%"] ap25 = avgs["classes"][class_name]["ap25%"] f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap, ap50, ap25]]) + '\n') def config(): parser = argparse.ArgumentParser() parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files') parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files') parser.add_argument('--output_file', default='semantic_instance_evaluation.txt', help='output file [default: semantic_instance_evaluation.txt]') opt = parser.parse_args() return opt if __name__ == '__main__': opt = config() setup_logging() #-----------------scannet---------------------- CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS) print('reading', len(os.listdir(opt.pred_path))-1, 'scans...') for i, pred_file in enumerate(os.listdir(opt.pred_path)): if os.path.isdir(os.path.join(opt.pred_path, pred_file)): continue scene_id = pred_file[:12] sys.stdout.write("\rscans read: {}".format(i+1)) sys.stdout.flush() gt_file = os.path.join(opt.gt_path, pred_file) gt_ids = util_3d.load_ids(gt_file) evaluator.add_gt(gt_ids, scene_id) instances = util_3d.read_instance_prediction_file(os.path.join(opt.pred_path,pred_file), opt.pred_path) for pred_mask_file in instances: # read the mask pred_mask = util_3d.load_ids(pred_mask_file) instances[pred_mask_file]['pred_mask'] = pred_mask evaluator.add_prediction(instances, scene_id) print('') _, _, _ = evaluator.evaluate()
ContrastiveSceneContexts-main
downstream/semseg/datasets/evaluation/evaluate_semantic_instance.py
import os, sys import csv try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: import imageio except: print("Please install the module 'imageio' for image processing, e.g.") print("pip install imageio") sys.exit(-1) # print an error message and quit def print_error(message, user_fault=False): sys.stderr.write('ERROR: ' + str(message) + '\n') if user_fault: sys.exit(2) sys.exit(-1) # if string s represents an int def represents_int(s): try: int(s) return True except ValueError: return False def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'): assert os.path.isfile(filename) mapping = dict() with open(filename) as csvfile: reader = csv.DictReader(csvfile, delimiter='\t') for row in reader: mapping[row[label_from]] = int(row[label_to]) # if ints convert if represents_int([key for key in mapping.keys()][0]): mapping = {int(k):v for k,v in mapping.items()} return mapping # input: scene_types.txt or scene_types_all.txt def read_scene_types_mapping(filename, remove_spaces=True): assert os.path.isfile(filename) mapping = dict() lines = open(filename).read().splitlines() lines = [line.split('\t') for line in lines] if remove_spaces: mapping = { x[1].strip():int(x[0]) for x in lines } else: mapping = { x[1]:int(x[0]) for x in lines } return mapping # color by label def visualize_label_image(filename, image): height = image.shape[0] width = image.shape[1] vis_image = np.zeros([height, width, 3], dtype=np.uint8) color_palette = create_color_palette() for idx, color in enumerate(color_palette): vis_image[image==idx] = color imageio.imwrite(filename, vis_image) # color by different instances (mod length of color palette) def visualize_instance_image(filename, image): height = image.shape[0] width = image.shape[1] vis_image = np.zeros([height, width, 3], dtype=np.uint8) color_palette = create_color_palette() instances = np.unique(image) for idx, inst in enumerate(instances): vis_image[image==inst] = color_palette[inst%len(color_palette)] imageio.imwrite(filename, vis_image)
ContrastiveSceneContexts-main
downstream/semseg/datasets/evaluation/scannet_benchmark_utils/util.py
import os, sys import json try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: from plyfile import PlyData, PlyElement except: print("Please install the module 'plyfile' for PLY i/o, e.g.") print("pip install plyfile") sys.exit(-1) from . import util # matrix: 4x4 np array # points Nx3 np array def transform_points(matrix, points): assert len(points.shape) == 2 and points.shape[1] == 3 num_points = points.shape[0] p = np.concatenate([points, np.ones((num_points, 1))], axis=1) p = np.matmul(matrix, np.transpose(p)) p = np.transpose(p) p[:,:3] /= p[:,3,None] return p[:,:3] def export_ids(filename, ids): with open(filename, 'w') as f: for id in ids: f.write('%d\n' % id) def load_ids(filename): ids = open(filename).read().splitlines() ids = np.array(ids, dtype=np.int64) return ids def read_mesh_vertices(filename): assert os.path.isfile(filename) with open(filename, 'rb') as f: plydata = PlyData.read(f) num_verts = plydata['vertex'].count vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32) vertices[:,0] = plydata['vertex'].data['x'] vertices[:,1] = plydata['vertex'].data['y'] vertices[:,2] = plydata['vertex'].data['z'] return vertices # export 3d instance labels for instance evaluation def export_instance_ids_for_eval(filename, label_ids, instance_ids): assert label_ids.shape[0] == instance_ids.shape[0] output_mask_path_relative = 'predicted_masks' name = os.path.splitext(os.path.basename(filename))[0] output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative) if not os.path.isdir(output_mask_path): os.mkdir(output_mask_path) insts = np.unique(instance_ids) zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32) with open(filename, 'w') as f: for idx, inst_id in enumerate(insts): if inst_id == 0: # 0 -> no instance for this vertex continue loc = np.where(instance_ids == inst_id) label_id = label_ids[loc[0][0]] # write mask indexing output_mask_file_relavtive = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt') f.write('%s %d %f\n' % (output_mask_file_relavtive, label_id, 1.0)) # write mask mask = np.copy(zero_mask) mask[loc[0]] = 1 output_mask_file = os.path.join(output_mask_path, name + '_' + str(idx) + '.txt') export_ids(output_mask_file, mask) # ------------ Instance Utils ------------ # class Instance(object): instance_id = 0 label_id = 0 vert_count = 0 med_dist = -1 dist_conf = 0.0 def __init__(self, mesh_vert_instances, instance_id): if (instance_id == -1): return self.instance_id = int(instance_id) self.label_id = int(self.get_label_id(instance_id)) self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id)) def get_label_id(self, instance_id): return int(instance_id // 1000) def get_instance_verts(self, mesh_vert_instances, instance_id): return (mesh_vert_instances == instance_id).sum() def to_json(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) def to_dict(self): dict = {} dict["instance_id"] = self.instance_id dict["label_id"] = self.label_id dict["vert_count"] = self.vert_count dict["med_dist"] = self.med_dist dict["dist_conf"] = self.dist_conf return dict def from_json(self, data): self.instance_id = int(data["instance_id"]) self.label_id = int(data["label_id"]) self.vert_count = int(data["vert_count"]) if ("med_dist" in data): self.med_dist = float(data["med_dist"]) self.dist_conf = float(data["dist_conf"]) def __str__(self): return "("+str(self.instance_id)+")" def read_instance_prediction_file(filename, pred_path): lines = open(filename).read().splitlines() instance_info = {} abs_pred_path = os.path.abspath(pred_path) for line in lines: parts = line.split(' ') if len(parts) != 3: util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]') if os.path.isabs(parts[0]): util.print_error('invalid instance prediction file. First entry in line must be a relative path') mask_file = os.path.join(os.path.dirname(filename), parts[0]) mask_file = os.path.abspath(mask_file) # check that mask_file lives inside prediction path if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path: util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename)) info = {} info["label_id"] = int(float(parts[1])) info["conf"] = float(parts[2]) instance_info[mask_file] = info return instance_info def get_instances(ids, class_ids, class_labels, id2label): instances = {} for label in class_labels: instances[label] = [] instance_ids = np.unique(ids) for id in instance_ids: if id == 0: continue inst = Instance(ids, id) if inst.label_id in class_ids: instances[id2label[inst.label_id]].append(inst.to_dict()) return instances
ContrastiveSceneContexts-main
downstream/semseg/datasets/evaluation/scannet_benchmark_utils/util_3d.py
# Evaluates semantic label task # Input: # - path to .txt prediction files # - path to .txt ground truth files # - output file to write results to # Note that only the valid classes are used for evaluation, # i.e., any ground truth label not in the valid label set # is ignored in the evaluation. # # example usage: evaluate_semantic_label.py --scan_path [path to scan data] --output_file [output file] # python imports import math import logging import os, sys, argparse import inspect try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: from itertools import izip except ImportError: izip = zip #currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) #parentdir = os.path.dirname(currentdir) #sys.path.insert(0,parentdir) import util_3d import util class Evaluator: def __init__(self, CLASS_LABELS, VALID_CLASS_IDS): self.CLASS_LABELS = CLASS_LABELS self.VALID_CLASS_IDS = VALID_CLASS_IDS self.UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1 self.gt = {} self.pred = {} max_id = self.UNKNOWN_ID self.confusion = np.zeros((max_id+1, max_id+1), dtype=np.ulonglong) def update_confusion(self, pred_ids, gt_ids, sceneId=None): # sanity checks if not pred_ids.shape == gt_ids.shape: util.print_error('%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True) n = self.confusion.shape[0] k = (gt_ids >= 0) & (gt_ids < n) temporal = np.bincount(n * gt_ids[k].astype(int) + pred_ids[k], minlength=n**2).reshape(n, n) for valid_class_row in self.VALID_CLASS_IDS: for valid_class_col in self.VALID_CLASS_IDS: self.confusion[valid_class_row][valid_class_col] += temporal[valid_class_row][valid_class_col] @staticmethod def write_to_benchmark(base='benchmark_segmentation', sceneId=None, pred_ids=None): os.makedirs(base, exist_ok=True) util_3d.export_ids('{}.txt'.format(os.path.join(base, sceneId)), pred_ids) def get_iou(self, label_id, confusion): if not label_id in self.VALID_CLASS_IDS: return float('nan') # #true positives tp = np.longlong(confusion[label_id, label_id]) # #false negatives fn = np.longlong(confusion[label_id, :].sum()) - tp # #false positives not_ignored = [l for l in self.VALID_CLASS_IDS if not l == label_id] fp = np.longlong(confusion[not_ignored, label_id].sum()) denom = (tp + fp + fn) if denom == 0: return float('nan') return (float(tp) / denom, tp, denom) def write_result_file(self, confusion, ious, filename): with open(filename, 'w') as f: f.write('iou scores\n') for i in range(len(self.VALID_CLASS_IDS)): label_id = self.VALID_CLASS_IDS[i] label_name = self.CLASS_LABELS[i] iou = ious[label_name][0] f.write('{0:<14s}({1:<2d}): {2:>5.3f}\n'.format(label_name, label_id, iou)) f.write("{0:<14s}: {1:>5.3f}".format('mean', np.array([ious[k][0] for k in ious]).mean())) f.write('\nconfusion matrix\n') f.write('\t\t\t') for i in range(len(self.VALID_CLASS_IDS)): #f.write('\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i])) f.write('{0:<8d}'.format(self.VALID_CLASS_IDS[i])) f.write('\n') for r in range(len(self.VALID_CLASS_IDS)): f.write('{0:<14s}({1:<2d})'.format(self.CLASS_LABELS[r], self.VALID_CLASS_IDS[r])) for c in range(len(self.VALID_CLASS_IDS)): f.write('\t{0:>5.3f}'.format(confusion[self.VALID_CLASS_IDS[r],self.VALID_CLASS_IDS[c]])) f.write('\n') print('wrote results to', filename) def evaluate_confusion(self, output_file=None): class_ious = {} counter = 0 summation = 0 for i in range(len(self.VALID_CLASS_IDS)): label_name = self.CLASS_LABELS[i] label_id = self.VALID_CLASS_IDS[i] class_ious[label_name] = self.get_iou(label_id, self.confusion) # print logging.info('classes IoU') logging.info('----------------------------') for i in range(len(self.VALID_CLASS_IDS)): label_name = self.CLASS_LABELS[i] try: logging.info('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2])) summation += class_ious[label_name][0] counter += 1 except: logging.info('{0:<14s}: nan ( nan/nan )'.format(label_name)) logging.info("{0:<14s}: {1:>5.3f}".format('mean', summation / counter)) if output_file: self.write_result_file(self.confusion, class_ious, output_file) return summation / counter def config(): parser = argparse.ArgumentParser() parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files') parser.add_argument('--gt_path', required=True, help='path to gt files') parser.add_argument('--output_file', type=str, default='./semantic_label_evaluation.txt') opt = parser.parse_args() return opt def main(): opt = config() ch = logging.StreamHandler(sys.stdout) logging.getLogger().setLevel(logging.INFO) logging.basicConfig( format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch]) #------------------------- ScanNet -------------------------- CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS) print('reading', len(os.listdir(opt.pred_path))-1, 'scans...') for i, pred_file in enumerate(os.listdir(opt.pred_path)): if pred_file == 'semantic_label_evaluation.txt': continue gt_file = os.path.join(opt.gt_path, pred_file) if not os.path.isfile(gt_file): util.print_error('Result file {} does not match any gt file'.format(pred_file), user_fault=True) gt_ids = util_3d.load_ids(gt_file) pred_file = os.path.join(opt.pred_path, pred_file) pred_ids = util_3d.load_ids(pred_file) evaluator.update_confusion(pred_ids, gt_ids, pred_file.split('.')[0]) sys.stdout.write("\rscans processed: {}".format(i+1)) sys.stdout.flush() # evaluate evaluator.evaluate_confusion(opt.output_file) if __name__ == '__main__': main()
ContrastiveSceneContexts-main
downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/evaluate_semantic_label.py
import os, sys import csv try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: import imageio except: print("Please install the module 'imageio' for image processing, e.g.") print("pip install imageio") sys.exit(-1) # print an error message and quit def print_error(message, user_fault=False): sys.stderr.write('ERROR: ' + str(message) + '\n') if user_fault: sys.exit(2) sys.exit(-1) # if string s represents an int def represents_int(s): try: int(s) return True except ValueError: return False def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'): assert os.path.isfile(filename) mapping = dict() with open(filename) as csvfile: reader = csv.DictReader(csvfile, delimiter='\t') for row in reader: mapping[row[label_from]] = int(row[label_to]) # if ints convert if represents_int([key for key in mapping.keys()][0]): mapping = {int(k):v for k,v in mapping.items()} return mapping # input: scene_types.txt or scene_types_all.txt def read_scene_types_mapping(filename, remove_spaces=True): assert os.path.isfile(filename) mapping = dict() lines = open(filename).read().splitlines() lines = [line.split('\t') for line in lines] if remove_spaces: mapping = { x[1].strip():int(x[0]) for x in lines } else: mapping = { x[1]:int(x[0]) for x in lines } return mapping # color by label def visualize_label_image(filename, image): height = image.shape[0] width = image.shape[1] vis_image = np.zeros([height, width, 3], dtype=np.uint8) color_palette = create_color_palette() for idx, color in enumerate(color_palette): vis_image[image==idx] = color imageio.imwrite(filename, vis_image) # color by different instances (mod length of color palette) def visualize_instance_image(filename, image): height = image.shape[0] width = image.shape[1] vis_image = np.zeros([height, width, 3], dtype=np.uint8) color_palette = create_color_palette() instances = np.unique(image) for idx, inst in enumerate(instances): vis_image[image==inst] = color_palette[inst%len(color_palette)] imageio.imwrite(filename, vis_image)
ContrastiveSceneContexts-main
downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/util.py
import os, sys import json try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: from plyfile import PlyData, PlyElement except: print("Please install the module 'plyfile' for PLY i/o, e.g.") print("pip install plyfile") sys.exit(-1) import util # matrix: 4x4 np array # points Nx3 np array def transform_points(matrix, points): assert len(points.shape) == 2 and points.shape[1] == 3 num_points = points.shape[0] p = np.concatenate([points, np.ones((num_points, 1))], axis=1) p = np.matmul(matrix, np.transpose(p)) p = np.transpose(p) p[:,:3] /= p[:,3,None] return p[:,:3] def export_ids(filename, ids): with open(filename, 'w') as f: for id in ids: f.write('%d\n' % id) def load_ids(filename): ids = open(filename).read().splitlines() ids = np.array(ids, dtype=np.int64) return ids def read_mesh_vertices(filename): assert os.path.isfile(filename) with open(filename, 'rb') as f: plydata = PlyData.read(f) num_verts = plydata['vertex'].count vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32) vertices[:,0] = plydata['vertex'].data['x'] vertices[:,1] = plydata['vertex'].data['y'] vertices[:,2] = plydata['vertex'].data['z'] return vertices # export 3d instance labels for instance evaluation def export_instance_ids_for_eval(filename, label_ids, instance_ids): assert label_ids.shape[0] == instance_ids.shape[0] output_mask_path_relative = 'predicted_masks' name = os.path.splitext(os.path.basename(filename))[0] output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative) if not os.path.isdir(output_mask_path): os.mkdir(output_mask_path) insts = np.unique(instance_ids) zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32) with open(filename, 'w') as f: for idx, inst_id in enumerate(insts): if inst_id == 0: # 0 -> no instance for this vertex continue loc = np.where(instance_ids == inst_id) label_id = label_ids[loc[0][0]] # write mask indexing output_mask_file_relavtive = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt') f.write('%s %d %f\n' % (output_mask_file_relavtive, label_id, 1.0)) # write mask mask = np.copy(zero_mask) mask[loc[0]] = 1 output_mask_file = os.path.join(output_mask_path, name + '_' + str(idx) + '.txt') export_ids(output_mask_file, mask) # ------------ Instance Utils ------------ # class Instance(object): instance_id = 0 label_id = 0 vert_count = 0 med_dist = -1 dist_conf = 0.0 def __init__(self, mesh_vert_instances, instance_id): if (instance_id == -1): return self.instance_id = int(instance_id) self.label_id = int(self.get_label_id(instance_id)) self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id)) def get_label_id(self, instance_id): return int(instance_id // 1000) def get_instance_verts(self, mesh_vert_instances, instance_id): return (mesh_vert_instances == instance_id).sum() def to_json(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) def to_dict(self): dict = {} dict["instance_id"] = self.instance_id dict["label_id"] = self.label_id dict["vert_count"] = self.vert_count dict["med_dist"] = self.med_dist dict["dist_conf"] = self.dist_conf return dict def from_json(self, data): self.instance_id = int(data["instance_id"]) self.label_id = int(data["label_id"]) self.vert_count = int(data["vert_count"]) if ("med_dist" in data): self.med_dist = float(data["med_dist"]) self.dist_conf = float(data["dist_conf"]) def __str__(self): return "("+str(self.instance_id)+")" def read_instance_prediction_file(filename, pred_path): lines = open(filename).read().splitlines() instance_info = {} abs_pred_path = os.path.abspath(pred_path) for line in lines: parts = line.split(' ') if len(parts) != 3: util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]') if os.path.isabs(parts[0]): util.print_error('invalid instance prediction file. First entry in line must be a relative path') mask_file = os.path.join(os.path.dirname(filename), parts[0]) mask_file = os.path.abspath(mask_file) # check that mask_file lives inside prediction path if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path: util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename)) info = {} info["label_id"] = int(float(parts[1])) info["conf"] = float(parts[2]) instance_info[mask_file] = info return instance_info def get_instances(ids, class_ids, class_labels, id2label): instances = {} for label in class_labels: instances[label] = [] instance_ids = np.unique(ids) for id in instance_ids: if id == 0: continue inst = Instance(ids, id) if inst.label_id in class_ids: instances[id2label[inst.label_id]].append(inst.to_dict()) return instances
ContrastiveSceneContexts-main
downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/util_3d.py
# Evaluates semantic instance task # Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation # Input: # - path to .txt prediction files # - path to .txt ground truth files # - output file to write results to # Each .txt prediction file look like: # [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence] # [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence] # [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence] # ... # # NOTE: The prediction files must live in the root of the given prediction path. # Predicted mask .txt files must live in a subfolder. # Additionally, filenames must not contain spaces. # The relative paths to predicted masks must contain one integer per line, # where each line corresponds to vertices in the *_vh_clean_2.ply (in that order). # Non-zero integers indicate part of the predicted instance. # The label ids specify the class of the corresponding mask. # Confidence is a float confidence score of the mask. # # Note that only the valid classes are used for evaluation, # i.e., any ground truth label not in the valid label set # is ignored in the evaluation. # # example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file] # python imports import logging import math import os, sys, argparse import inspect from copy import deepcopy import argparse import numpy as np #currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) #parentdir = os.path.dirname(currentdir) #sys.path.insert(0,parentdir) import util_3d import util def setup_logging(): ch = logging.StreamHandler(sys.stdout) logging.getLogger().setLevel(logging.INFO) logging.basicConfig( format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch]) class Evaluator: # ---------- Evaluation params ---------- # # overlaps for evaluation overlaps = np.append(np.arange(0.5,0.95,0.05), 0.25) # minimum region size for evaluation [verts] min_region_sizes = np.array( [ 100 ] ) # distance thresholds [m] distance_threshes = np.array( [ float('inf') ] ) # distance confidences distance_confs = np.array( [ -float('inf') ] ) def __init__(self, CLASS_LABELS, VALID_CLASS_IDS, benchmark=False): # ---------- Label info ---------- # #CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', # 'window', 'bookshelf', 'picture', 'counter', # 'desk', 'curtain', 'refrigerator', 'shower curtain', # 'toilet', 'sink', 'bathtub', 'otherfurniture'] #VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) self.CLASS_LABELS = CLASS_LABELS self.VALID_CLASS_IDS = VALID_CLASS_IDS self.ID_TO_LABEL = {} self.LABEL_TO_ID = {} for i in range(len(VALID_CLASS_IDS)): self.LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] self.ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] self.pred_instances = {} self.gt_instances = {} self.benchmark = benchmark def evaluate_matches(self, matches): # results: class x overlap ap = np.zeros( (len(self.distance_threshes) , len(self.CLASS_LABELS) , len(self.overlaps)) , np.float ) for di, (min_region_size, distance_thresh, distance_conf) in enumerate(zip(self.min_region_sizes, self.distance_threshes, self.distance_confs)): for oi, overlap_th in enumerate(self.overlaps): pred_visited = {} for m in matches: for p in matches[m]['pred']: for label_name in self.CLASS_LABELS: for p in matches[m]['pred'][label_name]: if 'filename' in p: pred_visited[p['filename']] = False for li, label_name in enumerate(self.CLASS_LABELS): y_true = np.empty(0) y_score = np.empty(0) hard_false_negatives = 0 has_gt = False has_pred = False for m in matches: pred_instances = matches[m]['pred'][label_name] gt_instances = matches[m]['gt'][label_name] # filter groups in ground truth gt_instances = [ gt for gt in gt_instances if gt['instance_id']>=1000 and gt['vert_count']>=min_region_size and gt['med_dist']<=distance_thresh and gt['dist_conf']>=distance_conf ] if gt_instances: has_gt = True if pred_instances: has_pred = True cur_true = np.ones ( len(gt_instances) ) cur_score = np.ones ( len(gt_instances) ) * (-float("inf")) cur_match = np.zeros( len(gt_instances) , dtype=np.bool ) # collect matches for (gti,gt) in enumerate(gt_instances): found_match = False num_pred = len(gt['matched_pred']) for pred in gt['matched_pred']: # greedy assignments if pred_visited[pred['filename']]: continue overlap = float(pred['intersection']) / (gt['vert_count']+pred['vert_count']-pred['intersection']) if overlap > overlap_th: confidence = pred['confidence'] # if already have a prediction for this gt, # the prediction with the lower score is automatically a false positive if cur_match[gti]: max_score = max( cur_score[gti] , confidence ) min_score = min( cur_score[gti] , confidence ) cur_score[gti] = max_score # append false positive cur_true = np.append(cur_true,0) cur_score = np.append(cur_score,min_score) cur_match = np.append(cur_match,True) # otherwise set score else: found_match = True cur_match[gti] = True cur_score[gti] = confidence pred_visited[pred['filename']] = True if not found_match: hard_false_negatives += 1 # remove non-matched ground truth instances cur_true = cur_true [ cur_match==True ] cur_score = cur_score[ cur_match==True ] # collect non-matched predictions as false positive for pred in pred_instances: found_gt = False for gt in pred['matched_gt']: overlap = float(gt['intersection']) / (gt['vert_count']+pred['vert_count']-gt['intersection']) if overlap > overlap_th: found_gt = True break if not found_gt: num_ignore = pred['void_intersection'] for gt in pred['matched_gt']: # group? if gt['instance_id'] < 1000: num_ignore += gt['intersection'] # small ground truth instances if gt['vert_count'] < min_region_size or gt['med_dist']>distance_thresh or gt['dist_conf']<distance_conf: num_ignore += gt['intersection'] proportion_ignore = float(num_ignore)/pred['vert_count'] # if not ignored append false positive if proportion_ignore <= overlap_th: cur_true = np.append(cur_true,0) confidence = pred["confidence"] cur_score = np.append(cur_score,confidence) # append to overall results y_true = np.append(y_true,cur_true) y_score = np.append(y_score,cur_score) # compute average precision if has_gt and has_pred: # compute precision recall curve first # sorting and cumsum score_arg_sort = np.argsort(y_score) y_score_sorted = y_score[score_arg_sort] y_true_sorted = y_true[score_arg_sort] y_true_sorted_cumsum = np.cumsum(y_true_sorted) # unique thresholds (thresholds,unique_indices) = np.unique( y_score_sorted , return_index=True ) num_prec_recall = len(unique_indices) + 1 # prepare precision recall num_examples = len(y_score_sorted) try: num_true_examples = y_true_sorted_cumsum[-1] except: num_true_examples = 0 precision = np.zeros(num_prec_recall) recall = np.zeros(num_prec_recall) # deal with the first point y_true_sorted_cumsum = np.append( y_true_sorted_cumsum , 0 ) # deal with remaining for idx_res,idx_scores in enumerate(unique_indices): cumsum = y_true_sorted_cumsum[idx_scores-1] tp = num_true_examples - cumsum fp = num_examples - idx_scores - tp fn = cumsum + hard_false_negatives p = float(tp)/(tp+fp) r = float(tp)/(tp+fn) precision[idx_res] = p recall [idx_res] = r # first point in curve is artificial precision[-1] = 1. recall [-1] = 0. # compute average of precision-recall curve recall_for_conv = np.copy(recall) recall_for_conv = np.append(recall_for_conv[0], recall_for_conv) recall_for_conv = np.append(recall_for_conv, 0.) stepWidths = np.convolve(recall_for_conv,[-0.5,0,0.5],'valid') # integrate is now simply a dot product ap_current = np.dot(precision, stepWidths) elif has_gt: ap_current = 0.0 else: ap_current = float('nan') ap[di,li,oi] = ap_current return ap def compute_averages(self, aps): d_inf = 0 o50 = np.where(np.isclose(self.overlaps,0.5)) o25 = np.where(np.isclose(self.overlaps,0.25)) oAllBut25 = np.where(np.logical_not(np.isclose(self.overlaps,0.25))) avg_dict = {} #avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ]) avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,oAllBut25]) avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50]) avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25]) avg_dict["classes"] = {} for (li,label_name) in enumerate(self.CLASS_LABELS): avg_dict["classes"][label_name] = {} #avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :]) avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li,oAllBut25]) avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50]) avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25]) return avg_dict def assign_instances_for_scan(self, scene_id): # get gt instances gt_ids = self.gt_instances[scene_id] gt_instances = util_3d.get_instances(gt_ids, self.VALID_CLASS_IDS, self.CLASS_LABELS, self.ID_TO_LABEL) # associate gt2pred = deepcopy(gt_instances) for label in gt2pred: for gt in gt2pred[label]: gt['matched_pred'] = [] pred2gt = {} for label in self.CLASS_LABELS: pred2gt[label] = [] num_pred_instances = 0 # mask of void labels in the groundtruth bool_void = np.logical_not(np.in1d(gt_ids//1000, self.VALID_CLASS_IDS)) # go thru all prediction masks for instance_id in self.pred_instances[scene_id]: label_id = int(self.pred_instances[scene_id][instance_id]['label_id']) conf = self.pred_instances[scene_id][instance_id]['conf'] if not label_id in self.ID_TO_LABEL: continue label_name = self.ID_TO_LABEL[label_id] # read the mask pred_mask = self.pred_instances[scene_id][instance_id]['pred_mask'] # convert to binary num = np.count_nonzero(pred_mask) if num < self.min_region_sizes[0]: continue # skip if empty pred_instance = {} pred_instance['filename'] = str(scene_id) + '/' + str(instance_id) pred_instance['pred_id'] = num_pred_instances pred_instance['label_id'] = label_id pred_instance['vert_count'] = num pred_instance['confidence'] = conf pred_instance['void_intersection'] = np.count_nonzero(np.logical_and(bool_void, pred_mask)) # matched gt instances matched_gt = [] # go thru all gt instances with matching label for (gt_num, gt_inst) in enumerate(gt2pred[label_name]): intersection = np.count_nonzero(np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask)) if intersection > 0: gt_copy = gt_inst.copy() pred_copy = pred_instance.copy() gt_copy['intersection'] = intersection pred_copy['intersection'] = intersection matched_gt.append(gt_copy) gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy) pred_instance['matched_gt'] = matched_gt num_pred_instances += 1 pred2gt[label_name].append(pred_instance) return gt2pred, pred2gt def print_results(self, avgs): sep = "" col1 = ":" lineLen = 64 logging.info("") logging.info("#"*lineLen) line = "" line += "{:<15}".format("what" ) + sep + col1 line += "{:>15}".format("AP" ) + sep line += "{:>15}".format("AP_50%" ) + sep line += "{:>15}".format("AP_25%" ) + sep logging.info(line) logging.info("#"*lineLen) for (li,label_name) in enumerate(self.CLASS_LABELS): ap_avg = avgs["classes"][label_name]["ap"] ap_50o = avgs["classes"][label_name]["ap50%"] ap_25o = avgs["classes"][label_name]["ap25%"] line = "{:<15}".format(label_name) + sep + col1 line += sep + "{:>15.3f}".format(ap_avg ) + sep line += sep + "{:>15.3f}".format(ap_50o ) + sep line += sep + "{:>15.3f}".format(ap_25o ) + sep logging.info(line) all_ap_avg = avgs["all_ap"] all_ap_50o = avgs["all_ap_50%"] all_ap_25o = avgs["all_ap_25%"] logging.info("-"*lineLen) line = "{:<15}".format("average") + sep + col1 line += "{:>15.3f}".format(all_ap_avg) + sep line += "{:>15.3f}".format(all_ap_50o) + sep line += "{:>15.3f}".format(all_ap_25o) + sep logging.info(line) logging.info("") @staticmethod def write_to_benchmark(output_path='benchmark_instance', scene_id=None, pred_inst={}): os.makedirs(output_path, exist_ok=True) os.makedirs(os.path.join(output_path, 'predicted_masks'), exist_ok=True) f = open(os.path.join(output_path, scene_id + '.txt'), 'w') for instance_id in pred_inst: # for pred instance id starts from 0; in gt valid instance id starts from 1 score = pred_inst[instance_id]['conf'] label = pred_inst[instance_id]['label_id'] mask = pred_inst[instance_id]['pred_mask'] f.write('predicted_masks/{}_{:03d}.txt {} {:.4f}'.format(scene_id, instance_id, label, score)) if instance_id < len(pred_inst) - 1: f.write('\n') util_3d.export_ids(os.path.join(output_path, 'predicted_masks', scene_id + '_%03d.txt' % (instance_id)), mask) f.close() def add_prediction(self, instance_info, id): self.pred_instances[id] = instance_info def add_gt(self, instance_info, id): self.gt_instances[id] = instance_info def evaluate(self): print('evaluating', len(self.pred_instances), 'scans...') matches = {} for i, scene_id in enumerate(self.pred_instances): gt2pred, pred2gt = self.assign_instances_for_scan(scene_id) matches[scene_id] = {} matches[scene_id]['gt'] = gt2pred matches[scene_id]['pred'] = pred2gt sys.stdout.write("\rscans processed: {}".format(i+1)) sys.stdout.flush() print('') ap_scores = self.evaluate_matches(matches) avgs = self.compute_averages(ap_scores) # print self.print_results(avgs) return avgs['all_ap'], avgs['all_ap_50%'], avgs['all_ap_25%'] def write_result_file(avgs, filename): _SPLITTER = ',' with open(filename, 'w') as f: f.write(_SPLITTER.join(['class', 'class id', 'ap', 'ap50', 'ap25']) + '\n') for i in range(len(VALID_CLASS_IDS)): class_name = CLASS_LABELS[i] class_id = VALID_CLASS_IDS[i] ap = avgs["classes"][class_name]["ap"] ap50 = avgs["classes"][class_name]["ap50%"] ap25 = avgs["classes"][class_name]["ap25%"] f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap, ap50, ap25]]) + '\n') def config(): parser = argparse.ArgumentParser() parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files') parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files') parser.add_argument('--output_file', default='semantic_instance_evaluation.txt', help='output file [default: semantic_instance_evaluation.txt]') opt = parser.parse_args() return opt if __name__ == '__main__': opt = config() setup_logging() #-----------------scannet---------------------- CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS) print('reading', len(os.listdir(opt.pred_path))-1, 'scans...') for i, pred_file in enumerate(os.listdir(opt.pred_path)): if os.path.isdir(os.path.join(opt.pred_path, pred_file)): continue scene_id = pred_file[:12] sys.stdout.write("\rscans read: {}".format(i+1)) sys.stdout.flush() gt_file = os.path.join(opt.gt_path, pred_file) gt_ids = util_3d.load_ids(gt_file) evaluator.add_gt(gt_ids, scene_id) instances = util_3d.read_instance_prediction_file(os.path.join(opt.pred_path,pred_file), opt.pred_path) for pred_mask_file in instances: # read the mask pred_mask = util_3d.load_ids(pred_mask_file) instances[pred_mask_file]['pred_mask'] = pred_mask evaluator.add_prediction(instances, scene_id) print('') _, _, _ = evaluator.evaluate()
ContrastiveSceneContexts-main
downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/evaluate_semantic_instance.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import glob import numpy as np import os import torch from tqdm import tqdm from lib.utils import mkdir_p from lib.pc_utils import save_point_cloud, read_plyfile import MinkowskiEngine as ME STANFORD_3D_IN_PATH = '/checkpoint/jihou/data/stanford3d/Stanford3dDataset_v1.2/' STANFORD_3D_OUT_PATH = '/checkpoint/jihou/data/stanford3d/pointcloud_pth' STANFORD_3D_TO_SEGCLOUD_LABEL = { 4: 0, 8: 1, 12: 2, 1: 3, 6: 4, 13: 5, 7: 6, 5: 7, 11: 8, 3: 9, 9: 10, 2: 11, 0: 12, } class Stanford3DDatasetConverter: CLASSES = [ 'clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column', 'door', 'floor', 'sofa', 'stairs', 'table', 'wall', 'window' ] TRAIN_TEXT = 'train' VAL_TEXT = 'val' TEST_TEXT = 'test' @classmethod def read_txt(cls, txtfile): # Read txt file and parse its content. with open(txtfile) as f: pointcloud = [] for l in f: try: pointcloud += [[float(li) for li in l.split()]] except Exception as e: print(e, txtfile) continue # pointcloud = [l.split() for l in f] # Load point cloud to named numpy array. pointcloud = np.array(pointcloud).astype(np.float32) assert pointcloud.shape[1] == 6 xyz = pointcloud[:, :3].astype(np.float32) rgb = pointcloud[:, 3:].astype(np.uint8) return xyz, rgb @classmethod def convert_to_ply(cls, root_path, out_path, save_pth=False): """Convert Stanford3DDataset to PLY format that is compatible with Synthia dataset. Assumes file structure as given by the dataset. Outputs the processed PLY files to `STANFORD_3D_OUT_PATH`. """ txtfiles = glob.glob(os.path.join(root_path, '*/*/*.txt')) for txtfile in tqdm(txtfiles): file_sp = os.path.normpath(txtfile).split(os.path.sep) target_path = os.path.join(out_path, file_sp[-3]) out_file = os.path.join(target_path, file_sp[-2] + '.ply') if save_pth: out_file = os.path.join(target_path, file_sp[-2] + '.pth') if os.path.exists(out_file): print(out_file, ' exists') continue annotation, _ = os.path.split(txtfile) subclouds = glob.glob(os.path.join(annotation, 'Annotations/*.txt')) coords, feats, labels, instances = [], [], [], [] for inst, subcloud in enumerate(subclouds): # Read ply file and parse its rgb values. xyz, rgb = cls.read_txt(subcloud) _, annotation_subfile = os.path.split(subcloud) clsidx = cls.CLASSES.index(annotation_subfile.split('_')[0]) coords.append(xyz) feats.append(rgb) labels.append(np.ones((len(xyz), 1), dtype=np.int32) * clsidx) instances.append(np.ones((len(xyz), 1), dtype=np.int32) * inst) if len(coords) == 0: print(txtfile, ' has 0 files.') else: # Concat coords = np.concatenate(coords, 0) feats = np.concatenate(feats, 0) labels = np.concatenate(labels, 0) instances = np.concatenate(instances, 0) inds, collabels = ME.utils.sparse_quantize( coords, feats, labels, return_index=True, ignore_label=255, quantization_size=0.01 # 1cm ) pointcloud = np.concatenate((coords[inds], feats[inds], collabels[:, None]), axis=1) if save_pth: pointcloud = np.concatenate((coords[inds], feats[inds], collabels[:, None], instances[inds]), axis=1) # Write ply file. mkdir_p(target_path) if save_pth: torch.save(pointcloud, out_file) continue save_point_cloud(pointcloud, out_file, with_label=True, verbose=False) def generate_splits(stanford_out_path, suffix='ply'): """Takes preprocessed out path and generate txt files""" split_path = './splits/stanford' mkdir_p(split_path) for i in range(1, 7): curr_path = os.path.join(stanford_out_path, f'Area_{i}') files = glob.glob(os.path.join(curr_path, '*.{}'.format(suffix))) files = [os.path.relpath(full_path, stanford_out_path) for full_path in files] out_txt = os.path.join(split_path, f'area{i}.txt') with open(out_txt, 'w') as f: f.write('\n'.join(files)) if __name__ == '__main__': Stanford3DDatasetConverter.convert_to_ply(STANFORD_3D_IN_PATH, STANFORD_3D_OUT_PATH, save_pth=True) generate_splits(STANFORD_3D_OUT_PATH, 'pth')
ContrastiveSceneContexts-main
downstream/semseg/datasets/preprocessing/stanford/stanford.py
import os import sys import plyfile import json import time import torch import argparse import numpy as np def get_raw2scannet_label_map(): lines = [line.rstrip() for line in open('scannetv2-labels.combined.tsv')] lines = lines[1:] raw2scannet = {} for i in range(len(lines)): elements = lines[i].split('\t') # raw_name = elements[0] # nyu40_name = elements[6] raw_name = elements[1] nyu40_id = elements[4] nyu40_name = elements[7] raw2scannet[raw_name] = nyu40_id return raw2scannet g_raw2scannet = get_raw2scannet_label_map() RAW2SCANNET = g_raw2scannet def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--input', default='/canis/Datasets/ScanNet/public/v2/scans/') parser.add_argument('--output', default='./output') opt = parser.parse_args() return opt def main(config): for scene_name in os.listdir(config.input): print(scene_name) # Over-segmented segments: maps from segment to vertex/point IDs segid_to_pointid = {} segfile = os.path.join(config.input, scene_name, '%s_vh_clean_2.0.010000.segs.json'%(scene_name)) with open(segfile) as jsondata: d = json.load(jsondata) seg = d['segIndices'] for i in range(len(seg)): if seg[i] not in segid_to_pointid: segid_to_pointid[seg[i]] = [] segid_to_pointid[seg[i]].append(i) # Raw points in XYZRGBA ply_filename = os.path.join(config.input, scene_name, '%s_vh_clean_2.ply' % (scene_name)) f = plyfile.PlyData().read(ply_filename) points = np.array([list(x) for x in f.elements[0]]) # Instances over-segmented segment IDs: annotation on segments instance_segids = [] labels = [] annotation_filename = os.path.join(config.input, scene_name, '%s.aggregation.json'%(scene_name)) with open(annotation_filename) as jsondata: d = json.load(jsondata) for x in d['segGroups']: instance_segids.append(x['segments']) labels.append(x['label']) # Each instance's points instance_labels = np.zeros(points.shape[0]) semantic_labels = np.zeros(points.shape[0]) for i in range(len(instance_segids)): segids = instance_segids[i] pointids = [] for segid in segids: pointids += segid_to_pointid[segid] pointids = np.array(pointids) instance_labels[pointids] = i+1 semantic_labels[pointids] = RAW2SCANNET[labels[i]] colors = points[:,3:6] points = points[:,0:3] # XYZ+RGB+NORMAL torch.save((points, colors, semantic_labels, instance_labels), os.path.join(config.output, scene_name+'.pth')) if __name__=='__main__': config = parse_args() os.makedirs(config.output, exist_ok=True) main(config)
ContrastiveSceneContexts-main
downstream/semseg/datasets/preprocessing/scannet/collect_indoor3d_data.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import random from torch.nn import Module from MinkowskiEngine import SparseTensor class Wrapper(Module): """ Wrapper for the segmentation networks. """ OUT_PIXEL_DIST = -1 def __init__(self, NetClass, in_nchannel, out_nchannel, config): super(Wrapper, self).__init__() self.initialize_filter(NetClass, in_nchannel, out_nchannel, config) def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config): raise NotImplementedError('Must initialize a model and a filter') def forward(self, x, coords, colors=None): soutput = self.model(x) # During training, make the network invariant to the filter if not self.training or random.random() < 0.5: # Filter requires the model to finish the forward pass wrapper_coords = self.filter.initialize_coords(self.model, coords, colors) finput = SparseTensor(soutput.F, wrapper_coords) soutput = self.filter(finput) return soutput
ContrastiveSceneContexts-main
downstream/semseg/models/wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from models.resnet import ResNetBase, get_norm from models.modules.common import ConvType, NormType, conv, conv_tr from models.modules.resnet_block import BasicBlock, BasicBlockINBN, Bottleneck import torch.nn as nn import MinkowskiEngine as ME from MinkowskiEngine import MinkowskiReLU import MinkowskiEngine.MinkowskiOps as me class MinkUNetBase(ResNetBase): BLOCK = None PLANES = (64, 128, 256, 512, 256, 128, 128) DILATIONS = (1, 1, 1, 1, 1, 1) LAYERS = (2, 2, 2, 2, 2, 2) INIT_DIM = 64 OUT_PIXEL_DIST = 1 NORM_TYPE = NormType.BATCH_NORM NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS # To use the model, must call initialize_coords before forward pass. # Once data is processed, call clear to reset the model before calling initialize_coords def __init__(self, in_channels, out_channels, config, D=3, **kwargs): super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D) def network_initialization(self, in_channels, out_channels, config, D): # Setup net_metadata dilations = self.DILATIONS bn_momentum = config.bn_momentum def space_n_time_m(n, m): return n if D == 3 else [n, n, n, m] if D == 4: self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) # Output of the first conv concated to conv6 self.inplanes = self.INIT_DIM self.conv1p1s1 = conv( in_channels, self.inplanes, kernel_size=space_n_time_m(config.conv1_kernel_size, 1), stride=1, dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum) self.block1 = self._make_layer( self.BLOCK, self.PLANES[0], self.LAYERS[0], dilation=dilations[0], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv2p1s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block2 = self._make_layer( self.BLOCK, self.PLANES[1], self.LAYERS[1], dilation=dilations[1], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv3p2s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block3 = self._make_layer( self.BLOCK, self.PLANES[2], self.LAYERS[2], dilation=dilations[2], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv4p4s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block4 = self._make_layer( self.BLOCK, self.PLANES[3], self.LAYERS[3], dilation=dilations[3], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr4p8s2 = conv_tr( self.inplanes, self.PLANES[4], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion self.block5 = self._make_layer( self.BLOCK, self.PLANES[4], self.LAYERS[4], dilation=dilations[4], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr5p4s2 = conv_tr( self.inplanes, self.PLANES[5], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion self.block6 = self._make_layer( self.BLOCK, self.PLANES[5], self.LAYERS[5], dilation=dilations[5], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr6p2s2 = conv_tr( self.inplanes, self.PLANES[6], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum) self.relu = MinkowskiReLU(inplace=True) self.final = nn.Sequential( conv( self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion, 512, kernel_size=1, stride=1, dilation=1, bias=False, D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(), conv(512, out_channels, kernel_size=1, stride=1, dilation=1, bias=True, D=D)) def forward(self, x): out = self.conv1p1s1(x) out = self.bn1(out) out = self.relu(out) out_b1p1 = self.block1(out) out = self.conv2p1s2(out_b1p1) out = self.bn2(out) out = self.relu(out) out_b2p2 = self.block2(out) out = self.conv3p2s2(out_b2p2) out = self.bn3(out) out = self.relu(out) out_b3p4 = self.block3(out) out = self.conv4p4s2(out_b3p4) out = self.bn4(out) out = self.relu(out) # pixel_dist=8 out = self.block4(out) out = self.convtr4p8s2(out) out = self.bntr4(out) out = self.relu(out) out = me.cat(out, out_b3p4) out = self.block5(out) out = self.convtr5p4s2(out) out = self.bntr5(out) out = self.relu(out) out = me.cat(out, out_b2p2) out = self.block6(out) out = self.convtr6p2s2(out) out = self.bntr6(out) out = self.relu(out) out = me.cat(out, out_b1p1) return self.final(out) class ResUNet14(MinkUNetBase): BLOCK = BasicBlock LAYERS = (1, 1, 1, 1, 1, 1) class ResUNet18(MinkUNetBase): BLOCK = BasicBlock LAYERS = (2, 2, 2, 2, 2, 2) class ResUNet18INBN(ResUNet18): NORM_TYPE = NormType.INSTANCE_BATCH_NORM BLOCK = BasicBlockINBN class ResUNet34(MinkUNetBase): BLOCK = BasicBlock LAYERS = (3, 4, 6, 3, 2, 2) class ResUNet50(MinkUNetBase): BLOCK = Bottleneck LAYERS = (3, 4, 6, 3, 2, 2) class ResUNet101(MinkUNetBase): BLOCK = Bottleneck LAYERS = (3, 4, 23, 3, 2, 2) class ResUNet14D(ResUNet14): PLANES = (64, 128, 256, 512, 512, 512, 512) class ResUNet18D(ResUNet18): PLANES = (64, 128, 256, 512, 512, 512, 512) class ResUNet34D(ResUNet34): PLANES = (64, 128, 256, 512, 512, 512, 512) class ResUNet34E(ResUNet34): INIT_DIM = 32 PLANES = (32, 64, 128, 256, 128, 64, 64) class ResUNet34F(ResUNet34): INIT_DIM = 32 PLANES = (32, 64, 128, 256, 128, 64, 32) class MinkUNetHyper(MinkUNetBase): BLOCK = None PLANES = (64, 128, 256, 512, 256, 128, 128) DILATIONS = (1, 1, 1, 1, 1, 1) LAYERS = (2, 2, 2, 2, 2, 2) INIT_DIM = 64 OUT_PIXEL_DIST = 1 NORM_TYPE = NormType.BATCH_NORM NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS # To use the model, must call initialize_coords before forward pass. # Once data is processed, call clear to reset the model before calling initialize_coords def __init__(self, in_channels, out_channels, config, D=3, **kwargs): super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D) def network_initialization(self, in_channels, out_channels, config, D): # Setup net_metadata dilations = self.DILATIONS bn_momentum = config.bn_momentum def space_n_time_m(n, m): return n if D == 3 else [n, n, n, m] if D == 4: self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) # Output of the first conv concated to conv6 self.inplanes = self.INIT_DIM self.conv1p1s1 = conv( in_channels, self.inplanes, kernel_size=space_n_time_m(config.conv1_kernel_size, 1), stride=1, dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum) self.block1 = self._make_layer( self.BLOCK, self.PLANES[0], self.LAYERS[0], dilation=dilations[0], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv2p1s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block2 = self._make_layer( self.BLOCK, self.PLANES[1], self.LAYERS[1], dilation=dilations[1], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv3p2s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block3 = self._make_layer( self.BLOCK, self.PLANES[2], self.LAYERS[2], dilation=dilations[2], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv4p4s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block4 = self._make_layer( self.BLOCK, self.PLANES[3], self.LAYERS[3], dilation=dilations[3], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.pool_tr4 = ME.MinkowskiPoolingTranspose(kernel_size=8, stride=8, dimension=D) out_pool4 = self.inplanes self.convtr4p8s2 = conv_tr( self.inplanes, self.PLANES[4], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion self.block5 = self._make_layer( self.BLOCK, self.PLANES[4], self.LAYERS[4], dilation=dilations[4], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.pool_tr5 = ME.MinkowskiPoolingTranspose(kernel_size=4, stride=4, dimension=D) out_pool5 = self.inplanes self.convtr5p4s2 = conv_tr( self.inplanes, self.PLANES[5], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion self.block6 = self._make_layer( self.BLOCK, self.PLANES[5], self.LAYERS[5], dilation=dilations[5], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.pool_tr6 = ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D) out_pool6 = self.inplanes self.convtr6p2s2 = conv_tr( self.inplanes, self.PLANES[6], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum) self.relu = MinkowskiReLU(inplace=True) self.final = nn.Sequential( conv( out_pool5 + out_pool6 + self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion, 512, kernel_size=1, bias=False, D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(), conv(512, out_channels, kernel_size=1, bias=True, D=D)) def forward(self, x): out = self.conv1p1s1(x) out = self.bn1(out) out = self.relu(out) out_b1p1 = self.block1(out) out = self.conv2p1s2(out_b1p1) out = self.bn2(out) out = self.relu(out) out_b2p2 = self.block2(out) out = self.conv3p2s2(out_b2p2) out = self.bn3(out) out = self.relu(out) out_b3p4 = self.block3(out) out = self.conv4p4s2(out_b3p4) out = self.bn4(out) out = self.relu(out) # pixel_dist=8 out = self.block4(out) out = self.convtr4p8s2(out) out = self.bntr4(out) out = self.relu(out) out = me.cat(out, out_b3p4) out = self.block5(out) out_5 = self.pool_tr5(out) out = self.convtr5p4s2(out) out = self.bntr5(out) out = self.relu(out) out = me.cat(out, out_b2p2) out = self.block6(out) out_6 = self.pool_tr6(out) out = self.convtr6p2s2(out) out = self.bntr6(out) out = self.relu(out) out = me.cat(out, out_b1p1, out_6, out_5) return self.final(out) class MinkUNetHyper14INBN(MinkUNetHyper): NORM_TYPE = NormType.INSTANCE_BATCH_NORM BLOCK = BasicBlockINBN class STMinkUNetBase(MinkUNetBase): CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS def __init__(self, in_channels, out_channels, config, D=4, **kwargs): super(STMinkUNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs) class STResUNet14(STMinkUNetBase, ResUNet14): pass class STResUNet18(STMinkUNetBase, ResUNet18): pass class STResUNet34(STMinkUNetBase, ResUNet34): pass class STResUNet50(STMinkUNetBase, ResUNet50): pass class STResUNet101(STMinkUNetBase, ResUNet101): pass class STResTesseractUNetBase(STMinkUNetBase): CONV_TYPE = ConvType.HYPERCUBE class STResTesseractUNet14(STResTesseractUNetBase, ResUNet14): pass class STResTesseractUNet18(STResTesseractUNetBase, ResUNet18): pass class STResTesseractUNet34(STResTesseractUNetBase, ResUNet34): pass class STResTesseractUNet50(STResTesseractUNetBase, ResUNet50): pass class STResTesseractUNet101(STResTesseractUNetBase, ResUNet101): pass
ContrastiveSceneContexts-main
downstream/semseg/models/resunet.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import models.resunet as resunet import models.res16unet as res16unet MODELS = [] def add_models(module): MODELS.extend([getattr(module, a) for a in dir(module) if 'Net' in a]) add_models(resunet) add_models(res16unet) def get_models(): '''Returns a tuple of sample models.''' return MODELS def load_model(name): '''Creates and returns an instance of the model given its class name. ''' # Find the model class from its name all_models = get_models() mdict = {model.__name__: model for model in all_models} if name not in mdict: print('Invalid model index. Options are:') # Display a list of valid model names for model in all_models: print('\t* {}'.format(model.__name__)) return None NetClass = mdict[name] return NetClass
ContrastiveSceneContexts-main
downstream/semseg/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from models.resnet import ResNetBase, get_norm from models.modules.common import ConvType, NormType, conv, conv_tr from models.modules.resnet_block import BasicBlock, Bottleneck from MinkowskiEngine import MinkowskiReLU, SparseTensor import MinkowskiEngine.MinkowskiOps as me class Res16UNetBase(ResNetBase): BLOCK = None PLANES = (32, 64, 128, 256, 256, 256, 256, 256) DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1) LAYERS = (2, 2, 2, 2, 2, 2, 2, 2) INIT_DIM = 32 OUT_PIXEL_DIST = 1 NORM_TYPE = NormType.BATCH_NORM NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS # To use the model, must call initialize_coords before forward pass. # Once data is processed, call clear to reset the model before calling initialize_coords def __init__(self, in_channels, out_channels, config, D=3, **kwargs): super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D) def network_initialization(self, in_channels, out_channels, config, D): # Setup net_metadata dilations = self.DILATIONS bn_momentum = config.optimizer.bn_momentum def space_n_time_m(n, m): return n if D == 3 else [n, n, n, m] if D == 4: self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) # Output of the first conv concated to conv6 self.inplanes = self.INIT_DIM print("building model, ", in_channels) self.conv0p1s1 = conv( in_channels, self.inplanes, kernel_size=space_n_time_m(config.net.conv1_kernel_size, 1), stride=1, dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.conv1p1s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block1 = self._make_layer( self.BLOCK, self.PLANES[0], self.LAYERS[0], dilation=dilations[0], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv2p2s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block2 = self._make_layer( self.BLOCK, self.PLANES[1], self.LAYERS[1], dilation=dilations[1], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv3p4s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block3 = self._make_layer( self.BLOCK, self.PLANES[2], self.LAYERS[2], dilation=dilations[2], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv4p8s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block4 = self._make_layer( self.BLOCK, self.PLANES[3], self.LAYERS[3], dilation=dilations[3], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr4p16s2 = conv_tr( self.inplanes, self.PLANES[4], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion self.block5 = self._make_layer( self.BLOCK, self.PLANES[4], self.LAYERS[4], dilation=dilations[4], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr5p8s2 = conv_tr( self.inplanes, self.PLANES[5], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion self.block6 = self._make_layer( self.BLOCK, self.PLANES[5], self.LAYERS[5], dilation=dilations[5], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr6p4s2 = conv_tr( self.inplanes, self.PLANES[6], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion self.block7 = self._make_layer( self.BLOCK, self.PLANES[6], self.LAYERS[6], dilation=dilations[6], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr7p2s2 = conv_tr( self.inplanes, self.PLANES[7], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[7] + self.INIT_DIM self.block8 = self._make_layer( self.BLOCK, self.PLANES[7], self.LAYERS[7], dilation=dilations[7], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D) self.relu = MinkowskiReLU(inplace=True) def forward(self, x): out = self.conv0p1s1(x) out = self.bn0(out) out_p1 = self.relu(out) out = self.conv1p1s2(out_p1) out = self.bn1(out) out = self.relu(out) out_b1p2 = self.block1(out) out = self.conv2p2s2(out_b1p2) out = self.bn2(out) out = self.relu(out) out_b2p4 = self.block2(out) out = self.conv3p4s2(out_b2p4) out = self.bn3(out) out = self.relu(out) out_b3p8 = self.block3(out) # pixel_dist=16 out = self.conv4p8s2(out_b3p8) out = self.bn4(out) out = self.relu(out) out = self.block4(out) # pixel_dist=8 out = self.convtr4p16s2(out) out = self.bntr4(out) out = self.relu(out) out = me.cat(out, out_b3p8) out = self.block5(out) # pixel_dist=4 out = self.convtr5p8s2(out) out = self.bntr5(out) out = self.relu(out) out = me.cat(out, out_b2p4) out = self.block6(out) # pixel_dist=2 out = self.convtr6p4s2(out) out = self.bntr6(out) out = self.relu(out) out = me.cat(out, out_b1p2) out = self.block7(out) # pixel_dist=1 out = self.convtr7p2s2(out) out = self.bntr7(out) out = self.relu(out) out = me.cat(out, out_p1) out = self.block8(out) return self.final(out), out class Res16UNet14(Res16UNetBase): BLOCK = BasicBlock LAYERS = (1, 1, 1, 1, 1, 1, 1, 1) class Res16UNet18(Res16UNetBase): BLOCK = BasicBlock LAYERS = (2, 2, 2, 2, 2, 2, 2, 2) class Res16UNet34(Res16UNetBase): BLOCK = BasicBlock LAYERS = (2, 3, 4, 6, 2, 2, 2, 2) class Res16UNet50(Res16UNetBase): BLOCK = Bottleneck LAYERS = (2, 3, 4, 6, 2, 2, 2, 2) class Res16UNet101(Res16UNetBase): BLOCK = Bottleneck LAYERS = (2, 3, 4, 23, 2, 2, 2, 2) class Res16UNet14A(Res16UNet14): PLANES = (32, 64, 128, 256, 128, 128, 96, 96) class Res16UNet14A2(Res16UNet14A): LAYERS = (1, 1, 1, 1, 2, 2, 2, 2) class Res16UNet14B(Res16UNet14): PLANES = (32, 64, 128, 256, 128, 128, 128, 128) class Res16UNet14B2(Res16UNet14B): LAYERS = (1, 1, 1, 1, 2, 2, 2, 2) class Res16UNet14B3(Res16UNet14B): LAYERS = (2, 2, 2, 2, 1, 1, 1, 1) class Res16UNet14C(Res16UNet14): PLANES = (32, 64, 128, 256, 192, 192, 128, 128) class Res16UNet14D(Res16UNet14): PLANES = (32, 64, 128, 256, 384, 384, 384, 384) class Res16UNet18A(Res16UNet18): PLANES = (32, 64, 128, 256, 128, 128, 96, 96) class Res16UNet18B(Res16UNet18): PLANES = (32, 64, 128, 256, 128, 128, 128, 128) class Res16UNet18D(Res16UNet18): PLANES = (32, 64, 128, 256, 384, 384, 384, 384) class Res16UNet34A(Res16UNet34): PLANES = (32, 64, 128, 256, 256, 128, 64, 64) class Res16UNet34B(Res16UNet34): PLANES = (32, 64, 128, 256, 256, 128, 64, 32) class Res16UNet34C(Res16UNet34): PLANES = (32, 64, 128, 256, 256, 128, 96, 96) class STRes16UNetBase(Res16UNetBase): CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS def __init__(self, in_channels, out_channels, config, D=4, **kwargs): super(STRes16UNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs) class STRes16UNet14(STRes16UNetBase, Res16UNet14): pass class STRes16UNet14A(STRes16UNetBase, Res16UNet14A): pass class STRes16UNet18(STRes16UNetBase, Res16UNet18): pass class STRes16UNet34(STRes16UNetBase, Res16UNet34): pass class STRes16UNet50(STRes16UNetBase, Res16UNet50): pass class STRes16UNet101(STRes16UNetBase, Res16UNet101): pass class STRes16UNet18A(STRes16UNet18): PLANES = (32, 64, 128, 256, 128, 128, 96, 96) class STResTesseract16UNetBase(STRes16UNetBase): CONV_TYPE = ConvType.HYPERCUBE class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase): pass
ContrastiveSceneContexts-main
downstream/semseg/models/res16unet.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from MinkowskiEngine import MinkowskiNetwork class Model(MinkowskiNetwork): """ Base network for all sparse convnet By default, all networks are segmentation networks. """ OUT_PIXEL_DIST = -1 def __init__(self, in_channels, out_channels, config, D, **kwargs): super(Model, self).__init__(D) self.in_channels = in_channels self.out_channels = out_channels self.config = config class HighDimensionalModel(Model): """ Base network for all spatio (temporal) chromatic sparse convnet """ def __init__(self, in_channels, out_channels, config, D, **kwargs): assert D > 4, "Num dimension smaller than 5" super(HighDimensionalModel, self).__init__(in_channels, out_channels, config, D, **kwargs)
ContrastiveSceneContexts-main
downstream/semseg/models/model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from models.common import get_norm import MinkowskiEngine as ME import MinkowskiEngine.MinkowskiFunctional as MEF class BasicBlockBase(nn.Module): expansion = 1 NORM_TYPE = 'BN' def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, bn_momentum=0.1, D=3): super(BasicBlockBase, self).__init__() self.conv1 = ME.MinkowskiConvolution( inplanes, planes, kernel_size=3, stride=stride, dimension=D) self.norm1 = get_norm(self.NORM_TYPE, planes, bn_momentum=bn_momentum, D=D) self.conv2 = ME.MinkowskiConvolution( planes, planes, kernel_size=3, stride=1, dilation=dilation, has_bias=False, dimension=D) self.norm2 = get_norm(self.NORM_TYPE, planes, bn_momentum=bn_momentum, D=D) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) out = MEF.relu(out) out = self.conv2(out) out = self.norm2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = MEF.relu(out) return out class BasicBlockBN(BasicBlockBase): NORM_TYPE = 'BN' class BasicBlockIN(BasicBlockBase): NORM_TYPE = 'IN' def get_block(norm_type, inplanes, planes, stride=1, dilation=1, downsample=None, bn_momentum=0.1, D=3): if norm_type == 'BN': return BasicBlockBN(inplanes, planes, stride, dilation, downsample, bn_momentum, D) elif norm_type == 'IN': return BasicBlockIN(inplanes, planes, stride, dilation, downsample, bn_momentum, D) else: raise ValueError(f'Type {norm_type}, not defined')
ContrastiveSceneContexts-main
downstream/semseg/models/residual_block.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import MinkowskiEngine as ME def get_norm(norm_type, num_feats, bn_momentum=0.05, D=-1): if norm_type == 'BN': return ME.MinkowskiBatchNorm(num_feats, momentum=bn_momentum) elif norm_type == 'IN': return ME.MinkowskiInstanceNorm(num_feats, dimension=D) else: raise ValueError(f'Type {norm_type}, not defined')
ContrastiveSceneContexts-main
downstream/semseg/models/common.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import MinkowskiEngine as ME from models.model import Model from models.modules.common import ConvType, NormType, get_norm, conv, sum_pool from models.modules.resnet_block import BasicBlock, Bottleneck class ResNetBase(Model): BLOCK = None LAYERS = () INIT_DIM = 64 PLANES = (64, 128, 256, 512) OUT_PIXEL_DIST = 32 HAS_LAST_BLOCK = False CONV_TYPE = ConvType.HYPERCUBE def __init__(self, in_channels, out_channels, config, D=3, **kwargs): assert self.BLOCK is not None assert self.OUT_PIXEL_DIST > 0 super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs) self.network_initialization(in_channels, out_channels, config, D) self.weight_initialization() def network_initialization(self, in_channels, out_channels, config, D): def space_n_time_m(n, m): return n if D == 3 else [n, n, n, m] if D == 4: self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) dilations = config.dilations bn_momentum = config.bn_momentum self.inplanes = self.INIT_DIM self.conv1 = conv( in_channels, self.inplanes, kernel_size=space_n_time_m(config.conv1_kernel_size, 1), stride=1, D=D) self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum) self.relu = ME.MinkowskiReLU(inplace=True) self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D) self.layer1 = self._make_layer( self.BLOCK, self.PLANES[0], self.LAYERS[0], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[0], 1)) self.layer2 = self._make_layer( self.BLOCK, self.PLANES[1], self.LAYERS[1], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[1], 1)) self.layer3 = self._make_layer( self.BLOCK, self.PLANES[2], self.LAYERS[2], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[2], 1)) self.layer4 = self._make_layer( self.BLOCK, self.PLANES[3], self.LAYERS[3], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[3], 1)) self.final = conv( self.PLANES[3] * self.BLOCK.expansion, out_channels, kernel_size=1, bias=True, D=D) def weight_initialization(self): for m in self.modules(): if isinstance(m, ME.MinkowskiBatchNorm): nn.init.constant_(m.bn.weight, 1) nn.init.constant_(m.bn.bias, 0) def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_type=NormType.BATCH_NORM, bn_momentum=0.1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv( self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, D=self.D), get_norm(norm_type, planes * block.expansion, D=self.D, bn_momentum=bn_momentum), ) layers = [] layers.append( block( self.inplanes, planes, stride=stride, dilation=dilation, downsample=downsample, conv_type=self.CONV_TYPE, D=self.D)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( self.inplanes, planes, stride=1, dilation=dilation, conv_type=self.CONV_TYPE, D=self.D)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.pool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.final(x) return x class ResNet14(ResNetBase): BLOCK = BasicBlock LAYERS = (1, 1, 1, 1) class ResNet18(ResNetBase): BLOCK = BasicBlock LAYERS = (2, 2, 2, 2) class ResNet34(ResNetBase): BLOCK = BasicBlock LAYERS = (3, 4, 6, 3) class ResNet50(ResNetBase): BLOCK = Bottleneck LAYERS = (3, 4, 6, 3) class ResNet101(ResNetBase): BLOCK = Bottleneck LAYERS = (3, 4, 23, 3) class STResNetBase(ResNetBase): CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS def __init__(self, in_channels, out_channels, config, D=4, **kwargs): super(STResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs) class STResNet14(STResNetBase, ResNet14): pass class STResNet18(STResNetBase, ResNet18): pass class STResNet34(STResNetBase, ResNet34): pass class STResNet50(STResNetBase, ResNet50): pass class STResNet101(STResNetBase, ResNet101): pass class STResTesseractNetBase(STResNetBase): CONV_TYPE = ConvType.HYPERCUBE class STResTesseractNet14(STResTesseractNetBase, STResNet14): pass class STResTesseractNet18(STResTesseractNetBase, STResNet18): pass class STResTesseractNet34(STResTesseractNetBase, STResNet34): pass class STResTesseractNet50(STResTesseractNetBase, STResNet50): pass class STResTesseractNet101(STResTesseractNetBase, STResNet101): pass
ContrastiveSceneContexts-main
downstream/semseg/models/resnet.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from models.modules.common import ConvType, NormType, get_norm, conv from MinkowskiEngine import MinkowskiReLU class BasicBlockBase(nn.Module): expansion = 1 NORM_TYPE = NormType.BATCH_NORM def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, conv_type=ConvType.HYPERCUBE, bn_momentum=0.1, D=3): super(BasicBlockBase, self).__init__() self.conv1 = conv( inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D) self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.conv2 = conv( planes, planes, kernel_size=3, stride=1, dilation=dilation, bias=False, conv_type=conv_type, D=D) self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.relu = MinkowskiReLU(inplace=True) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class BasicBlock(BasicBlockBase): NORM_TYPE = NormType.BATCH_NORM class BasicBlockIN(BasicBlockBase): NORM_TYPE = NormType.INSTANCE_NORM class BasicBlockINBN(BasicBlockBase): NORM_TYPE = NormType.INSTANCE_BATCH_NORM class BottleneckBase(nn.Module): expansion = 4 NORM_TYPE = NormType.BATCH_NORM def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, conv_type=ConvType.HYPERCUBE, bn_momentum=0.1, D=3): super(BottleneckBase, self).__init__() self.conv1 = conv(inplanes, planes, kernel_size=1, D=D) self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.conv2 = conv( planes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D) self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D) self.norm3 = get_norm(self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum) self.relu = MinkowskiReLU(inplace=True) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) out = self.conv3(out) out = self.norm3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(BottleneckBase): NORM_TYPE = NormType.BATCH_NORM class BottleneckIN(BottleneckBase): NORM_TYPE = NormType.INSTANCE_NORM class BottleneckINBN(BottleneckBase): NORM_TYPE = NormType.INSTANCE_BATCH_NORM
ContrastiveSceneContexts-main
downstream/semseg/models/modules/resnet_block.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import MinkowskiEngine as ME from models.modules.common import ConvType, NormType from models.modules.resnet_block import BasicBlock, Bottleneck class SELayer(nn.Module): def __init__(self, channel, reduction=16, D=-1): # Global coords does not require coords_key super(SELayer, self).__init__() self.fc = nn.Sequential( ME.MinkowskiLinear(channel, channel // reduction), ME.MinkowskiReLU(inplace=True), ME.MinkowskiLinear(channel // reduction, channel), ME.MinkowskiSigmoid()) self.pooling = ME.MinkowskiGlobalPooling(dimension=D) self.broadcast_mul = ME.MinkowskiBroadcastMultiplication(dimension=D) def forward(self, x): y = self.pooling(x) y = self.fc(y) return self.broadcast_mul(x, y) class SEBasicBlock(BasicBlock): def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, conv_type=ConvType.HYPERCUBE, reduction=16, D=-1): super(SEBasicBlock, self).__init__( inplanes, planes, stride=stride, dilation=dilation, downsample=downsample, conv_type=conv_type, D=D) self.se = SELayer(planes, reduction=reduction, D=D) def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) out = self.se(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class SEBasicBlockSN(SEBasicBlock): NORM_TYPE = NormType.SPARSE_SWITCH_NORM class SEBasicBlockIN(SEBasicBlock): NORM_TYPE = NormType.SPARSE_INSTANCE_NORM class SEBasicBlockLN(SEBasicBlock): NORM_TYPE = NormType.SPARSE_LAYER_NORM class SEBottleneck(Bottleneck): def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, conv_type=ConvType.HYPERCUBE, D=3, reduction=16): super(SEBottleneck, self).__init__( inplanes, planes, stride=stride, dilation=dilation, downsample=downsample, conv_type=conv_type, D=D) self.se = SELayer(planes * self.expansion, reduction=reduction, D=D) def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) out = self.conv3(out) out = self.norm3(out) out = self.se(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class SEBottleneckSN(SEBottleneck): NORM_TYPE = NormType.SPARSE_SWITCH_NORM class SEBottleneckIN(SEBottleneck): NORM_TYPE = NormType.SPARSE_INSTANCE_NORM class SEBottleneckLN(SEBottleneck): NORM_TYPE = NormType.SPARSE_LAYER_NORM
ContrastiveSceneContexts-main
downstream/semseg/models/modules/senet_block.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
ContrastiveSceneContexts-main
downstream/semseg/models/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections from enum import Enum import torch.nn as nn import MinkowskiEngine as ME class NormType(Enum): BATCH_NORM = 0 INSTANCE_NORM = 1 INSTANCE_BATCH_NORM = 2 def get_norm(norm_type, n_channels, D, bn_momentum=0.1): if norm_type == NormType.BATCH_NORM: return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum) elif norm_type == NormType.INSTANCE_NORM: return ME.MinkowskiInstanceNorm(n_channels) elif norm_type == NormType.INSTANCE_BATCH_NORM: return nn.Sequential( ME.MinkowskiInstanceNorm(n_channels), ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum)) else: raise ValueError(f'Norm type: {norm_type} not supported') class ConvType(Enum): """ Define the kernel region type """ HYPERCUBE = 0, 'HYPERCUBE' SPATIAL_HYPERCUBE = 1, 'SPATIAL_HYPERCUBE' SPATIO_TEMPORAL_HYPERCUBE = 2, 'SPATIO_TEMPORAL_HYPERCUBE' HYPERCROSS = 3, 'HYPERCROSS' SPATIAL_HYPERCROSS = 4, 'SPATIAL_HYPERCROSS' SPATIO_TEMPORAL_HYPERCROSS = 5, 'SPATIO_TEMPORAL_HYPERCROSS' SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = 6, 'SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS ' def __new__(cls, value, name): member = object.__new__(cls) member._value_ = value member.fullname = name return member def __int__(self): return self.value # Covert the ConvType var to a RegionType var conv_to_region_type = { # kernel_size = [k, k, k, 1] ConvType.HYPERCUBE: ME.RegionType.HYPERCUBE, ConvType.SPATIAL_HYPERCUBE: ME.RegionType.HYPERCUBE, ConvType.SPATIO_TEMPORAL_HYPERCUBE: ME.RegionType.HYPERCUBE, ConvType.HYPERCROSS: ME.RegionType.HYPERCROSS, ConvType.SPATIAL_HYPERCROSS: ME.RegionType.HYPERCROSS, ConvType.SPATIO_TEMPORAL_HYPERCROSS: ME.RegionType.HYPERCROSS, ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: ME.RegionType.HYBRID } int_to_region_type = {m.value: m for m in ME.RegionType} def convert_region_type(region_type): """ Convert the integer region_type to the corresponding RegionType enum object. """ return int_to_region_type[region_type] def convert_conv_type(conv_type, kernel_size, D): assert isinstance(conv_type, ConvType), "conv_type must be of ConvType" region_type = conv_to_region_type[conv_type] axis_types = None if conv_type == ConvType.SPATIAL_HYPERCUBE: # No temporal convolution if isinstance(kernel_size, collections.Sequence): kernel_size = kernel_size[:3] else: kernel_size = [ kernel_size, ] * 3 if D == 4: kernel_size.append(1) elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE: # conv_type conversion already handled assert D == 4 elif conv_type == ConvType.HYPERCUBE: # conv_type conversion already handled pass elif conv_type == ConvType.SPATIAL_HYPERCROSS: if isinstance(kernel_size, collections.Sequence): kernel_size = kernel_size[:3] else: kernel_size = [ kernel_size, ] * 3 if D == 4: kernel_size.append(1) elif conv_type == ConvType.HYPERCROSS: # conv_type conversion already handled pass elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS: # conv_type conversion already handled assert D == 4 elif conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: # Define the CUBIC conv kernel for spatial dims and CROSS conv for temp dim axis_types = [ ME.RegionType.HYPERCUBE, ] * 3 if D == 4: axis_types.append(ME.RegionType.HYPERCROSS) return region_type, axis_types, kernel_size def conv(in_planes, out_planes, kernel_size, stride=1, dilation=1, bias=False, conv_type=ConvType.HYPERCUBE, D=-1): assert D > 0, 'Dimension must be a positive integer' region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator( kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiConvolution( in_channels=in_planes, out_channels=out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation, has_bias=bias, kernel_generator=kernel_generator, dimension=D) def conv_tr(in_planes, out_planes, kernel_size, upsample_stride=1, dilation=1, bias=False, conv_type=ConvType.HYPERCUBE, D=-1): assert D > 0, 'Dimension must be a positive integer' region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator( kernel_size, upsample_stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiConvolutionTranspose( in_channels=in_planes, out_channels=out_planes, kernel_size=kernel_size, stride=upsample_stride, dilation=dilation, has_bias=bias, kernel_generator=kernel_generator, dimension=D) def avg_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, in_coords_key=None, D=-1): assert D > 0, 'Dimension must be a positive integer' region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator( kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiAvgPooling( kernel_size=kernel_size, stride=stride, dilation=dilation, kernel_generator=kernel_generator, dimension=D) def avg_unpool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1): assert D > 0, 'Dimension must be a positive integer' region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator( kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiAvgUnpooling( kernel_size=kernel_size, stride=stride, dilation=dilation, kernel_generator=kernel_generator, dimension=D) def sum_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1): assert D > 0, 'Dimension must be a positive integer' region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator( kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiSumPooling( kernel_size=kernel_size, stride=stride, dilation=dilation, kernel_generator=kernel_generator, dimension=D)
ContrastiveSceneContexts-main
downstream/semseg/models/modules/common.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import torch import numpy as np import glob import time import argparse import pykeops from pykeops.torch import LazyTensor pykeops.clean_pykeops() def parse_args(): """parse input arguments""" parser = argparse.ArgumentParser(description='data_efficient3d') parser.add_argument('--point_data', type=str, default='/checkpoint/jihou/data/scannet/pointcloud/') parser.add_argument('--feat_data', type=str, default='/checkpoint/jihou/checkpoint/scannet/pretrain/partition8_4096_60k/1/outputs/feat') parser.add_argument('--num_points', type=int, default=100) parser.add_argument('--num_iters', type=int, default=50) parser.add_argument('--output', type=str, default='./output') return parser.parse_args() def kmeans(pointcloud, k=10, iterations=10, verbose=True): n, dim = pointcloud.shape # Number of samples, dimension of the ambient space start = time.time() clusters = pointcloud[:k, :].clone() # Simplistic random initialization pointcloud_cuda = LazyTensor(pointcloud[:, None, :]) # (Npoints, 1, D) # K-means loop: for _ in range(iterations): clusters_previous = clusters.clone() clusters_gpu = LazyTensor(clusters[None, :, :]) # (1, Nclusters, D) distance_matrix = ((pointcloud_cuda - clusters_gpu) ** 2).sum(-1) # (Npoints, Nclusters) symbolic matrix of squared distances cloest_clusters = distance_matrix.argmin(dim=1).long().view(-1) # Points -> Nearest cluster # #points for each cluster clusters_count = torch.bincount(cloest_clusters, minlength=k).float() # Class weights for d in range(dim): # Compute the cluster centroids with torch.bincount: clusters[:, d] = torch.bincount(cloest_clusters, weights=pointcloud[:, d], minlength=k) / clusters_count # for clusters that have no points assigned mask = clusters_count == 0 clusters[mask] = clusters_previous[mask] end = time.time() if verbose: print("K-means example with {:,} points in dimension {:,}, K = {:,}:".format(n, dim, k)) print('Timing for {} iterations: {:.5f}s = {} x {:.5f}s\n'.format( iterations, end - start, iterations, (end-start) / iterations)) # nearest neighbouring search for each cluster cloest_points_to_centers = distance_matrix.argmin(dim=0).long().view(-1) return cloest_points_to_centers def kmeans_sampling(args): pointcloud_names = glob.glob(os.path.join(args.raw_data, "*.pth")) sampled_inds = {} for idx, pointcloud_name in enumerate(pointcloud_names): print('{}/{}: {}'.format(idx, len(pointcloud_names), pointcloud_name)) pointcloud = torch.load(pointcloud_name) scene_name = os.path.basename(pointcloud_name).split('.')[0] coords = pointcloud[0].astype(np.float32) colors = pointcloud[1].astype(np.int32) candidates = [] candidates.append(coords) candidates.append(colors) feats = torch.load(os.path.join(args.feat_data, scene_name)) candidates.append(feats) candidates = torch.from_numpy(np.concatenate(candidates,1)).cuda().float() K = args.num_points sampled_inds_per_scene = kmeans(candidates, K, args.num_iters).cpu().numpy() sampled_inds[scene_name] = sampled_inds_per_scene return sampled_inds if __name__ == "__main__": args = parse_args() sampled_inds = kmeans_sampling(args) torch.save(sampled_inds, args.output)
ContrastiveSceneContexts-main
downstream/semseg/lib/sampling_points.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
ContrastiveSceneContexts-main
downstream/semseg/lib/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import matplotlib.pyplot as plt import matplotlib import numpy as np from matplotlib.pyplot import * from PIL import Image colors = [ 'xkcd:blue', 'xkcd:red', 'xkcd:purple', 'xkcd:orchid', 'xkcd:orange', 'xkcd:grey', 'xkcd:teal', 'xkcd:sienna', 'xkcd:azure', 'xkcd:green', 'xkcd:black', 'xkcd:goldenrod'] def bar_plot_insseg(image_name='bar_insseg.png'): labels = ['20 Points', '50 Points', '100 Points', '200 points'] RAND = [14.6, 21.6, 34.0, 43.5] Kmeans = [15.6, 24.3, 35.7, 42.3] OURS_I = [26.3, 32.6, 39.9, 48.9] OURS_S = [25.8, 32.5, 44.2, 48.3] OURS_IS = [27.2, 35.7, 43.6, 50.4] x = np.array([0,2,4,6]) # the label locations width = 1.7 # the width of the bars font = {'family' : 'Times New Roman', 'size' : 11} matplotlib.rc('font', **font) fig, ax = plt.subplots(1,1) fig.set_size_inches(5.5, 4.5) rects1 = ax.bar(x - width*2/5, RAND, width/5, label='RAND') rects2 = ax.bar(x - width*1/5, Kmeans, width/5, label='Kmeans') rects3 = ax.bar(x , OURS_I, width/5, label='OURS_I') rects4 = ax.bar(x + width*1/5, OURS_S, width/5, label='OURS_S') rects5 = ax.bar(x + width*2/5, OURS_IS, width/5, label='OURS_IS') #rects1 = ax.bar(x - width*2/4, points20, width/4, label='20') #rects2 = ax.bar(x - width/4, points50, width/4, label='50') #rects3 = ax.bar(x + width/4, points100, width/4, label='100') #rects4 = ax.bar(x + width*2/4, points200, width/4, label='200') ax.plot(np.arange(len(labels)+15)-2, [56.9]*(len(x)+15), '--', linewidth=2.25, color=colors[-1]) # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('[email protected]') ax.set_xlabel('Number of Annotated Points Per Scene') ax.set(xlim=[-1, 7], ylim=[0, 61]) #ax.set_title('') ax.set_xticks(x) ax.set_xticklabels(labels) ax.text(1.5, 58, '150,000 Annotated Points Per Scene', fontsize=8) ax.legend(loc=2) def autolabel(rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points",fontsize=6, ha='center', va='bottom') autolabel(rects1) autolabel(rects2) autolabel(rects3) autolabel(rects4) autolabel(rects5) fig.tight_layout() plt.show() fig.savefig(image_name, dpi=600) image = Image.open(image_name) image.save(image_name) def bar_plot_sem(image_name='bar_semseg.png'): #labels = ['RAND', 'KMEANS', 'OURS_I', 'OURS_S', 'OURS_IS'] #points20 = [41.9, 45.9, 53.6, 55.5, 53.8] #points50 = [53.9, 55.4, 60.7, 60.5, 62.9] #points100 = [62.2, 60.6, 65.7, 65.9, 66.9] #points200 = [65.5, 64.3, 68.2, 68.2, 69.0] labels = ['20 Points', '50 Points', '100 Points', '200 points'] RAND = [41.9, 53.9, 62.2, 65.5] Kmeans = [45.9, 55.4, 60.6, 64.3] OURS_I = [55.5, 60.5, 65.9, 68.2] OURS_S = [53.6, 60.7, 65.7, 68.2] OURS_IS = [53.8, 62.9, 66.9, 69.0] x = np.array([0,2,4,6]) # the label locations width = 1.7 # the width of the bars font = {'family' : 'Times New Roman', 'size' : 11} matplotlib.rc('font', **font) fig, ax = plt.subplots(1,1) fig.set_size_inches(5.5, 4.5) rects1 = ax.bar(x - width*2/5, RAND, width/5, label='RAND') rects2 = ax.bar(x - width*1/5, Kmeans, width/5, label='Kmeans') rects3 = ax.bar(x , OURS_I, width/5, label='OURS_I') rects4 = ax.bar(x + width*1/5, OURS_S, width/5, label='OURS_S') rects5 = ax.bar(x + width*2/5, OURS_IS, width/5, label='OURS_IS') #rects1 = ax.bar(x - width*2/4, points20, width/4, label='20') #rects2 = ax.bar(x - width/4, points50, width/4, label='50') #rects3 = ax.bar(x + width/4, points100, width/4, label='100') #rects4 = ax.bar(x + width*2/4, points200, width/4, label='200') ax.plot(np.arange(len(labels)+15)-2, [72.2]*(len(x)+15), '--', linewidth=2.25, color=colors[-1]) # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('mIoU') ax.set_xlabel('Number of Annotated Points Per Scene') ax.set(xlim=[-1, 7], ylim=[40, 75]) #ax.set_title('') ax.set_xticks(x) ax.set_xticklabels(labels) ax.text(1.5, 73, '150,000 Annotated Points Per Scene', fontsize=8) ax.legend() def autolabel(rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points",fontsize=6, ha='center', va='bottom') autolabel(rects1) autolabel(rects2) autolabel(rects3) autolabel(rects4) autolabel(rects5) fig.tight_layout() plt.show() fig.savefig(image_name, dpi=600) image = Image.open(image_name) image.save(image_name) def plot_curves(curves, xlabel='% Dataset Labeled\n(ScanNet-5-Recon)', xlim=[4, 36], xticks=np.arange(5, 35, 5), xticklabels=None, ylabel='mIoU', ylim=[0.2, 0.65], yticks=np.arange(0.2, 0.65, 0.05), if_grid=True, image_name='test.png'): font = {'family' : 'Times New Roman', 'size' : 11} matplotlib.rc('font', **font) fig, subplot = plt.subplots(1,1) fig.set_size_inches(8.0, 4.0) subplot.set(xlim=xlim, ylim=ylim, xlabel=xlabel, ylabel=ylabel) subplot.set(xticks=xticks, yticks=yticks) if xticklabels: subplot.axes.set_xticklabels(xticklabels) subplot.grid(if_grid) for idx, curve in enumerate(curves): name = '' fmt='' marker = '' markersize = 10 linewidth=4.0 color = colors[idx%len(colors)] if 'name' in curve: name = curve['name'] if 'marker' in curve: marker = curve['marker'] if 'markersize' in curve: marker_size = curve['markersize'] if 'color' in curve: color = curve['color'] if 'linewidth' in curve: linewidth = curve['linewidth'] if 'fmt' in curve: fmt = curve['fmt'] x = curve['x'] y = curve['y'] subplot.plot(x, y, fmt, label=name, marker=marker, markersize=markersize, linewidth=linewidth, color=color) subplot.legend(loc='best') fig.tight_layout() plt.show() fig.savefig(image_name, dpi=600) image = Image.open(image_name) w, h = image.size image.crop((75, 75, w - 75, h - 60)).save(image_name) def shape_contexts_ablation(): ''' Variants & 1024 & 2048 & 4096 \\ \hline 1 & 59.7 & 60.7 & 60.1 \\ 2 & 61.4 & 61.6 & 61.9 \\ 4 & 61.7 & 61.8 & 63.0 \\ 8 & 61.2 & 62.1 & 63.4 \\ ''' data = [ {'name': 'No scene contexts', 'x': [1, 2, 3, 4], 'y': [60.5, 60.7, 60.1, 60.6], 'marker': 'o'}, {'name': '2 Partitions', 'x': [2, 4, 6, 8], 'y': [61.4, 61.6, 61.9, 61.9], 'marker': 'o'}, {'name': '4 Partitions', 'x': [2, 4, 6, 8], 'y': [61.7, 61.8, 63.0, 62.9], 'marker': '^'}, {'name': '8 Partitions', 'x': [2, 4, 6, 8], 'y': [61.2, 62.1, 63.4, 63.5], 'marker': 's'}, {'name': '16 Partitions', 'x': [2, 4, 6], 'y': [61.1, 61.9, 62.6], 'marker': 'p'}, {'name': '32 Partitions', 'x': [2, 4, 6], 'y': [60.9, 61.7, 62.1], 'marker': '*'}, ] plot_curves(curves=data, xlabel='Number of Points', ylabel='[email protected]', xlim=[1.9,8.1], xticks=[2,4,6,8], xticklabels=[1024, 2048, 4096, 8192], ylim=[60.9, 63.5], yticks=[60.9, 61.5, 62.5, 63.5], if_grid=True, image_name='shape_context_ablation.jpg') def bar_plot_active(image_name='bar_active.png'): labels = ['20 Points', '50 Points', '100 Points', '200 points'] #kmeans = [734, 1034, 1386, 1688] #act = [1151, 1726, 2153, 2456] #total 2873 kmeans = [0.255, 0.36, 0.482, 0.588] act = [0.401, 0.601, 0.749, 0.855] x = np.array([0,2,4,6]) # the label locations width = 1.7 # the width of the bars font = {'family' : 'Times New Roman', 'size' : 11} matplotlib.rc('font', **font) fig, ax = plt.subplots(1,1) fig.set_size_inches(8, 4) rects1 = ax.bar(x , kmeans, width/2, label='kmeans sampling (xyz+rgb)') rects2 = ax.bar(x + width*1/2, act, width/2, label='act. labeling') # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('Percentage of Distinct Objects') ax.set_xlabel('Number of Annotated Points Per Scene') ax.set(xlim=[-1, 8], ylim=[0.2, 0.9]) # manipulate vals = ax.get_yticks() ax.set_yticklabels(['{:.0%}'.format(x) for x in vals]) ax.set_xticks(x) ax.set_xticklabels(labels) ax.grid(False) ax.legend(loc=2) def autolabel(rects): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() ax.annotate('{:.1%}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points",fontsize=10, ha='center', va='bottom') autolabel(rects1) autolabel(rects2) fig.tight_layout() plt.show() fig.savefig(image_name, dpi=600) image = Image.open(image_name) image.save(image_name) if __name__=='__main__': #shape_contexts_ablation() #bar_plot_insseg() #bar_plot_semseg() #bar_plot_active()
ContrastiveSceneContexts-main
downstream/semseg/lib/plot_graph.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from torch.optim import SGD, Adam from torch.optim.lr_scheduler import LambdaLR, StepLR class LambdaStepLR(LambdaLR): def __init__(self, optimizer, lr_lambda, last_step=-1): super(LambdaStepLR, self).__init__(optimizer, lr_lambda, last_step) @property def last_step(self): """Use last_epoch for the step counter""" return self.last_epoch @last_step.setter def last_step(self, v): self.last_epoch = v class PolyLR(LambdaStepLR): """DeepLab learning rate policy""" def __init__(self, optimizer, max_iter, power=0.9, last_step=-1): super(PolyLR, self).__init__(optimizer, lambda s: (1 - s / (max_iter + 1))**power, last_step) class SquaredLR(LambdaStepLR): """ Used for SGD Lars""" def __init__(self, optimizer, max_iter, last_step=-1): super(SquaredLR, self).__init__(optimizer, lambda s: (1 - s / (max_iter + 1))**2, last_step) class ExpLR(LambdaStepLR): def __init__(self, optimizer, step_size, gamma=0.9, last_step=-1): # (0.9 ** 21.854) = 0.1, (0.95 ** 44.8906) = 0.1 # To get 0.1 every N using gamma 0.9, N * log(0.9)/log(0.1) = 0.04575749 N # To get 0.1 every N using gamma g, g ** N = 0.1 -> N * log(g) = log(0.1) -> g = np.exp(log(0.1) / N) super(ExpLR, self).__init__(optimizer, lambda s: gamma**(s / step_size), last_step) def initialize_optimizer(params, config): assert config.optimizer in ['SGD', 'Adagrad', 'Adam', 'RMSProp', 'Rprop', 'SGDLars'] if config.optimizer == 'SGD': return SGD( params, lr=config.lr, momentum=config.sgd_momentum, dampening=config.sgd_dampening, weight_decay=config.weight_decay) elif config.optimizer == 'Adam': return Adam( params, lr=config.lr, betas=(config.adam_beta1, config.adam_beta2), weight_decay=config.weight_decay) else: logging.error('Optimizer type not supported') raise ValueError('Optimizer type not supported') def initialize_scheduler(optimizer, config, last_step=-1): if config.scheduler == 'StepLR': return StepLR( optimizer, step_size=config.step_size, gamma=config.step_gamma, last_epoch=last_step) elif config.scheduler == 'PolyLR': return PolyLR(optimizer, max_iter=config.max_iter, power=config.poly_power, last_step=last_step) elif config.scheduler == 'SquaredLR': return SquaredLR(optimizer, max_iter=config.max_iter, last_step=last_step) elif config.scheduler == 'ExpLR': return ExpLR( optimizer, step_size=config.exp_step_size, gamma=config.exp_gamma, last_step=last_step) else: logging.error('Scheduler not supported')
ContrastiveSceneContexts-main
downstream/semseg/lib/solvers.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import shutil import tempfile import warnings import numpy as np import torch import torch.nn as nn from sklearn.metrics import average_precision_score from sklearn.preprocessing import label_binarize from datasets.evaluation.evaluate_semantic_label import Evaluator from lib.utils import Timer, AverageMeter, precision_at_one, fast_hist, per_class_iu, \ get_prediction, get_torch_device, visualize_results, \ permute_pointcloud, save_rotation_pred from MinkowskiEngine import SparseTensor def print_info(iteration, max_iteration, data_time, iter_time, losses=None, scores=None, ious=None, hist=None, ap_class=None, class_names=None): debug_str = "{}/{}: ".format(iteration + 1, max_iteration) debug_str += "Data time: {:.4f}, Iter time: {:.4f}".format(data_time, iter_time) acc = hist.diagonal() / hist.sum(1) * 100 debug_str += "\tLoss {loss.val:.3f} (AVG: {loss.avg:.3f})\t" \ "Score {top1.val:.3f} (AVG: {top1.avg:.3f})\t" \ "mIOU {mIOU:.3f} mAP {mAP:.3f} mAcc {mAcc:.3f}\n".format( loss=losses, top1=scores, mIOU=np.nanmean(ious), mAP=np.nanmean(ap_class), mAcc=np.nanmean(acc)) if class_names is not None: debug_str += "\nClasses: " + " ".join(class_names) + '\n' debug_str += 'IOU: ' + ' '.join('{:.03f}'.format(i) for i in ious) + '\n' debug_str += 'mAP: ' + ' '.join('{:.03f}'.format(i) for i in ap_class) + '\n' debug_str += 'mAcc: ' + ' '.join('{:.03f}'.format(i) for i in acc) + '\n' logging.info(debug_str) def average_precision(prob_np, target_np): num_class = prob_np.shape[1] label = label_binarize(target_np, classes=list(range(num_class))) with np.errstate(divide='ignore', invalid='ignore'): return average_precision_score(label, prob_np, average=None) def test(model, data_loader, config): device = get_torch_device(config.misc.is_cuda) dataset = data_loader.dataset num_labels = dataset.NUM_LABELS global_timer, data_timer, iter_timer = Timer(), Timer(), Timer() criterion = nn.CrossEntropyLoss(ignore_index=config.data.ignore_label) losses, scores, ious = AverageMeter(), AverageMeter(), 0 aps = np.zeros((0, num_labels)) hist = np.zeros((num_labels, num_labels)) logging.info('===> Start testing') global_timer.tic() data_iter = data_loader.__iter__() max_iter = len(data_loader) max_iter_unique = max_iter #------------------------------- add ------------------------------------- VALID_CLASS_IDS = torch.FloatTensor(dataset.VALID_CLASS_IDS).long() # Fix batch normalization running mean and std model.eval() # Clear cache (when run in val mode, cleanup training cache) torch.cuda.empty_cache() if config.test.save_features: save_feat_dir = config.test.save_feat_dir os.makedirs(save_feat_dir, exist_ok=True) with torch.no_grad(): for iteration in range(max_iter): data_timer.tic() if config.data.return_transformation: coords, input, target, transformation = data_iter.next() else: coords, input, target = data_iter.next() transformation = None data_time = data_timer.toc(False) # Preprocess input iter_timer.tic() if config.net.wrapper_type != None: color = input[:, :3].int() if config.augmentation.normalize_color: input[:, :3] = input[:, :3] / 255. - 0.5 sinput = SparseTensor(input, coords).to(device) # Feed forward inputs = (sinput,) if config.net.wrapper_type == None else (sinput, coords, color) soutput, out_feats = model(*inputs) output = soutput.F pred = get_prediction(dataset, output, target).int() if config.test.evaluate_benchmark: # ---------------- point level ------------------- scene_id = dataset.get_output_id(iteration) inverse_mapping = dataset.get_original_pointcloud(coords, transformation, iteration) CLASS_MAP = np.array(dataset.VALID_CLASS_IDS) pred_points = CLASS_MAP[pred.cpu().numpy()][inverse_mapping[0]] # for benchmark Evaluator.write_to_benchmark(scene_id=scene_id, pred_ids=pred_points) iter_time = iter_timer.toc(False) if config.test.save_features: dataset.save_features(coords, out_feats.F, transformation, iteration, save_feat_dir) target_np = target.numpy() num_sample = target_np.shape[0] target = target.to(device) cross_ent = criterion(output, target.long()) losses.update(float(cross_ent), num_sample) scores.update(precision_at_one(pred, target), num_sample) hist += fast_hist(pred.cpu().numpy().flatten(), target_np.flatten(), num_labels) ious = per_class_iu(hist) * 100 prob = torch.nn.functional.softmax(output, dim=1) ap = average_precision(prob.cpu().detach().numpy(), target_np) aps = np.vstack((aps, ap)) # Due to heavy bias in class, there exists class with no test label at all with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) ap_class = np.nanmean(aps, 0) * 100. if iteration % config.test.test_stat_freq == 0 and iteration > 0: reordered_ious = dataset.reorder_result(ious) reordered_ap_class = dataset.reorder_result(ap_class) class_names = dataset.get_classnames() print_info( iteration, max_iter_unique, data_time, iter_time, losses, scores, reordered_ious, hist, reordered_ap_class, class_names=class_names) if iteration % config.train.empty_cache_freq == 0: # Clear cache torch.cuda.empty_cache() global_time = global_timer.toc(False) reordered_ious = dataset.reorder_result(ious) reordered_ap_class = dataset.reorder_result(ap_class) class_names = dataset.get_classnames() print_info( iteration, max_iter_unique, data_time, iter_time, losses, scores, reordered_ious, hist, reordered_ap_class, class_names=class_names) logging.info("Finished test. Elapsed time: {:.4f}".format(global_time)) return losses.avg, scores.avg, np.nanmean(ap_class), np.nanmean(per_class_iu(hist)) * 100
ContrastiveSceneContexts-main
downstream/semseg/lib/test.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import logging import os import sys import torch import logging import torch.nn.functional as F from torch import nn from torch.serialization import default_restore_location from tensorboardX import SummaryWriter from MinkowskiEngine import SparseTensor from omegaconf import OmegaConf from lib.distributed import get_world_size, all_gather, is_master_proc from models import load_model from lib.test import test as test_ from lib.solvers import initialize_optimizer, initialize_scheduler from datasets import load_dataset from datasets.dataset import initialize_data_loader from lib.utils import checkpoint, precision_at_one, Timer, AverageMeter, get_prediction, load_state_with_same_shape, count_parameters class SegmentationTrainer: def __init__(self, config): self.is_master = is_master_proc(config.misc.num_gpus) if config.misc.num_gpus > 1 else True self.cur_device = torch.cuda.current_device() # load the configurations self.setup_logging() if os.path.exists('config.yaml'): logging.info('===> Loading exsiting config file') config = OmegaConf.load('config.yaml') logging.info('===> Loaded exsiting config file') logging.info('===> Configurations') logging.info(config.pretty()) # dataloader DatasetClass = load_dataset(config.data.dataset) logging.info('===> Initializing dataloader') self.train_data_loader = initialize_data_loader( DatasetClass, config, phase=config.train.train_phase, num_workers=config.data.num_workers, augment_data=True, shuffle=True, repeat=True, batch_size=config.data.batch_size // config.misc.num_gpus, limit_numpoints=config.data.train_limit_numpoints) self.val_data_loader = initialize_data_loader( DatasetClass, config, phase=config.train.val_phase, num_workers=config.data.num_val_workers, augment_data=False, shuffle=False, repeat=False, batch_size=1, limit_numpoints=False) self.test_data_loader = initialize_data_loader( DatasetClass, config, phase=config.test.test_phase, num_workers=config.data.num_workers, augment_data=False, shuffle=False, repeat=False, batch_size=1, limit_numpoints=False) # Model initialization logging.info('===> Building model') num_in_channel = self.train_data_loader.dataset.NUM_IN_CHANNEL num_labels = self.train_data_loader.dataset.NUM_LABELS NetClass = load_model(config.net.model) model = NetClass(num_in_channel, num_labels, config) logging.info('===> Number of trainable parameters: {}: {}'.format(NetClass.__name__, count_parameters(model))) logging.info(model) # Load weights if specified by the parameter. if config.net.weights != '': logging.info('===> Loading weights: ' + config.net.weights) state = torch.load(config.net.weights, map_location=lambda s, l: default_restore_location(s, 'cpu')) matched_weights = load_state_with_same_shape(model, state['state_dict']) model_dict = model.state_dict() model_dict.update(matched_weights) model.load_state_dict(model_dict) model = model.cuda() if config.misc.num_gpus > 1: model = torch.nn.parallel.DistributedDataParallel( module=model, device_ids=[self.cur_device], output_device=self.cur_device, broadcast_buffers=False ) self.config = config self.model = model if self.is_master: self.writer = SummaryWriter(log_dir='tensorboard') self.optimizer = initialize_optimizer(model.parameters(), config.optimizer) self.scheduler = initialize_scheduler(self.optimizer, config.optimizer) self.criterion = nn.CrossEntropyLoss(ignore_index=config.data.ignore_label) checkpoint_fn = 'weights/weights.pth' self.best_val_miou, self.best_val_miou_iter = 0,1 self.curr_iter, self.epoch, self.is_training = 1, 1, True if os.path.isfile(checkpoint_fn): logging.info("=> loading checkpoint '{}'".format(checkpoint_fn)) state = torch.load(checkpoint_fn, map_location=lambda s, l: default_restore_location(s, 'cpu')) self.load_state(state['state_dict']) self.curr_iter = state['iteration'] + 1 self.epoch = state['epoch'] self.scheduler = initialize_scheduler(self.optimizer, config.optimizer, last_step=self.curr_iter) self.optimizer.load_state_dict(state['optimizer']) if 'best_val_miou' in state: self.best_val_miou = state['best_val_miou'] logging.info("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_fn, state['epoch'])) else: logging.info("=> no weights.pth") def setup_logging(self): ch = logging.StreamHandler(sys.stdout) logging.getLogger().setLevel(logging.WARN) if self.is_master: logging.getLogger().setLevel(logging.INFO) logging.basicConfig( format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch]) def load_state(self, state): if get_world_size() > 1: _model = self.model.module else: _model = self.model _model.load_state_dict(state) def set_seed(self): # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints seed = self.config.misc.seed + self.curr_iter torch.manual_seed(seed) torch.cuda.manual_seed(seed) def test(self): return test_(self.model, self.test_data_loader, self.config) def validate(self): val_loss, val_score, _, val_miou = test_(self.model, self.val_data_loader, self.config) self.writer.add_scalar('val/miou', val_miou, self.curr_iter) self.writer.add_scalar('val/loss', val_loss, self.curr_iter) self.writer.add_scalar('val/precision_at_1', val_score, self.curr_iter) if val_miou > self.best_val_miou: self.best_val_miou = val_miou self.best_val_iou_iter = self.curr_iter checkpoint(self.model, self.optimizer, self.epoch, self.curr_iter, self.config, self.best_val_miou, "miou") logging.info("Current best mIoU: {:.3f} at iter {}".format(self.best_val_miou, self.best_val_miou_iter)) checkpoint(self.model, self.optimizer, self.epoch, self.curr_iter, self.config, self.best_val_miou) def train(self): # Set up the train flag for batch normalization self.model.train() # Configuration data_timer, iter_timer = Timer(), Timer() fw_timer, bw_timer, ddp_timer = Timer(), Timer(), Timer() data_time_avg, iter_time_avg = AverageMeter(), AverageMeter() fw_time_avg, bw_time_avg, ddp_time_avg = AverageMeter(), AverageMeter(), AverageMeter() scores = AverageMeter() losses = { 'semantic_loss': AverageMeter(), 'total_loss': AverageMeter() } # Train the network logging.info('===> Start training on {} GPUs, batch-size={}'.format( get_world_size(), self.config.data.batch_size)) data_iter = self.train_data_loader.__iter__() # (distributed) infinite sampler while self.is_training: for _ in range(len(self.train_data_loader) // self.config.optimizer.iter_size): self.optimizer.zero_grad() data_time, batch_score = 0, 0 batch_losses = { 'semantic_loss': 0.0, 'offset_dir_loss': 0.0, 'offset_norm_loss': 0.0, 'total_loss': 0.0} iter_timer.tic() # set random seed for every iteration for trackability self.set_seed() for sub_iter in range(self.config.optimizer.iter_size): # Get training data data_timer.tic() if self.config.data.return_transformation: coords, input, target, _ = data_iter.next() else: coords, input, target = data_iter.next() # Preprocess input color = input[:, :3].int() if self.config.augmentation.normalize_color: input[:, :3] = input[:, :3] / 255. - 0.5 sinput = SparseTensor(input, coords).to(self.cur_device) data_time += data_timer.toc(False) # Feed forward fw_timer.tic() inputs = (sinput,) soutput, _ = self.model(*inputs) # The output of the network is not sorted target = target.long().to(self.cur_device) semantic_loss = self.criterion(soutput.F, target.long()) total_loss = semantic_loss # Compute and accumulate gradient total_loss /= self.config.optimizer.iter_size pred = get_prediction(self.train_data_loader.dataset, soutput.F, target) score = precision_at_one(pred, target) # bp the loss fw_timer.toc(False) bw_timer.tic() total_loss.backward() bw_timer.toc(False) # gather information logging_output = {'total_loss': total_loss.item(), 'semantic_loss': semantic_loss.item(), 'score': score / self.config.optimizer.iter_size} ddp_timer.tic() if self.config.misc.num_gpus > 1: logging_output = all_gather(logging_output) logging_output = {w: np.mean([ a[w] for a in logging_output] ) for w in logging_output[0]} batch_losses['total_loss'] += logging_output['total_loss'] batch_losses['semantic_loss'] += logging_output['semantic_loss'] batch_score += logging_output['score'] ddp_timer.toc(False) # Update number of steps self.optimizer.step() self.scheduler.step() data_time_avg.update(data_time) iter_time_avg.update(iter_timer.toc(False)) fw_time_avg.update(fw_timer.diff) bw_time_avg.update(bw_timer.diff) ddp_time_avg.update(ddp_timer.diff) losses['total_loss'].update(batch_losses['total_loss'], target.size(0)) losses['semantic_loss'].update(batch_losses['semantic_loss'], target.size(0)) scores.update(batch_score, target.size(0)) if self.curr_iter >= self.config.optimizer.max_iter: self.is_training = False break if self.curr_iter % self.config.train.stat_freq == 0 or self.curr_iter == 1: lrs = ', '.join(['{:.3e}'.format(x) for x in self.scheduler.get_last_lr()]) debug_str = "===> Epoch[{}]({}/{}): Loss {:.4f}, Sem {:.4f} \tLR: {}\t".format( self.epoch, self.curr_iter, len(self.train_data_loader) // self.config.optimizer.iter_size, losses['total_loss'].avg, losses['semantic_loss'].avg, lrs) debug_str += "Score {:.3f}\tData time: {:.4f}, Forward time: {:.4f}, Backward time: {:.4f}, DDP time: {:.4f}, Total iter time: {:.4f}".format( scores.avg, data_time_avg.avg, fw_time_avg.avg, bw_time_avg.avg, ddp_time_avg.avg, iter_time_avg.avg) logging.info(debug_str) # Reset timers data_time_avg.reset() iter_time_avg.reset() # Write logs if self.is_master: self.writer.add_scalar('train/loss', losses['total_loss'].avg, self.curr_iter) self.writer.add_scalar('train/semantic_loss', losses['semantic_loss'].avg, self.curr_iter) self.writer.add_scalar('train/precision_at_1', scores.avg, self.curr_iter) self.writer.add_scalar('train/learning_rate', self.scheduler.get_last_lr()[0], self.curr_iter) # clear loss losses['total_loss'].reset() losses['semantic_loss'].reset() scores.reset() # Validation if self.curr_iter % self.config.train.val_freq == 0 and self.is_master: self.validate() self.model.train() if self.curr_iter % self.config.train.empty_cache_freq == 0: # Clear cache torch.cuda.empty_cache() # End of iteration self.curr_iter += 1 self.epoch += 1 # Explicit memory cleanup if hasattr(data_iter, 'cleanup'): data_iter.cleanup() # Save the final model if self.is_master: self.validate()
ContrastiveSceneContexts-main
downstream/semseg/lib/ddp_trainer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import trimesh # color palette for nyu40 labels def create_color_palette(): return [ (0, 0, 0), (174, 199, 232), # wall (152, 223, 138), # floor (31, 119, 180), # cabinet (255, 187, 120), # bed (188, 189, 34), # chair (140, 86, 75), # sofa (255, 152, 150), # table (214, 39, 40), # door (197, 176, 213), # window (148, 103, 189), # bookshelf (196, 156, 148), # picture (23, 190, 207), # counter (178, 76, 76), (247, 182, 210), # desk (66, 188, 102), (219, 219, 141), # curtain (140, 57, 197), (202, 185, 52), (51, 176, 203), (200, 54, 131), (92, 193, 61), (78, 71, 183), (172, 114, 82), (255, 127, 14), # refrigerator (91, 163, 138), (153, 98, 156), (140, 153, 101), (158, 218, 229), # shower curtain (100, 125, 154), (178, 127, 135), (120, 185, 128), (146, 111, 194), (44, 160, 44), # toilet (112, 128, 144), # sink (96, 207, 209), (227, 119, 194), # bathtub (213, 92, 176), (94, 106, 211), (82, 84, 163), # otherfurn (100, 85, 144), ] def write_triangle_mesh(vertices, colors, faces, outputFile): mesh = trimesh.Trimesh(vertices=vertices, vertex_colors=colors, faces=faces, process=False) mesh.export(outputFile) def read_triangle_mesh(filename): mesh = trimesh.load_mesh(filename, process=False) if isinstance(mesh, trimesh.PointCloud): vertices = mesh.vertices colors = mesh.colors faces = None elif isinstance(mesh, trimesh.Trimesh): vertices = mesh.vertices colors = mesh.visual.vertex_colors faces = mesh.faces return vertices, colors, faces
ContrastiveSceneContexts-main
downstream/semseg/lib/io3d.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #!/usr/bin/env python3 import os import time import torch import signal import pickle import threading import functools import traceback import torch.nn as nn import torch.distributed as dist import multiprocessing as mp """Multiprocessing error handler.""" class ChildException(Exception): """Wraps an exception from a child process.""" def __init__(self, child_trace): super(ChildException, self).__init__(child_trace) class ErrorHandler(object): """Multiprocessing error handler (based on fairseq's). Listens for errors in child processes and propagates the tracebacks to the parent process. """ def __init__(self, error_queue): # Shared error queue self.error_queue = error_queue # Children processes sharing the error queue self.children_pids = [] # Start a thread listening to errors self.error_listener = threading.Thread(target=self.listen, daemon=True) self.error_listener.start() # Register the signal handler signal.signal(signal.SIGUSR1, self.signal_handler) def add_child(self, pid): """Registers a child process.""" self.children_pids.append(pid) def listen(self): """Listens for errors in the error queue.""" # Wait until there is an error in the queue child_trace = self.error_queue.get() # Put the error back for the signal handler self.error_queue.put(child_trace) # Invoke the signal handler os.kill(os.getpid(), signal.SIGUSR1) def signal_handler(self, sig_num, stack_frame): """Signal handler.""" # Kill children processes for pid in self.children_pids: os.kill(pid, signal.SIGINT) # Propagate the error from the child process raise ChildException(self.error_queue.get()) """Multiprocessing helpers.""" def run(proc_rank, world_size, error_queue, fun, fun_args, fun_kwargs): """Runs a function from a child process.""" try: # Initialize the process group init_process_group(proc_rank, world_size) # Run the function fun(*fun_args, **fun_kwargs) except: # Propagate exception to the parent process error_queue.put(traceback.format_exc()) finally: destroy_process_group() def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs={}): """Runs a function in a multi-proc setting.""" # Handle errors from training subprocesses error_queue = mp.SimpleQueue() error_handler = ErrorHandler(error_queue) # Run each training subprocess ps = [] for i in range(num_proc): p_i = mp.Process( target=run, args=(i, num_proc, error_queue, fun, fun_args, fun_kwargs) ) ps.append(p_i) p_i.start() error_handler.add_child(p_i.pid) # Wait for each subprocess to finish for p in ps: p.join() """Distributed helpers.""" def is_master_proc(num_gpus): """Determines if the current process is the master process. Master process is responsible for logging, writing and loading checkpoints. In the multi GPU setting, we assign the master role to the rank 0 process. When training using a single GPU, there is only one training processes which is considered the master processes. """ return num_gpus == 1 or torch.distributed.get_rank() == 0 def get_world_size(): if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size() def get_rank(): if not dist.is_available(): return 0 if not dist.is_initialized(): return 0 return dist.get_rank() def synchronize(): """ Helper function to synchronize (barrier) among all processes when using distributed training """ if not dist.is_available(): return if not dist.is_initialized(): return world_size = dist.get_world_size() if world_size == 1: return dist.barrier() def all_gather_differentiable(tensor): """ Run differentiable gather function for SparseConv features with variable number of points. tensor: [num_points, feature_dim] """ world_size = get_world_size() if world_size == 1: return [tensor] num_points, f_dim = tensor.size() local_np = torch.LongTensor([num_points]).to("cuda") np_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)] dist.all_gather(np_list, local_np) np_list = [int(np.item()) for np in np_list] max_np = max(np_list) tensor_list = [] for _ in np_list: tensor_list.append(torch.FloatTensor(size=(max_np, f_dim)).to("cuda")) if local_np != max_np: padding = torch.zeros(size=(max_np-local_np, f_dim)).to("cuda").float() tensor = torch.cat((tensor, padding), dim=0) assert tensor.size() == (max_np, f_dim) dist.all_gather(tensor_list, tensor) data_list = [] for gather_np, gather_tensor in zip(np_list, tensor_list): gather_tensor = gather_tensor[:gather_np] assert gather_tensor.size() == (gather_np, f_dim) data_list.append(gather_tensor) return data_list def all_gather(data): """ Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank """ world_size = get_world_size() if world_size == 1: return [data] # serialized to a Tensor buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to("cuda") # obtain Tensor size of each rank local_size = torch.LongTensor([tensor.numel()]).to("cuda") size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) # receiving Tensor from all ranks # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes tensor_list = [] for _ in size_list: tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda")) if local_size != max_size: padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda") tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list def init_process_group(proc_rank, world_size): """Initializes the default process group.""" # Set the GPU to use torch.cuda.set_device(proc_rank) # Initialize the process group torch.distributed.init_process_group( backend="nccl", init_method="tcp://{}:{}".format("localhost", "10001"), world_size=world_size, rank=proc_rank ) def destroy_process_group(): """Destroys the default process group.""" torch.distributed.destroy_process_group()
ContrastiveSceneContexts-main
downstream/semseg/lib/distributed.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import numpy as np from numpy.linalg import matrix_rank, inv from plyfile import PlyData, PlyElement import pandas as pd COLOR_MAP_RGB = ( (241, 255, 82), (102, 168, 226), (0, 255, 0), (113, 143, 65), (89, 173, 163), (254, 158, 137), (190, 123, 75), (100, 22, 116), (0, 18, 141), (84, 84, 84), (85, 116, 127), (255, 31, 33), (228, 228, 228), (0, 255, 0), (70, 145, 150), (237, 239, 94), ) IGNORE_COLOR = (0, 0, 0) def read_plyfile(filepath): """Read ply file and return it as numpy array. Returns None if emtpy.""" with open(filepath, 'rb') as f: plydata = PlyData.read(f) if plydata.elements: return pd.DataFrame(plydata.elements[0].data).values def save_point_cloud(points_3d, filename, binary=True, with_label=False, verbose=True): """Save an RGB point cloud as a PLY file. Args: points_3d: Nx6 matrix where points_3d[:, :3] are the XYZ coordinates and points_3d[:, 4:] are the RGB values. If Nx3 matrix, save all points with [128, 128, 128] (gray) color. """ assert points_3d.ndim == 2 if with_label: assert points_3d.shape[1] == 7 python_types = (float, float, float, int, int, int, int) npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1'), ('label', 'u1')] else: if points_3d.shape[1] == 3: gray_concat = np.tile(np.array([128], dtype=np.uint8), (points_3d.shape[0], 3)) points_3d = np.hstack((points_3d, gray_concat)) assert points_3d.shape[1] == 6 python_types = (float, float, float, int, int, int) npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] if binary is True: # Format into NumPy structured array vertices = [] for row_idx in range(points_3d.shape[0]): cur_point = points_3d[row_idx] vertices.append(tuple(dtype(point) for dtype, point in zip(python_types, cur_point))) vertices_array = np.array(vertices, dtype=npy_types) el = PlyElement.describe(vertices_array, 'vertex') # Write PlyData([el]).write(filename) else: # PlyData([el], text=True).write(filename) with open(filename, 'w') as f: f.write('ply\n' 'format ascii 1.0\n' 'element vertex %d\n' 'property float x\n' 'property float y\n' 'property float z\n' 'property uchar red\n' 'property uchar green\n' 'property uchar blue\n' 'property uchar alpha\n' 'end_header\n' % points_3d.shape[0]) for row_idx in range(points_3d.shape[0]): X, Y, Z, R, G, B = points_3d[row_idx] f.write('%f %f %f %d %d %d 0\n' % (X, Y, Z, R, G, B)) if verbose is True: print('Saved point cloud to: %s' % filename) class Camera(object): def __init__(self, intrinsics): self._intrinsics = intrinsics self._camera_matrix = self.build_camera_matrix(self.intrinsics) self._K_inv = inv(self.camera_matrix) @staticmethod def build_camera_matrix(intrinsics): """Build the 3x3 camera matrix K using the given intrinsics. Equation 6.10 from HZ. """ f = intrinsics['focal_length'] pp_x = intrinsics['pp_x'] pp_y = intrinsics['pp_y'] K = np.array([[f, 0, pp_x], [0, f, pp_y], [0, 0, 1]], dtype=np.float32) # K[:, 0] *= -1. # Step 1 of Kyle assert matrix_rank(K) == 3 return K @staticmethod def extrinsics2RT(extrinsics): """Convert extrinsics matrix to separate rotation matrix R and translation vector T. """ assert extrinsics.shape == (4, 4) R = extrinsics[:3, :3] T = extrinsics[3, :3] R = np.copy(R) T = np.copy(T) T = T.reshape(3, 1) R[0, :] *= -1. # Step 1 of Kyle T *= 100. # Convert from m to cm return R, T def project(self, points_3d, extrinsics=None): """Project a 3D point in camera coordinates into the camera/image plane. Args: point_3d: """ if extrinsics is not None: # Map points to camera coordinates points_3d = self.world2camera(extrinsics, points_3d) # TODO: Make sure to handle homogeneous AND non-homogeneous coordinate points # TODO: Consider handling a set of points raise NotImplementedError def backproject(self, depth_map, labels=None, max_depth=None, max_height=None, min_height=None, rgb_img=None, extrinsics=None, prune=True): """Backproject a depth map into 3D points (camera coordinate system). Attach color if RGB image is provided, otherwise use gray [128 128 128] color. Does not show points at Z = 0 or maximum Z = 65535 depth. Args: labels: Tensor with the same shape as depth map (but can be 1-channel or 3-channel). max_depth: Maximum depth in cm. All pts with depth greater than max_depth will be ignored. max_height: Maximum height in cm. All pts with height greater than max_height will be ignored. Returns: points_3d: Numpy array of size Nx3 (XYZ) or Nx6 (XYZRGB). """ if labels is not None: assert depth_map.shape[:2] == labels.shape[:2] if (labels.ndim == 2) or ((labels.ndim == 3) and (labels.shape[2] == 1)): n_label_channels = 1 elif (labels.ndim == 3) and (labels.shape[2] == 3): n_label_channels = 3 if rgb_img is not None: assert depth_map.shape[:2] == rgb_img.shape[:2] else: rgb_img = np.ones_like(depth_map, dtype=np.uint8) * 255 # Convert from 1-channel to 3-channel if (rgb_img.ndim == 3) and (rgb_img.shape[2] == 1): rgb_img = np.tile(rgb_img, [1, 1, 3]) # Convert depth map to single channel if it is multichannel if (depth_map.ndim == 3) and depth_map.shape[2] == 3: depth_map = np.squeeze(depth_map[:, :, 0]) depth_map = depth_map.astype(np.float32) # Get image dimensions H, W = depth_map.shape # Create meshgrid (pixel coordinates) Z = depth_map A, B = np.meshgrid(range(W), range(H)) ones = np.ones_like(A) grid = np.concatenate((A[:, :, np.newaxis], B[:, :, np.newaxis], ones[:, :, np.newaxis]), axis=2) grid = grid.astype(np.float32) * Z[:, :, np.newaxis] # Nx3 where each row is (a*Z, b*Z, Z) grid_flattened = grid.reshape((-1, 3)) grid_flattened = grid_flattened.T # 3xN where each col is (a*Z, b*Z, Z) prod = np.dot(self.K_inv, grid_flattened) XYZ = np.concatenate((prod[:2, :].T, Z.flatten()[:, np.newaxis]), axis=1) # Nx3 XYZRGB = np.hstack((XYZ, rgb_img.reshape((-1, 3)))) points_3d = XYZRGB if labels is not None: labels_reshaped = labels.reshape((-1, n_label_channels)) # Prune points if prune is True: valid = [] for idx in range(points_3d.shape[0]): cur_y = points_3d[idx, 1] cur_z = points_3d[idx, 2] if (cur_z == 0) or (cur_z == 65535): # Don't show things at 0 distance or max distance continue elif (max_depth is not None) and (cur_z > max_depth): continue elif (max_height is not None) and (cur_y > max_height): continue elif (min_height is not None) and (cur_y < min_height): continue else: valid.append(idx) points_3d = points_3d[np.asarray(valid)] if labels is not None: labels_reshaped = labels_reshaped[np.asarray(valid)] if extrinsics is not None: points_3d = self.camera2world(extrinsics, points_3d) if labels is not None: points_3d_labels = np.hstack((points_3d[:, :3], labels_reshaped)) return points_3d, points_3d_labels else: return points_3d @staticmethod def _camera2world_transform(no_rgb_points_3d, R, T): points_3d_world = (np.dot(R.T, no_rgb_points_3d.T) - T).T # Nx3 return points_3d_world @staticmethod def _world2camera_transform(no_rgb_points_3d, R, T): points_3d_world = (np.dot(R, no_rgb_points_3d.T + T)).T # Nx3 return points_3d_world def _transform_points(self, points_3d, extrinsics, transform): """Base/wrapper method for transforming points using R and T. """ assert points_3d.ndim == 2 orig_points_3d = points_3d points_3d = np.copy(orig_points_3d) if points_3d.shape[1] == 6: # XYZRGB points_3d = points_3d[:, :3] elif points_3d.shape[1] == 3: # XYZ points_3d = points_3d else: raise ValueError('3D points need to be XYZ or XYZRGB.') R, T = self.extrinsics2RT(extrinsics) points_3d_world = transform(points_3d, R, T) # Add color again (if appropriate) if orig_points_3d.shape[1] == 6: # XYZRGB points_3d_world = np.hstack((points_3d_world, orig_points_3d[:, -3:])) return points_3d_world def camera2world(self, extrinsics, points_3d): """Transform from camera coordinates (3D) to world coordinates (3D). Args: points_3d: Nx3 or Nx6 matrix of N points with XYZ or XYZRGB values. """ return self._transform_points(points_3d, extrinsics, self._camera2world_transform) def world2camera(self, extrinsics, points_3d): """Transform from world coordinates (3D) to camera coordinates (3D). """ return self._transform_points(points_3d, extrinsics, self._world2camera_transform) @property def intrinsics(self): return self._intrinsics @property def camera_matrix(self): return self._camera_matrix @property def K_inv(self): return self._K_inv def colorize_pointcloud(xyz, label, ignore_label=255): assert label[label != ignore_label].max() < len(COLOR_MAP_RGB), 'Not enough colors.' label_rgb = np.array([COLOR_MAP_RGB[i] if i != ignore_label else IGNORE_COLOR for i in label]) return np.hstack((xyz, label_rgb)) class PlyWriter(object): POINTCLOUD_DTYPE = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] @classmethod def read_txt(cls, txtfile): # Read txt file and parse its content. with open(txtfile) as f: pointcloud = [l.split() for l in f] # Load point cloud to named numpy array. pointcloud = np.array(pointcloud).astype(np.float32) assert pointcloud.shape[1] == 6 xyz = pointcloud[:, :3].astype(np.float32) rgb = pointcloud[:, 3:].astype(np.uint8) return xyz, rgb @staticmethod def write_ply(array, filepath): ply_el = PlyElement.describe(array, 'vertex') target_path, _ = os.path.split(filepath) if target_path != '' and not os.path.exists(target_path): os.makedirs(target_path) PlyData([ply_el]).write(filepath) @classmethod def write_vertex_only_ply(cls, vertices, filepath): # assume that points are N x 3 np array for vertex locations color = 255 * np.ones((len(vertices), 3)) pc_points = np.array([tuple(p) for p in np.concatenate((vertices, color), axis=1)], dtype=cls.POINTCLOUD_DTYPE) cls.write_ply(pc_points, filepath) @classmethod def write_ply_vert_color(cls, vertices, colors, filepath): # assume that points are N x 3 np array for vertex locations pc_points = np.array([tuple(p) for p in np.concatenate((vertices, colors), axis=1)], dtype=cls.POINTCLOUD_DTYPE) cls.write_ply(pc_points, filepath) @classmethod def concat_label(cls, target, xyz, label): subpointcloud = np.concatenate([xyz, label], axis=1) subpointcloud = np.array([tuple(l) for l in subpointcloud], dtype=cls.POINTCLOUD_DTYPE) return np.concatenate([target, subpointcloud], axis=0)
ContrastiveSceneContexts-main
downstream/semseg/lib/pc_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import logging import os import errno import time import torch import numpy as np from omegaconf import OmegaConf from lib.pc_utils import colorize_pointcloud, save_point_cloud from lib.distributed import get_world_size def load_state_with_same_shape(model, weights): # weights['conv1.kernel'] = weights['conv1.kernel'].repeat([1,3,1])/3.0 model_state = model.state_dict() if list(weights.keys())[0].startswith('module.'): logging.info("Loading multigpu weights with module. prefix...") weights = {k.partition('module.')[2]:weights[k] for k in weights.keys()} if list(weights.keys())[0].startswith('encoder.'): logging.info("Loading multigpu weights with encoder. prefix...") weights = {k.partition('encoder.')[2]:weights[k] for k in weights.keys()} # print(weights.items()) # print("===================") # print("===================") # print("===================") # print("===================") # print("===================") # print(model_state) filtered_weights = { k: v for k, v in weights.items() if k in model_state and v.size() == model_state[k].size() } logging.info("Loading weights:" + ', '.join(filtered_weights.keys())) return filtered_weights def checkpoint(model, optimizer, epoch, iteration, config, best_val_miou=None, postfix=None): mkdir_p('weights') filename = f"checkpoint_{config.net.model}_iter{iteration}.pth" if config.train.overwrite_weights: filename = f"checkpoint_{config.net.model}.pth" if postfix is not None: filename = f"checkpoint_{config.net.model}_{postfix}.pth" checkpoint_file = 'weights/' + filename _model = model.module if get_world_size() > 1 else model state = { 'iteration': iteration, 'epoch': epoch, 'arch': config.net.model, 'state_dict': _model.state_dict(), 'optimizer': optimizer.state_dict() } if best_val_miou is not None: state['best_val_miou'] = best_val_miou state['best_val_iter'] = iteration # save config OmegaConf.save(config, 'config.yaml') torch.save(state, checkpoint_file) logging.info(f"Checkpoint saved to {checkpoint_file}") if postfix == None: # Delete symlink if it exists if os.path.exists('weights/weights.pth'): os.remove('weights/weights.pth') # Create symlink os.system('ln -s {} weights/weights.pth'.format(filename)) def precision_at_one(pred, target, ignore_label=255): """Computes the precision@k for the specified values of k""" # batch_size = target.size(0) * target.size(1) * target.size(2) pred = pred.view(1, -1) target = target.view(1, -1) correct = pred.eq(target) correct = correct[target != ignore_label] correct = correct.view(-1) if correct.nelement(): return correct.float().sum(0).mul(100.0 / correct.size(0)).item() else: return float('nan') def fast_hist(pred, label, n): k = (label >= 0) & (label < n) return np.bincount(n * label[k].astype(int) + pred[k], minlength=n**2).reshape(n, n) def per_class_iu(hist): with np.errstate(divide='ignore', invalid='ignore'): return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) class WithTimer(object): """Timer for with statement.""" def __init__(self, name=None): self.name = name def __enter__(self): self.tstart = time.time() def __exit__(self, type, value, traceback): out_str = 'Elapsed: %s' % (time.time() - self.tstart) if self.name: logging.info('[{self.name}]') logging.info(out_str) class Timer(object): """A simple timer.""" def __init__(self): self.total_time = 0. self.calls = 0 self.start_time = 0. self.diff = 0. self.average_time = 0. def reset(self): self.total_time = 0 self.calls = 0 self.start_time = 0 self.diff = 0 self.averate_time = 0 def tic(self): # using time.time instead of time.clock because time time.clock # does not normalize for multithreading self.start_time = time.time() def toc(self, average=True): self.diff = time.time() - self.start_time self.total_time += self.diff self.calls += 1 self.average_time = self.total_time / self.calls if average: return self.average_time else: return self.diff class ExpTimer(Timer): """ Exponential Moving Average Timer """ def __init__(self, alpha=0.5): super(ExpTimer, self).__init__() self.alpha = alpha def toc(self): self.diff = time.time() - self.start_time self.average_time = self.alpha * self.diff + \ (1 - self.alpha) * self.average_time return self.average_time class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def mkdir_p(path): try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def read_txt(path): """Read txt file into lines. """ with open(path) as f: lines = f.readlines() lines = [x.strip() for x in lines] return lines def debug_on(): import sys import pdb import functools import traceback def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except Exception: info = sys.exc_info() traceback.print_exception(*info) pdb.post_mortem(info[2]) return wrapper return decorator def get_prediction(dataset, output, target): return output.max(1)[1] def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def get_torch_device(is_cuda): return torch.device('cuda' if is_cuda else 'cpu') class HashTimeBatch(object): def __init__(self, prime=5279): self.prime = prime def __call__(self, time, batch): return self.hash(time, batch) def hash(self, time, batch): return self.prime * batch + time def dehash(self, key): time = key % self.prime batch = key / self.prime return time, batch def save_rotation_pred(iteration, pred, dataset, save_pred_dir): """Save prediction results in original pointcloud scale.""" decode_label_map = {} for k, v in dataset.label_map.items(): decode_label_map[v] = k pred = np.array([decode_label_map[x] for x in pred], dtype=np.int) out_rotation_txt = dataset.get_output_id(iteration) + '.txt' out_rotation_path = save_pred_dir + '/' + out_rotation_txt np.savetxt(out_rotation_path, pred, fmt='%i') def visualize_results(coords, input, target, upsampled_pred, config, iteration): # Get filter for valid predictions in the first batch. target_batch = coords[:, 3].numpy() == 0 input_xyz = coords[:, :3].numpy() target_valid = target.numpy() != 255 target_pred = np.logical_and(target_batch, target_valid) target_nonpred = np.logical_and(target_batch, ~target_valid) ptc_nonpred = np.hstack((input_xyz[target_nonpred], np.zeros((np.sum(target_nonpred), 3)))) # Unwrap file index if tested with rotation. file_iter = iteration if config.test_rotation >= 1: file_iter = iteration // config.test.test_rotation # Create directory to save visualization results. os.makedirs(config.test.visualize_path, exist_ok=True) # Label visualization in RGB. xyzlabel = colorize_pointcloud(input_xyz[target_pred], upsampled_pred[target_pred]) xyzlabel = np.vstack((xyzlabel, ptc_nonpred)) filename = '_'.join([config.dataset, config.model, 'pred', '%04d.ply' % file_iter]) save_point_cloud(xyzlabel, os.path.join(config.test.visualize_path, filename), verbose=False) # RGB input values visualization. xyzrgb = np.hstack((input_xyz[target_batch], input[:, :3].cpu().numpy()[target_batch])) filename = '_'.join([config.dataset, config.model, 'rgb', '%04d.ply' % file_iter]) save_point_cloud(xyzrgb, os.path.join(config.test.visualize_path, filename), verbose=False) # Ground-truth visualization in RGB. xyzgt = colorize_pointcloud(input_xyz[target_pred], target.numpy()[target_pred]) xyzgt = np.vstack((xyzgt, ptc_nonpred)) filename = '_'.join([config.dataset, config.model, 'gt', '%04d.ply' % file_iter]) save_point_cloud(xyzgt, os.path.join(config.test.visualize_path, filename), verbose=False) def permute_pointcloud(input_coords, pointcloud, transformation, label_map, voxel_output, voxel_pred): """Get permutation from pointcloud to input voxel coords.""" def _hash_coords(coords, coords_min, coords_dim): return np.ravel_multi_index((coords - coords_min).T, coords_dim) # Validate input. input_batch_size = input_coords[:, -1].max().item() pointcloud_batch_size = pointcloud[:, -1].max().int().item() transformation_batch_size = transformation[:, -1].max().int().item() assert input_batch_size == pointcloud_batch_size == transformation_batch_size pointcloud_permutation, pointcloud_target = [], [] # Process each batch. for i in range(input_batch_size + 1): # Filter batch from the data. input_coords_mask_b = input_coords[:, -1] == i input_coords_b = (input_coords[input_coords_mask_b])[:, :-1].numpy() pointcloud_b = pointcloud[pointcloud[:, -1] == i, :-1].numpy() transformation_b = transformation[i, :-1].reshape(4, 4).numpy() # Transform original pointcloud to voxel space. original_coords1 = np.hstack((pointcloud_b[:, :3], np.ones((pointcloud_b.shape[0], 1)))) original_vcoords = np.floor(original_coords1 @ transformation_b.T)[:, :3].astype(int) # Hash input and voxel coordinates to flat coordinate. vcoords_all = np.vstack((input_coords_b, original_vcoords)) vcoords_min = vcoords_all.min(0) vcoords_dims = vcoords_all.max(0) - vcoords_all.min(0) + 1 input_coords_key = _hash_coords(input_coords_b, vcoords_min, vcoords_dims) original_vcoords_key = _hash_coords(original_vcoords, vcoords_min, vcoords_dims) # Query voxel predictions from original pointcloud. key_to_idx = dict(zip(input_coords_key, range(len(input_coords_key)))) pointcloud_permutation.append( np.array([key_to_idx.get(i, -1) for i in original_vcoords_key])) pointcloud_target.append(pointcloud_b[:, -1].astype(int)) pointcloud_permutation = np.concatenate(pointcloud_permutation) # Prepare pointcloud permutation array. pointcloud_permutation = torch.from_numpy(pointcloud_permutation) permutation_mask = pointcloud_permutation >= 0 permutation_valid = pointcloud_permutation[permutation_mask] # Permuate voxel output to pointcloud. pointcloud_output = torch.zeros(pointcloud.shape[0], voxel_output.shape[1]).to(voxel_output) pointcloud_output[permutation_mask] = voxel_output[permutation_valid] # Permuate voxel prediction to pointcloud. # NOTE: Invalid points (points found in pointcloud but not in the voxel) are mapped to 0. pointcloud_pred = torch.ones(pointcloud.shape[0]).int().to(voxel_pred) * 0 pointcloud_pred[permutation_mask] = voxel_pred[permutation_valid] # Map pointcloud target to respect dataset IGNORE_LABELS pointcloud_target = torch.from_numpy( np.array([label_map[i] for i in np.concatenate(pointcloud_target)])).int() return pointcloud_output, pointcloud_pred, pointcloud_target
ContrastiveSceneContexts-main
downstream/semseg/lib/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from scipy.sparse import csr_matrix import torch class SparseMM(torch.autograd.Function): """ Sparse x dense matrix multiplication with autograd support. Implementation by Soumith Chintala: https://discuss.pytorch.org/t/ does-pytorch-support-autograd-on-sparse-matrix/6156/7 """ def forward(self, matrix1, matrix2): self.save_for_backward(matrix1, matrix2) return torch.mm(matrix1, matrix2) def backward(self, grad_output): matrix1, matrix2 = self.saved_tensors grad_matrix1 = grad_matrix2 = None if self.needs_input_grad[0]: grad_matrix1 = torch.mm(grad_output, matrix2.t()) if self.needs_input_grad[1]: grad_matrix2 = torch.mm(matrix1.t(), grad_output) return grad_matrix1, grad_matrix2 def sparse_float_tensor(values, indices, size=None): """ Return a torch sparse matrix give values and indices (row_ind, col_ind). If the size is an integer, return a square matrix with side size. If the size is a torch.Size, use it to initialize the out tensor. If none, the size is inferred. """ indices = torch.stack(indices).int() sargs = [indices, values.float()] if size is not None: # Use the provided size if isinstance(size, int): size = torch.Size((size, size)) sargs.append(size) if values.is_cuda: return torch.cuda.sparse.FloatTensor(*sargs) else: return torch.sparse.FloatTensor(*sargs) def diags(values, size=None): values = values.view(-1) n = values.nelement() size = torch.Size((n, n)) indices = (torch.arange(0, n), torch.arange(0, n)) return sparse_float_tensor(values, indices, size) def sparse_to_csr_matrix(tensor): tensor = tensor.cpu() inds = tensor._indices().numpy() vals = tensor._values().numpy() return csr_matrix((vals, (inds[0], inds[1])), shape=[s for s in tensor.shape]) def csr_matrix_to_sparse(mat): row_ind, col_ind = mat.nonzero() return sparse_float_tensor( torch.from_numpy(mat.data), (torch.from_numpy(row_ind), torch.from_numpy(col_ind)), size=torch.Size(mat.shape))
ContrastiveSceneContexts-main
downstream/semseg/lib/math_functions.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from MinkowskiEngine import MinkowskiGlobalPooling, MinkowskiBroadcastAddition, MinkowskiBroadcastMultiplication class MinkowskiLayerNorm(nn.Module): def __init__(self, num_features, eps=1e-5, D=-1): super(MinkowskiLayerNorm, self).__init__() self.num_features = num_features self.eps = eps self.weight = nn.Parameter(torch.ones(1, num_features)) self.bias = nn.Parameter(torch.zeros(1, num_features)) self.mean_in = MinkowskiGlobalPooling(dimension=D) self.glob_sum = MinkowskiBroadcastAddition(dimension=D) self.glob_sum2 = MinkowskiBroadcastAddition(dimension=D) self.glob_mean = MinkowskiGlobalPooling(dimension=D) self.glob_times = MinkowskiBroadcastMultiplication(dimension=D) self.D = D self.reset_parameters() def __repr__(self): s = f'(D={self.D})' return self.__class__.__name__ + s def reset_parameters(self): self.weight.data.fill_(1) self.bias.data.zero_() def _check_input_dim(self, input): if input.F.dim() != 2: raise ValueError('expected 2D input (got {}D input)'.format(input.dim())) def forward(self, x): self._check_input_dim(x) mean = self.mean_in(x).F.mean(-1, keepdim=True) mean = mean + torch.zeros(mean.size(0), self.num_features).type_as(mean) temp = self.glob_sum(x.F, -mean)**2 var = self.glob_mean(temp.data).mean(-1, keepdim=True) var = var + torch.zeros(var.size(0), self.num_features).type_as(var) instd = 1 / (var + self.eps).sqrt() x = self.glob_times(self.glob_sum2(x, -mean), instd) return x * self.weight + self.bias class MinkowskiInstanceNorm(nn.Module): def __init__(self, num_features, eps=1e-5, D=-1): super(MinkowskiInstanceNorm, self).__init__() self.eps = eps self.weight = nn.Parameter(torch.ones(1, num_features)) self.bias = nn.Parameter(torch.zeros(1, num_features)) self.mean_in = MinkowskiGlobalPooling(dimension=D) self.glob_sum = MinkowskiBroadcastAddition(dimension=D) self.glob_sum2 = MinkowskiBroadcastAddition(dimension=D) self.glob_mean = MinkowskiGlobalPooling(dimension=D) self.glob_times = MinkowskiBroadcastMultiplication(dimension=D) self.D = D self.reset_parameters() def __repr__(self): s = f'(pixel_dist={self.pixel_dist}, D={self.D})' return self.__class__.__name__ + s def reset_parameters(self): self.weight.data.fill_(1) self.bias.data.zero_() def _check_input_dim(self, input): if input.dim() != 2: raise ValueError('expected 2D input (got {}D input)'.format(input.dim())) def forward(self, x): self._check_input_dim(x) mean_in = self.mean_in(x) temp = self.glob_sum(x, -mean_in)**2 var_in = self.glob_mean(temp.data) instd_in = 1 / (var_in + self.eps).sqrt() x = self.glob_times(self.glob_sum2(x, -mean_in), instd_in) return x * self.weight + self.bias
ContrastiveSceneContexts-main
downstream/semseg/lib/layers.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import sys import hydra import torch import numpy as np from lib.ddp_trainer import SegmentationTrainer from lib.distributed import multi_proc_run def single_proc_run(config): if not torch.cuda.is_available(): raise Exception('No GPUs FOUND.') trainer = SegmentationTrainer(config) if config.train.is_train: trainer.train() else: trainer.test() @hydra.main(config_path='config', config_name='default.yaml') def main(config): # fix seed np.random.seed(config.misc.seed) torch.manual_seed(config.misc.seed) torch.cuda.manual_seed(config.misc.seed) # Convert to dict if config.misc.num_gpus > 1: multi_proc_run(config.misc.num_gpus, fun=single_proc_run, fun_args=(config,)) else: single_proc_run(config) if __name__ == '__main__': __spec__ = None os.environ['MKL_THREADING_LAYER'] = 'GNU' main()
ContrastiveSceneContexts-main
downstream/insseg/ddp_main.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import random import logging import numpy as np import scipy import scipy.ndimage import scipy.interpolate import torch # A sparse tensor consists of coordinates and associated features. # You must apply augmentation to both. # In 2D, flip, shear, scale, and rotation of images are coordinate transformation # color jitter, hue, etc., are feature transformations ############################## # Feature transformations ############################## class ChromaticTranslation(object): """Add random color to the image, input must be an array in [0,255] or a PIL image""" def __init__(self, trans_range_ratio=1e-1): """ trans_range_ratio: ratio of translation i.e. 255 * 2 * ratio * rand(-0.5, 0.5) """ self.trans_range_ratio = trans_range_ratio def __call__(self, coords, feats, labels, instances): if random.random() < 0.95: tr = (np.random.rand(1, 3) - 0.5) * 255 * 2 * self.trans_range_ratio feats[:, :3] = np.clip(tr + feats[:, :3], 0, 255) return coords, feats, labels, instances class ChromaticAutoContrast(object): def __init__(self, randomize_blend_factor=True, blend_factor=0.5): self.randomize_blend_factor = randomize_blend_factor self.blend_factor = blend_factor def __call__(self, coords, feats, labels, instances): if random.random() < 0.2: # mean = np.mean(feats, 0, keepdims=True) # std = np.std(feats, 0, keepdims=True) # lo = mean - std # hi = mean + std lo = feats[:, :3].min(0, keepdims=True) hi = feats[:, :3].max(0, keepdims=True) assert hi.max() > 1, f"invalid color value. Color is supposed to be [0-255]" scale = 255 / (hi - lo) contrast_feats = (feats[:, :3] - lo) * scale blend_factor = random.random() if self.randomize_blend_factor else self.blend_factor feats[:, :3] = (1 - blend_factor) * feats + blend_factor * contrast_feats return coords, feats, labels, instances class ChromaticJitter(object): def __init__(self, std=0.01): self.std = std def __call__(self, coords, feats, labels, instances): if random.random() < 0.95: noise = np.random.randn(feats.shape[0], 3) noise *= self.std * 255 feats[:, :3] = np.clip(noise + feats[:, :3], 0, 255) return coords, feats, labels, instances class HueSaturationTranslation(object): @staticmethod def rgb_to_hsv(rgb): # Translated from source of colorsys.rgb_to_hsv # r,g,b should be a numpy arrays with values between 0 and 255 # rgb_to_hsv returns an array of floats between 0.0 and 1.0. rgb = rgb.astype('float') hsv = np.zeros_like(rgb) # in case an RGBA array was passed, just copy the A channel hsv[..., 3:] = rgb[..., 3:] r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2] maxc = np.max(rgb[..., :3], axis=-1) minc = np.min(rgb[..., :3], axis=-1) hsv[..., 2] = maxc mask = maxc != minc hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask] rc = np.zeros_like(r) gc = np.zeros_like(g) bc = np.zeros_like(b) rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask] gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask] bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask] hsv[..., 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc) hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0 return hsv @staticmethod def hsv_to_rgb(hsv): # Translated from source of colorsys.hsv_to_rgb # h,s should be a numpy arrays with values between 0.0 and 1.0 # v should be a numpy array with values between 0.0 and 255.0 # hsv_to_rgb returns an array of uints between 0 and 255. rgb = np.empty_like(hsv) rgb[..., 3:] = hsv[..., 3:] h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2] i = (h * 6.0).astype('uint8') f = (h * 6.0) - i p = v * (1.0 - s) q = v * (1.0 - s * f) t = v * (1.0 - s * (1.0 - f)) i = i % 6 conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5] rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v) rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t) rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p) return rgb.astype('uint8') def __init__(self, hue_max, saturation_max): self.hue_max = hue_max self.saturation_max = saturation_max def __call__(self, coords, feats, labels, instances): # Assume feat[:, :3] is rgb hsv = HueSaturationTranslation.rgb_to_hsv(feats[:, :3]) hue_val = (random.random() - 0.5) * 2 * self.hue_max sat_ratio = 1 + (random.random() - 0.5) * 2 * self.saturation_max hsv[..., 0] = np.remainder(hue_val + hsv[..., 0] + 1, 1) hsv[..., 1] = np.clip(sat_ratio * hsv[..., 1], 0, 1) feats[:, :3] = np.clip(HueSaturationTranslation.hsv_to_rgb(hsv), 0, 255) return coords, feats, labels, instances ############################## # Coordinate transformations ############################## class RandomDropout(object): def __init__(self, dropout_ratio=0.2, dropout_application_ratio=0.5): """ upright_axis: axis index among x,y,z, i.e. 2 for z """ self.dropout_ratio = dropout_ratio self.dropout_application_ratio = dropout_application_ratio def __call__(self, coords, feats, labels, instances): if random.random() < self.dropout_ratio: N = len(coords) inds = np.random.choice(N, int(N * (1 - self.dropout_ratio)), replace=False) return coords[inds], feats[inds], labels[inds], instances[inds] return coords, feats, labels, instances class RandomHorizontalFlip(object): def __init__(self, upright_axis, is_temporal): """ upright_axis: axis index among x,y,z, i.e. 2 for z """ self.is_temporal = is_temporal self.D = 4 if is_temporal else 3 self.upright_axis = {'x': 0, 'y': 1, 'z': 2}[upright_axis.lower()] # Use the rest of axes for flipping. self.horz_axes = set(range(self.D)) - set([self.upright_axis]) def __call__(self, coords, feats, labels, instances): if random.random() < 0.95: for curr_ax in self.horz_axes: if random.random() < 0.5: coord_max = np.max(coords[:, curr_ax]) coords[:, curr_ax] = coord_max - coords[:, curr_ax] return coords, feats, labels, instances class ElasticDistortion: def __init__(self, distortion_params): self.distortion_params = distortion_params def elastic_distortion(self, coords, feats, labels, granularity, magnitude): """Apply elastic distortion on sparse coordinate space. pointcloud: numpy array of (number of points, at least 3 spatial dims) granularity: size of the noise grid (in same scale[m/cm] as the voxel grid) magnitude: noise multiplier """ blurx = np.ones((3, 1, 1, 1)).astype('float32') / 3 blury = np.ones((1, 3, 1, 1)).astype('float32') / 3 blurz = np.ones((1, 1, 3, 1)).astype('float32') / 3 coords_min = coords.min(0) # Create Gaussian noise tensor of the size given by granularity. noise_dim = ((coords - coords_min).max(0) // granularity).astype(int) + 3 noise = np.random.randn(*noise_dim, 3).astype(np.float32) # Smoothing. for _ in range(2): noise = scipy.ndimage.filters.convolve(noise, blurx, mode='constant', cval=0) noise = scipy.ndimage.filters.convolve(noise, blury, mode='constant', cval=0) noise = scipy.ndimage.filters.convolve(noise, blurz, mode='constant', cval=0) # Trilinear interpolate noise filters for each spatial dimensions. ax = [ np.linspace(d_min, d_max, d) for d_min, d_max, d in zip(coords_min - granularity, coords_min + granularity * (noise_dim - 2), noise_dim) ] interp = scipy.interpolate.RegularGridInterpolator(ax, noise, bounds_error=0, fill_value=0) coords += interp(coords) * magnitude return coords, feats, labels def __call__(self, coords, feats, labels): if self.distortion_params is not None: if random.random() < 0.95: for granularity, magnitude in self.distortion_params: coords, feats, labels = self.elastic_distortion(coords, feats, labels, granularity, magnitude) return coords, feats, labels class Compose(object): """Composes several transforms together.""" def __init__(self, transforms): self.transforms = transforms def __call__(self, *args): for t in self.transforms: args = t(*args) return args class cfl_collate_fn_factory: """Generates collate function for coords, feats, labels. Args: limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch size so that the number of input coordinates is below limit_numpoints. """ def __init__(self, limit_numpoints): self.limit_numpoints = limit_numpoints def __call__(self, list_data): coords, feats, labels, instances = list(zip(*list_data)) coords_batch, feats_batch, labels_batch, instances_batch = [], [], [], [] batch_id = 0 batch_num_points = 0 for batch_id, _ in enumerate(coords): num_points = coords[batch_id].shape[0] batch_num_points += num_points if self.limit_numpoints and batch_num_points > self.limit_numpoints: num_full_points = sum(len(c) for c in coords) num_full_batch_size = len(coords) logging.warning( f'\t\tCannot fit {num_full_points} points into {self.limit_numpoints} points ' f'limit. Truncating batch size at {batch_id} out of {num_full_batch_size} with {batch_num_points - num_points}.' ) break # coords_batch.append( # torch.cat((torch.from_numpy( # coords[batch_id]).int(), torch.ones(num_points, 1).int() * batch_id), 1)) coords_batch.append( torch.cat((torch.ones(num_points, 1).int() * batch_id, torch.from_numpy( coords[batch_id]).int()), 1)) feats_batch.append(torch.from_numpy(feats[batch_id])) labels_batch.append(torch.from_numpy(labels[batch_id]).int()) instances_batch.append(instances[batch_id]) batch_id += 1 # Concatenate all lists coords_batch = torch.cat(coords_batch, 0).int() feats_batch = torch.cat(feats_batch, 0).float() labels_batch = torch.cat(labels_batch, 0).int() return coords_batch, feats_batch, labels_batch, instances_batch class cflt_collate_fn_factory: """Generates collate function for coords, feats, labels, point_clouds, transformations. Args: limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch size so that the number of input coordinates is below limit_numpoints. """ def __init__(self, limit_numpoints): self.limit_numpoints = limit_numpoints def __call__(self, list_data): coords, feats, labels, instances, transformations = list(zip(*list_data)) cfl_collate_fn = cfl_collate_fn_factory(limit_numpoints=self.limit_numpoints) coords_batch, feats_batch, labels_batch, instances_batch = cfl_collate_fn(list(zip(coords, feats, labels, instances))) num_truncated_batch = coords_batch[:, -1].max().item() + 1 batch_id = 0 transformations_batch = [] for transformation in transformations: if batch_id >= num_truncated_batch: break transformations_batch.append(torch.from_numpy(transformation).float()) batch_id += 1 transformations_batch = torch.stack(transformations_batch, 0) return coords_batch, feats_batch, labels_batch, instances_batch, transformations_batch
ContrastiveSceneContexts-main
downstream/insseg/datasets/transforms.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from lib.datasets import synthia from datasets import stanford from datasets import scannet #from lib.datasets import shapenet DATASETS = [] def add_datasets(module): DATASETS.extend([getattr(module, a) for a in dir(module) if 'Dataset' in a]) add_datasets(stanford) #add_datasets(synthia) add_datasets(scannet) #add_datasets(shapenet) def load_dataset(name): '''Creates and returns an instance of the datasets given its name. ''' # Find the model class from its name mdict = {dataset.__name__: dataset for dataset in DATASETS} if name not in mdict: print('Invalid dataset index. Options are:') # Display a list of valid dataset names for dataset in DATASETS: print('\t* {}'.format(dataset.__name__)) raise ValueError(f'Dataset {name} not defined') DatasetClass = mdict[name] return DatasetClass
ContrastiveSceneContexts-main
downstream/insseg/datasets/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import unittest import imageio import os import os.path as osp import pickle import numpy as np from collections import defaultdict from plyfile import PlyData from lib.pc_utils import Camera, read_plyfile from lib.dataset import DictDataset, VoxelizationDataset, TemporalVoxelizationDataset, \ str2datasetphase_type, DatasetPhase from lib.transforms import cfl_collate_fn_factory from lib.utils import read_txt, debug_on class SynthiaDataset(DictDataset): NUM_LABELS = 16 def __init__(self, data_path_file, input_transform=None, target_transform=None): with open(data_path_file, 'r') as f: data_paths = pickle.load(f) super(SynthiaDataset, self).__init__(data_paths, input_transform, target_transform) @staticmethod def load_extrinsics(extrinsics_file): """Load the camera extrinsics from a .txt file. """ lines = read_txt(extrinsics_file) params = [float(x) for x in lines[0].split(' ')] extrinsics_matrix = np.asarray(params).reshape([4, 4]) return extrinsics_matrix @staticmethod def load_intrinsics(intrinsics_file): """Load the camera intrinsics from a intrinsics.txt file. intrinsics.txt: a text file containing 4 values that represent (in this order) {focal length, principal-point-x, principal-point-y, baseline (m) with the corresponding right camera} """ lines = read_txt(intrinsics_file) assert len(lines) == 7 intrinsics = { 'focal_length': float(lines[0]), 'pp_x': float(lines[2]), 'pp_y': float(lines[4]), 'baseline': float(lines[6]), } return intrinsics @staticmethod def load_depth(depth_file): """Read a single depth map (.png) file. 1280x760 760 rows, 1280 columns. Depth is encoded in any of the 3 channels in centimetres as an ushort. """ img = np.asarray(imageio.imread(depth_file, format='PNG-FI')) # uint16 img = img.astype(np.int32) # Convert to int32 for torch compatibility return img @staticmethod def load_label(label_file): """Load the ground truth semantic segmentation label. Annotations are given in two channels. The first channel contains the class of that pixel (see the table below). The second channel contains the unique ID of the instance for those objects that are dynamic (cars, pedestrians, etc.). Class R G B ID Void 0 0 0 0 Sky 128 128 128 1 Building 128 0 0 2 Road 128 64 128 3 Sidewalk 0 0 192 4 Fence 64 64 128 5 Vegetation 128 128 0 6 Pole 192 192 128 7 Car 64 0 128 8 Traffic Sign 192 128 128 9 Pedestrian 64 64 0 10 Bicycle 0 128 192 11 Lanemarking 0 172 0 12 Reserved - - - 13 Reserved - - - 14 Traffic Light 0 128 128 15 """ img = np.asarray(imageio.imread(label_file, format='PNG-FI')) # uint16 img = img.astype(np.int32) # Convert to int32 for torch compatibility return img @staticmethod def load_rgb(rgb_file): """Load RGB images. 1280x760 RGB images used for training. 760 rows, 1280 columns. """ img = np.array(imageio.imread(rgb_file)) # uint8 return img class SynthiaVoxelizationDataset(VoxelizationDataset): """Load the ground truth semantic segmentation label. Annotations are given in two channels. The first channel contains the class of that pixel (see the table below). The second channel contains the unique ID of the instance for those objects that are dynamic (cars, pedestrians, etc.). Class R G B ID Void 0 0 0 0 Sky 128 128 128 1 Building 128 0 0 2 Road 128 64 128 3 Sidewalk 0 0 192 4 Fence 64 64 128 5 Vegetation 128 128 0 6 Pole 192 192 128 7 Car 64 0 128 8 Traffic Sign 192 128 128 9 Pedestrian 64 64 0 10 Bicycle 0 128 192 11 Lanemarking 0 172 0 12 Reserved - - - 13 Reserved - - - 14 Traffic Light 0 128 128 15 """ CLASS_LABELS = ('building', 'road', 'sidewalk', 'fence', 'vegetation', 'pole', 'car', 'sign', 'pedestrian', 'cyclist', 'lanemarking', 'traffic light') VALID_CLASS_IDS = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15) # Voxelization arguments CLIP_BOUND = ((-1800, 1800), (-1800, 1800), (-1800, 1800)) TEST_CLIP_BOUND = ((-2500, 2500), (-2500, 2500), (-2500, 2500)) VOXEL_SIZE = 15 # cm PREVOXELIZATION_VOXEL_SIZE = 7.5 # Elastic distortion, (granularity, magitude) pairs # ELASTIC_DISTORT_PARAMS = ((80, 300),) # Augmentation arguments ROTATION_AUGMENTATION_BOUND = ((0, 0), (-np.pi, np.pi), (0, 0)) TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.1, 0.1), (0, 0), (-0.1, 0.1)) ROTATION_AXIS = 'y' LOCFEAT_IDX = 1 NUM_LABELS = 16 # Automatically subtract ignore labels after processed IGNORE_LABELS = (0, 1, 13, 14) # void, sky, reserved, reserved # Split used in the Minkowski ConvNet, CVPR'19 DATA_PATH_FILE = { DatasetPhase.Train: 'train_cvpr19.txt', DatasetPhase.Val: 'val_cvpr19.txt', DatasetPhase.Test: 'test_cvpr19.txt' } def __init__(self, config, prevoxel_transform=None, input_transform=None, target_transform=None, augment_data=True, elastic_distortion=False, cache=False, phase=DatasetPhase.Train): if isinstance(phase, str): phase = str2datasetphase_type(phase) if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]: self.CLIP_BOUND = self.TEST_CLIP_BOUND data_root = config.data.synthia_path data_paths = read_txt(osp.join('/checkpoint/jihou/data/synthia4d/splits', self.DATA_PATH_FILE[phase])) if phase == DatasetPhase.Train: data_paths = data_paths[:int(len(data_paths)*config.data.data_ratio)] data_paths = [d.split()[0] for d in data_paths] logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase])) super().__init__( data_paths, data_root=data_root, input_transform=input_transform, target_transform=target_transform, ignore_label=config.data.ignore_label, return_transformation=config.data.return_transformation, augment_data=augment_data, elastic_distortion=elastic_distortion, config=config) def load_data(self, index): filepath = self.data_root / self.data_paths[index] plydata = PlyData.read(filepath) data = plydata.elements[0].data coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T feats = np.array([data['r'], data['g'], data['b']], dtype=np.float32).T labels = np.array(data['l'], dtype=np.int32) instances = np.zeros_like(labels) return coords, feats, labels, instances class SynthiaCVPR15cmVoxelizationDataset(SynthiaVoxelizationDataset): pass class SynthiaCVPR30cmVoxelizationDataset(SynthiaVoxelizationDataset): VOXEL_SIZE = 30 class SynthiaAllSequencesVoxelizationDataset(SynthiaVoxelizationDataset): DATA_PATH_FILE = { DatasetPhase.Train: 'train_raw.txt', DatasetPhase.Val: 'val_raw.txt', DatasetPhase.Test: 'test_raw.txt' } class TestSynthia(unittest.TestCase): @debug_on() def test(self): from torch.utils.data import DataLoader from lib.utils import Timer from config import get_config config = get_config() dataset = SynthiaVoxelizationDataset(config) timer = Timer() data_loader = DataLoader( dataset=dataset, collate_fn=cfl_collate_fn_factory(limit_numpoints=False), num_workers=0, batch_size=4, shuffle=True) # Start from index 1 # for i, batch in enumerate(data_loader, 1): iter = data_loader.__iter__() for i in range(100): timer.tic() batch = iter.next() print(batch, timer.toc()) if __name__ == '__main__': unittest.main()
ContrastiveSceneContexts-main
downstream/insseg/datasets/synthia.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from abc import ABC from pathlib import Path from collections import defaultdict import random import numpy as np from enum import Enum import torch from torch.utils.data import Dataset, DataLoader import MinkowskiEngine as ME from plyfile import PlyData import datasets.transforms as t from datasets.dataloader import InfSampler, DistributedInfSampler from datasets.voxelizer import Voxelizer from lib.distributed import get_world_size class DatasetPhase(Enum): Train = 0 Val = 1 Val2 = 2 TrainVal = 3 Test = 4 Debug = 5 def datasetphase_2str(arg): if arg == DatasetPhase.Train: return 'train' elif arg == DatasetPhase.Val: return 'val' elif arg == DatasetPhase.Val2: return 'val2' elif arg == DatasetPhase.TrainVal: return 'trainval' elif arg == DatasetPhase.Test: return 'test' else: raise ValueError('phase must be one of dataset enum.') def str2datasetphase_type(arg): if arg.upper() == 'TRAIN': return DatasetPhase.Train elif arg.upper() == 'VAL': return DatasetPhase.Val elif arg.upper() == 'VAL2': return DatasetPhase.Val2 elif arg.upper() == 'TRAINVAL': return DatasetPhase.TrainVal elif arg.upper() == 'TEST': return DatasetPhase.Test else: raise ValueError('phase must be one of train/val/test') def cache(func): def wrapper(self, *args, **kwargs): # Assume that args[0] is index index = args[0] if self.cache: if index not in self.cache_dict[func.__name__]: results = func(self, *args, **kwargs) self.cache_dict[func.__name__][index] = results return self.cache_dict[func.__name__][index] else: return func(self, *args, **kwargs) return wrapper class DictDataset(Dataset, ABC): IS_FULL_POINTCLOUD_EVAL = False def __init__(self, data_paths, prevoxel_transform=None, input_transform=None, target_transform=None, cache=False, data_root='/'): """ data_paths: list of lists, [[str_path_to_input, str_path_to_label], [...]] """ Dataset.__init__(self) # Allows easier path concatenation if not isinstance(data_root, Path): data_root = Path(data_root) self.data_root = data_root self.data_paths = sorted(data_paths) self.prevoxel_transform = prevoxel_transform self.input_transform = input_transform self.target_transform = target_transform # dictionary of input self.data_loader_dict = { 'input': (self.load_input, self.input_transform), 'target': (self.load_target, self.target_transform) } # For large dataset, do not cache self.cache = cache self.cache_dict = defaultdict(dict) self.loading_key_order = ['input', 'target'] def load_input(self, index): raise NotImplementedError def load_target(self, index): raise NotImplementedError def get_classnames(self): pass def reorder_result(self, result): return result def __getitem__(self, index): out_array = [] for k in self.loading_key_order: loader, transformer = self.data_loader_dict[k] v = loader(index) if transformer: v = transformer(v) out_array.append(v) return out_array def __len__(self): return len(self.data_paths) class VoxelizationDatasetBase(DictDataset, ABC): IS_TEMPORAL = False CLIP_BOUND = (-1000, -1000, -1000, 1000, 1000, 1000) ROTATION_AXIS = None NUM_IN_CHANNEL = None NUM_LABELS = -1 # Number of labels in the dataset, including all ignore classes IGNORE_LABELS = None # labels that are not evaluated def __init__(self, data_paths, prevoxel_transform=None, input_transform=None, target_transform=None, cache=False, data_root='/', ignore_mask=255, return_transformation=False, **kwargs): """ ignore_mask: label value for ignore class. It will not be used as a class in the loss or evaluation. """ DictDataset.__init__( self, data_paths, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, cache=cache, data_root=data_root) self.ignore_mask = ignore_mask self.return_transformation = return_transformation def __getitem__(self, index): raise NotImplementedError def load_ply(self, index): filepath = self.data_root / self.data_paths[index] plydata = PlyData.read(filepath) data = plydata.elements[0].data coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T labels = np.array(data['label'], dtype=np.int32) return coords, feats, labels, None def load_data(self, index): raise NotImplementedError def __len__(self): num_data = len(self.data_paths) return num_data class VoxelizationDataset(VoxelizationDatasetBase): """This dataset loads RGB point clouds and their labels as a list of points and voxelizes the pointcloud with sufficient data augmentation. """ # Voxelization arguments VOXEL_SIZE = 0.05 # 5cm # Coordinate Augmentation Arguments: Unlike feature augmentation, coordinate # augmentation has to be done before voxelization SCALE_AUGMENTATION_BOUND = (0.9, 1.1) ROTATION_AUGMENTATION_BOUND = ((-np.pi / 6, np.pi / 6), (-np.pi, np.pi), (-np.pi / 6, np.pi / 6)) TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.05, 0.05), (-0.2, 0.2)) ELASTIC_DISTORT_PARAMS = None # MISC. PREVOXELIZATION_VOXEL_SIZE = None # Augment coords to feats AUGMENT_COORDS_TO_FEATS = False def __init__(self, data_paths, prevoxel_transform=None, input_transform=None, target_transform=None, data_root='/', ignore_label=255, return_transformation=False, augment_data=False, config=None, **kwargs): self.augment_data = augment_data self.config = config VoxelizationDatasetBase.__init__( self, data_paths, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, cache=cache, data_root=data_root, ignore_mask=ignore_label, return_transformation=return_transformation) # Prevoxel transformations self.voxelizer = Voxelizer( voxel_size=self.VOXEL_SIZE, clip_bound=self.CLIP_BOUND, use_augmentation=augment_data, scale_augmentation_bound=self.SCALE_AUGMENTATION_BOUND, rotation_augmentation_bound=self.ROTATION_AUGMENTATION_BOUND, translation_augmentation_ratio_bound=self.TRANSLATION_AUGMENTATION_RATIO_BOUND, ignore_label=ignore_label) # map labels not evaluated to ignore_label label_map = {} n_used = 0 for l in range(self.NUM_LABELS): if l in self.IGNORE_LABELS: label_map[l] = self.ignore_mask else: label_map[l] = n_used n_used += 1 label_map[self.ignore_mask] = self.ignore_mask self.label_map = label_map self.NUM_LABELS -= len(self.IGNORE_LABELS) def _augment_coords_to_feats(self, coords, feats, labels=None): norm_coords = coords - coords.mean(0) # color must come first. if isinstance(coords, np.ndarray): feats = np.concatenate((feats, norm_coords), 1) else: feats = torch.cat((feats, norm_coords), 1) return coords, feats, labels def convert_mat2cfl(self, mat): # Generally, xyz,rgb,label return mat[:, :3], mat[:, 3:-1], mat[:, -1] def get_instance_info(self, xyz, instance_ids): ''' :param xyz: (n, 3) :param instance_ids: (n), int, (1~nInst, -1) :return: instance_num, dict ''' centers = np.ones((xyz.shape[0], 3), dtype=np.float32) * -1 # (n, 9), float, (cx, cy, cz, minx, miny, minz, maxx, maxy, maxz, occ, num_instances) occupancy = {} # (nInst), int bbox = {} unique_ids = np.unique(instance_ids) for id_ in unique_ids: if id_ == -1: continue mask = (instance_ids == id_) xyz_ = xyz[mask] bbox_min = xyz_.min(0) bbox_max = xyz_.max(0) center = xyz_.mean(0) centers[mask] = center occupancy[id_] = mask.sum() bbox[id_] = np.concatenate([bbox_min, bbox_max]) return {"ids": instance_ids, "center": centers, "occupancy": occupancy, "bbox": bbox} def __getitem__(self, index): coords, feats, labels, instances = self.load_data(index) # Downsample the pointcloud with finer voxel size before transformation for memory and speed if self.PREVOXELIZATION_VOXEL_SIZE is not None: inds = ME.utils.sparse_quantize( coords / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True) coords = coords[inds] feats = feats[inds] labels = labels[inds] instances = instances[inds] # Prevoxel transformations if self.prevoxel_transform is not None: coords, feats, labels = self.prevoxel_transform(coords, feats, labels) coords, feats, labels, instances, transformation = self.voxelizer.voxelize( coords, feats, labels, instances) #import ipdb; ipdb.set_trace() #from lib.pc_utils import save_point_cloud #save_point_cloud(coords, 'test.ply') # map labels not used for evaluation to ignore_label if self.input_transform is not None: coords, feats, labels, instances = self.input_transform(coords, feats, labels, instances) if self.target_transform is not None: coords, feats, labels, instances = self.target_transform(coords, feats, labels, instances) if self.augment_data: # For some networks, making the network invariant to even, odd coords is important coords += (torch.rand(3) * 100).int().numpy() #----------------Instances------------------------- instance_info = instances condition = (labels == self.ignore_mask) instances[condition] = -1 IGNORE_LABELS_INSTANCE = self.IGNORE_LABELS if self.config.misc.train_stuff else self.IGNORE_LABELS_INSTANCE for ignore_id in IGNORE_LABELS_INSTANCE: condition = (labels == ignore_id) instances[condition] = -1 instance_info = self.get_instance_info(coords, instances) # ------------- label mapping -------------------- if self.IGNORE_LABELS is not None: labels = np.array([self.label_map[x] for x in labels], dtype=np.int) # Use coordinate features if config is set if self.AUGMENT_COORDS_TO_FEATS: coords, feats, labels = self._augment_coords_to_feats(coords, feats, labels) return_args = [coords, feats, labels, instance_info] if self.return_transformation: return_args.append(transformation.astype(np.float32)) return tuple(return_args) class TemporalVoxelizationDataset(VoxelizationDataset): IS_TEMPORAL = True def __init__(self, data_paths, prevoxel_transform=None, input_transform=None, target_transform=None, data_root='/', ignore_label=255, temporal_dilation=1, temporal_numseq=3, return_transformation=False, augment_data=False, config=None, **kwargs): VoxelizationDataset.__init__( self, data_paths, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, data_root=data_root, ignore_label=ignore_label, return_transformation=return_transformation, augment_data=augment_data, config=config, **kwargs) self.temporal_dilation = temporal_dilation self.temporal_numseq = temporal_numseq temporal_window = temporal_dilation * (temporal_numseq - 1) + 1 self.numels = [len(p) - temporal_window + 1 for p in self.data_paths] if any([numel <= 0 for numel in self.numels]): raise ValueError('Your temporal window configuration is too wide for ' 'this dataset. Please change the configuration.') def load_world_pointcloud(self, filename): raise NotImplementedError def __getitem__(self, index): for seq_idx, numel in enumerate(self.numels): if index >= numel: index -= numel else: break numseq = self.temporal_numseq if self.augment_data and self.config.data.temporal_rand_numseq: numseq = random.randrange(1, self.temporal_numseq + 1) dilations = [self.temporal_dilation for i in range(numseq - 1)] if self.augment_data and self.config.data.temporal_rand_dilation: dilations = [random.randrange(1, self.temporal_dilation + 1) for i in range(numseq - 1)] files = [self.data_paths[seq_idx][index + sum(dilations[:i])] for i in range(numseq)] world_pointclouds = [self.load_world_pointcloud(f) for f in files] ptcs, centers = zip(*world_pointclouds) # Downsample pointcloud for speed and memory if self.PREVOXELIZATION_VOXEL_SIZE is not None: new_ptcs = [] for ptc in ptcs: inds = ME.utils.sparse_quantize( ptc[:, :3] / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True) new_ptcs.append(ptc[inds]) ptcs = new_ptcs # Apply prevoxel transformations ptcs = [self.prevoxel_transform(ptc) for ptc in ptcs] coords, feats, labels = zip(*ptcs) outs = self.voxelizer.voxelize_temporal( coords, feats, labels, centers=centers, return_transformation=self.return_transformation) if self.return_transformation: coords_t, feats_t, labels_t, transformation_t = outs else: coords_t, feats_t, labels_t = outs joint_coords = np.vstack([ np.hstack((coords, np.ones((coords.shape[0], 1)) * i)) for i, coords in enumerate(coords_t) ]) joint_feats = np.vstack(feats_t) joint_labels = np.hstack(labels_t) # map labels not used for evaluation to ignore_label if self.input_transform is not None: joint_coords, joint_feats, joint_labels = self.input_transform(joint_coords, joint_feats, joint_labels) if self.target_transform is not None: joint_coords, joint_feats, joint_labels = self.target_transform(joint_coords, joint_feats, joint_labels) if self.IGNORE_LABELS is not None: joint_labels = np.array([self.label_map[x] for x in joint_labels], dtype=np.int) return_args = [joint_coords, joint_feats, joint_labels] if self.return_transformation: pointclouds = np.vstack([ np.hstack((pointcloud[0][:, :6], np.ones((pointcloud[0].shape[0], 1)) * i)) for i, pointcloud in enumerate(world_pointclouds) ]) transformations = np.vstack( [np.hstack((transformation, [i])) for i, transformation in enumerate(transformation_t)]) return_args.extend([pointclouds.astype(np.float32), transformations.astype(np.float32)]) return tuple(return_args) def __len__(self): num_data = sum(self.numels) return num_data def initialize_data_loader(DatasetClass, config, phase, num_workers, shuffle, repeat, augment_data, batch_size, limit_numpoints, input_transform=None, target_transform=None): if isinstance(phase, str): phase = str2datasetphase_type(phase) if config.data.return_transformation: collate_fn = t.cflt_collate_fn_factory(limit_numpoints) else: collate_fn = t.cfl_collate_fn_factory(limit_numpoints) prevoxel_transform_train = [] if augment_data: prevoxel_transform_train.append(t.ElasticDistortion(DatasetClass.ELASTIC_DISTORT_PARAMS)) if len(prevoxel_transform_train) > 0: prevoxel_transforms = t.Compose(prevoxel_transform_train) else: prevoxel_transforms = None input_transforms = [] if input_transform is not None: input_transforms += input_transform if augment_data: input_transforms += [ t.RandomDropout(0.2), t.RandomHorizontalFlip(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL), t.ChromaticAutoContrast(), t.ChromaticTranslation(config.augmentation.data_aug_color_trans_ratio), t.ChromaticJitter(config.augmentation.data_aug_color_jitter_std), # t.HueSaturationTranslation(config.data_aug_hue_max, config.data_aug_saturation_max), ] if len(input_transforms) > 0: input_transforms = t.Compose(input_transforms) else: input_transforms = None dataset = DatasetClass( config, prevoxel_transform=prevoxel_transforms, input_transform=input_transforms, target_transform=target_transform, cache=config.data.cache_data, augment_data=augment_data, phase=phase) data_args = { 'dataset': dataset, 'num_workers': num_workers, 'batch_size': batch_size, 'collate_fn': collate_fn, } if repeat: if get_world_size() > 1: data_args['sampler'] = DistributedInfSampler(dataset, shuffle=shuffle) # torch.utils.data.distributed.DistributedSampler(dataset) else: data_args['sampler'] = InfSampler(dataset, shuffle) else: data_args['shuffle'] = shuffle data_loader = DataLoader(**data_args) return data_loader
ContrastiveSceneContexts-main
downstream/insseg/datasets/dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import numpy as np from collections import defaultdict from scipy import spatial import torch from plyfile import PlyData from lib.utils import read_txt, fast_hist, per_class_iu from datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type, cache import datasets.transforms as t class StanfordVoxelizationDatasetBase: # added NUM_LABELS = 14 CLASS_LABELS = ('clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column', 'door', 'floor', 'sofa', 'table', 'wall', 'window') VALID_CLASS_IDS = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13) IGNORE_LABELS = tuple(set(range(14)) - set(VALID_CLASS_IDS)) CLASS_LABELS_INSTANCE = ('clutter', 'beam', 'board', 'bookcase', 'chair', 'column', 'door', 'sofa', 'table', 'window') VALID_CLASS_IDS_INSTANCE = (0, 1, 2, 3, 5, 6, 7, 9, 11, 13) IGNORE_LABELS_INSTANCE = tuple(set(range(14)) - set(VALID_CLASS_IDS_INSTANCE)) #--------- CLIP_SIZE = None CLIP_BOUND = None LOCFEAT_IDX = 2 ROTATION_AXIS = 'z' #IGNORE_LABELS = (10,) # remove stairs, following SegCloud # CLASSES = [ # 'clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column', 'door', 'floor', 'sofa', # 'table', 'wall', 'window' # ] IS_FULL_POINTCLOUD_EVAL = True DATA_PATH_FILE = { DatasetPhase.Train: 'train.txt', DatasetPhase.Val: 'val.txt', DatasetPhase.TrainVal: 'trainval.txt', DatasetPhase.Test: 'test.txt' } def test_pointcloud(self, pred_dir): print('Running full pointcloud evaluation.') # Join room by their area and room id. room_dict = defaultdict(list) for i, data_path in enumerate(self.data_paths): area, room = data_path.split(os.sep) room, _ = os.path.splitext(room) room_id = '_'.join(room.split('_')[:-1]) room_dict[(area, room_id)].append(i) # Test independently for each room. sys.setrecursionlimit(100000) # Increase recursion limit for k-d tree. pred_list = sorted(os.listdir(pred_dir)) hist = np.zeros((self.NUM_LABELS, self.NUM_LABELS)) for room_idx, room_list in enumerate(room_dict.values()): print(f'Evaluating room {room_idx} / {len(room_dict)}.') # Join all predictions and query pointclouds of split data. pred = np.zeros((0, 4)) pointcloud = np.zeros((0, 7)) for i in room_list: pred = np.vstack((pred, np.load(os.path.join(pred_dir, pred_list[i])))) pointcloud = np.vstack((pointcloud, self.load_ply(i)[0])) # Deduplicate all query pointclouds of split data. pointcloud = np.array(list(set(tuple(l) for l in pointcloud.tolist()))) # Run test for each room. pred_tree = spatial.KDTree(pred[:, :3], leafsize=500) _, result = pred_tree.query(pointcloud[:, :3]) ptc_pred = pred[result, 3].astype(int) ptc_gt = pointcloud[:, -1].astype(int) if self.IGNORE_LABELS: ptc_pred = self.label2masked[ptc_pred] ptc_gt = self.label2masked[ptc_gt] hist += fast_hist(ptc_pred, ptc_gt, self.NUM_LABELS) # Print results. ious = [] print('Per class IoU:') for i, iou in enumerate(per_class_iu(hist) * 100): result_str = '' if hist.sum(1)[i]: result_str += f'{iou}' ious.append(iou) else: result_str += 'N/A' # Do not print if data not in ground truth. print(result_str) print(f'Average IoU: {np.nanmean(ious)}') def _augment_coords_to_feats(self, coords, feats, labels=None): # Center x,y coords_center = coords.mean(0, keepdims=True) coords_center[0, 2] = 0 norm_coords = coords - coords_center feats = np.concatenate((feats, norm_coords), 1) return coords, feats, labels class StanfordDataset(StanfordVoxelizationDatasetBase, VoxelizationDataset): # Voxelization arguments VOXEL_SIZE = 0.05 # 5cm CLIP_BOUND = 4 # [-N, N] TEST_CLIP_BOUND = None # Augmentation arguments ROTATION_AUGMENTATION_BOUND = \ ((-np.pi / 32, np.pi / 32), (-np.pi / 32, np.pi / 32), (-np.pi, np.pi)) TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (-0.05, 0.05)) # AUGMENT_COORDS_TO_FEATS = True # NUM_IN_CHANNEL = 6 AUGMENT_COORDS_TO_FEATS = False NUM_IN_CHANNEL = 3 def __init__(self, config, prevoxel_transform=None, input_transform=None, target_transform=None, cache=False, augment_data=True, elastic_distortion=False, phase=DatasetPhase.Train): if isinstance(phase, str): phase = str2datasetphase_type(phase) if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]: self.CLIP_BOUND = self.TEST_CLIP_BOUND data_root = config.data.stanford3d_path if isinstance(self.DATA_PATH_FILE[phase], (list, tuple)): data_paths = [] for split in self.DATA_PATH_FILE[phase]: data_paths += read_txt(os.path.join(data_root, 'splits', split)) else: data_paths = read_txt(os.path.join(data_root, 'splits', self.DATA_PATH_FILE[phase])) if config.data.voxel_size: self.VOXEL_SIZE = config.data.voxel_size logging.info('voxel size: {}'.format(self.VOXEL_SIZE)) logging.info('Loading {} {}: {}'.format(self.__class__.__name__, phase, self.DATA_PATH_FILE[phase])) VoxelizationDataset.__init__( self, data_paths, data_root=data_root, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, ignore_label=config.data.ignore_label, return_transformation=config.data.return_transformation, augment_data=augment_data, elastic_distortion=elastic_distortion, config=config) @cache def load_ply(self, index): filepath = self.data_root / self.data_paths[index] plydata = PlyData.read(filepath) data = plydata.elements[0].data coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T labels = np.array(data['label'], dtype=np.int32) return coords, feats, labels, None @cache def load_data(self, index): filepath = self.data_root / self.data_paths[index] pointcloud = torch.load(filepath) coords = pointcloud[:,:3].astype(np.float32) feats = pointcloud[:,3:6].astype(np.float32) labels = pointcloud[:,6].astype(np.int32) instances = pointcloud[:,7].astype(np.int32) return coords, feats, labels, instances class StanfordArea5Dataset(StanfordDataset): DATA_PATH_FILE = { DatasetPhase.Train: ['area1.txt', 'area2.txt', 'area3.txt', 'area4.txt', 'area6.txt'], DatasetPhase.Val: 'area5.txt', DatasetPhase.Test: 'area5.txt' } class StanfordArea53cmDataset(StanfordArea5Dataset): CLIP_BOUND = 3.2 VOXEL_SIZE = 0.03 class StanfordArea57d5cmDataset(StanfordArea5Dataset): VOXEL_SIZE = 0.075 class StanfordArea510cmDataset(StanfordArea5Dataset): VOXEL_SIZE = 0.1 def test(config): """Test point cloud data loader. """ from torch.utils.data import DataLoader from lib.utils import Timer import open3d as o3d def make_pcd(coords, feats): pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(coords[:, :3].float().numpy()) pcd.colors = o3d.utility.Vector3dVector(feats[:, :3].numpy() / 255) return pcd timer = Timer() DatasetClass = StanfordArea5Dataset transformations = [ t.RandomHorizontalFlip(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL), t.ChromaticAutoContrast(), t.ChromaticTranslation(config.data_aug_color_trans_ratio), t.ChromaticJitter(config.data_aug_color_jitter_std), ] dataset = DatasetClass( config, prevoxel_transform=t.ElasticDistortion(DatasetClass.ELASTIC_DISTORT_PARAMS), input_transform=t.Compose(transformations), augment_data=True, cache=True, elastic_distortion=True) data_loader = DataLoader( dataset=dataset, collate_fn=t.cfl_collate_fn_factory(limit_numpoints=False), batch_size=1, shuffle=True) # Start from index 1 iter = data_loader.__iter__() for i in range(100): timer.tic() coords, feats, labels = iter.next() pcd = make_pcd(coords, feats) o3d.visualization.draw_geometries([pcd]) print(timer.toc()) if __name__ == '__main__': from config import get_config config = get_config() test(config)
ContrastiveSceneContexts-main
downstream/insseg/datasets/stanford.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import numpy as np import MinkowskiEngine as ME from scipy.linalg import expm, norm # Rotation matrix along axis with angle theta def M(axis, theta): return expm(np.cross(np.eye(3), axis / norm(axis) * theta)) class Voxelizer: def __init__(self, voxel_size=1, clip_bound=None, use_augmentation=False, scale_augmentation_bound=None, rotation_augmentation_bound=None, translation_augmentation_ratio_bound=None, ignore_label=255): """ Args: voxel_size: side length of a voxel clip_bound: boundary of the voxelizer. Points outside the bound will be deleted expects either None or an array like ((-100, 100), (-100, 100), (-100, 100)). scale_augmentation_bound: None or (0.9, 1.1) rotation_augmentation_bound: None or ((np.pi / 6, np.pi / 6), None, None) for 3 axis. Use random order of x, y, z to prevent bias. translation_augmentation_bound: ((-5, 5), (0, 0), (-10, 10)) ignore_label: label assigned for ignore (not a training label). """ self.voxel_size = voxel_size self.clip_bound = clip_bound self.ignore_label = ignore_label # Augmentation self.use_augmentation = use_augmentation self.scale_augmentation_bound = scale_augmentation_bound self.rotation_augmentation_bound = rotation_augmentation_bound self.translation_augmentation_ratio_bound = translation_augmentation_ratio_bound def get_transformation_matrix(self): voxelization_matrix, rotation_matrix = np.eye(4), np.eye(4) # Get clip boundary from config or pointcloud. # Get inner clip bound to crop from. # Transform pointcloud coordinate to voxel coordinate. # 1. Random rotation rot_mat = np.eye(3) if self.use_augmentation and self.rotation_augmentation_bound is not None: if isinstance(self.rotation_augmentation_bound, collections.Iterable): rot_mats = [] for axis_ind, rot_bound in enumerate(self.rotation_augmentation_bound): theta = 0 axis = np.zeros(3) axis[axis_ind] = 1 if rot_bound is not None: theta = np.random.uniform(*rot_bound) rot_mats.append(M(axis, theta)) # Use random order np.random.shuffle(rot_mats) rot_mat = rot_mats[0] @ rot_mats[1] @ rot_mats[2] else: raise ValueError() rotation_matrix[:3, :3] = rot_mat # 2. Scale and translate to the voxel space. scale = 1 / self.voxel_size if self.use_augmentation and self.scale_augmentation_bound is not None: scale *= np.random.uniform(*self.scale_augmentation_bound) np.fill_diagonal(voxelization_matrix[:3, :3], scale) # Get final transformation matrix. return voxelization_matrix, rotation_matrix def clip(self, coords, center=None, trans_aug_ratio=None): bound_min = np.min(coords, 0).astype(float) bound_max = np.max(coords, 0).astype(float) bound_size = bound_max - bound_min if center is None: center = bound_min + bound_size * 0.5 if trans_aug_ratio is not None: trans = np.multiply(trans_aug_ratio, bound_size) center += trans lim = self.clip_bound if isinstance(self.clip_bound, (int, float)): if bound_size.max() < self.clip_bound: return None else: clip_inds = ((coords[:, 0] >= (-lim + center[0])) & \ (coords[:, 0] < (lim + center[0])) & \ (coords[:, 1] >= (-lim + center[1])) & \ (coords[:, 1] < (lim + center[1])) & \ (coords[:, 2] >= (-lim + center[2])) & \ (coords[:, 2] < (lim + center[2]))) return clip_inds # Clip points outside the limit clip_inds = ((coords[:, 0] >= (lim[0][0] + center[0])) & \ (coords[:, 0] < (lim[0][1] + center[0])) & \ (coords[:, 1] >= (lim[1][0] + center[1])) & \ (coords[:, 1] < (lim[1][1] + center[1])) & \ (coords[:, 2] >= (lim[2][0] + center[2])) & \ (coords[:, 2] < (lim[2][1] + center[2]))) return clip_inds def voxelize(self, coords, feats, labels, instances, center=None): assert coords.shape[1] == 3 and coords.shape[0] == feats.shape[0] and coords.shape[0] if self.clip_bound is not None: trans_aug_ratio = np.zeros(3) if self.use_augmentation and self.translation_augmentation_ratio_bound is not None: for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound): trans_aug_ratio[axis_ind] = np.random.uniform(*trans_ratio_bound) clip_inds = self.clip(coords, center, trans_aug_ratio) if clip_inds is not None: coords, feats = coords[clip_inds], feats[clip_inds] if labels is not None: labels = labels[clip_inds] if instances is not None: instances = instances[clip_inds] # Get rotation and scale M_v, M_r = self.get_transformation_matrix() # Apply transformations rigid_transformation = M_v if self.use_augmentation: rigid_transformation = M_r @ rigid_transformation homo_coords = np.hstack((coords, np.ones((coords.shape[0], 1), dtype=coords.dtype))) coords_aug = np.floor(homo_coords @ rigid_transformation.T[:, :3]) # Align all coordinates to the origin. min_coords = coords_aug.min(0) M_t = np.eye(4) M_t[:3, -1] = -min_coords rigid_transformation = M_t @ rigid_transformation coords_aug = np.floor(coords_aug - min_coords) # key = self.hash(coords_aug) # floor happens by astype(np.uint64) mapping, colabels = ME.utils.sparse_quantize( coords_aug, feats, labels=labels, return_index=True, ignore_label=self.ignore_label) coords_aug = coords_aug[mapping] feats = feats[mapping] labels = colabels instances = instances[mapping] return coords_aug, feats, labels, instances, rigid_transformation.flatten() def voxelize_temporal(self, coords_t, feats_t, labels_t, centers=None, return_transformation=False): # Legacy code, remove if centers is None: centers = [ None, ] * len(coords_t) coords_tc, feats_tc, labels_tc, transformation_tc = [], [], [], [] # ######################### Data Augmentation ############################# # Get rotation and scale M_v, M_r = self.get_transformation_matrix() # Apply transformations rigid_transformation = M_v if self.use_augmentation: rigid_transformation = M_r @ rigid_transformation # ######################### Voxelization ############################# # Voxelize coords for coords, feats, labels, center in zip(coords_t, feats_t, labels_t, centers): ################################### # Clip the data if bound exists if self.clip_bound is not None: trans_aug_ratio = np.zeros(3) if self.use_augmentation and self.translation_augmentation_ratio_bound is not None: for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound): trans_aug_ratio[axis_ind] = np.random.uniform(*trans_ratio_bound) clip_inds = self.clip(coords, center, trans_aug_ratio) if clip_inds is not None: coords, feats = coords[clip_inds], feats[clip_inds] if labels is not None: labels = labels[clip_inds] ################################### homo_coords = np.hstack((coords, np.ones((coords.shape[0], 1), dtype=coords.dtype))) coords_aug = np.floor(homo_coords @ rigid_transformation.T)[:, :3] coords_aug, feats, labels = ME.utils.sparse_quantize( coords_aug, feats, labels=labels, ignore_label=self.ignore_label) coords_tc.append(coords_aug) feats_tc.append(feats) labels_tc.append(labels) transformation_tc.append(rigid_transformation.flatten()) return_args = [coords_tc, feats_tc, labels_tc] if return_transformation: return_args.append(transformation_tc) return tuple(return_args) def test(): N = 16575 coords = np.random.rand(N, 3) * 10 feats = np.random.rand(N, 4) labels = np.floor(np.random.rand(N) * 3) coords[:3] = 0 labels[:3] = 2 voxelizer = Voxelizer() print(voxelizer.voxelize(coords, feats, labels)) if __name__ == '__main__': test()
ContrastiveSceneContexts-main
downstream/insseg/datasets/voxelizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.distributed as dist from torch.utils.data.sampler import Sampler class InfSampler(Sampler): """Samples elements randomly, without replacement. Arguments: data_source (Dataset): dataset to sample from """ def __init__(self, data_source, shuffle=False): self.data_source = data_source self.shuffle = shuffle self.reset_permutation() def reset_permutation(self): perm = len(self.data_source) if self.shuffle: perm = torch.randperm(perm) self._perm = perm.tolist() def __iter__(self): return self def __next__(self): if len(self._perm) == 0: self.reset_permutation() return self._perm.pop() def __len__(self): return len(self.data_source) next = __next__ # Python 2 compatibility class DistributedInfSampler(InfSampler): def __init__(self, data_source, num_replicas=None, rank=None, shuffle=True): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.data_source = data_source self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.it = 0 self.num_samples = int(math.ceil(len(self.data_source) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas self.shuffle = shuffle self.reset_permutation() def __next__(self): it = self.it * self.num_replicas + self.rank value = self._perm[it % len(self._perm)] self.it = self.it + 1 if (self.it * self.num_replicas) >= len(self._perm): self.reset_permutation() self.it = 0 return value def __len__(self): return self.num_samples
ContrastiveSceneContexts-main
downstream/insseg/datasets/dataloader.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys from pathlib import Path import torch import numpy as np from scipy import spatial from datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type from lib.pc_utils import read_plyfile, save_point_cloud from lib.utils import read_txt, fast_hist, per_class_iu from lib.io3d import write_triangle_mesh, create_color_palette class ScannetVoxelizationDataset(VoxelizationDataset): # added NUM_LABELS = 41 # Will be converted to 20 as defined in IGNORE_LABELS. NUM_IN_CHANNEL = 3 CLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture') VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39) IGNORE_LABELS = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS)) CLASS_LABELS_INSTANCE = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] VALID_CLASS_IDS_INSTANCE = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) IGNORE_LABELS_INSTANCE = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS_INSTANCE)) # Voxelization arguments CLIP_BOUND = None TEST_CLIP_BOUND = None VOXEL_SIZE = 0.05 # Augmentation arguments ROTATION_AUGMENTATION_BOUND = ((-np.pi / 64, np.pi / 64), (-np.pi / 64, np.pi / 64), (-np.pi, np.pi)) TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (0, 0)) ELASTIC_DISTORT_PARAMS = ((0.2, 0.4), (0.8, 1.6)) ROTATION_AXIS = 'z' LOCFEAT_IDX = 2 IS_FULL_POINTCLOUD_EVAL = True # If trainval.txt does not exist, copy train.txt and add contents from val.txt DATA_PATH_FILE = { DatasetPhase.Train: 'scannetv2_train.txt', DatasetPhase.Val: 'scannetv2_val.txt', DatasetPhase.TrainVal: 'scannetv2_trainval.txt', DatasetPhase.Test: 'scannetv2_test.txt', DatasetPhase.Debug: 'debug.txt' } def __init__(self, config, prevoxel_transform=None, input_transform=None, target_transform=None, augment_data=True, elastic_distortion=False, cache=False, phase=DatasetPhase.Train): if isinstance(phase, str): phase = str2datasetphase_type(phase) # Use cropped rooms for train/val data_root = config.data.scannet_path if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]: self.CLIP_BOUND = self.TEST_CLIP_BOUND data_paths = read_txt(os.path.join(data_root, 'splits', self.DATA_PATH_FILE[phase])) if phase == DatasetPhase.Train and config.data.train_file: data_paths = read_txt(os.path.join(data_root, 'splits', config.data.train_file)) # data efficiency by sampling points self.sampled_inds = {} if config.data.sampled_inds and phase == DatasetPhase.Train: self.sampled_inds = torch.load(config.data.sampled_inds) data_paths = [data_path + '.pth' for data_path in data_paths] logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase])) super().__init__( data_paths, data_root=data_root, prevoxel_transform=prevoxel_transform, input_transform=input_transform, target_transform=target_transform, ignore_label=config.data.ignore_label, return_transformation=config.data.return_transformation, augment_data=augment_data, elastic_distortion=elastic_distortion, config=config) def get_output_id(self, iteration): return '_'.join(Path(self.data_paths[iteration]).stem.split('_')[:2]) def _augment_locfeat(self, pointcloud): # Assuming that pointcloud is xyzrgb(...), append location feat. pointcloud = np.hstack( (pointcloud[:, :6], 100 * np.expand_dims(pointcloud[:, self.LOCFEAT_IDX], 1), pointcloud[:, 6:])) return pointcloud def load_data(self, index): filepath = self.data_root / self.data_paths[index] pointcloud = torch.load(filepath) coords = pointcloud[0].astype(np.float32) feats = pointcloud[1].astype(np.float32) labels = pointcloud[2].astype(np.int32) instances = pointcloud[3].astype(np.int32) if self.sampled_inds: scene_name = self.get_output_id(index) mask = np.ones_like(labels).astype(np.bool) sampled_inds = self.sampled_inds[scene_name] mask[sampled_inds] = False labels[mask] = 0 instances[mask] = 0 return coords, feats, labels, instances def get_original_pointcloud(self, coords, transformation, iteration): logging.info('===> Start testing on original pointcloud space.') data_path = self.data_paths[iteration] fullply_f = self.data_root / data_path query_xyz, _, query_label, _ = torch.load(fullply_f) coords = coords[:, 1:].numpy() + 0.5 curr_transformation = transformation[0, :16].numpy().reshape(4, 4) coords = np.hstack((coords, np.ones((coords.shape[0], 1)))) coords = (np.linalg.inv(curr_transformation) @ coords.T).T # Run test for each room. from pykeops.numpy import LazyTensor from pykeops.numpy.utils import IsGpuAvailable query_xyz = np.array(query_xyz) x_i = LazyTensor( query_xyz[:,None,:] ) # x_i.shape = (1e6, 1, 3) y_j = LazyTensor( coords[:,:3][None,:,:] ) # y_j.shape = ( 1, 2e6,3) D_ij = ((x_i - y_j) ** 2).sum(-1) # (M**2, N) symbolic matrix of squared distances indKNN = D_ij.argKmin(1, dim=1) # Grid <-> Samples, (M**2, K) integer tensor inds = indKNN[:,0] return inds, query_xyz def save_prediction(self, coords, pred, transformation, iteration, save_dir): print('Running full pointcloud evaluation.') #if dataset.IGNORE_LABELS: # decode_label_map = {} # for k, v in dataset.label_map.items(): # decode_label_map[v] = k # orig_pred = np.array([decode_label_map[x.item()] for x in orig_pred.cpu()], dtype=np.int) inds_mapping, xyz = self.get_original_pointcloud(coords, transformation, iteration) save = {'points': coords, 'mapping': inds_mapping, 'labels': pred} # Save prediciton in txt format for submission. room_id = self.get_output_id(iteration) torch.save(save, os.path.join(save_dir, room_id)) #np.savetxt(f'{save_dir}/{room_id}.txt', ptc_pred, fmt='%i') def save_groundtruth(self, coords, gt, transformation, iteration, save_dir): save = {'points': coords, 'labels': gt} # Save prediciton in txt format for submission. room_id = self.get_output_id(iteration) torch.save(save, os.path.join(save_dir, room_id)) class ScannetVoxelization2cmDataset(ScannetVoxelizationDataset): VOXEL_SIZE = 0.02
ContrastiveSceneContexts-main
downstream/insseg/datasets/scannet.py
# Evaluates semantic label task # Input: # - path to .txt prediction files # - path to .txt ground truth files # - output file to write results to # Note that only the valid classes are used for evaluation, # i.e., any ground truth label not in the valid label set # is ignored in the evaluation. # # example usage: evaluate_semantic_label.py --scan_path [path to scan data] --output_file [output file] # python imports import math import logging import os, sys, argparse import inspect try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: from itertools import izip except ImportError: izip = zip #currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) #parentdir = os.path.dirname(currentdir) #sys.path.insert(0,parentdir) from datasets.evaluation.scannet_benchmark_utils import util_3d from datasets.evaluation.scannet_benchmark_utils import util class Evaluator: def __init__(self, CLASS_LABELS, VALID_CLASS_IDS): #CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', # 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', # 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] #VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) self.CLASS_LABELS = CLASS_LABELS self.VALID_CLASS_IDS = VALID_CLASS_IDS self.UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1 self.gt = {} self.pred = {} max_id = self.UNKNOWN_ID self.confusion = np.zeros((max_id+1, max_id+1), dtype=np.ulonglong) def update_confusion(self, pred_ids, gt_ids, sceneId=None): # sanity checks if not pred_ids.shape == gt_ids.shape: util.print_error('%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True) n = self.confusion.shape[0] k = (gt_ids >= 0) & (gt_ids < n) temporal = np.bincount(n * gt_ids[k].astype(int) + pred_ids[k], minlength=n**2).reshape(n, n) for valid_class_row in self.VALID_CLASS_IDS: for valid_class_col in self.VALID_CLASS_IDS: self.confusion[valid_class_row][valid_class_col] += temporal[valid_class_row][valid_class_col] @staticmethod def write_to_benchmark(base='benchmark_segmentation', sceneId=None, pred_ids=None): os.makedirs(base, exist_ok=True) util_3d.export_ids('{}.txt'.format(os.path.join(base, sceneId)), pred_ids) def get_iou(self, label_id, confusion): if not label_id in self.VALID_CLASS_IDS: return float('nan') # #true positives tp = np.longlong(confusion[label_id, label_id]) # #false negatives fn = np.longlong(confusion[label_id, :].sum()) - tp # #false positives not_ignored = [l for l in self.VALID_CLASS_IDS if not l == label_id] fp = np.longlong(confusion[not_ignored, label_id].sum()) denom = (tp + fp + fn) if denom == 0: return float('nan') return (float(tp) / denom, tp, denom) def write_result_file(self, confusion, ious, filename): with open(filename, 'w') as f: f.write('iou scores\n') for i in range(len(self.VALID_CLASS_IDS)): label_id = self.VALID_CLASS_IDS[i] label_name = self.CLASS_LABELS[i] iou = ious[label_name][0] f.write('{0:<14s}({1:<2d}): {2:>5.3f}\n'.format(label_name, label_id, iou)) f.write("{0:<14s}: {1:>5.3f}".format('mean', np.array([ious[k][0] for k in ious]).mean())) f.write('\nconfusion matrix\n') f.write('\t\t\t') for i in range(len(self.VALID_CLASS_IDS)): #f.write('\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i])) f.write('{0:<8d}'.format(self.VALID_CLASS_IDS[i])) f.write('\n') for r in range(len(self.VALID_CLASS_IDS)): f.write('{0:<14s}({1:<2d})'.format(self.CLASS_LABELS[r], self.VALID_CLASS_IDS[r])) for c in range(len(self.VALID_CLASS_IDS)): f.write('\t{0:>5.3f}'.format(confusion[self.VALID_CLASS_IDS[r],self.VALID_CLASS_IDS[c]])) f.write('\n') print('wrote results to', filename) def evaluate_confusion(self, output_file=None): class_ious = {} counter = 0 summation = 0 for i in range(len(self.VALID_CLASS_IDS)): label_name = self.CLASS_LABELS[i] label_id = self.VALID_CLASS_IDS[i] class_ious[label_name] = self.get_iou(label_id, self.confusion) # print logging.info('classes IoU') logging.info('----------------------------') for i in range(len(self.VALID_CLASS_IDS)): label_name = self.CLASS_LABELS[i] try: logging.info('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2])) summation += class_ious[label_name][0] counter += 1 except: logging.info('{0:<14s}: nan ( nan/nan )'.format(label_name)) logging.info("{0:<14s}: {1:>5.3f}".format('mean', summation / counter)) if output_file: self.write_result_file(self.confusion, class_ious, output_file) return summation / counter def config(): parser = argparse.ArgumentParser() parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files') parser.add_argument('--gt_path', required=True, help='path to gt files') parser.add_argument('--output_file', type=str, default='./semantic_label_evaluation.txt') opt = parser.parse_args() return opt def main(): opt = config() #------------------------- ScanNet -------------------------- CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS) print('reading', len(os.listdir(opt.pred_path))-1, 'scans...') for i, pred_file in enumerate(os.listdir(opt.pred_path)): if pred_file == 'semantic_label_evaluation.txt': continue gt_file = os.path.join(opt.gt_path, pred_file) if not os.path.isfile(gt_file): util.print_error('Result file {} does not match any gt file'.format(pred_file), user_fault=True) gt_ids = util_3d.load_ids(gt_file) pred_file = os.path.join(opt.pred_path, pred_file) pred_ids = util_3d.load_ids(pred_file) evaluator.update_confusion(pred_ids, gt_ids, pred_file.split('.')[0]) sys.stdout.write("\rscans processed: {}".format(i+1)) sys.stdout.flush() # evaluate evaluator.evaluate_confusion(opt.output_file) if __name__ == '__main__': main()
ContrastiveSceneContexts-main
downstream/insseg/datasets/evaluation/evaluate_semantic_label.py
# Evaluates semantic instance task # Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation # Input: # - path to .txt prediction files # - path to .txt ground truth files # - output file to write results to # Each .txt prediction file look like: # [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence] # [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence] # [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence] # ... # # NOTE: The prediction files must live in the root of the given prediction path. # Predicted mask .txt files must live in a subfolder. # Additionally, filenames must not contain spaces. # The relative paths to predicted masks must contain one integer per line, # where each line corresponds to vertices in the *_vh_clean_2.ply (in that order). # Non-zero integers indicate part of the predicted instance. # The label ids specify the class of the corresponding mask. # Confidence is a float confidence score of the mask. # # Note that only the valid classes are used for evaluation, # i.e., any ground truth label not in the valid label set # is ignored in the evaluation. # # example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file] # python imports import logging import math import os, sys, argparse import inspect from copy import deepcopy import argparse import numpy as np #currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) #parentdir = os.path.dirname(currentdir) #sys.path.insert(0,parentdir) from datasets.evaluation.scannet_benchmark_utils import util_3d from datasets.evaluation.scannet_benchmark_utils import util def setup_logging(): ch = logging.StreamHandler(sys.stdout) logging.getLogger().setLevel(logging.INFO) logging.basicConfig( format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch]) class Evaluator: # ---------- Evaluation params ---------- # # overlaps for evaluation overlaps = np.append(np.arange(0.5,0.95,0.05), 0.25) # minimum region size for evaluation [verts] min_region_sizes = np.array( [ 10 ] ) # distance thresholds [m] distance_threshes = np.array( [ float('inf') ] ) # distance confidences distance_confs = np.array( [ -float('inf') ] ) def __init__(self, CLASS_LABELS, VALID_CLASS_IDS, benchmark=False): # ---------- Label info ---------- # #CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', # 'window', 'bookshelf', 'picture', 'counter', # 'desk', 'curtain', 'refrigerator', 'shower curtain', # 'toilet', 'sink', 'bathtub', 'otherfurniture'] #VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) self.CLASS_LABELS = CLASS_LABELS self.VALID_CLASS_IDS = VALID_CLASS_IDS self.ID_TO_LABEL = {} self.LABEL_TO_ID = {} for i in range(len(VALID_CLASS_IDS)): self.LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] self.ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] self.pred_instances = {} self.gt_instances = {} self.benchmark = benchmark def evaluate_matches(self, matches): # results: class x overlap ap = np.zeros( (len(self.distance_threshes) , len(self.CLASS_LABELS) , len(self.overlaps)) , np.float ) for di, (min_region_size, distance_thresh, distance_conf) in enumerate(zip(self.min_region_sizes, self.distance_threshes, self.distance_confs)): for oi, overlap_th in enumerate(self.overlaps): pred_visited = {} for m in matches: for p in matches[m]['pred']: for label_name in self.CLASS_LABELS: for p in matches[m]['pred'][label_name]: if 'filename' in p: pred_visited[p['filename']] = False for li, label_name in enumerate(self.CLASS_LABELS): y_true = np.empty(0) y_score = np.empty(0) hard_false_negatives = 0 has_gt = False has_pred = False for m in matches: pred_instances = matches[m]['pred'][label_name] gt_instances = matches[m]['gt'][label_name] # filter groups in ground truth gt_instances = [ gt for gt in gt_instances if gt['instance_id']>=1000 and gt['vert_count']>=min_region_size and gt['med_dist']<=distance_thresh and gt['dist_conf']>=distance_conf ] if gt_instances: has_gt = True if pred_instances: has_pred = True cur_true = np.ones ( len(gt_instances) ) cur_score = np.ones ( len(gt_instances) ) * (-float("inf")) cur_match = np.zeros( len(gt_instances) , dtype=np.bool ) # collect matches for (gti,gt) in enumerate(gt_instances): found_match = False num_pred = len(gt['matched_pred']) for pred in gt['matched_pred']: # greedy assignments if pred_visited[pred['filename']]: continue overlap = float(pred['intersection']) / (gt['vert_count']+pred['vert_count']-pred['intersection']) if overlap > overlap_th: confidence = pred['confidence'] # if already have a prediction for this gt, # the prediction with the lower score is automatically a false positive if cur_match[gti]: max_score = max( cur_score[gti] , confidence ) min_score = min( cur_score[gti] , confidence ) cur_score[gti] = max_score # append false positive cur_true = np.append(cur_true,0) cur_score = np.append(cur_score,min_score) cur_match = np.append(cur_match,True) # otherwise set score else: found_match = True cur_match[gti] = True cur_score[gti] = confidence pred_visited[pred['filename']] = True if not found_match: hard_false_negatives += 1 # remove non-matched ground truth instances cur_true = cur_true [ cur_match==True ] cur_score = cur_score[ cur_match==True ] # collect non-matched predictions as false positive for pred in pred_instances: found_gt = False for gt in pred['matched_gt']: overlap = float(gt['intersection']) / (gt['vert_count']+pred['vert_count']-gt['intersection']) if overlap > overlap_th: found_gt = True break if not found_gt: num_ignore = pred['void_intersection'] for gt in pred['matched_gt']: # group? if gt['instance_id'] < 1000: num_ignore += gt['intersection'] # small ground truth instances if gt['vert_count'] < min_region_size or gt['med_dist']>distance_thresh or gt['dist_conf']<distance_conf: num_ignore += gt['intersection'] proportion_ignore = float(num_ignore)/pred['vert_count'] # if not ignored append false positive if proportion_ignore <= overlap_th: cur_true = np.append(cur_true,0) confidence = pred["confidence"] cur_score = np.append(cur_score,confidence) # append to overall results y_true = np.append(y_true,cur_true) y_score = np.append(y_score,cur_score) # compute average precision if has_gt and has_pred: # compute precision recall curve first # sorting and cumsum score_arg_sort = np.argsort(y_score) y_score_sorted = y_score[score_arg_sort] y_true_sorted = y_true[score_arg_sort] y_true_sorted_cumsum = np.cumsum(y_true_sorted) # unique thresholds (thresholds,unique_indices) = np.unique( y_score_sorted , return_index=True ) num_prec_recall = len(unique_indices) + 1 # prepare precision recall num_examples = len(y_score_sorted) try: num_true_examples = y_true_sorted_cumsum[-1] except: num_true_examples = 0 precision = np.zeros(num_prec_recall) recall = np.zeros(num_prec_recall) # deal with the first point y_true_sorted_cumsum = np.append( y_true_sorted_cumsum , 0 ) # deal with remaining for idx_res,idx_scores in enumerate(unique_indices): cumsum = y_true_sorted_cumsum[idx_scores-1] tp = num_true_examples - cumsum fp = num_examples - idx_scores - tp fn = cumsum + hard_false_negatives p = float(tp)/(tp+fp) r = float(tp)/(tp+fn) precision[idx_res] = p recall [idx_res] = r # first point in curve is artificial precision[-1] = 1. recall [-1] = 0. # compute average of precision-recall curve recall_for_conv = np.copy(recall) recall_for_conv = np.append(recall_for_conv[0], recall_for_conv) recall_for_conv = np.append(recall_for_conv, 0.) stepWidths = np.convolve(recall_for_conv,[-0.5,0,0.5],'valid') # integrate is now simply a dot product ap_current = np.dot(precision, stepWidths) elif has_gt: ap_current = 0.0 else: ap_current = float('nan') ap[di,li,oi] = ap_current return ap def compute_averages(self, aps): d_inf = 0 o50 = np.where(np.isclose(self.overlaps,0.5)) o25 = np.where(np.isclose(self.overlaps,0.25)) oAllBut25 = np.where(np.logical_not(np.isclose(self.overlaps,0.25))) avg_dict = {} #avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ]) avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,oAllBut25]) avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50]) avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25]) avg_dict["classes"] = {} for (li,label_name) in enumerate(self.CLASS_LABELS): avg_dict["classes"][label_name] = {} #avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :]) avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li,oAllBut25]) avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50]) avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25]) return avg_dict def assign_instances_for_scan(self, scene_id): # get gt instances gt_ids = self.gt_instances[scene_id] gt_instances = util_3d.get_instances(gt_ids, self.VALID_CLASS_IDS, self.CLASS_LABELS, self.ID_TO_LABEL) # associate gt2pred = deepcopy(gt_instances) for label in gt2pred: for gt in gt2pred[label]: gt['matched_pred'] = [] pred2gt = {} for label in self.CLASS_LABELS: pred2gt[label] = [] num_pred_instances = 0 # mask of void labels in the groundtruth bool_void = np.logical_not(np.in1d(gt_ids//1000, self.VALID_CLASS_IDS)) # go thru all prediction masks for instance_id in self.pred_instances[scene_id]: label_id = int(self.pred_instances[scene_id][instance_id]['label_id']) conf = self.pred_instances[scene_id][instance_id]['conf'] if not label_id in self.ID_TO_LABEL: continue label_name = self.ID_TO_LABEL[label_id] # read the mask pred_mask = self.pred_instances[scene_id][instance_id]['pred_mask'] # convert to binary num = np.count_nonzero(pred_mask) if num < self.min_region_sizes[0]: continue # skip if empty pred_instance = {} pred_instance['filename'] = str(scene_id) + '/' + str(instance_id) pred_instance['pred_id'] = num_pred_instances pred_instance['label_id'] = label_id pred_instance['vert_count'] = num pred_instance['confidence'] = conf pred_instance['void_intersection'] = np.count_nonzero(np.logical_and(bool_void, pred_mask)) # matched gt instances matched_gt = [] # go thru all gt instances with matching label for (gt_num, gt_inst) in enumerate(gt2pred[label_name]): intersection = np.count_nonzero(np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask)) if intersection > 0: gt_copy = gt_inst.copy() pred_copy = pred_instance.copy() gt_copy['intersection'] = intersection pred_copy['intersection'] = intersection matched_gt.append(gt_copy) gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy) pred_instance['matched_gt'] = matched_gt num_pred_instances += 1 pred2gt[label_name].append(pred_instance) return gt2pred, pred2gt def print_results(self, avgs): sep = "" col1 = ":" lineLen = 64 logging.info("") logging.info("#"*lineLen) line = "" line += "{:<15}".format("what" ) + sep + col1 line += "{:>15}".format("AP" ) + sep line += "{:>15}".format("AP_50%" ) + sep line += "{:>15}".format("AP_25%" ) + sep logging.info(line) logging.info("#"*lineLen) for (li,label_name) in enumerate(self.CLASS_LABELS): ap_avg = avgs["classes"][label_name]["ap"] ap_50o = avgs["classes"][label_name]["ap50%"] ap_25o = avgs["classes"][label_name]["ap25%"] line = "{:<15}".format(label_name) + sep + col1 line += sep + "{:>15.3f}".format(ap_avg ) + sep line += sep + "{:>15.3f}".format(ap_50o ) + sep line += sep + "{:>15.3f}".format(ap_25o ) + sep logging.info(line) all_ap_avg = avgs["all_ap"] all_ap_50o = avgs["all_ap_50%"] all_ap_25o = avgs["all_ap_25%"] logging.info("-"*lineLen) line = "{:<15}".format("average") + sep + col1 line += "{:>15.3f}".format(all_ap_avg) + sep line += "{:>15.3f}".format(all_ap_50o) + sep line += "{:>15.3f}".format(all_ap_25o) + sep logging.info(line) logging.info("") @staticmethod def write_to_benchmark(output_path='benchmark_instance', scene_id=None, pred_inst={}): os.makedirs(output_path, exist_ok=True) os.makedirs(os.path.join(output_path, 'predicted_masks'), exist_ok=True) f = open(os.path.join(output_path, scene_id + '.txt'), 'w') for instance_id in pred_inst: # for pred instance id starts from 0; in gt valid instance id starts from 1 score = pred_inst[instance_id]['conf'] label = pred_inst[instance_id]['label_id'] mask = pred_inst[instance_id]['pred_mask'] f.write('predicted_masks/{}_{:03d}.txt {} {:.4f}'.format(scene_id, instance_id, label, score)) if instance_id < len(pred_inst) - 1: f.write('\n') util_3d.export_ids(os.path.join(output_path, 'predicted_masks', scene_id + '_%03d.txt' % (instance_id)), mask) f.close() def add_prediction(self, instance_info, id): self.pred_instances[id] = instance_info def add_gt(self, instance_info, id): self.gt_instances[id] = instance_info # see scannet repo for generating gt data for val in benchmark format def add_gt_in_benchmark_format(self, scene_id): gt_file_path = '/rhome/jhou/data/dataset/scannet/scannet_benchmark/gt_instance/' gt_file = os.path.join(gt_file_path, scene_id + '.txt') gt_ids = util_3d.load_ids(gt_file) self.add_gt(gt_ids, scene_id) def evaluate(self): print('evaluating', len(self.pred_instances), 'scans...') matches = {} for i, scene_id in enumerate(self.pred_instances): gt2pred, pred2gt = self.assign_instances_for_scan(scene_id) matches[scene_id] = {} matches[scene_id]['gt'] = gt2pred matches[scene_id]['pred'] = pred2gt sys.stdout.write("\rscans processed: {}".format(i+1)) sys.stdout.flush() print('') ap_scores = self.evaluate_matches(matches) avgs = self.compute_averages(ap_scores) # print self.print_results(avgs) return avgs['all_ap'], avgs['all_ap_50%'], avgs['all_ap_25%'] def write_result_file(avgs, filename): _SPLITTER = ',' with open(filename, 'w') as f: f.write(_SPLITTER.join(['class', 'class id', 'ap', 'ap50', 'ap25']) + '\n') for i in range(len(VALID_CLASS_IDS)): class_name = CLASS_LABELS[i] class_id = VALID_CLASS_IDS[i] ap = avgs["classes"][class_name]["ap"] ap50 = avgs["classes"][class_name]["ap50%"] ap25 = avgs["classes"][class_name]["ap25%"] f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap, ap50, ap25]]) + '\n') def config(): parser = argparse.ArgumentParser() parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files') parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files') parser.add_argument('--output_file', default='semantic_instance_evaluation.txt', help='output file [default: semantic_instance_evaluation.txt]') opt = parser.parse_args() return opt if __name__ == '__main__': opt = config() setup_logging() #-----------------scannet---------------------- CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS) print('reading', len(os.listdir(opt.pred_path))-1, 'scans...') for i, pred_file in enumerate(os.listdir(opt.pred_path)): if os.path.isdir(os.path.join(opt.pred_path, pred_file)): continue scene_id = pred_file[:12] sys.stdout.write("\rscans read: {}".format(i+1)) sys.stdout.flush() gt_file = os.path.join(opt.gt_path, pred_file) gt_ids = util_3d.load_ids(gt_file) evaluator.add_gt(gt_ids, scene_id) instances = util_3d.read_instance_prediction_file(os.path.join(opt.pred_path,pred_file), opt.pred_path) for pred_mask_file in instances: # read the mask pred_mask = util_3d.load_ids(pred_mask_file) instances[pred_mask_file]['pred_mask'] = pred_mask evaluator.add_prediction(instances, scene_id) print('') _, _, _ = evaluator.evaluate()
ContrastiveSceneContexts-main
downstream/insseg/datasets/evaluation/evaluate_semantic_instance.py
import os, sys import csv try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: import imageio except: print("Please install the module 'imageio' for image processing, e.g.") print("pip install imageio") sys.exit(-1) # print an error message and quit def print_error(message, user_fault=False): sys.stderr.write('ERROR: ' + str(message) + '\n') if user_fault: sys.exit(2) sys.exit(-1) # if string s represents an int def represents_int(s): try: int(s) return True except ValueError: return False def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'): assert os.path.isfile(filename) mapping = dict() with open(filename) as csvfile: reader = csv.DictReader(csvfile, delimiter='\t') for row in reader: mapping[row[label_from]] = int(row[label_to]) # if ints convert if represents_int([key for key in mapping.keys()][0]): mapping = {int(k):v for k,v in mapping.items()} return mapping # input: scene_types.txt or scene_types_all.txt def read_scene_types_mapping(filename, remove_spaces=True): assert os.path.isfile(filename) mapping = dict() lines = open(filename).read().splitlines() lines = [line.split('\t') for line in lines] if remove_spaces: mapping = { x[1].strip():int(x[0]) for x in lines } else: mapping = { x[1]:int(x[0]) for x in lines } return mapping # color by label def visualize_label_image(filename, image): height = image.shape[0] width = image.shape[1] vis_image = np.zeros([height, width, 3], dtype=np.uint8) color_palette = create_color_palette() for idx, color in enumerate(color_palette): vis_image[image==idx] = color imageio.imwrite(filename, vis_image) # color by different instances (mod length of color palette) def visualize_instance_image(filename, image): height = image.shape[0] width = image.shape[1] vis_image = np.zeros([height, width, 3], dtype=np.uint8) color_palette = create_color_palette() instances = np.unique(image) for idx, inst in enumerate(instances): vis_image[image==inst] = color_palette[inst%len(color_palette)] imageio.imwrite(filename, vis_image)
ContrastiveSceneContexts-main
downstream/insseg/datasets/evaluation/scannet_benchmark_utils/util.py
import os, sys import json try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: from plyfile import PlyData, PlyElement except: print("Please install the module 'plyfile' for PLY i/o, e.g.") print("pip install plyfile") sys.exit(-1) from . import util # matrix: 4x4 np array # points Nx3 np array def transform_points(matrix, points): assert len(points.shape) == 2 and points.shape[1] == 3 num_points = points.shape[0] p = np.concatenate([points, np.ones((num_points, 1))], axis=1) p = np.matmul(matrix, np.transpose(p)) p = np.transpose(p) p[:,:3] /= p[:,3,None] return p[:,:3] def export_ids(filename, ids): with open(filename, 'w') as f: for id in ids: f.write('%d\n' % id) def load_ids(filename): ids = open(filename).read().splitlines() ids = np.array(ids, dtype=np.int64) return ids def read_mesh_vertices(filename): assert os.path.isfile(filename) with open(filename, 'rb') as f: plydata = PlyData.read(f) num_verts = plydata['vertex'].count vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32) vertices[:,0] = plydata['vertex'].data['x'] vertices[:,1] = plydata['vertex'].data['y'] vertices[:,2] = plydata['vertex'].data['z'] return vertices # export 3d instance labels for instance evaluation def export_instance_ids_for_eval(filename, label_ids, instance_ids): assert label_ids.shape[0] == instance_ids.shape[0] output_mask_path_relative = 'predicted_masks' name = os.path.splitext(os.path.basename(filename))[0] output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative) if not os.path.isdir(output_mask_path): os.mkdir(output_mask_path) insts = np.unique(instance_ids) zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32) with open(filename, 'w') as f: for idx, inst_id in enumerate(insts): if inst_id == 0: # 0 -> no instance for this vertex continue loc = np.where(instance_ids == inst_id) label_id = label_ids[loc[0][0]] # write mask indexing output_mask_file_relavtive = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt') f.write('%s %d %f\n' % (output_mask_file_relavtive, label_id, 1.0)) # write mask mask = np.copy(zero_mask) mask[loc[0]] = 1 output_mask_file = os.path.join(output_mask_path, name + '_' + str(idx) + '.txt') export_ids(output_mask_file, mask) # ------------ Instance Utils ------------ # class Instance(object): instance_id = 0 label_id = 0 vert_count = 0 med_dist = -1 dist_conf = 0.0 def __init__(self, mesh_vert_instances, instance_id): if (instance_id == -1): return self.instance_id = int(instance_id) self.label_id = int(self.get_label_id(instance_id)) self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id)) def get_label_id(self, instance_id): return int(instance_id // 1000) def get_instance_verts(self, mesh_vert_instances, instance_id): return (mesh_vert_instances == instance_id).sum() def to_json(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) def to_dict(self): dict = {} dict["instance_id"] = self.instance_id dict["label_id"] = self.label_id dict["vert_count"] = self.vert_count dict["med_dist"] = self.med_dist dict["dist_conf"] = self.dist_conf return dict def from_json(self, data): self.instance_id = int(data["instance_id"]) self.label_id = int(data["label_id"]) self.vert_count = int(data["vert_count"]) if ("med_dist" in data): self.med_dist = float(data["med_dist"]) self.dist_conf = float(data["dist_conf"]) def __str__(self): return "("+str(self.instance_id)+")" def read_instance_prediction_file(filename, pred_path): lines = open(filename).read().splitlines() instance_info = {} abs_pred_path = os.path.abspath(pred_path) for line in lines: parts = line.split(' ') if len(parts) != 3: util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]') if os.path.isabs(parts[0]): util.print_error('invalid instance prediction file. First entry in line must be a relative path') mask_file = os.path.join(os.path.dirname(filename), parts[0]) mask_file = os.path.abspath(mask_file) # check that mask_file lives inside prediction path if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path: util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename)) info = {} info["label_id"] = int(float(parts[1])) info["conf"] = float(parts[2]) instance_info[mask_file] = info return instance_info def get_instances(ids, class_ids, class_labels, id2label): instances = {} for label in class_labels: instances[label] = [] instance_ids = np.unique(ids) for id in instance_ids: if id == 0: continue inst = Instance(ids, id) if inst.label_id in class_ids: instances[id2label[inst.label_id]].append(inst.to_dict()) return instances
ContrastiveSceneContexts-main
downstream/insseg/datasets/evaluation/scannet_benchmark_utils/util_3d.py
# Evaluates semantic label task # Input: # - path to .txt prediction files # - path to .txt ground truth files # - output file to write results to # Note that only the valid classes are used for evaluation, # i.e., any ground truth label not in the valid label set # is ignored in the evaluation. # # example usage: evaluate_semantic_label.py --scan_path [path to scan data] --output_file [output file] # python imports import math import logging import os, sys, argparse import inspect try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: from itertools import izip except ImportError: izip = zip #currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) #parentdir = os.path.dirname(currentdir) #sys.path.insert(0,parentdir) import util_3d import util class Evaluator: def __init__(self, CLASS_LABELS, VALID_CLASS_IDS): self.CLASS_LABELS = CLASS_LABELS self.VALID_CLASS_IDS = VALID_CLASS_IDS self.UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1 self.gt = {} self.pred = {} max_id = self.UNKNOWN_ID self.confusion = np.zeros((max_id+1, max_id+1), dtype=np.ulonglong) def update_confusion(self, pred_ids, gt_ids, sceneId=None): # sanity checks if not pred_ids.shape == gt_ids.shape: util.print_error('%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True) n = self.confusion.shape[0] k = (gt_ids >= 0) & (gt_ids < n) temporal = np.bincount(n * gt_ids[k].astype(int) + pred_ids[k], minlength=n**2).reshape(n, n) for valid_class_row in self.VALID_CLASS_IDS: for valid_class_col in self.VALID_CLASS_IDS: self.confusion[valid_class_row][valid_class_col] += temporal[valid_class_row][valid_class_col] @staticmethod def write_to_benchmark(base='benchmark_segmentation', sceneId=None, pred_ids=None): os.makedirs(base, exist_ok=True) util_3d.export_ids('{}.txt'.format(os.path.join(base, sceneId)), pred_ids) def get_iou(self, label_id, confusion): if not label_id in self.VALID_CLASS_IDS: return float('nan') # #true positives tp = np.longlong(confusion[label_id, label_id]) # #false negatives fn = np.longlong(confusion[label_id, :].sum()) - tp # #false positives not_ignored = [l for l in self.VALID_CLASS_IDS if not l == label_id] fp = np.longlong(confusion[not_ignored, label_id].sum()) denom = (tp + fp + fn) if denom == 0: return float('nan') return (float(tp) / denom, tp, denom) def write_result_file(self, confusion, ious, filename): with open(filename, 'w') as f: f.write('iou scores\n') for i in range(len(self.VALID_CLASS_IDS)): label_id = self.VALID_CLASS_IDS[i] label_name = self.CLASS_LABELS[i] iou = ious[label_name][0] f.write('{0:<14s}({1:<2d}): {2:>5.3f}\n'.format(label_name, label_id, iou)) f.write("{0:<14s}: {1:>5.3f}".format('mean', np.array([ious[k][0] for k in ious]).mean())) f.write('\nconfusion matrix\n') f.write('\t\t\t') for i in range(len(self.VALID_CLASS_IDS)): #f.write('\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i])) f.write('{0:<8d}'.format(self.VALID_CLASS_IDS[i])) f.write('\n') for r in range(len(self.VALID_CLASS_IDS)): f.write('{0:<14s}({1:<2d})'.format(self.CLASS_LABELS[r], self.VALID_CLASS_IDS[r])) for c in range(len(self.VALID_CLASS_IDS)): f.write('\t{0:>5.3f}'.format(confusion[self.VALID_CLASS_IDS[r],self.VALID_CLASS_IDS[c]])) f.write('\n') print('wrote results to', filename) def evaluate_confusion(self, output_file=None): class_ious = {} counter = 0 summation = 0 for i in range(len(self.VALID_CLASS_IDS)): label_name = self.CLASS_LABELS[i] label_id = self.VALID_CLASS_IDS[i] class_ious[label_name] = self.get_iou(label_id, self.confusion) # print logging.info('classes IoU') logging.info('----------------------------') for i in range(len(self.VALID_CLASS_IDS)): label_name = self.CLASS_LABELS[i] try: logging.info('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2])) summation += class_ious[label_name][0] counter += 1 except: logging.info('{0:<14s}: nan ( nan/nan )'.format(label_name)) logging.info("{0:<14s}: {1:>5.3f}".format('mean', summation / counter)) if output_file: self.write_result_file(self.confusion, class_ious, output_file) return summation / counter def config(): parser = argparse.ArgumentParser() parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files') parser.add_argument('--gt_path', required=True, help='path to gt files') parser.add_argument('--output_file', type=str, default='./semantic_label_evaluation.txt') opt = parser.parse_args() return opt def main(): opt = config() ch = logging.StreamHandler(sys.stdout) logging.getLogger().setLevel(logging.INFO) logging.basicConfig( format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch]) #------------------------- ScanNet -------------------------- CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS) print('reading', len(os.listdir(opt.pred_path))-1, 'scans...') for i, pred_file in enumerate(os.listdir(opt.pred_path)): if pred_file == 'semantic_label_evaluation.txt': continue gt_file = os.path.join(opt.gt_path, pred_file) if not os.path.isfile(gt_file): util.print_error('Result file {} does not match any gt file'.format(pred_file), user_fault=True) gt_ids = util_3d.load_ids(gt_file) pred_file = os.path.join(opt.pred_path, pred_file) pred_ids = util_3d.load_ids(pred_file) evaluator.update_confusion(pred_ids, gt_ids, pred_file.split('.')[0]) sys.stdout.write("\rscans processed: {}".format(i+1)) sys.stdout.flush() # evaluate evaluator.evaluate_confusion(opt.output_file) if __name__ == '__main__': main()
ContrastiveSceneContexts-main
downstream/insseg/datasets/evaluation/scannet_benchmark_utils/scripts/evaluate_semantic_label.py
import os, sys import csv try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: import imageio except: print("Please install the module 'imageio' for image processing, e.g.") print("pip install imageio") sys.exit(-1) # print an error message and quit def print_error(message, user_fault=False): sys.stderr.write('ERROR: ' + str(message) + '\n') if user_fault: sys.exit(2) sys.exit(-1) # if string s represents an int def represents_int(s): try: int(s) return True except ValueError: return False def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'): assert os.path.isfile(filename) mapping = dict() with open(filename) as csvfile: reader = csv.DictReader(csvfile, delimiter='\t') for row in reader: mapping[row[label_from]] = int(row[label_to]) # if ints convert if represents_int([key for key in mapping.keys()][0]): mapping = {int(k):v for k,v in mapping.items()} return mapping # input: scene_types.txt or scene_types_all.txt def read_scene_types_mapping(filename, remove_spaces=True): assert os.path.isfile(filename) mapping = dict() lines = open(filename).read().splitlines() lines = [line.split('\t') for line in lines] if remove_spaces: mapping = { x[1].strip():int(x[0]) for x in lines } else: mapping = { x[1]:int(x[0]) for x in lines } return mapping # color by label def visualize_label_image(filename, image): height = image.shape[0] width = image.shape[1] vis_image = np.zeros([height, width, 3], dtype=np.uint8) color_palette = create_color_palette() for idx, color in enumerate(color_palette): vis_image[image==idx] = color imageio.imwrite(filename, vis_image) # color by different instances (mod length of color palette) def visualize_instance_image(filename, image): height = image.shape[0] width = image.shape[1] vis_image = np.zeros([height, width, 3], dtype=np.uint8) color_palette = create_color_palette() instances = np.unique(image) for idx, inst in enumerate(instances): vis_image[image==inst] = color_palette[inst%len(color_palette)] imageio.imwrite(filename, vis_image)
ContrastiveSceneContexts-main
downstream/insseg/datasets/evaluation/scannet_benchmark_utils/scripts/util.py
import os, sys import json try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) try: from plyfile import PlyData, PlyElement except: print("Please install the module 'plyfile' for PLY i/o, e.g.") print("pip install plyfile") sys.exit(-1) import util # matrix: 4x4 np array # points Nx3 np array def transform_points(matrix, points): assert len(points.shape) == 2 and points.shape[1] == 3 num_points = points.shape[0] p = np.concatenate([points, np.ones((num_points, 1))], axis=1) p = np.matmul(matrix, np.transpose(p)) p = np.transpose(p) p[:,:3] /= p[:,3,None] return p[:,:3] def export_ids(filename, ids): with open(filename, 'w') as f: for id in ids: f.write('%d\n' % id) def load_ids(filename): ids = open(filename).read().splitlines() ids = np.array(ids, dtype=np.int64) return ids def read_mesh_vertices(filename): assert os.path.isfile(filename) with open(filename, 'rb') as f: plydata = PlyData.read(f) num_verts = plydata['vertex'].count vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32) vertices[:,0] = plydata['vertex'].data['x'] vertices[:,1] = plydata['vertex'].data['y'] vertices[:,2] = plydata['vertex'].data['z'] return vertices # export 3d instance labels for instance evaluation def export_instance_ids_for_eval(filename, label_ids, instance_ids): assert label_ids.shape[0] == instance_ids.shape[0] output_mask_path_relative = 'predicted_masks' name = os.path.splitext(os.path.basename(filename))[0] output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative) if not os.path.isdir(output_mask_path): os.mkdir(output_mask_path) insts = np.unique(instance_ids) zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32) with open(filename, 'w') as f: for idx, inst_id in enumerate(insts): if inst_id == 0: # 0 -> no instance for this vertex continue loc = np.where(instance_ids == inst_id) label_id = label_ids[loc[0][0]] # write mask indexing output_mask_file_relavtive = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt') f.write('%s %d %f\n' % (output_mask_file_relavtive, label_id, 1.0)) # write mask mask = np.copy(zero_mask) mask[loc[0]] = 1 output_mask_file = os.path.join(output_mask_path, name + '_' + str(idx) + '.txt') export_ids(output_mask_file, mask) # ------------ Instance Utils ------------ # class Instance(object): instance_id = 0 label_id = 0 vert_count = 0 med_dist = -1 dist_conf = 0.0 def __init__(self, mesh_vert_instances, instance_id): if (instance_id == -1): return self.instance_id = int(instance_id) self.label_id = int(self.get_label_id(instance_id)) self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id)) def get_label_id(self, instance_id): return int(instance_id // 1000) def get_instance_verts(self, mesh_vert_instances, instance_id): return (mesh_vert_instances == instance_id).sum() def to_json(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) def to_dict(self): dict = {} dict["instance_id"] = self.instance_id dict["label_id"] = self.label_id dict["vert_count"] = self.vert_count dict["med_dist"] = self.med_dist dict["dist_conf"] = self.dist_conf return dict def from_json(self, data): self.instance_id = int(data["instance_id"]) self.label_id = int(data["label_id"]) self.vert_count = int(data["vert_count"]) if ("med_dist" in data): self.med_dist = float(data["med_dist"]) self.dist_conf = float(data["dist_conf"]) def __str__(self): return "("+str(self.instance_id)+")" def read_instance_prediction_file(filename, pred_path): lines = open(filename).read().splitlines() instance_info = {} abs_pred_path = os.path.abspath(pred_path) for line in lines: parts = line.split(' ') if len(parts) != 3: util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]') if os.path.isabs(parts[0]): util.print_error('invalid instance prediction file. First entry in line must be a relative path') mask_file = os.path.join(os.path.dirname(filename), parts[0]) mask_file = os.path.abspath(mask_file) # check that mask_file lives inside prediction path if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path: util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename)) info = {} info["label_id"] = int(float(parts[1])) info["conf"] = float(parts[2]) instance_info[mask_file] = info return instance_info def get_instances(ids, class_ids, class_labels, id2label): instances = {} for label in class_labels: instances[label] = [] instance_ids = np.unique(ids) for id in instance_ids: if id == 0: continue inst = Instance(ids, id) if inst.label_id in class_ids: instances[id2label[inst.label_id]].append(inst.to_dict()) return instances
ContrastiveSceneContexts-main
downstream/insseg/datasets/evaluation/scannet_benchmark_utils/scripts/util_3d.py
# Evaluates semantic instance task # Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation # Input: # - path to .txt prediction files # - path to .txt ground truth files # - output file to write results to # Each .txt prediction file look like: # [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence] # [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence] # [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence] # ... # # NOTE: The prediction files must live in the root of the given prediction path. # Predicted mask .txt files must live in a subfolder. # Additionally, filenames must not contain spaces. # The relative paths to predicted masks must contain one integer per line, # where each line corresponds to vertices in the *_vh_clean_2.ply (in that order). # Non-zero integers indicate part of the predicted instance. # The label ids specify the class of the corresponding mask. # Confidence is a float confidence score of the mask. # # Note that only the valid classes are used for evaluation, # i.e., any ground truth label not in the valid label set # is ignored in the evaluation. # # example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file] # python imports import logging import math import os, sys, argparse import inspect from copy import deepcopy import argparse import numpy as np #currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) #parentdir = os.path.dirname(currentdir) #sys.path.insert(0,parentdir) import util_3d import util def setup_logging(): ch = logging.StreamHandler(sys.stdout) logging.getLogger().setLevel(logging.INFO) logging.basicConfig( format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch]) class Evaluator: # ---------- Evaluation params ---------- # # overlaps for evaluation overlaps = np.append(np.arange(0.5,0.95,0.05), 0.25) # minimum region size for evaluation [verts] min_region_sizes = np.array( [ 100 ] ) # distance thresholds [m] distance_threshes = np.array( [ float('inf') ] ) # distance confidences distance_confs = np.array( [ -float('inf') ] ) def __init__(self, CLASS_LABELS, VALID_CLASS_IDS, benchmark=False): # ---------- Label info ---------- # #CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', # 'window', 'bookshelf', 'picture', 'counter', # 'desk', 'curtain', 'refrigerator', 'shower curtain', # 'toilet', 'sink', 'bathtub', 'otherfurniture'] #VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) self.CLASS_LABELS = CLASS_LABELS self.VALID_CLASS_IDS = VALID_CLASS_IDS self.ID_TO_LABEL = {} self.LABEL_TO_ID = {} for i in range(len(VALID_CLASS_IDS)): self.LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] self.ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] self.pred_instances = {} self.gt_instances = {} self.benchmark = benchmark def evaluate_matches(self, matches): # results: class x overlap ap = np.zeros( (len(self.distance_threshes) , len(self.CLASS_LABELS) , len(self.overlaps)) , np.float ) for di, (min_region_size, distance_thresh, distance_conf) in enumerate(zip(self.min_region_sizes, self.distance_threshes, self.distance_confs)): for oi, overlap_th in enumerate(self.overlaps): pred_visited = {} for m in matches: for p in matches[m]['pred']: for label_name in self.CLASS_LABELS: for p in matches[m]['pred'][label_name]: if 'filename' in p: pred_visited[p['filename']] = False for li, label_name in enumerate(self.CLASS_LABELS): y_true = np.empty(0) y_score = np.empty(0) hard_false_negatives = 0 has_gt = False has_pred = False for m in matches: pred_instances = matches[m]['pred'][label_name] gt_instances = matches[m]['gt'][label_name] # filter groups in ground truth gt_instances = [ gt for gt in gt_instances if gt['instance_id']>=1000 and gt['vert_count']>=min_region_size and gt['med_dist']<=distance_thresh and gt['dist_conf']>=distance_conf ] if gt_instances: has_gt = True if pred_instances: has_pred = True cur_true = np.ones ( len(gt_instances) ) cur_score = np.ones ( len(gt_instances) ) * (-float("inf")) cur_match = np.zeros( len(gt_instances) , dtype=np.bool ) # collect matches for (gti,gt) in enumerate(gt_instances): found_match = False num_pred = len(gt['matched_pred']) for pred in gt['matched_pred']: # greedy assignments if pred_visited[pred['filename']]: continue overlap = float(pred['intersection']) / (gt['vert_count']+pred['vert_count']-pred['intersection']) if overlap > overlap_th: confidence = pred['confidence'] # if already have a prediction for this gt, # the prediction with the lower score is automatically a false positive if cur_match[gti]: max_score = max( cur_score[gti] , confidence ) min_score = min( cur_score[gti] , confidence ) cur_score[gti] = max_score # append false positive cur_true = np.append(cur_true,0) cur_score = np.append(cur_score,min_score) cur_match = np.append(cur_match,True) # otherwise set score else: found_match = True cur_match[gti] = True cur_score[gti] = confidence pred_visited[pred['filename']] = True if not found_match: hard_false_negatives += 1 # remove non-matched ground truth instances cur_true = cur_true [ cur_match==True ] cur_score = cur_score[ cur_match==True ] # collect non-matched predictions as false positive for pred in pred_instances: found_gt = False for gt in pred['matched_gt']: overlap = float(gt['intersection']) / (gt['vert_count']+pred['vert_count']-gt['intersection']) if overlap > overlap_th: found_gt = True break if not found_gt: num_ignore = pred['void_intersection'] for gt in pred['matched_gt']: # group? if gt['instance_id'] < 1000: num_ignore += gt['intersection'] # small ground truth instances if gt['vert_count'] < min_region_size or gt['med_dist']>distance_thresh or gt['dist_conf']<distance_conf: num_ignore += gt['intersection'] proportion_ignore = float(num_ignore)/pred['vert_count'] # if not ignored append false positive if proportion_ignore <= overlap_th: cur_true = np.append(cur_true,0) confidence = pred["confidence"] cur_score = np.append(cur_score,confidence) # append to overall results y_true = np.append(y_true,cur_true) y_score = np.append(y_score,cur_score) # compute average precision if has_gt and has_pred: # compute precision recall curve first # sorting and cumsum score_arg_sort = np.argsort(y_score) y_score_sorted = y_score[score_arg_sort] y_true_sorted = y_true[score_arg_sort] y_true_sorted_cumsum = np.cumsum(y_true_sorted) # unique thresholds (thresholds,unique_indices) = np.unique( y_score_sorted , return_index=True ) num_prec_recall = len(unique_indices) + 1 # prepare precision recall num_examples = len(y_score_sorted) try: num_true_examples = y_true_sorted_cumsum[-1] except: num_true_examples = 0 precision = np.zeros(num_prec_recall) recall = np.zeros(num_prec_recall) # deal with the first point y_true_sorted_cumsum = np.append( y_true_sorted_cumsum , 0 ) # deal with remaining for idx_res,idx_scores in enumerate(unique_indices): cumsum = y_true_sorted_cumsum[idx_scores-1] tp = num_true_examples - cumsum fp = num_examples - idx_scores - tp fn = cumsum + hard_false_negatives p = float(tp)/(tp+fp) r = float(tp)/(tp+fn) precision[idx_res] = p recall [idx_res] = r # first point in curve is artificial precision[-1] = 1. recall [-1] = 0. # compute average of precision-recall curve recall_for_conv = np.copy(recall) recall_for_conv = np.append(recall_for_conv[0], recall_for_conv) recall_for_conv = np.append(recall_for_conv, 0.) stepWidths = np.convolve(recall_for_conv,[-0.5,0,0.5],'valid') # integrate is now simply a dot product ap_current = np.dot(precision, stepWidths) elif has_gt: ap_current = 0.0 else: ap_current = float('nan') ap[di,li,oi] = ap_current return ap def compute_averages(self, aps): d_inf = 0 o50 = np.where(np.isclose(self.overlaps,0.5)) o25 = np.where(np.isclose(self.overlaps,0.25)) oAllBut25 = np.where(np.logical_not(np.isclose(self.overlaps,0.25))) avg_dict = {} #avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ]) avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,oAllBut25]) avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50]) avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25]) avg_dict["classes"] = {} for (li,label_name) in enumerate(self.CLASS_LABELS): avg_dict["classes"][label_name] = {} #avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :]) avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li,oAllBut25]) avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50]) avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25]) return avg_dict def assign_instances_for_scan(self, scene_id): # get gt instances gt_ids = self.gt_instances[scene_id] gt_instances = util_3d.get_instances(gt_ids, self.VALID_CLASS_IDS, self.CLASS_LABELS, self.ID_TO_LABEL) # associate gt2pred = deepcopy(gt_instances) for label in gt2pred: for gt in gt2pred[label]: gt['matched_pred'] = [] pred2gt = {} for label in self.CLASS_LABELS: pred2gt[label] = [] num_pred_instances = 0 # mask of void labels in the groundtruth bool_void = np.logical_not(np.in1d(gt_ids//1000, self.VALID_CLASS_IDS)) # go thru all prediction masks for instance_id in self.pred_instances[scene_id]: label_id = int(self.pred_instances[scene_id][instance_id]['label_id']) conf = self.pred_instances[scene_id][instance_id]['conf'] if not label_id in self.ID_TO_LABEL: continue label_name = self.ID_TO_LABEL[label_id] # read the mask pred_mask = self.pred_instances[scene_id][instance_id]['pred_mask'] # convert to binary num = np.count_nonzero(pred_mask) if num < self.min_region_sizes[0]: continue # skip if empty pred_instance = {} pred_instance['filename'] = str(scene_id) + '/' + str(instance_id) pred_instance['pred_id'] = num_pred_instances pred_instance['label_id'] = label_id pred_instance['vert_count'] = num pred_instance['confidence'] = conf pred_instance['void_intersection'] = np.count_nonzero(np.logical_and(bool_void, pred_mask)) # matched gt instances matched_gt = [] # go thru all gt instances with matching label for (gt_num, gt_inst) in enumerate(gt2pred[label_name]): intersection = np.count_nonzero(np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask)) if intersection > 0: gt_copy = gt_inst.copy() pred_copy = pred_instance.copy() gt_copy['intersection'] = intersection pred_copy['intersection'] = intersection matched_gt.append(gt_copy) gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy) pred_instance['matched_gt'] = matched_gt num_pred_instances += 1 pred2gt[label_name].append(pred_instance) return gt2pred, pred2gt def print_results(self, avgs): sep = "" col1 = ":" lineLen = 64 logging.info("") logging.info("#"*lineLen) line = "" line += "{:<15}".format("what" ) + sep + col1 line += "{:>15}".format("AP" ) + sep line += "{:>15}".format("AP_50%" ) + sep line += "{:>15}".format("AP_25%" ) + sep logging.info(line) logging.info("#"*lineLen) for (li,label_name) in enumerate(self.CLASS_LABELS): ap_avg = avgs["classes"][label_name]["ap"] ap_50o = avgs["classes"][label_name]["ap50%"] ap_25o = avgs["classes"][label_name]["ap25%"] line = "{:<15}".format(label_name) + sep + col1 line += sep + "{:>15.3f}".format(ap_avg ) + sep line += sep + "{:>15.3f}".format(ap_50o ) + sep line += sep + "{:>15.3f}".format(ap_25o ) + sep logging.info(line) all_ap_avg = avgs["all_ap"] all_ap_50o = avgs["all_ap_50%"] all_ap_25o = avgs["all_ap_25%"] logging.info("-"*lineLen) line = "{:<15}".format("average") + sep + col1 line += "{:>15.3f}".format(all_ap_avg) + sep line += "{:>15.3f}".format(all_ap_50o) + sep line += "{:>15.3f}".format(all_ap_25o) + sep logging.info(line) logging.info("") @staticmethod def write_to_benchmark(output_path='benchmark_instance', scene_id=None, pred_inst={}): os.makedirs(output_path, exist_ok=True) os.makedirs(os.path.join(output_path, 'predicted_masks'), exist_ok=True) f = open(os.path.join(output_path, scene_id + '.txt'), 'w') for instance_id in pred_inst: # for pred instance id starts from 0; in gt valid instance id starts from 1 score = pred_inst[instance_id]['conf'] label = pred_inst[instance_id]['label_id'] mask = pred_inst[instance_id]['pred_mask'] f.write('predicted_masks/{}_{:03d}.txt {} {:.4f}'.format(scene_id, instance_id, label, score)) if instance_id < len(pred_inst) - 1: f.write('\n') util_3d.export_ids(os.path.join(output_path, 'predicted_masks', scene_id + '_%03d.txt' % (instance_id)), mask) f.close() def add_prediction(self, instance_info, id): self.pred_instances[id] = instance_info def add_gt(self, instance_info, id): self.gt_instances[id] = instance_info def evaluate(self): print('evaluating', len(self.pred_instances), 'scans...') matches = {} for i, scene_id in enumerate(self.pred_instances): gt2pred, pred2gt = self.assign_instances_for_scan(scene_id) matches[scene_id] = {} matches[scene_id]['gt'] = gt2pred matches[scene_id]['pred'] = pred2gt sys.stdout.write("\rscans processed: {}".format(i+1)) sys.stdout.flush() print('') ap_scores = self.evaluate_matches(matches) avgs = self.compute_averages(ap_scores) # print self.print_results(avgs) return avgs['all_ap'], avgs['all_ap_50%'], avgs['all_ap_25%'] def write_result_file(avgs, filename): _SPLITTER = ',' with open(filename, 'w') as f: f.write(_SPLITTER.join(['class', 'class id', 'ap', 'ap50', 'ap25']) + '\n') for i in range(len(VALID_CLASS_IDS)): class_name = CLASS_LABELS[i] class_id = VALID_CLASS_IDS[i] ap = avgs["classes"][class_name]["ap"] ap50 = avgs["classes"][class_name]["ap50%"] ap25 = avgs["classes"][class_name]["ap25%"] f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap, ap50, ap25]]) + '\n') def config(): parser = argparse.ArgumentParser() parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files') parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files') parser.add_argument('--output_file', default='semantic_instance_evaluation.txt', help='output file [default: semantic_instance_evaluation.txt]') opt = parser.parse_args() return opt if __name__ == '__main__': opt = config() setup_logging() #-----------------scannet---------------------- CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture'] VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS) print('reading', len(os.listdir(opt.pred_path))-1, 'scans...') for i, pred_file in enumerate(os.listdir(opt.pred_path)): if os.path.isdir(os.path.join(opt.pred_path, pred_file)): continue scene_id = pred_file[:12] sys.stdout.write("\rscans read: {}".format(i+1)) sys.stdout.flush() gt_file = os.path.join(opt.gt_path, pred_file) gt_ids = util_3d.load_ids(gt_file) evaluator.add_gt(gt_ids, scene_id) instances = util_3d.read_instance_prediction_file(os.path.join(opt.pred_path,pred_file), opt.pred_path) for pred_mask_file in instances: # read the mask pred_mask = util_3d.load_ids(pred_mask_file) instances[pred_mask_file]['pred_mask'] = pred_mask evaluator.add_prediction(instances, scene_id) print('') _, _, _ = evaluator.evaluate()
ContrastiveSceneContexts-main
downstream/insseg/datasets/evaluation/scannet_benchmark_utils/scripts/evaluate_semantic_instance.py
import random from torch.nn import Module from MinkowskiEngine import SparseTensor class Wrapper(Module): """ Wrapper for the segmentation networks. """ OUT_PIXEL_DIST = -1 def __init__(self, NetClass, in_nchannel, out_nchannel, config): super(Wrapper, self).__init__() self.initialize_filter(NetClass, in_nchannel, out_nchannel, config) def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config): raise NotImplementedError('Must initialize a model and a filter') def forward(self, x, coords, colors=None): soutput = self.model(x) # During training, make the network invariant to the filter if not self.training or random.random() < 0.5: # Filter requires the model to finish the forward pass wrapper_coords = self.filter.initialize_coords(self.model, coords, colors) finput = SparseTensor(soutput.F, wrapper_coords) soutput = self.filter(finput) return soutput
ContrastiveSceneContexts-main
downstream/insseg/models/wrapper.py
from models.resnet import ResNetBase, get_norm from models.modules.common import ConvType, NormType, conv, conv_tr from models.modules.resnet_block import BasicBlock, BasicBlockINBN, Bottleneck import torch.nn as nn import MinkowskiEngine as ME from MinkowskiEngine import MinkowskiReLU import MinkowskiEngine.MinkowskiOps as me class MinkUNetBase(ResNetBase): BLOCK = None PLANES = (64, 128, 256, 512, 256, 128, 128) DILATIONS = (1, 1, 1, 1, 1, 1) LAYERS = (2, 2, 2, 2, 2, 2) INIT_DIM = 64 OUT_PIXEL_DIST = 1 NORM_TYPE = NormType.BATCH_NORM NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS # To use the model, must call initialize_coords before forward pass. # Once data is processed, call clear to reset the model before calling initialize_coords def __init__(self, in_channels, out_channels, config, D=3, **kwargs): super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D) def network_initialization(self, in_channels, out_channels, config, D): # Setup net_metadata dilations = self.DILATIONS bn_momentum = config.bn_momentum def space_n_time_m(n, m): return n if D == 3 else [n, n, n, m] if D == 4: self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) # Output of the first conv concated to conv6 self.inplanes = self.INIT_DIM self.conv1p1s1 = conv( in_channels, self.inplanes, kernel_size=space_n_time_m(config.conv1_kernel_size, 1), stride=1, dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum) self.block1 = self._make_layer( self.BLOCK, self.PLANES[0], self.LAYERS[0], dilation=dilations[0], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv2p1s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block2 = self._make_layer( self.BLOCK, self.PLANES[1], self.LAYERS[1], dilation=dilations[1], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv3p2s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block3 = self._make_layer( self.BLOCK, self.PLANES[2], self.LAYERS[2], dilation=dilations[2], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv4p4s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block4 = self._make_layer( self.BLOCK, self.PLANES[3], self.LAYERS[3], dilation=dilations[3], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr4p8s2 = conv_tr( self.inplanes, self.PLANES[4], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion self.block5 = self._make_layer( self.BLOCK, self.PLANES[4], self.LAYERS[4], dilation=dilations[4], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr5p4s2 = conv_tr( self.inplanes, self.PLANES[5], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion self.block6 = self._make_layer( self.BLOCK, self.PLANES[5], self.LAYERS[5], dilation=dilations[5], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr6p2s2 = conv_tr( self.inplanes, self.PLANES[6], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum) self.relu = MinkowskiReLU(inplace=True) self.final = nn.Sequential( conv( self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion, 512, kernel_size=1, stride=1, dilation=1, bias=False, D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(), conv(512, out_channels, kernel_size=1, stride=1, dilation=1, bias=True, D=D)) def forward(self, x): out = self.conv1p1s1(x) out = self.bn1(out) out = self.relu(out) out_b1p1 = self.block1(out) out = self.conv2p1s2(out_b1p1) out = self.bn2(out) out = self.relu(out) out_b2p2 = self.block2(out) out = self.conv3p2s2(out_b2p2) out = self.bn3(out) out = self.relu(out) out_b3p4 = self.block3(out) out = self.conv4p4s2(out_b3p4) out = self.bn4(out) out = self.relu(out) # pixel_dist=8 out = self.block4(out) out = self.convtr4p8s2(out) out = self.bntr4(out) out = self.relu(out) out = me.cat(out, out_b3p4) out = self.block5(out) out = self.convtr5p4s2(out) out = self.bntr5(out) out = self.relu(out) out = me.cat(out, out_b2p2) out = self.block6(out) out = self.convtr6p2s2(out) out = self.bntr6(out) out = self.relu(out) out = me.cat(out, out_b1p1) return self.final(out) class ResUNet14(MinkUNetBase): BLOCK = BasicBlock LAYERS = (1, 1, 1, 1, 1, 1) class ResUNet18(MinkUNetBase): BLOCK = BasicBlock LAYERS = (2, 2, 2, 2, 2, 2) class ResUNet18INBN(ResUNet18): NORM_TYPE = NormType.INSTANCE_BATCH_NORM BLOCK = BasicBlockINBN class ResUNet34(MinkUNetBase): BLOCK = BasicBlock LAYERS = (3, 4, 6, 3, 2, 2) class ResUNet50(MinkUNetBase): BLOCK = Bottleneck LAYERS = (3, 4, 6, 3, 2, 2) class ResUNet101(MinkUNetBase): BLOCK = Bottleneck LAYERS = (3, 4, 23, 3, 2, 2) class ResUNet14D(ResUNet14): PLANES = (64, 128, 256, 512, 512, 512, 512) class ResUNet18D(ResUNet18): PLANES = (64, 128, 256, 512, 512, 512, 512) class ResUNet34D(ResUNet34): PLANES = (64, 128, 256, 512, 512, 512, 512) class ResUNet34E(ResUNet34): INIT_DIM = 32 PLANES = (32, 64, 128, 256, 128, 64, 64) class ResUNet34F(ResUNet34): INIT_DIM = 32 PLANES = (32, 64, 128, 256, 128, 64, 32) class MinkUNetHyper(MinkUNetBase): BLOCK = None PLANES = (64, 128, 256, 512, 256, 128, 128) DILATIONS = (1, 1, 1, 1, 1, 1) LAYERS = (2, 2, 2, 2, 2, 2) INIT_DIM = 64 OUT_PIXEL_DIST = 1 NORM_TYPE = NormType.BATCH_NORM NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS # To use the model, must call initialize_coords before forward pass. # Once data is processed, call clear to reset the model before calling initialize_coords def __init__(self, in_channels, out_channels, config, D=3, **kwargs): super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D) def network_initialization(self, in_channels, out_channels, config, D): # Setup net_metadata dilations = self.DILATIONS bn_momentum = config.bn_momentum def space_n_time_m(n, m): return n if D == 3 else [n, n, n, m] if D == 4: self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) # Output of the first conv concated to conv6 self.inplanes = self.INIT_DIM self.conv1p1s1 = conv( in_channels, self.inplanes, kernel_size=space_n_time_m(config.conv1_kernel_size, 1), stride=1, dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum) self.block1 = self._make_layer( self.BLOCK, self.PLANES[0], self.LAYERS[0], dilation=dilations[0], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv2p1s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block2 = self._make_layer( self.BLOCK, self.PLANES[1], self.LAYERS[1], dilation=dilations[1], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv3p2s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block3 = self._make_layer( self.BLOCK, self.PLANES[2], self.LAYERS[2], dilation=dilations[2], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv4p4s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block4 = self._make_layer( self.BLOCK, self.PLANES[3], self.LAYERS[3], dilation=dilations[3], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.pool_tr4 = ME.MinkowskiPoolingTranspose(kernel_size=8, stride=8, dimension=D) out_pool4 = self.inplanes self.convtr4p8s2 = conv_tr( self.inplanes, self.PLANES[4], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion self.block5 = self._make_layer( self.BLOCK, self.PLANES[4], self.LAYERS[4], dilation=dilations[4], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.pool_tr5 = ME.MinkowskiPoolingTranspose(kernel_size=4, stride=4, dimension=D) out_pool5 = self.inplanes self.convtr5p4s2 = conv_tr( self.inplanes, self.PLANES[5], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion self.block6 = self._make_layer( self.BLOCK, self.PLANES[5], self.LAYERS[5], dilation=dilations[5], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.pool_tr6 = ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D) out_pool6 = self.inplanes self.convtr6p2s2 = conv_tr( self.inplanes, self.PLANES[6], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum) self.relu = MinkowskiReLU(inplace=True) self.final = nn.Sequential( conv( out_pool5 + out_pool6 + self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion, 512, kernel_size=1, bias=False, D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(), conv(512, out_channels, kernel_size=1, bias=True, D=D)) def forward(self, x): out = self.conv1p1s1(x) out = self.bn1(out) out = self.relu(out) out_b1p1 = self.block1(out) out = self.conv2p1s2(out_b1p1) out = self.bn2(out) out = self.relu(out) out_b2p2 = self.block2(out) out = self.conv3p2s2(out_b2p2) out = self.bn3(out) out = self.relu(out) out_b3p4 = self.block3(out) out = self.conv4p4s2(out_b3p4) out = self.bn4(out) out = self.relu(out) # pixel_dist=8 out = self.block4(out) out = self.convtr4p8s2(out) out = self.bntr4(out) out = self.relu(out) out = me.cat(out, out_b3p4) out = self.block5(out) out_5 = self.pool_tr5(out) out = self.convtr5p4s2(out) out = self.bntr5(out) out = self.relu(out) out = me.cat(out, out_b2p2) out = self.block6(out) out_6 = self.pool_tr6(out) out = self.convtr6p2s2(out) out = self.bntr6(out) out = self.relu(out) out = me.cat(out, out_b1p1, out_6, out_5) return self.final(out) class MinkUNetHyper14INBN(MinkUNetHyper): NORM_TYPE = NormType.INSTANCE_BATCH_NORM BLOCK = BasicBlockINBN class STMinkUNetBase(MinkUNetBase): CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS def __init__(self, in_channels, out_channels, config, D=4, **kwargs): super(STMinkUNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs) class STResUNet14(STMinkUNetBase, ResUNet14): pass class STResUNet18(STMinkUNetBase, ResUNet18): pass class STResUNet34(STMinkUNetBase, ResUNet34): pass class STResUNet50(STMinkUNetBase, ResUNet50): pass class STResUNet101(STMinkUNetBase, ResUNet101): pass class STResTesseractUNetBase(STMinkUNetBase): CONV_TYPE = ConvType.HYPERCUBE class STResTesseractUNet14(STResTesseractUNetBase, ResUNet14): pass class STResTesseractUNet18(STResTesseractUNetBase, ResUNet18): pass class STResTesseractUNet34(STResTesseractUNetBase, ResUNet34): pass class STResTesseractUNet50(STResTesseractUNetBase, ResUNet50): pass class STResTesseractUNet101(STResTesseractUNetBase, ResUNet101): pass
ContrastiveSceneContexts-main
downstream/insseg/models/resunet.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import models.resunet as resunet import models.res16unet as res16unet MODELS = [] def add_models(module): MODELS.extend([getattr(module, a) for a in dir(module) if 'Net' in a]) add_models(resunet) add_models(res16unet) def get_models(): '''Returns a tuple of sample models.''' return MODELS def load_model(name): '''Creates and returns an instance of the model given its class name. ''' # Find the model class from its name all_models = get_models() mdict = {model.__name__: model for model in all_models} if name not in mdict: print('Invalid model index. Options are:') # Display a list of valid model names for model in all_models: print('\t* {}'.format(model.__name__)) return None NetClass = mdict[name] return NetClass
ContrastiveSceneContexts-main
downstream/insseg/models/__init__.py
from models.resnet import ResNetBase, get_norm from models.modules.common import ConvType, NormType, conv, conv_tr from models.modules.resnet_block import BasicBlock, Bottleneck from MinkowskiEngine import MinkowskiReLU, SparseTensor import MinkowskiEngine.MinkowskiOps as me class Res16UNetBase(ResNetBase): BLOCK = None PLANES = (32, 64, 128, 256, 256, 256, 256, 256) DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1) LAYERS = (2, 2, 2, 2, 2, 2, 2, 2) INIT_DIM = 32 OUT_PIXEL_DIST = 1 NORM_TYPE = NormType.BATCH_NORM NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS # To use the model, must call initialize_coords before forward pass. # Once data is processed, call clear to reset the model before calling initialize_coords def __init__(self, in_channels, out_channels, config, D=3, **kwargs): super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D) def network_initialization(self, in_channels, out_channels, config, D): # Setup net_metadata dilations = self.DILATIONS bn_momentum = config.optimizer.bn_momentum def space_n_time_m(n, m): return n if D == 3 else [n, n, n, m] if D == 4: self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) # Output of the first conv concated to conv6 self.inplanes = self.INIT_DIM print("building model, ", in_channels) self.conv0p1s1 = conv( in_channels, self.inplanes, kernel_size=space_n_time_m(config.net.conv1_kernel_size, 1), stride=1, dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.conv1p1s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block1 = self._make_layer( self.BLOCK, self.PLANES[0], self.LAYERS[0], dilation=dilations[0], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv2p2s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block2 = self._make_layer( self.BLOCK, self.PLANES[1], self.LAYERS[1], dilation=dilations[1], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv3p4s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block3 = self._make_layer( self.BLOCK, self.PLANES[2], self.LAYERS[2], dilation=dilations[2], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.conv4p8s2 = conv( self.inplanes, self.inplanes, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.block4 = self._make_layer( self.BLOCK, self.PLANES[3], self.LAYERS[3], dilation=dilations[3], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr4p16s2 = conv_tr( self.inplanes, self.PLANES[4], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion self.block5 = self._make_layer( self.BLOCK, self.PLANES[4], self.LAYERS[4], dilation=dilations[4], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr5p8s2 = conv_tr( self.inplanes, self.PLANES[5], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion self.block6 = self._make_layer( self.BLOCK, self.PLANES[5], self.LAYERS[5], dilation=dilations[5], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr6p4s2 = conv_tr( self.inplanes, self.PLANES[6], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion self.block7 = self._make_layer( self.BLOCK, self.PLANES[6], self.LAYERS[6], dilation=dilations[6], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.convtr7p2s2 = conv_tr( self.inplanes, self.PLANES[7], kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, bias=False, conv_type=self.NON_BLOCK_CONV_TYPE, D=D) self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum) self.inplanes = self.PLANES[7] + self.INIT_DIM self.block8 = self._make_layer( self.BLOCK, self.PLANES[7], self.LAYERS[7], dilation=dilations[7], norm_type=self.NORM_TYPE, bn_momentum=bn_momentum) self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D) self.relu = MinkowskiReLU(inplace=True) self.offsets_pre = conv(self.inplanes, self.inplanes, kernel_size=1, stride=1, bias=True, D=D) self.bntr_offset = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum) self.offsets = conv(self.inplanes, 3, kernel_size=1, stride=1, bias=True, D=D) def forward(self, x, detach=False): out = self.conv0p1s1(x) out = self.bn0(out) out_p1 = self.relu(out) out = self.conv1p1s2(out_p1) out = self.bn1(out) out = self.relu(out) out_b1p2 = self.block1(out) out = self.conv2p2s2(out_b1p2) out = self.bn2(out) out = self.relu(out) out_b2p4 = self.block2(out) out = self.conv3p4s2(out_b2p4) out = self.bn3(out) out = self.relu(out) out_b3p8 = self.block3(out) # pixel_dist=16 out = self.conv4p8s2(out_b3p8) out = self.bn4(out) out = self.relu(out) out = self.block4(out) # pixel_dist=8 out = self.convtr4p16s2(out) out = self.bntr4(out) out = self.relu(out) out = me.cat(out, out_b3p8) out = self.block5(out) # pixel_dist=4 out = self.convtr5p8s2(out) out = self.bntr5(out) out = self.relu(out) out = me.cat(out, out_b2p4) out = self.block6(out) # pixel_dist=2 out = self.convtr6p4s2(out) out = self.bntr6(out) out = self.relu(out) out = me.cat(out, out_b1p2) out = self.block7(out) # pixel_dist=1 out = self.convtr7p2s2(out) out = self.bntr7(out) out = self.relu(out) out = me.cat(out, out_p1) out = self.block8(out) offsets = self.offsets_pre(out) offsets = self.bntr_offset(offsets) offsets = self.relu(offsets) offsets = self.offsets(offsets) return offsets, self.final(out), out class Res16UNet14(Res16UNetBase): BLOCK = BasicBlock LAYERS = (1, 1, 1, 1, 1, 1, 1, 1) class Res16UNet18(Res16UNetBase): BLOCK = BasicBlock LAYERS = (2, 2, 2, 2, 2, 2, 2, 2) class Res16UNet34(Res16UNetBase): BLOCK = BasicBlock LAYERS = (2, 3, 4, 6, 2, 2, 2, 2) class Res16UNet50(Res16UNetBase): BLOCK = Bottleneck LAYERS = (2, 3, 4, 6, 2, 2, 2, 2) class Res16UNet101(Res16UNetBase): BLOCK = Bottleneck LAYERS = (2, 3, 4, 23, 2, 2, 2, 2) class Res16UNet14A(Res16UNet14): PLANES = (32, 64, 128, 256, 128, 128, 96, 96) class Res16UNet14A2(Res16UNet14A): LAYERS = (1, 1, 1, 1, 2, 2, 2, 2) class Res16UNet14B(Res16UNet14): PLANES = (32, 64, 128, 256, 128, 128, 128, 128) class Res16UNet14B2(Res16UNet14B): LAYERS = (1, 1, 1, 1, 2, 2, 2, 2) class Res16UNet14B3(Res16UNet14B): LAYERS = (2, 2, 2, 2, 1, 1, 1, 1) class Res16UNet14C(Res16UNet14): PLANES = (32, 64, 128, 256, 192, 192, 128, 128) class Res16UNet14D(Res16UNet14): PLANES = (32, 64, 128, 256, 384, 384, 384, 384) class Res16UNet18A(Res16UNet18): PLANES = (32, 64, 128, 256, 128, 128, 96, 96) class Res16UNet18B(Res16UNet18): PLANES = (32, 64, 128, 256, 128, 128, 128, 128) class Res16UNet18D(Res16UNet18): PLANES = (32, 64, 128, 256, 384, 384, 384, 384) class Res16UNet34A(Res16UNet34): PLANES = (32, 64, 128, 256, 256, 128, 64, 64) class Res16UNet34B(Res16UNet34): PLANES = (32, 64, 128, 256, 256, 128, 64, 32) class Res16UNet34C(Res16UNet34): PLANES = (32, 64, 128, 256, 256, 128, 96, 96) class STRes16UNetBase(Res16UNetBase): CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS def __init__(self, in_channels, out_channels, config, D=4, **kwargs): super(STRes16UNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs) class STRes16UNet14(STRes16UNetBase, Res16UNet14): pass class STRes16UNet14A(STRes16UNetBase, Res16UNet14A): pass class STRes16UNet18(STRes16UNetBase, Res16UNet18): pass class STRes16UNet34(STRes16UNetBase, Res16UNet34): pass class STRes16UNet50(STRes16UNetBase, Res16UNet50): pass class STRes16UNet101(STRes16UNetBase, Res16UNet101): pass class STRes16UNet18A(STRes16UNet18): PLANES = (32, 64, 128, 256, 128, 128, 96, 96) class STResTesseract16UNetBase(STRes16UNetBase): CONV_TYPE = ConvType.HYPERCUBE class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase): pass
ContrastiveSceneContexts-main
downstream/insseg/models/res16unet.py
from MinkowskiEngine import MinkowskiNetwork class Model(MinkowskiNetwork): """ Base network for all sparse convnet By default, all networks are segmentation networks. """ OUT_PIXEL_DIST = -1 def __init__(self, in_channels, out_channels, config, D, **kwargs): super(Model, self).__init__(D) self.in_channels = in_channels self.out_channels = out_channels self.config = config class HighDimensionalModel(Model): """ Base network for all spatio (temporal) chromatic sparse convnet """ def __init__(self, in_channels, out_channels, config, D, **kwargs): assert D > 4, "Num dimension smaller than 5" super(HighDimensionalModel, self).__init__(in_channels, out_channels, config, D, **kwargs)
ContrastiveSceneContexts-main
downstream/insseg/models/model.py
import torch.nn as nn from models.common import get_norm import MinkowskiEngine as ME import MinkowskiEngine.MinkowskiFunctional as MEF class BasicBlockBase(nn.Module): expansion = 1 NORM_TYPE = 'BN' def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, bn_momentum=0.1, D=3): super(BasicBlockBase, self).__init__() self.conv1 = ME.MinkowskiConvolution( inplanes, planes, kernel_size=3, stride=stride, dimension=D) self.norm1 = get_norm(self.NORM_TYPE, planes, bn_momentum=bn_momentum, D=D) self.conv2 = ME.MinkowskiConvolution( planes, planes, kernel_size=3, stride=1, dilation=dilation, has_bias=False, dimension=D) self.norm2 = get_norm(self.NORM_TYPE, planes, bn_momentum=bn_momentum, D=D) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) out = MEF.relu(out) out = self.conv2(out) out = self.norm2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = MEF.relu(out) return out class BasicBlockBN(BasicBlockBase): NORM_TYPE = 'BN' class BasicBlockIN(BasicBlockBase): NORM_TYPE = 'IN' def get_block(norm_type, inplanes, planes, stride=1, dilation=1, downsample=None, bn_momentum=0.1, D=3): if norm_type == 'BN': return BasicBlockBN(inplanes, planes, stride, dilation, downsample, bn_momentum, D) elif norm_type == 'IN': return BasicBlockIN(inplanes, planes, stride, dilation, downsample, bn_momentum, D) else: raise ValueError(f'Type {norm_type}, not defined')
ContrastiveSceneContexts-main
downstream/insseg/models/residual_block.py
import MinkowskiEngine as ME def get_norm(norm_type, num_feats, bn_momentum=0.05, D=-1): if norm_type == 'BN': return ME.MinkowskiBatchNorm(num_feats, momentum=bn_momentum) elif norm_type == 'IN': return ME.MinkowskiInstanceNorm(num_feats, dimension=D) else: raise ValueError(f'Type {norm_type}, not defined')
ContrastiveSceneContexts-main
downstream/insseg/models/common.py
import torch.nn as nn import MinkowskiEngine as ME from models.model import Model from models.modules.common import ConvType, NormType, get_norm, conv, sum_pool from models.modules.resnet_block import BasicBlock, Bottleneck class ResNetBase(Model): BLOCK = None LAYERS = () INIT_DIM = 64 PLANES = (64, 128, 256, 512) OUT_PIXEL_DIST = 32 HAS_LAST_BLOCK = False CONV_TYPE = ConvType.HYPERCUBE def __init__(self, in_channels, out_channels, config, D=3, **kwargs): assert self.BLOCK is not None assert self.OUT_PIXEL_DIST > 0 super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs) self.network_initialization(in_channels, out_channels, config, D) self.weight_initialization() def network_initialization(self, in_channels, out_channels, config, D): def space_n_time_m(n, m): return n if D == 3 else [n, n, n, m] if D == 4: self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1) dilations = config.dilations bn_momentum = config.bn_momentum self.inplanes = self.INIT_DIM self.conv1 = conv( in_channels, self.inplanes, kernel_size=space_n_time_m(config.conv1_kernel_size, 1), stride=1, D=D) self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum) self.relu = ME.MinkowskiReLU(inplace=True) self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D) self.layer1 = self._make_layer( self.BLOCK, self.PLANES[0], self.LAYERS[0], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[0], 1)) self.layer2 = self._make_layer( self.BLOCK, self.PLANES[1], self.LAYERS[1], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[1], 1)) self.layer3 = self._make_layer( self.BLOCK, self.PLANES[2], self.LAYERS[2], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[2], 1)) self.layer4 = self._make_layer( self.BLOCK, self.PLANES[3], self.LAYERS[3], stride=space_n_time_m(2, 1), dilation=space_n_time_m(dilations[3], 1)) self.final = conv( self.PLANES[3] * self.BLOCK.expansion, out_channels, kernel_size=1, bias=True, D=D) def weight_initialization(self): for m in self.modules(): if isinstance(m, ME.MinkowskiBatchNorm): nn.init.constant_(m.bn.weight, 1) nn.init.constant_(m.bn.bias, 0) def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_type=NormType.BATCH_NORM, bn_momentum=0.1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv( self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, D=self.D), get_norm(norm_type, planes * block.expansion, D=self.D, bn_momentum=bn_momentum), ) layers = [] layers.append( block( self.inplanes, planes, stride=stride, dilation=dilation, downsample=downsample, conv_type=self.CONV_TYPE, D=self.D)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( self.inplanes, planes, stride=1, dilation=dilation, conv_type=self.CONV_TYPE, D=self.D)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.pool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.final(x) return x class ResNet14(ResNetBase): BLOCK = BasicBlock LAYERS = (1, 1, 1, 1) class ResNet18(ResNetBase): BLOCK = BasicBlock LAYERS = (2, 2, 2, 2) class ResNet34(ResNetBase): BLOCK = BasicBlock LAYERS = (3, 4, 6, 3) class ResNet50(ResNetBase): BLOCK = Bottleneck LAYERS = (3, 4, 6, 3) class ResNet101(ResNetBase): BLOCK = Bottleneck LAYERS = (3, 4, 23, 3) class STResNetBase(ResNetBase): CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS def __init__(self, in_channels, out_channels, config, D=4, **kwargs): super(STResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs) class STResNet14(STResNetBase, ResNet14): pass class STResNet18(STResNetBase, ResNet18): pass class STResNet34(STResNetBase, ResNet34): pass class STResNet50(STResNetBase, ResNet50): pass class STResNet101(STResNetBase, ResNet101): pass class STResTesseractNetBase(STResNetBase): CONV_TYPE = ConvType.HYPERCUBE class STResTesseractNet14(STResTesseractNetBase, STResNet14): pass class STResTesseractNet18(STResTesseractNetBase, STResNet18): pass class STResTesseractNet34(STResTesseractNetBase, STResNet34): pass class STResTesseractNet50(STResTesseractNetBase, STResNet50): pass class STResTesseractNet101(STResTesseractNetBase, STResNet101): pass
ContrastiveSceneContexts-main
downstream/insseg/models/resnet.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from models.modules.common import ConvType, NormType, get_norm, conv from MinkowskiEngine import MinkowskiReLU class BasicBlockBase(nn.Module): expansion = 1 NORM_TYPE = NormType.BATCH_NORM def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, conv_type=ConvType.HYPERCUBE, bn_momentum=0.1, D=3): super(BasicBlockBase, self).__init__() self.conv1 = conv( inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D) self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.conv2 = conv( planes, planes, kernel_size=3, stride=1, dilation=dilation, bias=False, conv_type=conv_type, D=D) self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.relu = MinkowskiReLU(inplace=True) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class BasicBlock(BasicBlockBase): NORM_TYPE = NormType.BATCH_NORM class BasicBlockIN(BasicBlockBase): NORM_TYPE = NormType.INSTANCE_NORM class BasicBlockINBN(BasicBlockBase): NORM_TYPE = NormType.INSTANCE_BATCH_NORM class BottleneckBase(nn.Module): expansion = 4 NORM_TYPE = NormType.BATCH_NORM def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, conv_type=ConvType.HYPERCUBE, bn_momentum=0.1, D=3): super(BottleneckBase, self).__init__() self.conv1 = conv(inplanes, planes, kernel_size=1, D=D) self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.conv2 = conv( planes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D) self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D) self.norm3 = get_norm(self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum) self.relu = MinkowskiReLU(inplace=True) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) out = self.conv3(out) out = self.norm3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(BottleneckBase): NORM_TYPE = NormType.BATCH_NORM class BottleneckIN(BottleneckBase): NORM_TYPE = NormType.INSTANCE_NORM class BottleneckINBN(BottleneckBase): NORM_TYPE = NormType.INSTANCE_BATCH_NORM
ContrastiveSceneContexts-main
downstream/insseg/models/modules/resnet_block.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import MinkowskiEngine as ME from models.modules.common import ConvType, NormType from models.modules.resnet_block import BasicBlock, Bottleneck class SELayer(nn.Module): def __init__(self, channel, reduction=16, D=-1): # Global coords does not require coords_key super(SELayer, self).__init__() self.fc = nn.Sequential( ME.MinkowskiLinear(channel, channel // reduction), ME.MinkowskiReLU(inplace=True), ME.MinkowskiLinear(channel // reduction, channel), ME.MinkowskiSigmoid()) self.pooling = ME.MinkowskiGlobalPooling(dimension=D) self.broadcast_mul = ME.MinkowskiBroadcastMultiplication(dimension=D) def forward(self, x): y = self.pooling(x) y = self.fc(y) return self.broadcast_mul(x, y) class SEBasicBlock(BasicBlock): def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, conv_type=ConvType.HYPERCUBE, reduction=16, D=-1): super(SEBasicBlock, self).__init__( inplanes, planes, stride=stride, dilation=dilation, downsample=downsample, conv_type=conv_type, D=D) self.se = SELayer(planes, reduction=reduction, D=D) def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) out = self.se(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class SEBasicBlockSN(SEBasicBlock): NORM_TYPE = NormType.SPARSE_SWITCH_NORM class SEBasicBlockIN(SEBasicBlock): NORM_TYPE = NormType.SPARSE_INSTANCE_NORM class SEBasicBlockLN(SEBasicBlock): NORM_TYPE = NormType.SPARSE_LAYER_NORM class SEBottleneck(Bottleneck): def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, conv_type=ConvType.HYPERCUBE, D=3, reduction=16): super(SEBottleneck, self).__init__( inplanes, planes, stride=stride, dilation=dilation, downsample=downsample, conv_type=conv_type, D=D) self.se = SELayer(planes * self.expansion, reduction=reduction, D=D) def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) out = self.relu(out) out = self.conv3(out) out = self.norm3(out) out = self.se(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class SEBottleneckSN(SEBottleneck): NORM_TYPE = NormType.SPARSE_SWITCH_NORM class SEBottleneckIN(SEBottleneck): NORM_TYPE = NormType.SPARSE_INSTANCE_NORM class SEBottleneckLN(SEBottleneck): NORM_TYPE = NormType.SPARSE_LAYER_NORM
ContrastiveSceneContexts-main
downstream/insseg/models/modules/senet_block.py
ContrastiveSceneContexts-main
downstream/insseg/models/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections from enum import Enum import torch.nn as nn import MinkowskiEngine as ME class NormType(Enum): BATCH_NORM = 0 INSTANCE_NORM = 1 INSTANCE_BATCH_NORM = 2 def get_norm(norm_type, n_channels, D, bn_momentum=0.1): if norm_type == NormType.BATCH_NORM: return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum) elif norm_type == NormType.INSTANCE_NORM: return ME.MinkowskiInstanceNorm(n_channels) elif norm_type == NormType.INSTANCE_BATCH_NORM: return nn.Sequential( ME.MinkowskiInstanceNorm(n_channels), ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum)) else: raise ValueError(f'Norm type: {norm_type} not supported') class ConvType(Enum): """ Define the kernel region type """ HYPERCUBE = 0, 'HYPERCUBE' SPATIAL_HYPERCUBE = 1, 'SPATIAL_HYPERCUBE' SPATIO_TEMPORAL_HYPERCUBE = 2, 'SPATIO_TEMPORAL_HYPERCUBE' HYPERCROSS = 3, 'HYPERCROSS' SPATIAL_HYPERCROSS = 4, 'SPATIAL_HYPERCROSS' SPATIO_TEMPORAL_HYPERCROSS = 5, 'SPATIO_TEMPORAL_HYPERCROSS' SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = 6, 'SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS ' def __new__(cls, value, name): member = object.__new__(cls) member._value_ = value member.fullname = name return member def __int__(self): return self.value # Covert the ConvType var to a RegionType var conv_to_region_type = { # kernel_size = [k, k, k, 1] ConvType.HYPERCUBE: ME.RegionType.HYPERCUBE, ConvType.SPATIAL_HYPERCUBE: ME.RegionType.HYPERCUBE, ConvType.SPATIO_TEMPORAL_HYPERCUBE: ME.RegionType.HYPERCUBE, ConvType.HYPERCROSS: ME.RegionType.HYPERCROSS, ConvType.SPATIAL_HYPERCROSS: ME.RegionType.HYPERCROSS, ConvType.SPATIO_TEMPORAL_HYPERCROSS: ME.RegionType.HYPERCROSS, ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: ME.RegionType.HYBRID } int_to_region_type = {m.value: m for m in ME.RegionType} def convert_region_type(region_type): """ Convert the integer region_type to the corresponding RegionType enum object. """ return int_to_region_type[region_type] def convert_conv_type(conv_type, kernel_size, D): assert isinstance(conv_type, ConvType), "conv_type must be of ConvType" region_type = conv_to_region_type[conv_type] axis_types = None if conv_type == ConvType.SPATIAL_HYPERCUBE: # No temporal convolution if isinstance(kernel_size, collections.Sequence): kernel_size = kernel_size[:3] else: kernel_size = [ kernel_size, ] * 3 if D == 4: kernel_size.append(1) elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE: # conv_type conversion already handled assert D == 4 elif conv_type == ConvType.HYPERCUBE: # conv_type conversion already handled pass elif conv_type == ConvType.SPATIAL_HYPERCROSS: if isinstance(kernel_size, collections.Sequence): kernel_size = kernel_size[:3] else: kernel_size = [ kernel_size, ] * 3 if D == 4: kernel_size.append(1) elif conv_type == ConvType.HYPERCROSS: # conv_type conversion already handled pass elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS: # conv_type conversion already handled assert D == 4 elif conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: # Define the CUBIC conv kernel for spatial dims and CROSS conv for temp dim axis_types = [ ME.RegionType.HYPERCUBE, ] * 3 if D == 4: axis_types.append(ME.RegionType.HYPERCROSS) return region_type, axis_types, kernel_size def conv(in_planes, out_planes, kernel_size, stride=1, dilation=1, bias=False, conv_type=ConvType.HYPERCUBE, D=-1): assert D > 0, 'Dimension must be a positive integer' region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator( kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiConvolution( in_channels=in_planes, out_channels=out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation, has_bias=bias, kernel_generator=kernel_generator, dimension=D) def conv_tr(in_planes, out_planes, kernel_size, upsample_stride=1, dilation=1, bias=False, conv_type=ConvType.HYPERCUBE, D=-1): assert D > 0, 'Dimension must be a positive integer' region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator( kernel_size, upsample_stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiConvolutionTranspose( in_channels=in_planes, out_channels=out_planes, kernel_size=kernel_size, stride=upsample_stride, dilation=dilation, has_bias=bias, kernel_generator=kernel_generator, dimension=D) def avg_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, in_coords_key=None, D=-1): assert D > 0, 'Dimension must be a positive integer' region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator( kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiAvgPooling( kernel_size=kernel_size, stride=stride, dilation=dilation, kernel_generator=kernel_generator, dimension=D) def avg_unpool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1): assert D > 0, 'Dimension must be a positive integer' region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator( kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiAvgUnpooling( kernel_size=kernel_size, stride=stride, dilation=dilation, kernel_generator=kernel_generator, dimension=D) def sum_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1): assert D > 0, 'Dimension must be a positive integer' region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator( kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiSumPooling( kernel_size=kernel_size, stride=stride, dilation=dilation, kernel_generator=kernel_generator, dimension=D)
ContrastiveSceneContexts-main
downstream/insseg/models/modules/common.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
ContrastiveSceneContexts-main
downstream/insseg/lib/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from torch.optim import SGD, Adam from torch.optim.lr_scheduler import LambdaLR, StepLR class LambdaStepLR(LambdaLR): def __init__(self, optimizer, lr_lambda, last_step=-1): super(LambdaStepLR, self).__init__(optimizer, lr_lambda, last_step) @property def last_step(self): """Use last_epoch for the step counter""" return self.last_epoch @last_step.setter def last_step(self, v): self.last_epoch = v class PolyLR(LambdaStepLR): """DeepLab learning rate policy""" def __init__(self, optimizer, max_iter, power=0.9, last_step=-1): super(PolyLR, self).__init__(optimizer, lambda s: (1 - s / (max_iter + 1))**power, last_step) class SquaredLR(LambdaStepLR): """ Used for SGD Lars""" def __init__(self, optimizer, max_iter, last_step=-1): super(SquaredLR, self).__init__(optimizer, lambda s: (1 - s / (max_iter + 1))**2, last_step) class ExpLR(LambdaStepLR): def __init__(self, optimizer, step_size, gamma=0.9, last_step=-1): # (0.9 ** 21.854) = 0.1, (0.95 ** 44.8906) = 0.1 # To get 0.1 every N using gamma 0.9, N * log(0.9)/log(0.1) = 0.04575749 N # To get 0.1 every N using gamma g, g ** N = 0.1 -> N * log(g) = log(0.1) -> g = np.exp(log(0.1) / N) super(ExpLR, self).__init__(optimizer, lambda s: gamma**(s / step_size), last_step) def initialize_optimizer(params, config): assert config.optimizer in ['SGD', 'Adagrad', 'Adam', 'RMSProp', 'Rprop', 'SGDLars'] if config.optimizer == 'SGD': return SGD( params, lr=config.lr, momentum=config.sgd_momentum, dampening=config.sgd_dampening, weight_decay=config.weight_decay) elif config.optimizer == 'Adam': return Adam( params, lr=config.lr, betas=(config.adam_beta1, config.adam_beta2), weight_decay=config.weight_decay) else: logging.error('Optimizer type not supported') raise ValueError('Optimizer type not supported') def initialize_scheduler(optimizer, config, last_step=-1): if config.scheduler == 'StepLR': return StepLR( optimizer, step_size=config.step_size, gamma=config.step_gamma, last_epoch=last_step) elif config.scheduler == 'PolyLR': return PolyLR(optimizer, max_iter=config.max_iter, power=config.poly_power, last_step=last_step) elif config.scheduler == 'SquaredLR': return SquaredLR(optimizer, max_iter=config.max_iter, last_step=last_step) elif config.scheduler == 'ExpLR': return ExpLR( optimizer, step_size=config.exp_step_size, gamma=config.exp_gamma, last_step=last_step) else: logging.error('Scheduler not supported')
ContrastiveSceneContexts-main
downstream/insseg/lib/solvers.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import shutil import tempfile import warnings import numpy as np import torch import torch.nn as nn from sklearn.metrics import average_precision_score from sklearn.preprocessing import label_binarize from lib.utils import Timer, AverageMeter, precision_at_one, fast_hist, per_class_iu, \ get_prediction, get_torch_device, visualize_results, \ permute_pointcloud, save_rotation_pred from MinkowskiEngine import SparseTensor from lib.bfs.bfs import Clustering from datasets.evaluation.evaluate_semantic_instance import Evaluator as InstanceEvaluator from datasets.evaluation.evaluate_semantic_label import Evaluator as SemanticEvaluator def print_info(iteration, max_iteration, data_time, iter_time, losses=None, scores=None, ious=None, hist=None, ap_class=None, class_names=None): debug_str = "{}/{}: ".format(iteration + 1, max_iteration) debug_str += "Data time: {:.4f}, Iter time: {:.4f}".format(data_time, iter_time) acc = hist.diagonal() / hist.sum(1) * 100 debug_str += "\tLoss {loss.val:.3f} (AVG: {loss.avg:.3f})\t" \ "Score {top1.val:.3f} (AVG: {top1.avg:.3f})\t" \ "mIOU {mIOU:.3f} mAP {mAP:.3f} mAcc {mAcc:.3f}\n".format( loss=losses, top1=scores, mIOU=np.nanmean(ious), mAP=np.nanmean(ap_class), mAcc=np.nanmean(acc)) if class_names is not None: debug_str += "\nClasses: " + " ".join(class_names) + '\n' debug_str += 'IOU: ' + ' '.join('{:.03f}'.format(i) for i in ious) + '\n' debug_str += 'mAP: ' + ' '.join('{:.03f}'.format(i) for i in ap_class) + '\n' debug_str += 'mAcc: ' + ' '.join('{:.03f}'.format(i) for i in acc) + '\n' logging.info(debug_str) def average_precision(prob_np, target_np): num_class = prob_np.shape[1] label = label_binarize(target_np, classes=list(range(num_class))) with np.errstate(divide='ignore', invalid='ignore'): return average_precision_score(label, prob_np, average=None) def nms(instances, instances_): instances_return = {} counter = 0 for key in instances: label = instances[key]['label_id'].item() if label in [10, 12, 16]: continue instances_return[counter] = instances[key] counter += 1 # dual set clustering, for some classes, w/o voting loss is better for key_ in instances_: label_ = instances_[key_]['label_id'].item() if label_ in [10, 12, 16]: instances_return[counter] = instances_[key_] counter += 1 return instances_return def test(model, data_loader, config): device = get_torch_device(config.misc.is_cuda) dataset = data_loader.dataset num_labels = dataset.NUM_LABELS global_timer, data_timer, iter_timer = Timer(), Timer(), Timer() criterion = nn.CrossEntropyLoss(ignore_index=config.data.ignore_label) losses, scores, ious = AverageMeter(), AverageMeter(), 0 aps = np.zeros((0, num_labels)) hist = np.zeros((num_labels, num_labels)) logging.info('===> Start testing') global_timer.tic() data_iter = data_loader.__iter__() max_iter = len(data_loader) max_iter_unique = max_iter ###################################################################################### # Added for Instance Segmentation ###################################################################################### VALID_CLASS_IDS = torch.FloatTensor(dataset.VALID_CLASS_IDS).long() CLASS_LABELS_INSTANCE = dataset.CLASS_LABELS if config.misc.train_stuff else dataset.CLASS_LABELS_INSTANCE VALID_CLASS_IDS_INSTANCE = dataset.VALID_CLASS_IDS if config.misc.train_stuff else dataset.VALID_CLASS_IDS_INSTANCE IGNORE_LABELS_INSTANCE = dataset.IGNORE_LABELS if config.misc.train_stuff else dataset.IGNORE_LABELS_INSTANCE evaluator = InstanceEvaluator(CLASS_LABELS_INSTANCE, VALID_CLASS_IDS_INSTANCE) cluster_thresh = 1.5 propose_points = 100 score_func = torch.mean if config.test.evaluate_benchmark: cluster_thresh = 0.02 propose_points = 250 score_func = torch.median cluster = Clustering(ignored_labels=IGNORE_LABELS_INSTANCE, class_mapping=VALID_CLASS_IDS, thresh=cluster_thresh, score_func=score_func, propose_points=propose_points, closed_points=300, min_points=50) if config.test.dual_set_cluster : # dual set clustering when submit to benchmark cluster_ = Clustering(ignored_labels=IGNORE_LABELS_INSTANCE, class_mapping=VALID_CLASS_IDS, thresh=0.05, score_func=torch.mean, propose_points=250, closed_points=300, min_points=50) ###################################################################################### # Fix batch normalization running mean and std model.eval() # Clear cache (when run in val mode, cleanup training cache) torch.cuda.empty_cache() with torch.no_grad(): for iteration in range(max_iter): data_timer.tic() if config.data.return_transformation: coords, input, target, instances, transformation = data_iter.next() else: coords, input, target, instances = data_iter.next() transformation = None data_time = data_timer.toc(False) # Preprocess input iter_timer.tic() if config.net.wrapper_type != None: color = input[:, :3].int() if config.augmentation.normalize_color: input[:, :3] = input[:, :3] / 255. - 0.5 sinput = SparseTensor(input, coords).to(device) # Feed forward inputs = (sinput,) if config.net.wrapper_type == None else (sinput, coords, color) pt_offsets, soutput, out_feats = model(*inputs) output = soutput.F pred = get_prediction(dataset, output, target).int() iter_time = iter_timer.toc(False) ##################################################################################### # Added for Instance Segmentation ###################################################################################### if config.test.evaluate_benchmark: # ---------------- point level ------------------- # voting loss for dual set clustering, w/o using ScoreNet scene_id = dataset.get_output_id(iteration) inverse_mapping = dataset.get_original_pointcloud(coords, transformation, iteration) vertices = inverse_mapping[1] + pt_offsets.feats[inverse_mapping[0]].cpu().numpy() features = output[inverse_mapping[0]] instances = cluster.get_instances(vertices, features) if config.test.dual_set_cluster: instances_ = cluster_.get_instances(inverse_mapping[1], features) instances = nms(instances, instances_) evaluator.add_prediction(instances, scene_id) # comment out when evaluate on benchmark format # evaluator.add_gt_in_benchmark_format(scene_id) evaluator.write_to_benchmark(scene_id=scene_id, pred_inst=instances) else: # --------------- voxel level------------------ vertices = coords.cpu().numpy()[:,1:] + pt_offsets.F.cpu().numpy() / dataset.VOXEL_SIZE clusterred_result = cluster.get_instances(vertices, output.clone().cpu()) instance_ids = instances[0]['ids'] gt_labels = target.clone() gt_labels[instance_ids == -1] = IGNORE_LABELS_INSTANCE[0] #invalid instance id is -1, map 0,1,255 labels to 0 gt_labels = VALID_CLASS_IDS[gt_labels.long()] evaluator.add_gt((gt_labels*1000 + instance_ids).numpy(), iteration) # map invalid to invalid label, which is ignored anyway evaluator.add_prediction(clusterred_result, iteration) ###################################################################################### target_np = target.numpy() num_sample = target_np.shape[0] target = target.to(device) cross_ent = criterion(output, target.long()) losses.update(float(cross_ent), num_sample) scores.update(precision_at_one(pred, target), num_sample) hist += fast_hist(pred.cpu().numpy().flatten(), target_np.flatten(), num_labels) ious = per_class_iu(hist) * 100 prob = torch.nn.functional.softmax(output, dim=1) ap = average_precision(prob.cpu().detach().numpy(), target_np) aps = np.vstack((aps, ap)) # Due to heavy bias in class, there exists class with no test label at all with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) ap_class = np.nanmean(aps, 0) * 100. if iteration % config.test.test_stat_freq == 0 and iteration > 0: reordered_ious = dataset.reorder_result(ious) reordered_ap_class = dataset.reorder_result(ap_class) class_names = dataset.get_classnames() print_info( iteration, max_iter_unique, data_time, iter_time, losses, scores, reordered_ious, hist, reordered_ap_class, class_names=class_names) if iteration % config.train.empty_cache_freq == 0: # Clear cache torch.cuda.empty_cache() global_time = global_timer.toc(False) reordered_ious = dataset.reorder_result(ious) reordered_ap_class = dataset.reorder_result(ap_class) class_names = dataset.get_classnames() print_info( iteration, max_iter_unique, data_time, iter_time, losses, scores, reordered_ious, hist, reordered_ap_class, class_names=class_names) logging.info("Finished test. Elapsed time: {:.4f}".format(global_time)) mAP50 = 0.0 #if not config.test.evaluate_benchmark: _, mAP50, _ = evaluator.evaluate() return losses.avg, scores.avg, np.nanmean(ap_class), np.nanmean(per_class_iu(hist)) * 100, mAP50
ContrastiveSceneContexts-main
downstream/insseg/lib/test.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import logging import os import sys import torch import logging import torch.nn.functional as F from torch import nn from torch.serialization import default_restore_location from tensorboardX import SummaryWriter from MinkowskiEngine import SparseTensor from omegaconf import OmegaConf from lib.distributed import get_world_size, all_gather, is_master_proc from models import load_model from lib.test import test as test_ from lib.solvers import initialize_optimizer, initialize_scheduler from datasets import load_dataset from datasets.dataset import initialize_data_loader from lib.utils import checkpoint, precision_at_one, Timer, AverageMeter, get_prediction, load_state_with_same_shape, count_parameters class SegmentationTrainer: def __init__(self, config): self.is_master = is_master_proc(config.misc.num_gpus) if config.misc.num_gpus > 1 else True self.cur_device = torch.cuda.current_device() # load the configurations self.setup_logging() if os.path.exists('config.yaml'): logging.info('===> Loading exsiting config file') config = OmegaConf.load('config.yaml') logging.info('===> Loaded exsiting config file') logging.info('===> Configurations') logging.info(config.pretty()) # dataloader DatasetClass = load_dataset(config.data.dataset) logging.info('===> Initializing dataloader') self.train_data_loader = initialize_data_loader( DatasetClass, config, phase=config.train.train_phase, num_workers=config.data.num_workers, augment_data=True, shuffle=True, repeat=True, batch_size=config.data.batch_size // config.misc.num_gpus, limit_numpoints=config.data.train_limit_numpoints) self.val_data_loader = initialize_data_loader( DatasetClass, config, phase=config.train.val_phase, num_workers=1, augment_data=False, shuffle=True, repeat=False, batch_size=1, limit_numpoints=False) self.test_data_loader = initialize_data_loader( DatasetClass, config, phase=config.test.test_phase, num_workers=config.data.num_workers, augment_data=False, shuffle=False, repeat=False, batch_size=config.data.test_batch_size, limit_numpoints=False) # Model initialization logging.info('===> Building model') num_in_channel = self.train_data_loader.dataset.NUM_IN_CHANNEL num_labels = self.train_data_loader.dataset.NUM_LABELS NetClass = load_model(config.net.model) model = NetClass(num_in_channel, num_labels, config) logging.info('===> Number of trainable parameters: {}: {}'.format(NetClass.__name__, count_parameters(model))) logging.info(model) # Load weights if specified by the parameter. if config.net.weights != '': logging.info('===> Loading weights: ' + config.net.weights) state = torch.load(config.net.weights, map_location=lambda s, l: default_restore_location(s, 'cpu')) matched_weights = load_state_with_same_shape(model, state['state_dict']) model_dict = model.state_dict() model_dict.update(matched_weights) model.load_state_dict(model_dict) model = model.cuda() if config.misc.num_gpus > 1: model = torch.nn.parallel.DistributedDataParallel( module=model, device_ids=[self.cur_device], output_device=self.cur_device, broadcast_buffers=False ) self.config = config self.model = model if self.is_master: self.writer = SummaryWriter(log_dir='tensorboard') self.optimizer = initialize_optimizer(model.parameters(), config.optimizer) self.scheduler = initialize_scheduler(self.optimizer, config.optimizer) self.criterion = nn.CrossEntropyLoss(ignore_index=config.data.ignore_label) checkpoint_fn = 'weights/weights.pth' self.best_val_miou, self.best_val_miou_iter = -1,1 self.best_val_mAP, self.best_val_mAP_iter = -1,1 self.curr_iter, self.epoch, self.is_training = 1, 1, True if os.path.isfile(checkpoint_fn): logging.info("=> loading checkpoint '{}'".format(checkpoint_fn)) state = torch.load(checkpoint_fn, map_location=lambda s, l: default_restore_location(s, 'cpu')) self.load_state(state['state_dict']) self.curr_iter = state['iteration'] + 1 self.epoch = state['epoch'] self.scheduler = initialize_scheduler(self.optimizer, config.optimizer, last_step=self.curr_iter) self.optimizer.load_state_dict(state['optimizer']) if 'best_val_miou' in state: self.best_val_miou = state['best_val_miou'] if 'best_val_mAP' in state: self.best_val_mAP = state['best_val_mAP'] logging.info("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_fn, state['epoch'])) else: logging.info("=> no weights.pth") def setup_logging(self): ch = logging.StreamHandler(sys.stdout) logging.getLogger().setLevel(logging.WARN) if self.is_master: logging.getLogger().setLevel(logging.INFO) logging.basicConfig( format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch]) def load_state(self, state): if get_world_size() > 1: _model = self.model.module else: _model = self.model _model.load_state_dict(state) def set_seed(self): # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints seed = self.config.misc.seed + self.curr_iter torch.manual_seed(seed) torch.cuda.manual_seed(seed) def test(self): return test_(self.model, self.test_data_loader, self.config) def validate(self): val_loss, val_score, _, val_miou, val_mAP = test_(self.model, self.val_data_loader, self.config) self.writer.add_scalar('val/miou', val_miou, self.curr_iter) self.writer.add_scalar('val/loss', val_loss, self.curr_iter) self.writer.add_scalar('val/precision_at_1', val_score, self.curr_iter) self.writer.add_scalar('val/[email protected]', val_mAP, self.curr_iter) if val_miou > self.best_val_miou: self.best_val_miou = val_miou self.best_val_iou_iter = self.curr_iter checkpoint(self.model, self.optimizer, self.epoch, self.curr_iter, self.config, self.best_val_miou, self.best_val_mAP, "miou") logging.info("Current best mIoU: {:.3f} at iter {}".format(self.best_val_miou, self.best_val_miou_iter)) if val_mAP > self.best_val_mAP: self.best_val_mAP = val_mAP self.best_val_mAP_iter = self.curr_iter checkpoint(self.model, self.optimizer, self.epoch, self.curr_iter, self.config, self.best_val_miou, self.best_val_mAP, "mAP") logging.info("Current best [email protected]: {:.3f} at iter {}".format(self.best_val_mAP, self.best_val_mAP_iter)) checkpoint(self.model, self.optimizer, self.epoch, self.curr_iter, self.config, self.best_val_miou, self.best_val_mAP) def train(self): # Set up the train flag for batch normalization self.model.train() # Configuration data_timer, iter_timer = Timer(), Timer() fw_timer, bw_timer, ddp_timer = Timer(), Timer(), Timer() data_time_avg, iter_time_avg = AverageMeter(), AverageMeter() fw_time_avg, bw_time_avg, ddp_time_avg = AverageMeter(), AverageMeter(), AverageMeter() scores = AverageMeter() losses = { 'semantic_loss': AverageMeter(), 'offset_dir_loss': AverageMeter(), 'offset_norm_loss': AverageMeter(), 'total_loss': AverageMeter() } # Train the network logging.info('===> Start training on {} GPUs, batch-size={}'.format( get_world_size(), self.config.data.batch_size)) data_iter = self.train_data_loader.__iter__() # (distributed) infinite sampler while self.is_training: for _ in range(len(self.train_data_loader) // self.config.optimizer.iter_size): self.optimizer.zero_grad() data_time, batch_score = 0, 0 batch_losses = { 'semantic_loss': 0.0, 'offset_dir_loss': 0.0, 'offset_norm_loss': 0.0, 'total_loss': 0.0} iter_timer.tic() # set random seed for every iteration for trackability self.set_seed() for sub_iter in range(self.config.optimizer.iter_size): # Get training data data_timer.tic() if self.config.data.return_transformation: coords, input, target, instances, _ = data_iter.next() else: coords, input, target, instances = data_iter.next() # Preprocess input color = input[:, :3].int() if self.config.augmentation.normalize_color: input[:, :3] = input[:, :3] / 255. - 0.5 sinput = SparseTensor(input, coords).to(self.cur_device) data_time += data_timer.toc(False) # Feed forward fw_timer.tic() inputs = (sinput,) pt_offsets, soutput, _ = self.model(*inputs) # The output of the network is not sorted target = target.long().to(self.cur_device) semantic_loss = self.criterion(soutput.F, target.long()) total_loss = semantic_loss #-----------------offset loss---------------------- ## pt_offsets: (N, 3), float, cuda ## coords: (N, 3), float32 ## centers: (N, 3), float32 tensor ## instance_ids: (N), long centers = np.concatenate([instance['center'] for instance in instances]) instance_ids = np.concatenate([instance['ids'] for instance in instances]) centers = torch.from_numpy(centers).cuda() instance_ids = torch.from_numpy(instance_ids).cuda().long() gt_offsets = centers - coords[:,1:].cuda() # (N, 3) gt_offsets *= self.train_data_loader.dataset.VOXEL_SIZE pt_diff = pt_offsets.F - gt_offsets # (N, 3) pt_dist = torch.sum(torch.abs(pt_diff), dim=-1) # (N) valid = (instance_ids != -1).float() offset_norm_loss = torch.sum(pt_dist * valid) / (torch.sum(valid) + 1e-6) gt_offsets_norm = torch.norm(gt_offsets, p=2, dim=1) # (N), float gt_offsets_ = gt_offsets / (gt_offsets_norm.unsqueeze(-1) + 1e-8) pt_offsets_norm = torch.norm(pt_offsets.F, p=2, dim=1) pt_offsets_ = pt_offsets.F / (pt_offsets_norm.unsqueeze(-1) + 1e-8) direction_diff = - (gt_offsets_ * pt_offsets_).sum(-1) # (N) offset_dir_loss = torch.sum(direction_diff * valid) / (torch.sum(valid) + 1e-6) total_loss += offset_norm_loss + offset_dir_loss # Compute and accumulate gradient total_loss /= self.config.optimizer.iter_size pred = get_prediction(self.train_data_loader.dataset, soutput.F, target) score = precision_at_one(pred, target) # bp the loss fw_timer.toc(False) bw_timer.tic() total_loss.backward() bw_timer.toc(False) # gather information logging_output = {'total_loss': total_loss.item(), 'semantic_loss': semantic_loss.item(), 'score': score / self.config.optimizer.iter_size} logging_output['offset_dir_loss'] = offset_dir_loss.item() logging_output['offset_norm_loss'] = offset_norm_loss.item() ddp_timer.tic() if self.config.misc.num_gpus > 1: logging_output = all_gather(logging_output) logging_output = {w: np.mean([ a[w] for a in logging_output] ) for w in logging_output[0]} batch_losses['total_loss'] += logging_output['total_loss'] batch_losses['semantic_loss'] += logging_output['semantic_loss'] batch_losses['offset_dir_loss'] += logging_output['offset_dir_loss'] batch_losses['offset_norm_loss'] += logging_output['offset_norm_loss'] batch_score += logging_output['score'] ddp_timer.toc(False) # Update number of steps self.optimizer.step() self.scheduler.step() data_time_avg.update(data_time) iter_time_avg.update(iter_timer.toc(False)) fw_time_avg.update(fw_timer.diff) bw_time_avg.update(bw_timer.diff) ddp_time_avg.update(ddp_timer.diff) losses['total_loss'].update(batch_losses['total_loss'], target.size(0)) losses['semantic_loss'].update(batch_losses['semantic_loss'], target.size(0)) losses['offset_dir_loss'].update(batch_losses['offset_dir_loss'], target.size(0)) losses['offset_norm_loss'].update(batch_losses['offset_norm_loss'], target.size(0)) scores.update(batch_score, target.size(0)) if self.curr_iter >= self.config.optimizer.max_iter: self.is_training = False break if self.curr_iter % self.config.train.stat_freq == 0 or self.curr_iter == 1: lrs = ', '.join(['{:.3e}'.format(x) for x in self.scheduler.get_last_lr()]) debug_str = "===> Epoch[{}]({}/{}): Loss {:.4f}, Sem {:.4f}, dir {:.4f}, norm {:.4f}\tLR: {}\t".format( self.epoch, self.curr_iter, len(self.train_data_loader) // self.config.optimizer.iter_size, losses['total_loss'].avg, losses['semantic_loss'].avg, losses['offset_dir_loss'].avg, losses['offset_norm_loss'].avg, lrs) debug_str += "Score {:.3f}\tData time: {:.4f}, Forward time: {:.4f}, Backward time: {:.4f}, DDP time: {:.4f}, Total iter time: {:.4f}".format( scores.avg, data_time_avg.avg, fw_time_avg.avg, bw_time_avg.avg, ddp_time_avg.avg, iter_time_avg.avg) logging.info(debug_str) # Reset timers data_time_avg.reset() iter_time_avg.reset() # Write logs if self.is_master: self.writer.add_scalar('train/loss', losses['total_loss'].avg, self.curr_iter) self.writer.add_scalar('train/semantic_loss', losses['semantic_loss'].avg, self.curr_iter) self.writer.add_scalar('train/offset_dir_loss', losses['offset_dir_loss'].avg, self.curr_iter) self.writer.add_scalar('train/offset_norm_loss', losses['offset_norm_loss'].avg, self.curr_iter) self.writer.add_scalar('train/precision_at_1', scores.avg, self.curr_iter) self.writer.add_scalar('train/learning_rate', self.scheduler.get_last_lr()[0], self.curr_iter) # clear loss losses['total_loss'].reset() losses['semantic_loss'].reset() losses['offset_dir_loss'].reset() losses['offset_norm_loss'].reset() scores.reset() # Validation if self.curr_iter % self.config.train.val_freq == 0 and self.is_master: self.validate() self.model.train() if self.curr_iter % self.config.train.empty_cache_freq == 0: # Clear cache torch.cuda.empty_cache() # End of iteration self.curr_iter += 1 self.epoch += 1 # Explicit memory cleanup if hasattr(data_iter, 'cleanup'): data_iter.cleanup() # Save the final model if self.is_master: self.validate()
ContrastiveSceneContexts-main
downstream/insseg/lib/ddp_trainer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import trimesh # color palette for nyu40 labels def create_color_palette(): return [ (0, 0, 0), (174, 199, 232), # wall (152, 223, 138), # floor (31, 119, 180), # cabinet (255, 187, 120), # bed (188, 189, 34), # chair (140, 86, 75), # sofa (255, 152, 150), # table (214, 39, 40), # door (197, 176, 213), # window (148, 103, 189), # bookshelf (196, 156, 148), # picture (23, 190, 207), # counter (178, 76, 76), (247, 182, 210), # desk (66, 188, 102), (219, 219, 141), # curtain (140, 57, 197), (202, 185, 52), (51, 176, 203), (200, 54, 131), (92, 193, 61), (78, 71, 183), (172, 114, 82), (255, 127, 14), # refrigerator (91, 163, 138), (153, 98, 156), (140, 153, 101), (158, 218, 229), # shower curtain (100, 125, 154), (178, 127, 135), (120, 185, 128), (146, 111, 194), (44, 160, 44), # toilet (112, 128, 144), # sink (96, 207, 209), (227, 119, 194), # bathtub (213, 92, 176), (94, 106, 211), (82, 84, 163), # otherfurn (100, 85, 144), ] def write_triangle_mesh(vertices, colors, faces, outputFile): mesh = trimesh.Trimesh(vertices=vertices, vertex_colors=colors, faces=faces, process=False) mesh.export(outputFile) def read_triangle_mesh(filename): mesh = trimesh.load_mesh(filename, process=False) if isinstance(mesh, trimesh.PointCloud): vertices = mesh.vertices colors = mesh.colors faces = None elif isinstance(mesh, trimesh.Trimesh): vertices = mesh.vertices colors = mesh.visual.vertex_colors faces = mesh.faces return vertices, colors, faces
ContrastiveSceneContexts-main
downstream/insseg/lib/io3d.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import time import torch import signal import pickle import threading import functools import traceback import torch.nn as nn import torch.distributed as dist import multiprocessing as mp """Multiprocessing error handler.""" class ChildException(Exception): """Wraps an exception from a child process.""" def __init__(self, child_trace): super(ChildException, self).__init__(child_trace) class ErrorHandler(object): """Multiprocessing error handler (based on fairseq's). Listens for errors in child processes and propagates the tracebacks to the parent process. """ def __init__(self, error_queue): # Shared error queue self.error_queue = error_queue # Children processes sharing the error queue self.children_pids = [] # Start a thread listening to errors self.error_listener = threading.Thread(target=self.listen, daemon=True) self.error_listener.start() # Register the signal handler signal.signal(signal.SIGUSR1, self.signal_handler) def add_child(self, pid): """Registers a child process.""" self.children_pids.append(pid) def listen(self): """Listens for errors in the error queue.""" # Wait until there is an error in the queue child_trace = self.error_queue.get() # Put the error back for the signal handler self.error_queue.put(child_trace) # Invoke the signal handler os.kill(os.getpid(), signal.SIGUSR1) def signal_handler(self, sig_num, stack_frame): """Signal handler.""" # Kill children processes for pid in self.children_pids: os.kill(pid, signal.SIGINT) # Propagate the error from the child process raise ChildException(self.error_queue.get()) """Multiprocessing helpers.""" def run(proc_rank, world_size, error_queue, fun, fun_args, fun_kwargs): """Runs a function from a child process.""" try: # Initialize the process group init_process_group(proc_rank, world_size) # Run the function fun(*fun_args, **fun_kwargs) except: # Propagate exception to the parent process error_queue.put(traceback.format_exc()) finally: destroy_process_group() def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs={}): """Runs a function in a multi-proc setting.""" # Handle errors from training subprocesses error_queue = mp.SimpleQueue() error_handler = ErrorHandler(error_queue) # Run each training subprocess ps = [] for i in range(num_proc): p_i = mp.Process( target=run, args=(i, num_proc, error_queue, fun, fun_args, fun_kwargs) ) ps.append(p_i) p_i.start() error_handler.add_child(p_i.pid) # Wait for each subprocess to finish for p in ps: p.join() """Distributed helpers.""" def is_master_proc(num_gpus): """Determines if the current process is the master process. Master process is responsible for logging, writing and loading checkpoints. In the multi GPU setting, we assign the master role to the rank 0 process. When training using a single GPU, there is only one training processes which is considered the master processes. """ return num_gpus == 1 or torch.distributed.get_rank() == 0 def get_world_size(): if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size() def get_rank(): if not dist.is_available(): return 0 if not dist.is_initialized(): return 0 return dist.get_rank() def synchronize(): """ Helper function to synchronize (barrier) among all processes when using distributed training """ if not dist.is_available(): return if not dist.is_initialized(): return world_size = dist.get_world_size() if world_size == 1: return dist.barrier() def all_gather_differentiable(tensor): """ Run differentiable gather function for SparseConv features with variable number of points. tensor: [num_points, feature_dim] """ world_size = get_world_size() if world_size == 1: return [tensor] num_points, f_dim = tensor.size() local_np = torch.LongTensor([num_points]).to("cuda") np_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)] dist.all_gather(np_list, local_np) np_list = [int(np.item()) for np in np_list] max_np = max(np_list) tensor_list = [] for _ in np_list: tensor_list.append(torch.FloatTensor(size=(max_np, f_dim)).to("cuda")) if local_np != max_np: padding = torch.zeros(size=(max_np-local_np, f_dim)).to("cuda").float() tensor = torch.cat((tensor, padding), dim=0) assert tensor.size() == (max_np, f_dim) dist.all_gather(tensor_list, tensor) data_list = [] for gather_np, gather_tensor in zip(np_list, tensor_list): gather_tensor = gather_tensor[:gather_np] assert gather_tensor.size() == (gather_np, f_dim) data_list.append(gather_tensor) return data_list def all_gather(data): """ Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank """ world_size = get_world_size() if world_size == 1: return [data] # serialized to a Tensor buffer = pickle.dumps(data) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to("cuda") # obtain Tensor size of each rank local_size = torch.LongTensor([tensor.numel()]).to("cuda") size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)] dist.all_gather(size_list, local_size) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) # receiving Tensor from all ranks # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes tensor_list = [] for _ in size_list: tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda")) if local_size != max_size: padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda") tensor = torch.cat((tensor, padding), dim=0) dist.all_gather(tensor_list, tensor) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list def init_process_group(proc_rank, world_size): """Initializes the default process group.""" # Set the GPU to use torch.cuda.set_device(proc_rank) # Initialize the process group torch.distributed.init_process_group( backend="nccl", init_method="tcp://{}:{}".format("localhost", "10001"), world_size=world_size, rank=proc_rank ) def destroy_process_group(): """Destroys the default process group.""" torch.distributed.destroy_process_group()
ContrastiveSceneContexts-main
downstream/insseg/lib/distributed.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import numpy as np from numpy.linalg import matrix_rank, inv from plyfile import PlyData, PlyElement import pandas as pd COLOR_MAP_RGB = ( (241, 255, 82), (102, 168, 226), (0, 255, 0), (113, 143, 65), (89, 173, 163), (254, 158, 137), (190, 123, 75), (100, 22, 116), (0, 18, 141), (84, 84, 84), (85, 116, 127), (255, 31, 33), (228, 228, 228), (0, 255, 0), (70, 145, 150), (237, 239, 94), ) IGNORE_COLOR = (0, 0, 0) def read_plyfile(filepath): """Read ply file and return it as numpy array. Returns None if emtpy.""" with open(filepath, 'rb') as f: plydata = PlyData.read(f) if plydata.elements: return pd.DataFrame(plydata.elements[0].data).values def save_point_cloud(points_3d, filename, binary=True, with_label=False, verbose=True): """Save an RGB point cloud as a PLY file. Args: points_3d: Nx6 matrix where points_3d[:, :3] are the XYZ coordinates and points_3d[:, 4:] are the RGB values. If Nx3 matrix, save all points with [128, 128, 128] (gray) color. """ assert points_3d.ndim == 2 if with_label: assert points_3d.shape[1] == 7 python_types = (float, float, float, int, int, int, int) npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1'), ('label', 'u1')] else: if points_3d.shape[1] == 3: gray_concat = np.tile(np.array([128], dtype=np.uint8), (points_3d.shape[0], 3)) points_3d = np.hstack((points_3d, gray_concat)) assert points_3d.shape[1] == 6 python_types = (float, float, float, int, int, int) npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] if binary is True: # Format into NumPy structured array vertices = [] for row_idx in range(points_3d.shape[0]): cur_point = points_3d[row_idx] vertices.append(tuple(dtype(point) for dtype, point in zip(python_types, cur_point))) vertices_array = np.array(vertices, dtype=npy_types) el = PlyElement.describe(vertices_array, 'vertex') # Write PlyData([el]).write(filename) else: # PlyData([el], text=True).write(filename) with open(filename, 'w') as f: f.write('ply\n' 'format ascii 1.0\n' 'element vertex %d\n' 'property float x\n' 'property float y\n' 'property float z\n' 'property uchar red\n' 'property uchar green\n' 'property uchar blue\n' 'property uchar alpha\n' 'end_header\n' % points_3d.shape[0]) for row_idx in range(points_3d.shape[0]): X, Y, Z, R, G, B = points_3d[row_idx] f.write('%f %f %f %d %d %d 0\n' % (X, Y, Z, R, G, B)) if verbose is True: print('Saved point cloud to: %s' % filename) class Camera(object): def __init__(self, intrinsics): self._intrinsics = intrinsics self._camera_matrix = self.build_camera_matrix(self.intrinsics) self._K_inv = inv(self.camera_matrix) @staticmethod def build_camera_matrix(intrinsics): """Build the 3x3 camera matrix K using the given intrinsics. Equation 6.10 from HZ. """ f = intrinsics['focal_length'] pp_x = intrinsics['pp_x'] pp_y = intrinsics['pp_y'] K = np.array([[f, 0, pp_x], [0, f, pp_y], [0, 0, 1]], dtype=np.float32) # K[:, 0] *= -1. # Step 1 of Kyle assert matrix_rank(K) == 3 return K @staticmethod def extrinsics2RT(extrinsics): """Convert extrinsics matrix to separate rotation matrix R and translation vector T. """ assert extrinsics.shape == (4, 4) R = extrinsics[:3, :3] T = extrinsics[3, :3] R = np.copy(R) T = np.copy(T) T = T.reshape(3, 1) R[0, :] *= -1. # Step 1 of Kyle T *= 100. # Convert from m to cm return R, T def project(self, points_3d, extrinsics=None): """Project a 3D point in camera coordinates into the camera/image plane. Args: point_3d: """ if extrinsics is not None: # Map points to camera coordinates points_3d = self.world2camera(extrinsics, points_3d) # TODO: Make sure to handle homogeneous AND non-homogeneous coordinate points # TODO: Consider handling a set of points raise NotImplementedError def backproject(self, depth_map, labels=None, max_depth=None, max_height=None, min_height=None, rgb_img=None, extrinsics=None, prune=True): """Backproject a depth map into 3D points (camera coordinate system). Attach color if RGB image is provided, otherwise use gray [128 128 128] color. Does not show points at Z = 0 or maximum Z = 65535 depth. Args: labels: Tensor with the same shape as depth map (but can be 1-channel or 3-channel). max_depth: Maximum depth in cm. All pts with depth greater than max_depth will be ignored. max_height: Maximum height in cm. All pts with height greater than max_height will be ignored. Returns: points_3d: Numpy array of size Nx3 (XYZ) or Nx6 (XYZRGB). """ if labels is not None: assert depth_map.shape[:2] == labels.shape[:2] if (labels.ndim == 2) or ((labels.ndim == 3) and (labels.shape[2] == 1)): n_label_channels = 1 elif (labels.ndim == 3) and (labels.shape[2] == 3): n_label_channels = 3 if rgb_img is not None: assert depth_map.shape[:2] == rgb_img.shape[:2] else: rgb_img = np.ones_like(depth_map, dtype=np.uint8) * 255 # Convert from 1-channel to 3-channel if (rgb_img.ndim == 3) and (rgb_img.shape[2] == 1): rgb_img = np.tile(rgb_img, [1, 1, 3]) # Convert depth map to single channel if it is multichannel if (depth_map.ndim == 3) and depth_map.shape[2] == 3: depth_map = np.squeeze(depth_map[:, :, 0]) depth_map = depth_map.astype(np.float32) # Get image dimensions H, W = depth_map.shape # Create meshgrid (pixel coordinates) Z = depth_map A, B = np.meshgrid(range(W), range(H)) ones = np.ones_like(A) grid = np.concatenate((A[:, :, np.newaxis], B[:, :, np.newaxis], ones[:, :, np.newaxis]), axis=2) grid = grid.astype(np.float32) * Z[:, :, np.newaxis] # Nx3 where each row is (a*Z, b*Z, Z) grid_flattened = grid.reshape((-1, 3)) grid_flattened = grid_flattened.T # 3xN where each col is (a*Z, b*Z, Z) prod = np.dot(self.K_inv, grid_flattened) XYZ = np.concatenate((prod[:2, :].T, Z.flatten()[:, np.newaxis]), axis=1) # Nx3 XYZRGB = np.hstack((XYZ, rgb_img.reshape((-1, 3)))) points_3d = XYZRGB if labels is not None: labels_reshaped = labels.reshape((-1, n_label_channels)) # Prune points if prune is True: valid = [] for idx in range(points_3d.shape[0]): cur_y = points_3d[idx, 1] cur_z = points_3d[idx, 2] if (cur_z == 0) or (cur_z == 65535): # Don't show things at 0 distance or max distance continue elif (max_depth is not None) and (cur_z > max_depth): continue elif (max_height is not None) and (cur_y > max_height): continue elif (min_height is not None) and (cur_y < min_height): continue else: valid.append(idx) points_3d = points_3d[np.asarray(valid)] if labels is not None: labels_reshaped = labels_reshaped[np.asarray(valid)] if extrinsics is not None: points_3d = self.camera2world(extrinsics, points_3d) if labels is not None: points_3d_labels = np.hstack((points_3d[:, :3], labels_reshaped)) return points_3d, points_3d_labels else: return points_3d @staticmethod def _camera2world_transform(no_rgb_points_3d, R, T): points_3d_world = (np.dot(R.T, no_rgb_points_3d.T) - T).T # Nx3 return points_3d_world @staticmethod def _world2camera_transform(no_rgb_points_3d, R, T): points_3d_world = (np.dot(R, no_rgb_points_3d.T + T)).T # Nx3 return points_3d_world def _transform_points(self, points_3d, extrinsics, transform): """Base/wrapper method for transforming points using R and T. """ assert points_3d.ndim == 2 orig_points_3d = points_3d points_3d = np.copy(orig_points_3d) if points_3d.shape[1] == 6: # XYZRGB points_3d = points_3d[:, :3] elif points_3d.shape[1] == 3: # XYZ points_3d = points_3d else: raise ValueError('3D points need to be XYZ or XYZRGB.') R, T = self.extrinsics2RT(extrinsics) points_3d_world = transform(points_3d, R, T) # Add color again (if appropriate) if orig_points_3d.shape[1] == 6: # XYZRGB points_3d_world = np.hstack((points_3d_world, orig_points_3d[:, -3:])) return points_3d_world def camera2world(self, extrinsics, points_3d): """Transform from camera coordinates (3D) to world coordinates (3D). Args: points_3d: Nx3 or Nx6 matrix of N points with XYZ or XYZRGB values. """ return self._transform_points(points_3d, extrinsics, self._camera2world_transform) def world2camera(self, extrinsics, points_3d): """Transform from world coordinates (3D) to camera coordinates (3D). """ return self._transform_points(points_3d, extrinsics, self._world2camera_transform) @property def intrinsics(self): return self._intrinsics @property def camera_matrix(self): return self._camera_matrix @property def K_inv(self): return self._K_inv def colorize_pointcloud(xyz, label, ignore_label=255): assert label[label != ignore_label].max() < len(COLOR_MAP_RGB), 'Not enough colors.' label_rgb = np.array([COLOR_MAP_RGB[i] if i != ignore_label else IGNORE_COLOR for i in label]) return np.hstack((xyz, label_rgb)) class PlyWriter(object): POINTCLOUD_DTYPE = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] @classmethod def read_txt(cls, txtfile): # Read txt file and parse its content. with open(txtfile) as f: pointcloud = [l.split() for l in f] # Load point cloud to named numpy array. pointcloud = np.array(pointcloud).astype(np.float32) assert pointcloud.shape[1] == 6 xyz = pointcloud[:, :3].astype(np.float32) rgb = pointcloud[:, 3:].astype(np.uint8) return xyz, rgb @staticmethod def write_ply(array, filepath): ply_el = PlyElement.describe(array, 'vertex') target_path, _ = os.path.split(filepath) if target_path != '' and not os.path.exists(target_path): os.makedirs(target_path) PlyData([ply_el]).write(filepath) @classmethod def write_vertex_only_ply(cls, vertices, filepath): # assume that points are N x 3 np array for vertex locations color = 255 * np.ones((len(vertices), 3)) pc_points = np.array([tuple(p) for p in np.concatenate((vertices, color), axis=1)], dtype=cls.POINTCLOUD_DTYPE) cls.write_ply(pc_points, filepath) @classmethod def write_ply_vert_color(cls, vertices, colors, filepath): # assume that points are N x 3 np array for vertex locations pc_points = np.array([tuple(p) for p in np.concatenate((vertices, colors), axis=1)], dtype=cls.POINTCLOUD_DTYPE) cls.write_ply(pc_points, filepath) @classmethod def concat_label(cls, target, xyz, label): subpointcloud = np.concatenate([xyz, label], axis=1) subpointcloud = np.array([tuple(l) for l in subpointcloud], dtype=cls.POINTCLOUD_DTYPE) return np.concatenate([target, subpointcloud], axis=0)
ContrastiveSceneContexts-main
downstream/insseg/lib/pc_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import logging import os import errno import time import torch import numpy as np from omegaconf import OmegaConf from lib.pc_utils import colorize_pointcloud, save_point_cloud from lib.distributed import get_world_size def load_state_with_same_shape(model, weights): # weights['conv1.kernel'] = weights['conv1.kernel'].repeat([1,3,1])/3.0 model_state = model.state_dict() if list(weights.keys())[0].startswith('module.'): logging.info("Loading multigpu weights with module. prefix...") weights = {k.partition('module.')[2]:weights[k] for k in weights.keys()} if list(weights.keys())[0].startswith('encoder.'): logging.info("Loading multigpu weights with encoder. prefix...") weights = {k.partition('encoder.')[2]:weights[k] for k in weights.keys()} # print(weights.items()) # print("===================") # print("===================") # print("===================") # print("===================") # print("===================") # print(model_state) filtered_weights = { k: v for k, v in weights.items() if k in model_state and v.size() == model_state[k].size() } logging.info("Loading weights:" + ', '.join(filtered_weights.keys())) return filtered_weights def checkpoint(model, optimizer, epoch, iteration, config, best_val_miou=None, best_val_mAP=None, postfix=None): mkdir_p('weights') filename = f"checkpoint_{config.net.model}_iter{iteration}.pth" if config.train.overwrite_weights: filename = f"checkpoint_{config.net.model}.pth" if postfix is not None: filename = f"checkpoint_{config.net.model}_{postfix}.pth" checkpoint_file = 'weights/' + filename _model = model.module if get_world_size() > 1 else model state = { 'iteration': iteration, 'epoch': epoch, 'arch': config.net.model, 'state_dict': _model.state_dict(), 'optimizer': optimizer.state_dict() } if best_val_miou is not None: state['best_val_miou'] = best_val_miou state['best_val_iter'] = iteration if best_val_mAP is not None: state['best_val_mAP'] = best_val_mAP state['best_val_iter'] = iteration # save config OmegaConf.save(config, 'config.yaml') torch.save(state, checkpoint_file) logging.info(f"Checkpoint saved to {checkpoint_file}") if postfix == None: # Delete symlink if it exists if os.path.exists('weights/weights.pth'): os.remove('weights/weights.pth') # Create symlink os.system('ln -s {} weights/weights.pth'.format(filename)) def precision_at_one(pred, target, ignore_label=255): """Computes the precision@k for the specified values of k""" # batch_size = target.size(0) * target.size(1) * target.size(2) pred = pred.view(1, -1) target = target.view(1, -1) correct = pred.eq(target) correct = correct[target != ignore_label] correct = correct.view(-1) if correct.nelement(): return correct.float().sum(0).mul(100.0 / correct.size(0)).item() else: return float('nan') def fast_hist(pred, label, n): k = (label >= 0) & (label < n) return np.bincount(n * label[k].astype(int) + pred[k], minlength=n**2).reshape(n, n) def per_class_iu(hist): with np.errstate(divide='ignore', invalid='ignore'): return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) class WithTimer(object): """Timer for with statement.""" def __init__(self, name=None): self.name = name def __enter__(self): self.tstart = time.time() def __exit__(self, type, value, traceback): out_str = 'Elapsed: %s' % (time.time() - self.tstart) if self.name: logging.info('[{self.name}]') logging.info(out_str) class Timer(object): """A simple timer.""" def __init__(self): self.total_time = 0. self.calls = 0 self.start_time = 0. self.diff = 0. self.average_time = 0. def reset(self): self.total_time = 0 self.calls = 0 self.start_time = 0 self.diff = 0 self.averate_time = 0 def tic(self): # using time.time instead of time.clock because time time.clock # does not normalize for multithreading self.start_time = time.time() def toc(self, average=True): self.diff = time.time() - self.start_time self.total_time += self.diff self.calls += 1 self.average_time = self.total_time / self.calls if average: return self.average_time else: return self.diff class ExpTimer(Timer): """ Exponential Moving Average Timer """ def __init__(self, alpha=0.5): super(ExpTimer, self).__init__() self.alpha = alpha def toc(self): self.diff = time.time() - self.start_time self.average_time = self.alpha * self.diff + \ (1 - self.alpha) * self.average_time return self.average_time class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def mkdir_p(path): try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def read_txt(path): """Read txt file into lines. """ with open(path) as f: lines = f.readlines() lines = [x.strip() for x in lines] return lines def debug_on(): import sys import pdb import functools import traceback def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except Exception: info = sys.exc_info() traceback.print_exception(*info) pdb.post_mortem(info[2]) return wrapper return decorator def get_prediction(dataset, output, target): return output.max(1)[1] def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def get_torch_device(is_cuda): return torch.device('cuda' if is_cuda else 'cpu') class HashTimeBatch(object): def __init__(self, prime=5279): self.prime = prime def __call__(self, time, batch): return self.hash(time, batch) def hash(self, time, batch): return self.prime * batch + time def dehash(self, key): time = key % self.prime batch = key / self.prime return time, batch def save_rotation_pred(iteration, pred, dataset, save_pred_dir): """Save prediction results in original pointcloud scale.""" decode_label_map = {} for k, v in dataset.label_map.items(): decode_label_map[v] = k pred = np.array([decode_label_map[x] for x in pred], dtype=np.int) out_rotation_txt = dataset.get_output_id(iteration) + '.txt' out_rotation_path = save_pred_dir + '/' + out_rotation_txt np.savetxt(out_rotation_path, pred, fmt='%i') def visualize_results(coords, input, target, upsampled_pred, config, iteration): # Get filter for valid predictions in the first batch. target_batch = coords[:, 3].numpy() == 0 input_xyz = coords[:, :3].numpy() target_valid = target.numpy() != 255 target_pred = np.logical_and(target_batch, target_valid) target_nonpred = np.logical_and(target_batch, ~target_valid) ptc_nonpred = np.hstack((input_xyz[target_nonpred], np.zeros((np.sum(target_nonpred), 3)))) # Unwrap file index if tested with rotation. file_iter = iteration if config.test_rotation >= 1: file_iter = iteration // config.test.test_rotation # Create directory to save visualization results. os.makedirs(config.test.visualize_path, exist_ok=True) # Label visualization in RGB. xyzlabel = colorize_pointcloud(input_xyz[target_pred], upsampled_pred[target_pred]) xyzlabel = np.vstack((xyzlabel, ptc_nonpred)) filename = '_'.join([config.dataset, config.model, 'pred', '%04d.ply' % file_iter]) save_point_cloud(xyzlabel, os.path.join(config.test.visualize_path, filename), verbose=False) # RGB input values visualization. xyzrgb = np.hstack((input_xyz[target_batch], input[:, :3].cpu().numpy()[target_batch])) filename = '_'.join([config.dataset, config.model, 'rgb', '%04d.ply' % file_iter]) save_point_cloud(xyzrgb, os.path.join(config.test.visualize_path, filename), verbose=False) # Ground-truth visualization in RGB. xyzgt = colorize_pointcloud(input_xyz[target_pred], target.numpy()[target_pred]) xyzgt = np.vstack((xyzgt, ptc_nonpred)) filename = '_'.join([config.dataset, config.model, 'gt', '%04d.ply' % file_iter]) save_point_cloud(xyzgt, os.path.join(config.test.visualize_path, filename), verbose=False) def permute_pointcloud(input_coords, pointcloud, transformation, label_map, voxel_output, voxel_pred): """Get permutation from pointcloud to input voxel coords.""" def _hash_coords(coords, coords_min, coords_dim): return np.ravel_multi_index((coords - coords_min).T, coords_dim) # Validate input. input_batch_size = input_coords[:, -1].max().item() pointcloud_batch_size = pointcloud[:, -1].max().int().item() transformation_batch_size = transformation[:, -1].max().int().item() assert input_batch_size == pointcloud_batch_size == transformation_batch_size pointcloud_permutation, pointcloud_target = [], [] # Process each batch. for i in range(input_batch_size + 1): # Filter batch from the data. input_coords_mask_b = input_coords[:, -1] == i input_coords_b = (input_coords[input_coords_mask_b])[:, :-1].numpy() pointcloud_b = pointcloud[pointcloud[:, -1] == i, :-1].numpy() transformation_b = transformation[i, :-1].reshape(4, 4).numpy() # Transform original pointcloud to voxel space. original_coords1 = np.hstack((pointcloud_b[:, :3], np.ones((pointcloud_b.shape[0], 1)))) original_vcoords = np.floor(original_coords1 @ transformation_b.T)[:, :3].astype(int) # Hash input and voxel coordinates to flat coordinate. vcoords_all = np.vstack((input_coords_b, original_vcoords)) vcoords_min = vcoords_all.min(0) vcoords_dims = vcoords_all.max(0) - vcoords_all.min(0) + 1 input_coords_key = _hash_coords(input_coords_b, vcoords_min, vcoords_dims) original_vcoords_key = _hash_coords(original_vcoords, vcoords_min, vcoords_dims) # Query voxel predictions from original pointcloud. key_to_idx = dict(zip(input_coords_key, range(len(input_coords_key)))) pointcloud_permutation.append( np.array([key_to_idx.get(i, -1) for i in original_vcoords_key])) pointcloud_target.append(pointcloud_b[:, -1].astype(int)) pointcloud_permutation = np.concatenate(pointcloud_permutation) # Prepare pointcloud permutation array. pointcloud_permutation = torch.from_numpy(pointcloud_permutation) permutation_mask = pointcloud_permutation >= 0 permutation_valid = pointcloud_permutation[permutation_mask] # Permuate voxel output to pointcloud. pointcloud_output = torch.zeros(pointcloud.shape[0], voxel_output.shape[1]).to(voxel_output) pointcloud_output[permutation_mask] = voxel_output[permutation_valid] # Permuate voxel prediction to pointcloud. # NOTE: Invalid points (points found in pointcloud but not in the voxel) are mapped to 0. pointcloud_pred = torch.ones(pointcloud.shape[0]).int().to(voxel_pred) * 0 pointcloud_pred[permutation_mask] = voxel_pred[permutation_valid] # Map pointcloud target to respect dataset IGNORE_LABELS pointcloud_target = torch.from_numpy( np.array([label_map[i] for i in np.concatenate(pointcloud_target)])).int() return pointcloud_output, pointcloud_pred, pointcloud_target
ContrastiveSceneContexts-main
downstream/insseg/lib/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from scipy.sparse import csr_matrix import torch class SparseMM(torch.autograd.Function): """ Sparse x dense matrix multiplication with autograd support. Implementation by Soumith Chintala: https://discuss.pytorch.org/t/ does-pytorch-support-autograd-on-sparse-matrix/6156/7 """ def forward(self, matrix1, matrix2): self.save_for_backward(matrix1, matrix2) return torch.mm(matrix1, matrix2) def backward(self, grad_output): matrix1, matrix2 = self.saved_tensors grad_matrix1 = grad_matrix2 = None if self.needs_input_grad[0]: grad_matrix1 = torch.mm(grad_output, matrix2.t()) if self.needs_input_grad[1]: grad_matrix2 = torch.mm(matrix1.t(), grad_output) return grad_matrix1, grad_matrix2 def sparse_float_tensor(values, indices, size=None): """ Return a torch sparse matrix give values and indices (row_ind, col_ind). If the size is an integer, return a square matrix with side size. If the size is a torch.Size, use it to initialize the out tensor. If none, the size is inferred. """ indices = torch.stack(indices).int() sargs = [indices, values.float()] if size is not None: # Use the provided size if isinstance(size, int): size = torch.Size((size, size)) sargs.append(size) if values.is_cuda: return torch.cuda.sparse.FloatTensor(*sargs) else: return torch.sparse.FloatTensor(*sargs) def diags(values, size=None): values = values.view(-1) n = values.nelement() size = torch.Size((n, n)) indices = (torch.arange(0, n), torch.arange(0, n)) return sparse_float_tensor(values, indices, size) def sparse_to_csr_matrix(tensor): tensor = tensor.cpu() inds = tensor._indices().numpy() vals = tensor._values().numpy() return csr_matrix((vals, (inds[0], inds[1])), shape=[s for s in tensor.shape]) def csr_matrix_to_sparse(mat): row_ind, col_ind = mat.nonzero() return sparse_float_tensor( torch.from_numpy(mat.data), (torch.from_numpy(row_ind), torch.from_numpy(col_ind)), size=torch.Size(mat.shape))
ContrastiveSceneContexts-main
downstream/insseg/lib/math_functions.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from MinkowskiEngine import MinkowskiGlobalPooling, MinkowskiBroadcastAddition, MinkowskiBroadcastMultiplication class MinkowskiLayerNorm(nn.Module): def __init__(self, num_features, eps=1e-5, D=-1): super(MinkowskiLayerNorm, self).__init__() self.num_features = num_features self.eps = eps self.weight = nn.Parameter(torch.ones(1, num_features)) self.bias = nn.Parameter(torch.zeros(1, num_features)) self.mean_in = MinkowskiGlobalPooling(dimension=D) self.glob_sum = MinkowskiBroadcastAddition(dimension=D) self.glob_sum2 = MinkowskiBroadcastAddition(dimension=D) self.glob_mean = MinkowskiGlobalPooling(dimension=D) self.glob_times = MinkowskiBroadcastMultiplication(dimension=D) self.D = D self.reset_parameters() def __repr__(self): s = f'(D={self.D})' return self.__class__.__name__ + s def reset_parameters(self): self.weight.data.fill_(1) self.bias.data.zero_() def _check_input_dim(self, input): if input.F.dim() != 2: raise ValueError('expected 2D input (got {}D input)'.format(input.dim())) def forward(self, x): self._check_input_dim(x) mean = self.mean_in(x).F.mean(-1, keepdim=True) mean = mean + torch.zeros(mean.size(0), self.num_features).type_as(mean) temp = self.glob_sum(x.F, -mean)**2 var = self.glob_mean(temp.data).mean(-1, keepdim=True) var = var + torch.zeros(var.size(0), self.num_features).type_as(var) instd = 1 / (var + self.eps).sqrt() x = self.glob_times(self.glob_sum2(x, -mean), instd) return x * self.weight + self.bias class MinkowskiInstanceNorm(nn.Module): def __init__(self, num_features, eps=1e-5, D=-1): super(MinkowskiInstanceNorm, self).__init__() self.eps = eps self.weight = nn.Parameter(torch.ones(1, num_features)) self.bias = nn.Parameter(torch.zeros(1, num_features)) self.mean_in = MinkowskiGlobalPooling(dimension=D) self.glob_sum = MinkowskiBroadcastAddition(dimension=D) self.glob_sum2 = MinkowskiBroadcastAddition(dimension=D) self.glob_mean = MinkowskiGlobalPooling(dimension=D) self.glob_times = MinkowskiBroadcastMultiplication(dimension=D) self.D = D self.reset_parameters() def __repr__(self): s = f'(pixel_dist={self.pixel_dist}, D={self.D})' return self.__class__.__name__ + s def reset_parameters(self): self.weight.data.fill_(1) self.bias.data.zero_() def _check_input_dim(self, input): if input.dim() != 2: raise ValueError('expected 2D input (got {}D input)'.format(input.dim())) def forward(self, x): self._check_input_dim(x) mean_in = self.mean_in(x) temp = self.glob_sum(x, -mean_in)**2 var_in = self.glob_mean(temp.data) instd_in = 1 / (var_in + self.eps).sqrt() x = self.glob_times(self.glob_sum2(x, -mean_in), instd_in) return x * self.weight + self.bias
ContrastiveSceneContexts-main
downstream/insseg/lib/layers.py
import os import torch import numpy as np from torch.autograd import Function import argparse #from lib.datasets.scannet.datagen.export_ids_per_vertex import read_segmentation, write_triangle_mesh #from lib.utils.io import read_triangle_mesh, create_color_palette, write_triangle_mesh #from lib.utils.scannet_benchmark_utils import util_3d import PG_OP class BallQueryBatchP(Function): @staticmethod def forward(ctx, coords, batch_idxs, batch_offsets, radius, meanActive): ''' :param ctx: :param coords: (n, 3) float :param batch_idxs: (n) int :param batch_offsets: (B+1) int :param radius: float :param meanActive: int :return: idx (nActive), int :return: start_len (n, 2), int ''' n = coords.size(0) assert coords.is_contiguous() and coords.is_cuda assert batch_idxs.is_contiguous() and batch_idxs.is_cuda assert batch_offsets.is_contiguous() and batch_offsets.is_cuda while True: idx = torch.cuda.IntTensor(n * meanActive).zero_() start_len = torch.cuda.IntTensor(n, 2).zero_() nActive = PG_OP.ballquery_batch_p(coords, batch_idxs, batch_offsets, idx, start_len, n, meanActive, radius) if nActive <= n * meanActive: break meanActive = int(nActive // n + 1) idx = idx[:nActive] return idx, start_len @staticmethod def backward(ctx, a=None, b=None): return None, None, None ballquery_batch_p = BallQueryBatchP.apply class BFSCluster(Function): @staticmethod def forward(ctx, semantic_label, ball_query_idxs, start_len, threshold): ''' :param ctx: :param semantic_label: (N), int :param ball_query_idxs: (nActive), int :param start_len: (N, 2), int :return: cluster_idxs: int (sumNPoint, 2), dim 0 for cluster_id, dim 1 for corresponding point idxs in N :return: cluster_offsets: int (nCluster + 1) ''' N = start_len.size(0) assert semantic_label.is_contiguous() assert ball_query_idxs.is_contiguous() assert start_len.is_contiguous() cluster_idxs = semantic_label.new() cluster_offsets = semantic_label.new() PG_OP.bfs_cluster(semantic_label, ball_query_idxs, start_len, cluster_idxs, cluster_offsets, N, threshold) return cluster_idxs, cluster_offsets @staticmethod def backward(ctx, a=None): return None bfs_cluster = BFSCluster.apply class Clustering: def __init__(self, ignored_labels, class_mapping, thresh=0.03, closed_points=300, min_points=50, propose_points=100, score_func=torch.max) -> None: self.ignored_labels = ignored_labels self.thresh = thresh self.closed_points = closed_points self.min_points = min_points self.class_mapping = class_mapping.cuda() self.propose_points = propose_points self.score_func=score_func def cluster(self, vertices, scores): labels = torch.max(scores, 1)[1] # (N) long, cuda proposals_idx, proposals_offset = self.cluster_(vertices, labels.cuda()) ## debug #import ipdb; ipdb.set_trace() #colors = np.array(create_color_palette())[labels.cpu()] #write_triangle_mesh(vertices, colors, None, 'semantics.ply') # scatter proposals_pred = torch.zeros((proposals_offset.shape[0] - 1, vertices.shape[0]), dtype=torch.int) # (nProposal, N), int, cuda proposals_pred[proposals_idx[:, 0].long(), proposals_idx[:, 1].long()] = 1 labels = labels[proposals_idx[:, 1][proposals_offset[:-1].long()].long()] proposals_pointnum = proposals_pred.sum(1) npoint_mask = (proposals_pointnum > self.propose_points) proposals_pred = proposals_pred[npoint_mask] labels = labels[npoint_mask] return proposals_pred, labels def cluster_(self, vertices, labels): ''' :param batch_idxs: (N), int, cuda :labels: 0-19 ''' batch_idxs = torch.zeros_like(labels) mask_non_ignored = torch.ones_like(labels).bool() for ignored_label in self.ignored_labels: mask_non_ignored = mask_non_ignored & (self.class_mapping[labels] != ignored_label) object_idxs = mask_non_ignored.nonzero().view(-1) vertices_ = torch.from_numpy(vertices)[object_idxs].float().cuda() labels_ = labels[object_idxs].int().cuda() if vertices_.numel() == 0: return torch.zeros((0,2)).int(), torch.zeros(1).int() batch_idxs_ = batch_idxs[object_idxs].int().cuda() batch_offsets_ = torch.FloatTensor([0, object_idxs.shape[0]]).int().cuda() idx, start_len = ballquery_batch_p(vertices_, batch_idxs_, batch_offsets_, self.thresh, self.closed_points) proposals_idx, proposals_offset = bfs_cluster(labels_.cpu(), idx.cpu(), start_len.cpu(), self.min_points) proposals_idx[:, 1] = object_idxs[proposals_idx[:, 1].long()].int() return proposals_idx, proposals_offset def get_instances(self, vertices, scores): proposals_pred, labels = self.cluster(vertices, scores) instances = {} for proposal_id in range(len(proposals_pred)): clusters_i = proposals_pred[proposal_id] score = scores[clusters_i.bool(), labels[proposal_id]] score = self.score_func(score) instances[proposal_id] = {} instances[proposal_id]['conf'] = score.cpu().numpy() instances[proposal_id]['label_id'] = self.class_mapping.cpu()[labels[proposal_id]] instances[proposal_id]['pred_mask'] = clusters_i.cpu().numpy() return instances
ContrastiveSceneContexts-main
downstream/insseg/lib/bfs/bfs.py
''' PointGroup operations Written by Li Jiang '''
ContrastiveSceneContexts-main
downstream/insseg/lib/bfs/ops/ops.py