python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
from stanfordnlp.models.common.pretrain import Pretrain
from stanfordnlp.models.depparse.data import DataLoader
from stanfordnlp.models.depparse.trainer import Trainer
from stanfordnlp.pipeline.processor import UDProcessor
class DepparseProcessor(UDProcessor):
def __init__(self, config, use_gpu):
# set up configurations
# get pretrained word vectors
self.pretrain = Pretrain(config['pretrain_path'])
# set up trainer
self.trainer = Trainer(pretrain=self.pretrain, model_file=config['model_path'], use_cuda=use_gpu)
self.build_final_config(config)
def process(self, doc):
batch = DataLoader(
doc, self.config['batch_size'], self.config, self.pretrain, vocab=self.vocab, evaluation=True)
preds = []
for i, b in enumerate(batch):
preds += self.trainer.predict(b)
batch.conll.set(['head', 'deprel'], [y for x in preds for y in x])
return batch.conll
| stanfordnlp-master | stanfordnlp/pipeline/depparse_processor.py |
stanfordnlp-master | stanfordnlp/pipeline/__init__.py |
|
"""
Pipeline that runs tokenize,mwt,pos,lemma,depparse
"""
import itertools
import torch
from distutils.util import strtobool
from stanfordnlp.pipeline.doc import Document
from stanfordnlp.pipeline.tokenize_processor import TokenizeProcessor
from stanfordnlp.pipeline.mwt_processor import MWTProcessor
from stanfordnlp.pipeline.pos_processor import POSProcessor
from stanfordnlp.pipeline.lemma_processor import LemmaProcessor
from stanfordnlp.pipeline.depparse_processor import DepparseProcessor
from stanfordnlp.utils.resources import DEFAULT_MODEL_DIR, default_treebanks, mwt_languages, build_default_config
DEFAULT_PROCESSORS_LIST = 'tokenize,mwt,pos,lemma,depparse'
NAME_TO_PROCESSOR_CLASS = {'tokenize': TokenizeProcessor, 'mwt': MWTProcessor, 'pos': POSProcessor,
'lemma': LemmaProcessor, 'depparse': DepparseProcessor}
PIPELINE_SETTINGS = ['lang', 'shorthand', 'mode']
# list of settings for each processor
PROCESSOR_SETTINGS = {
'tokenize': ['anneal', 'anneal_after', 'batch_size', 'conv_filters', 'conv_res', 'dropout', 'emb_dim', 'feat_dim',
'feat_funcs', 'hidden_dim', 'hier_invtemp', 'hierarchical', 'input_dropout', 'lr0', 'max_grad_norm',
'max_seqlen', 'pretokenized', 'report_steps', 'residual', 'rnn_layers', 'seed', 'shuffle_steps',
'steps', 'tok_noise', 'unit_dropout', 'vocab_size', 'weight_decay'],
'mwt': ['attn_type', 'batch_size', 'beam_size', 'decay_epoch', 'dict_only', 'dropout', 'emb_dim', 'emb_dropout',
'ensemble_dict', 'ensemble_early_stop', 'hidden_dim', 'log_step', 'lr', 'lr_decay', 'max_dec_len',
'max_grad_norm', 'num_epoch', 'num_layers', 'optim', 'seed', 'vocab_size'],
'pos': ['adapt_eval_interval', 'batch_size', 'beta2', 'char', 'char_emb_dim', 'char_hidden_dim', 'char_num_layers',
'char_rec_dropout', 'composite_deep_biaff_hidden_dim', 'deep_biaff_hidden_dim', 'dropout', 'eval_interval',
'hidden_dim', 'log_step', 'lr', 'max_grad_norm', 'max_steps', 'max_steps_before_stop', 'num_layers',
'optim', 'pretrain', 'rec_dropout', 'seed', 'share_hid', 'tag_emb_dim', 'transformed_dim', 'word_dropout',
'word_emb_dim', 'wordvec_dir'],
'lemma': ['alpha', 'attn_type', 'batch_size', 'beam_size', 'decay_epoch', 'dict_only', 'dropout', 'edit', 'emb_dim',
'emb_dropout', 'ensemble_dict', 'hidden_dim', 'log_step', 'lr', 'lr_decay', 'max_dec_len',
'max_grad_norm', 'num_edit', 'num_epoch', 'num_layers', 'optim', 'pos', 'pos_dim', 'pos_dropout',
'pos_vocab_size', 'seed', 'use_identity', 'vocab_size'],
'depparse': ['batch_size', 'beta2', 'char', 'char_emb_dim', 'char_hidden_dim', 'char_num_layers',
'char_rec_dropout', 'composite_deep_biaff_hidden_dim', 'deep_biaff_hidden_dim', 'distance', 'dropout',
'eval_interval', 'hidden_dim', 'linearization', 'log_step', 'lr', 'max_grad_norm', 'max_steps',
'max_steps_before_stop', 'num_layers', 'optim', 'pretrain', 'rec_dropout', 'sample_train', 'seed',
'shorthand', 'tag_emb_dim', 'transformed_dim', 'word_dropout', 'word_emb_dim', 'wordvec_dir']
}
PROCESSOR_SETTINGS_LIST = \
['_'.join(psp) for k, v in PROCESSOR_SETTINGS.items() for psp in itertools.product([k], v)]
BOOLEAN_PROCESSOR_SETTINGS = {
'tokenize': ['pretokenized'],
'mwt': ['dict_only'],
'lemma': ['dict_only', 'edit', 'ensemble_dict', 'pos', 'use_identity']
}
BOOLEAN_PROCESSOR_SETTINGS_LIST = \
['_'.join(psp) for k, v in BOOLEAN_PROCESSOR_SETTINGS.items() for psp in itertools.product([k], v)]
class Pipeline:
def __init__(self, processors=DEFAULT_PROCESSORS_LIST, lang='en', models_dir=DEFAULT_MODEL_DIR, treebank=None,
use_gpu=True, **kwargs):
shorthand = default_treebanks[lang] if treebank is None else treebank
config = build_default_config(shorthand, models_dir)
config.update(kwargs)
self.config = config
self.config['processors'] = processors
self.config['lang'] = lang
self.config['shorthand'] = shorthand
self.config['models_dir'] = models_dir
self.processor_names = self.config['processors'].split(',')
self.processors = {'tokenize': None, 'mwt': None, 'lemma': None, 'pos': None, 'depparse': None}
# always use GPU if a GPU device can be found, unless use_gpu is explicitly set to be False
self.use_gpu = torch.cuda.is_available() and use_gpu
print("Use device: {}".format("gpu" if self.use_gpu else "cpu"))
# configs that are the same for all processors
pipeline_level_configs = {'lang': self.config['lang'], 'shorthand': self.config['shorthand'], 'mode': 'predict'}
self.standardize_config_values()
# set up processors
for processor_name in self.processor_names:
if processor_name == 'mwt' and self.config['shorthand'] not in mwt_languages:
continue
print('---')
print('Loading: ' + processor_name)
curr_processor_config = self.filter_config(processor_name, self.config)
curr_processor_config.update(pipeline_level_configs)
print('With settings: ')
print(curr_processor_config)
self.processors[processor_name] = NAME_TO_PROCESSOR_CLASS[processor_name](config=curr_processor_config,
use_gpu=self.use_gpu)
print("Done loading processors!")
print('---')
def filter_config(self, prefix, config_dict):
filtered_dict = {}
for key in config_dict.keys():
if key.split('_')[0] == prefix:
filtered_dict['_'.join(key.split('_')[1:])] = config_dict[key]
return filtered_dict
def standardize_config_values(self):
"""
Standardize config settings
1.) for boolean settings, convert string values to True or False using distutils.util.strtobool
"""
standardized_entries = {}
for key, val in self.config.items():
if key in BOOLEAN_PROCESSOR_SETTINGS_LIST and isinstance(val, str):
standardized_entries[key] = strtobool(val)
self.config.update(standardized_entries)
def process(self, doc):
# run the pipeline
for processor_name in self.processor_names:
if self.processors[processor_name] is not None:
self.processors[processor_name].process(doc)
doc.load_annotations()
def __call__(self, doc):
if isinstance(doc, str):
doc = Document(doc)
self.process(doc)
return doc
| stanfordnlp-master | stanfordnlp/pipeline/core.py |
"""
base classes for processors
"""
from abc import ABC, abstractmethod
# base class for all processors
class Processor(ABC):
@abstractmethod
def process(self, doc):
pass
# base class for UD processors
class UDProcessor(Processor):
@abstractmethod
def process(self, doc):
pass
def build_final_config(self, config):
# set configurations from loaded model
if self.trainer is not None:
loaded_args, self.vocab = self.trainer.args, self.trainer.vocab
# filter out unneeded args from model
loaded_args = {k: v for k, v in loaded_args.items() if not UDProcessor.filter_out_option(k)}
else:
loaded_args = {}
loaded_args.update(config)
self.config = loaded_args
@staticmethod
def filter_out_option(option):
options_to_filter = ['cpu', 'cuda', 'dev_conll_gold', 'epochs', 'lang', 'mode', 'save_name', 'shorthand']
if option.endswith('_file') or option.endswith('_dir'):
return True
elif option in options_to_filter:
return True
else:
return False
| stanfordnlp-master | stanfordnlp/pipeline/processor.py |
"""
Basic data structures
"""
import io
import re
from stanfordnlp.models.common.conll import FIELD_TO_IDX as CONLLU_FIELD_TO_IDX
multi_word_token_line = re.compile("([0-9]+)\-([0-9]+)")
class Document:
def __init__(self, text):
self._text = text
self._conll_file = None
self._sentences = []
@property
def conll_file(self):
""" Access the CoNLLFile of this document. """
return self._conll_file
@conll_file.setter
def conll_file(self, value):
""" Set the document's CoNLLFile value. """
self._conll_file = value
@property
def text(self):
""" Access text of this document. Example: 'This is a sentence.'"""
return self._text
@text.setter
def text(self, value):
""" Set the document's text value. Example: 'This is a sentence.'"""
self._text = value
@property
def sentences(self):
""" Access list of sentences for this document. """
return self._sentences
@sentences.setter
def sentences(self, value):
""" Set the list of tokens for this document. """
self._sentences = value
def load_annotations(self):
""" Integrate info from the CoNLLFile instance. """
self._sentences = [Sentence(token_list) for token_list in self.conll_file.sents]
def write_conll_to_file(self, file_path):
""" Write conll contents to file. """
self.conll_file.write_conll(file_path)
class Sentence:
def __init__(self, tokens):
self._tokens = []
self._words = []
self._process_tokens(tokens)
self._dependencies = []
# check if there is dependency info
if self.words[0].dependency_relation is not None:
self.build_dependencies()
def _process_tokens(self, tokens):
st, en = -1, -1
for tok in tokens:
m = multi_word_token_line.match(tok[CONLLU_FIELD_TO_IDX['id']])
if m:
st, en = int(m.group(1)), int(m.group(2))
self._tokens.append(Token(tok))
else:
new_word = Word(tok)
self._words.append(new_word)
idx = int(tok[CONLLU_FIELD_TO_IDX['id']])
if idx <= en:
self._tokens[-1].words.append(new_word)
new_word.parent_token = self._tokens[-1]
else:
self.tokens.append(Token(tok, words=[new_word]))
@property
def dependencies(self):
""" Access list of dependencies for this sentence. """
return self._dependencies
@dependencies.setter
def dependencies(self, value):
""" Set the list of dependencies for this sentence. """
self._dependencies = value
@property
def tokens(self):
""" Access list of tokens for this sentence. """
return self._tokens
@tokens.setter
def tokens(self, value):
""" Set the list of tokens for this sentence. """
self._tokens = value
@property
def words(self):
""" Access list of words for this sentence. """
return self._words
@words.setter
def words(self, value):
""" Set the list of words for this sentence. """
self._words = value
def build_dependencies(self):
for word in self.words:
if word.governor == 0:
# make a word for the ROOT
governor = Word(["0", "ROOT", "_", "_", "_", "_", "-1", "_", "_", "_", "_", "_"])
else:
# id is index in words list + 1
governor = self.words[word.governor-1]
self.dependencies.append((governor, word.dependency_relation, word))
def print_dependencies(self, file=None):
for dep_edge in self.dependencies:
print((dep_edge[2].text, dep_edge[0].index, dep_edge[1]), file=file)
def dependencies_string(self):
dep_string = io.StringIO()
self.print_dependencies(file=dep_string)
return dep_string.getvalue().strip()
def print_tokens(self, file=None):
for tok in self.tokens:
print(tok)
def tokens_string(self):
toks_string = io.StringIO()
self.print_tokens(file=toks_string)
return toks_string.getvalue().strip()
def print_words(self):
for word in self.words:
print(word)
def words_string(self):
wrds_string = io.StringIO()
self.print_tokens(file=wrds_string)
return wrds_string.getvalue().strip()
class Token:
def __init__(self, token_entry, words=None):
self._index = token_entry[CONLLU_FIELD_TO_IDX['id']]
self._text = token_entry[CONLLU_FIELD_TO_IDX['word']]
if words is None:
self.words = []
else:
self.words = words
@property
def words(self):
""" Access the list of syntactic words underlying this token. """
return self._words
@words.setter
def words(self, value):
""" Set this token's list of underlying syntactic words. """
self._words = value
for w in self._words:
w.parent_token = self
@property
def index(self):
""" Access index of this token. """
return self._index
@index.setter
def index(self, value):
""" Set the token's index value. """
self._index = value
@property
def text(self):
""" Access text of this token. Example: 'The'"""
return self._text
@text.setter
def text(self, value):
""" Set the token's text value. Example: 'The'"""
self._text = value
def __repr__(self):
return f"<{self.__class__.__name__} index={self.index};words={self.words}>"
class Word:
def __init__(self, word_entry):
self._index = word_entry[CONLLU_FIELD_TO_IDX['id']]
self._text = word_entry[CONLLU_FIELD_TO_IDX['word']]
self._lemma = word_entry[CONLLU_FIELD_TO_IDX['lemma']]
if self._lemma == '_':
self._lemma = None
self._upos = word_entry[CONLLU_FIELD_TO_IDX['upos']]
self._xpos = word_entry[CONLLU_FIELD_TO_IDX['xpos']]
self._feats = word_entry[CONLLU_FIELD_TO_IDX['feats']]
if self._upos == '_':
self._upos = None
self._xpos = None
self._feats = None
self._governor = word_entry[CONLLU_FIELD_TO_IDX['head']]
self._dependency_relation = word_entry[CONLLU_FIELD_TO_IDX['deprel']]
self._parent_token = None
# check if there is dependency information
if self._dependency_relation != '_':
self._governor = int(self._governor)
else:
self._governor = None
self._dependency_relation = None
@property
def dependency_relation(self):
""" Access dependency relation of this word. Example: 'nmod'"""
return self._dependency_relation
@dependency_relation.setter
def dependency_relation(self, value):
""" Set the word's dependency relation value. Example: 'nmod'"""
self._dependency_relation = value
@property
def lemma(self):
""" Access lemma of this word. """
return self._lemma
@lemma.setter
def lemma(self, value):
""" Set the word's lemma value. """
self._lemma = value
@property
def governor(self):
""" Access governor of this word. """
return self._governor
@governor.setter
def governor(self, value):
""" Set the word's governor value. """
self._governor = value
@property
def pos(self):
""" Access (treebank-specific) part-of-speech of this word. Example: 'NNP'"""
return self._xpos
@pos.setter
def pos(self, value):
""" Set the word's (treebank-specific) part-of-speech value. Example: 'NNP'"""
self._xpos = value
@property
def text(self):
""" Access text of this word. Example: 'The'"""
return self._text
@text.setter
def text(self, value):
""" Set the word's text value. Example: 'The'"""
self._text = value
@property
def xpos(self):
""" Access treebank-specific part-of-speech of this word. Example: 'NNP'"""
return self._xpos
@xpos.setter
def xpos(self, value):
""" Set the word's treebank-specific part-of-speech value. Example: 'NNP'"""
self._xpos = value
@property
def upos(self):
""" Access universal part-of-speech of this word. Example: 'DET'"""
return self._upos
@upos.setter
def upos(self, value):
""" Set the word's universal part-of-speech value. Example: 'DET'"""
self._upos = value
@property
def feats(self):
""" Access morphological features of this word. Example: 'Gender=Fem'"""
return self._feats
@feats.setter
def feats(self, value):
""" Set this word's morphological features. Example: 'Gender=Fem'"""
self._feats = value
@property
def parent_token(self):
""" Access the parent token of this word. """
return self._parent_token
@parent_token.setter
def parent_token(self, value):
""" Set this word's parent token. """
self._parent_token = value
@property
def index(self):
""" Access index of this word. """
return self._index
@index.setter
def index(self, value):
""" Set the word's index value. """
self._index = value
def __repr__(self):
features = ['index', 'text', 'lemma', 'upos', 'xpos', 'feats', 'governor', 'dependency_relation']
feature_str = ";".join(["{}={}".format(k, getattr(self, k)) for k in features if getattr(self, k) is not None])
return f"<{self.__class__.__name__} {feature_str}>"
| stanfordnlp-master | stanfordnlp/pipeline/doc.py |
from stanfordnlp.models.common.conll import FIELD_TO_IDX
from stanfordnlp.models.lemma.data import DataLoader
from stanfordnlp.models.lemma.trainer import Trainer
from stanfordnlp.pipeline.processor import UDProcessor
class LemmaProcessor(UDProcessor):
def __init__(self, config, use_gpu):
# check if in identity mode
if config.get('use_identity') in ['True', True]:
self.use_identity = True
self.config = config
else:
self.use_identity = False
self.trainer = Trainer(model_file=config['model_path'], use_cuda=use_gpu)
self.build_final_config(config)
def process(self, doc):
if not self.use_identity:
batch = DataLoader(doc, self.config['batch_size'], self.config, vocab=self.vocab, evaluation=True)
else:
batch = DataLoader(doc, self.config['batch_size'], self.config, evaluation=True, conll_only=True)
if self.use_identity:
preds = [ln[FIELD_TO_IDX['word']] for sent in batch.conll.sents for ln in sent if '-' not in ln[0]]
elif self.config.get('dict_only', False):
preds = self.trainer.predict_dict(batch.conll.get(['word', 'upos']))
else:
preds = []
edits = []
for i, b in enumerate(batch):
ps, es = self.trainer.predict(b, self.config['beam_size'])
preds += ps
if es is not None:
edits += es
preds = self.trainer.postprocess(batch.conll.get(['word']), preds, edits=edits)
if self.config.get('ensemble_dict', False):
preds = self.trainer.ensemble(batch.conll.get(['word', 'upos']), preds)
# map empty string lemmas to '_'
preds = [max([(len(x),x), (0, '_')])[1] for x in preds]
batch.conll.set(['lemma'], preds)
| stanfordnlp-master | stanfordnlp/pipeline/lemma_processor.py |
import io
from stanfordnlp.models.common import conll
from stanfordnlp.models.mwt.data import DataLoader
from stanfordnlp.models.mwt.trainer import Trainer
from stanfordnlp.pipeline.processor import UDProcessor
class MWTProcessor(UDProcessor):
def __init__(self, config, use_gpu):
# set up configurations
self.trainer = Trainer(model_file=config['model_path'], use_cuda=use_gpu)
self.build_final_config(config)
def process(self, doc):
batch = DataLoader(doc, self.config['batch_size'], self.config, vocab=self.vocab, evaluation=True)
if len(batch) > 0:
dict_preds = self.trainer.predict_dict(batch.conll.get_mwt_expansion_cands())
# decide trainer type and run eval
if self.config['dict_only']:
preds = dict_preds
else:
preds = []
for i, b in enumerate(batch):
preds += self.trainer.predict(b)
if self.config.get('ensemble_dict', False):
preds = self.trainer.ensemble(batch.conll.get_mwt_expansion_cands(), preds)
else:
# skip eval if dev data does not exist
preds = []
with io.StringIO() as conll_with_mwt:
batch.conll.write_conll_with_mwt_expansions(preds, conll_with_mwt)
doc.conll_file = conll.CoNLLFile(input_str=conll_with_mwt.getvalue())
| stanfordnlp-master | stanfordnlp/pipeline/mwt_processor.py |
r"""
Python CoreNLP: a server based interface to Java CoreNLP.
"""
import io
import os
import logging
import json
import shlex
import subprocess
import time
import sys
from six.moves.urllib.parse import urlparse
import requests
from stanfordnlp.protobuf import Document, parseFromDelimitedString, writeToDelimitedString, to_text
__author__ = 'arunchaganty, kelvinguu, vzhong, wmonroe4'
logger = logging.getLogger(__name__)
class AnnotationException(Exception):
"""
Exception raised when there was an error communicating with the CoreNLP server.
"""
pass
class TimeoutException(AnnotationException):
"""
Exception raised when the CoreNLP server timed out.
"""
pass
class ShouldRetryException(Exception):
"""
Exception raised if the service should retry the request.
"""
pass
class PermanentlyFailedException(Exception):
"""
Exception raised if the service should retry the request.
"""
pass
class RobustService(object):
"""
Service that resuscitates itself if it is not available.
"""
TIMEOUT = 15
def __init__(self, start_cmd, stop_cmd, endpoint, stdout=sys.stdout,
stderr=sys.stderr, be_quiet=False):
self.start_cmd = start_cmd and shlex.split(start_cmd)
self.stop_cmd = stop_cmd and shlex.split(stop_cmd)
self.endpoint = endpoint
self.stdout = stdout
self.stderr = stderr
self.server = None
self.is_active = False
self.be_quiet = be_quiet
def is_alive(self):
try:
return requests.get(self.endpoint + "/ping").ok
except requests.exceptions.ConnectionError as e:
raise ShouldRetryException(e)
def start(self):
if self.start_cmd:
if self.be_quiet:
# Issue #26: subprocess.DEVNULL isn't supported in python 2.7.
stderr = open(os.devnull, 'w')
else:
stderr = self.stderr
self.server = subprocess.Popen(self.start_cmd,
stderr=stderr,
stdout=stderr)
def stop(self):
if self.server:
self.server.kill()
if self.stop_cmd:
subprocess.run(self.stop_cmd, check=True)
self.is_active = False
def __enter__(self):
self.start()
return self
def __exit__(self, _, __, ___):
self.stop()
def ensure_alive(self):
# Check if the service is active and alive
if self.is_active:
try:
return self.is_alive()
except ShouldRetryException:
pass
# If not, try to start up the service.
if self.server is None:
self.start()
# Wait for the service to start up.
start_time = time.time()
while True:
try:
if self.is_alive():
break
except ShouldRetryException:
pass
if time.time() - start_time < self.TIMEOUT:
time.sleep(1)
else:
raise PermanentlyFailedException("Timed out waiting for service to come alive.")
# At this point we are guaranteed that the service is alive.
self.is_active = True
class CoreNLPClient(RobustService):
"""
A CoreNLP client to the Stanford CoreNLP server.
"""
DEFAULT_ANNOTATORS = "tokenize ssplit lemma pos ner depparse".split()
DEFAULT_PROPERTIES = {}
DEFAULT_OUTPUT_FORMAT = "serialized"
def __init__(self, start_server=True,
endpoint="http://localhost:9000",
timeout=30000,
threads=5,
annotators=None,
properties=None,
output_format=None,
stdout=sys.stdout,
stderr=sys.stderr,
memory="4G",
be_quiet=True,
max_char_length=100000
):
if isinstance(annotators, str):
annotators = annotators.split()
if start_server:
host, port = urlparse(endpoint).netloc.split(":")
assert host == "localhost", "If starting a server, endpoint must be localhost"
assert os.getenv("CORENLP_HOME") is not None, "Please define $CORENLP_HOME where your CoreNLP Java checkout is"
start_cmd = "java -Xmx{memory} -cp '{corenlp_home}/*' edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port {port} -timeout {timeout} -threads {threads} -maxCharLength {max_char_length}".format(
corenlp_home=os.getenv("CORENLP_HOME"),
port=port,
memory=memory,
timeout=timeout,
threads=threads,
max_char_length=max_char_length)
stop_cmd = None
else:
start_cmd = stop_cmd = None
super(CoreNLPClient, self).__init__(start_cmd, stop_cmd, endpoint,
stdout, stderr, be_quiet)
self.timeout = timeout
self.default_annotators = annotators or self.DEFAULT_ANNOTATORS
self.default_properties = properties or self.DEFAULT_PROPERTIES
self.default_output_format = output_format or self.DEFAULT_OUTPUT_FORMAT
def _request(self, buf, properties):
"""Send a request to the CoreNLP server.
:param (str | unicode) text: raw text for the CoreNLPServer to parse
:param (dict) properties: properties that the server expects
:return: request result
"""
self.ensure_alive()
try:
input_format = properties.get("inputFormat", "text")
if input_format == "text":
ctype = "text/plain; charset=utf-8"
elif input_format == "serialized":
ctype = "application/x-protobuf"
else:
raise ValueError("Unrecognized inputFormat " + input_format)
r = requests.post(self.endpoint,
params={'properties': str(properties)},
data=buf, headers={'content-type': ctype},
timeout=(self.timeout*2)/1000)
r.raise_for_status()
return r
except requests.HTTPError as e:
if r.text == "CoreNLP request timed out. Your document may be too long.":
raise TimeoutException(r.text)
else:
raise AnnotationException(r.text)
def annotate(self, text, annotators=None, output_format=None, properties=None):
"""Send a request to the CoreNLP server.
:param (str | unicode) text: raw text for the CoreNLPServer to parse
:param (list | string) annotators: list of annotators to use
:param (str) output_format: output type from server: serialized, json, text, conll, conllu, or xml
:param (dict) properties: properties that the server expects
:return: request result
"""
# set properties for server call
if properties is None:
properties = self.default_properties
properties.update({
'annotators': ','.join(annotators or self.default_annotators),
'inputFormat': 'text',
'outputFormat': self.default_output_format,
'serializer': 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer'
})
elif "annotators" not in properties:
properties.update({'annotators': ','.join(annotators or self.default_annotators)})
# if an output_format is specified, use that to override
if output_format is not None:
properties["outputFormat"] = output_format
# make the request
r = self._request(text.encode('utf-8'), properties)
# customize what is returned based outputFormat
if properties["outputFormat"] == "serialized":
doc = Document()
parseFromDelimitedString(doc, r.content)
return doc
elif properties["outputFormat"] == "json":
return r.json()
elif properties["outputFormat"] in ["text", "conllu", "conll", "xml"]:
return r.text
else:
return r
def update(self, doc, annotators=None, properties=None):
if properties is None:
properties = self.default_properties
properties.update({
'annotators': ','.join(annotators or self.default_annotators),
'inputFormat': 'serialized',
'outputFormat': 'serialized',
'serializer': 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer'
})
with io.BytesIO() as stream:
writeToDelimitedString(doc, stream)
msg = stream.getvalue()
r = self._request(msg, properties)
doc = Document()
parseFromDelimitedString(doc, r.content)
return doc
def tokensregex(self, text, pattern, filter=False, to_words=False, annotators=None, properties=None):
# this is required for some reason
matches = self.__regex('/tokensregex', text, pattern, filter, annotators, properties)
if to_words:
matches = regex_matches_to_indexed_words(matches)
return matches
def semgrex(self, text, pattern, filter=False, to_words=False, annotators=None, properties=None):
matches = self.__regex('/semgrex', text, pattern, filter, annotators, properties)
if to_words:
matches = regex_matches_to_indexed_words(matches)
return matches
def tregrex(self, text, pattern, filter=False, annotators=None, properties=None):
return self.__regex('/tregex', text, pattern, filter, annotators, properties)
def __regex(self, path, text, pattern, filter, annotators=None, properties=None):
"""Send a regex-related request to the CoreNLP server.
:param (str | unicode) path: the path for the regex endpoint
:param text: raw text for the CoreNLPServer to apply the regex
:param (str | unicode) pattern: regex pattern
:param (bool) filter: option to filter sentences that contain matches, if false returns matches
:param properties: option to filter sentences that contain matches, if false returns matches
:return: request result
"""
self.ensure_alive()
if properties is None:
properties = self.default_properties
properties.update({
'annotators': ','.join(annotators or self.default_annotators),
'inputFormat': 'text',
'outputFormat': self.default_output_format,
'serializer': 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer'
})
elif "annotators" not in properties:
properties.update({'annotators': ','.join(annotators or self.default_annotators)})
# HACK: For some stupid reason, CoreNLPServer will timeout if we
# need to annotate something from scratch. So, we need to call
# this to ensure that the _regex call doesn't timeout.
self.annotate(text, properties=properties)
try:
# Error occurs unless put properties in params
input_format = properties.get("inputFormat", "text")
if input_format == "text":
ctype = "text/plain; charset=utf-8"
elif input_format == "serialized":
ctype = "application/x-protobuf"
else:
raise ValueError("Unrecognized inputFormat " + input_format)
# change request method from `get` to `post` as required by CoreNLP
r = requests.post(
self.endpoint + path, params={
'pattern': pattern,
'filter': filter,
'properties': str(properties)
}, data=text,
headers={'content-type': ctype},
timeout=(self.timeout*2)/1000,
)
r.raise_for_status()
return json.loads(r.text)
except requests.HTTPError as e:
if r.text.startswith("Timeout"):
raise TimeoutException(r.text)
else:
raise AnnotationException(r.text)
except json.JSONDecodeError:
raise AnnotationException(r.text)
def regex_matches_to_indexed_words(matches):
"""Transforms tokensregex and semgrex matches to indexed words.
:param matches: unprocessed regex matches
:return: flat array of indexed words
"""
words = [dict(v, **dict([('sentence', i)]))
for i, s in enumerate(matches['sentences'])
for k, v in s.items() if k != 'length']
return words
__all__ = ["CoreNLPClient", "AnnotationException", "TimeoutException", "to_text"]
| stanfordnlp-master | stanfordnlp/server/client.py |
from stanfordnlp.protobuf import to_text
from stanfordnlp.protobuf import Document, Sentence, Token, IndexedWord, Span
from stanfordnlp.protobuf import ParseTree, DependencyGraph, CorefChain
from stanfordnlp.protobuf import Mention, NERMention, Entity, Relation, RelationTriple, Timex
from stanfordnlp.protobuf import Quote, SpeakerInfo
from stanfordnlp.protobuf import Operator, Polarity
from stanfordnlp.protobuf import SentenceFragment, TokenLocation
from stanfordnlp.protobuf import MapStringString, MapIntString
from .client import CoreNLPClient, AnnotationException, TimeoutException
from .annotator import Annotator
| stanfordnlp-master | stanfordnlp/server/__init__.py |
"""
Defines a base class that can be used to annotate.
"""
import io
from multiprocessing import Process
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from six.moves import http_client as HTTPStatus
from stanfordnlp.protobuf import Document, parseFromDelimitedString, writeToDelimitedString
class Annotator(Process):
"""
This annotator base class hosts a lightweight server that accepts
annotation requests from CoreNLP.
Each annotator simply defines 3 functions: requires, provides and annotate.
This class takes care of defining appropriate endpoints to interface
with CoreNLP.
"""
@property
def name(self):
"""
Name of the annotator (used by CoreNLP)
"""
raise NotImplementedError()
@property
def requires(self):
"""
Requires has to specify all the annotations required before we
are called.
"""
raise NotImplementedError()
@property
def provides(self):
"""
The set of annotations guaranteed to be provided when we are done.
NOTE: that these annotations are either fully qualified Java
class names or refer to nested classes of
edu.stanford.nlp.ling.CoreAnnotations (as is the case below).
"""
raise NotImplementedError()
def annotate(self, ann):
"""
@ann: is a protobuf annotation object.
Actually populate @ann with tokens.
"""
raise NotImplementedError()
@property
def properties(self):
"""
Defines a Java property to define this anntoator to CoreNLP.
"""
return {
"customAnnotatorClass.{}".format(self.name): "edu.stanford.nlp.pipeline.GenericWebServiceAnnotator",
"generic.endpoint": "http://{}:{}".format(self.host, self.port),
"generic.requires": ",".join(self.requires),
"generic.provides": ",".join(self.provides),
}
class _Handler(BaseHTTPRequestHandler):
annotator = None
def __init__(self, request, client_address, server):
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
"""
Handle a ping request
"""
if not self.path.endswith("/"): self.path += "/"
if self.path == "/ping/":
msg = "pong".encode("UTF-8")
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", "text/application")
self.send_header("Content-Length", len(msg))
self.end_headers()
self.wfile.write(msg)
else:
self.send_response(HTTPStatus.BAD_REQUEST)
self.end_headers()
def do_POST(self):
"""
Handle an annotate request
"""
if not self.path.endswith("/"): self.path += "/"
if self.path == "/annotate/":
# Read message
length = int(self.headers.get('content-length'))
msg = self.rfile.read(length)
# Do the annotation
doc = Document()
parseFromDelimitedString(doc, msg)
self.annotator.annotate(doc)
with io.BytesIO() as stream:
writeToDelimitedString(doc, stream)
msg = stream.getvalue()
# write message
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", "application/x-protobuf")
self.send_header("Content-Length", len(msg))
self.end_headers()
self.wfile.write(msg)
else:
self.send_response(HTTPStatus.BAD_REQUEST)
self.end_headers()
def __init__(self, host="", port=8432):
"""
Launches a server endpoint to communicate with CoreNLP
"""
Process.__init__(self)
self.host, self.port = host, port
self._Handler.annotator = self
def run(self):
"""
Runs the server using Python's simple HTTPServer.
TODO: make this multithreaded.
"""
httpd = HTTPServer((self.host, self.port), self._Handler)
sa = httpd.socket.getsockname()
serve_message = "Serving HTTP on {host} port {port} (http://{host}:{port}/) ..."
print(serve_message.format(host=sa[0], port=sa[1]))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
httpd.shutdown()
| stanfordnlp-master | stanfordnlp/server/annotator.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simple shell program to pipe in
"""
import corenlp
import json
import re
import csv
import sys
from collections import namedtuple, OrderedDict
FLOAT_RE = re.compile(r"\d*\.\d+")
INT_RE = re.compile(r"\d+")
def dictstr(arg):
"""
Parse a key=value string as a tuple (key, value) that can be provided as an argument to dict()
"""
key, value = arg.split("=")
if value.lower() == "true" or value.lower() == "false":
value = bool(value)
elif INT_RE.match(value):
value = int(value)
elif FLOAT_RE.match(value):
value = float(value)
return (key, value)
def do_annotate(args):
args.props = dict(args.props) if args.props else {}
if args.sentence_mode:
args.props["ssplit.isOneSentence"] = True
with corenlp.CoreNLPClient(annotators=args.annotators, properties=args.props, be_quiet=not args.verbose_server) as client:
for line in args.input:
if line.startswith("#"): continue
ann = client.annotate(line.strip(), output_format=args.format)
if args.format == "json":
if args.sentence_mode:
ann = ann["sentences"][0]
args.output.write(json.dumps(ann))
args.output.write("\n")
def main():
import argparse
parser = argparse.ArgumentParser(description='Annotate data')
parser.add_argument('-i', '--input', type=argparse.FileType('r'), default=sys.stdin, help="Input file to process; each line contains one document (default: stdin)")
parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help="File to write annotations to (default: stdout)")
parser.add_argument('-f', '--format', choices=["json",], default="json", help="Output format")
parser.add_argument('-a', '--annotators', nargs="+", type=str, default=["tokenize ssplit lemma pos"], help="A list of annotators")
parser.add_argument('-s', '--sentence-mode', action="store_true",help="Assume each line of input is a sentence.")
parser.add_argument('-v', '--verbose-server', action="store_true",help="Server is made verbose")
parser.add_argument('-m', '--memory', type=str, default="4G", help="Memory to use for the server")
parser.add_argument('-p', '--props', nargs="+", type=dictstr, help="Properties as a list of key=value pairs")
parser.set_defaults(func=do_annotate)
ARGS = parser.parse_args()
if ARGS.func is None:
parser.print_help()
sys.exit(1)
else:
ARGS.func(ARGS)
if __name__ == "__main__":
main()
| stanfordnlp-master | stanfordnlp/server/main.py |
import sys
import json
with open(sys.argv[1]) as f:
d = json.load(f)
l = max([0] + [len(" ".join(x[0][1])) for x in d])
with open(sys.argv[2]) as f:
d = json.load(f)
l = max([l] + [len(" ".join(x[0][1])) for x in d])
print(l)
| stanfordnlp-master | stanfordnlp/utils/max_mwt_length.py |
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
# fix up multi-word token
lines = []
for line in sys.stdin:
line = line.strip()
lines += [line]
input_lines = []
with open(input_file) as f:
for line in f:
line = line.strip()
input_lines += [line]
with open(output_file, 'w') as outf:
i = 0
for line in input_lines:
if len(line) == 0:
print(lines[i], file=outf)
i += 1
continue
if line[0] == '#':
continue
line = line.split('\t')
if '.' in line[0]:
continue
if '-' in line[0]:
line[6] = '_'
line[9] = '_'
print('\t'.join(line), file=outf)
continue
print(lines[i], file=outf)
i += 1
| stanfordnlp-master | stanfordnlp/utils/post_insert_mwt.py |
import sys
backoff_models = { "UD_Breton-KEB": "ga_idt",
"UD_Czech-PUD": "cs_pdt",
"UD_English-PUD": "en_ewt",
"UD_Faroese-OFT": "no_nynorsk",
"UD_Finnish-PUD": "fi_tdt",
"UD_Japanese-Modern": "ja_gsd",
"UD_Naija-NSC": "en_ewt",
"UD_Swedish-PUD": "sv_talbanken"
}
print(backoff_models[sys.argv[1]])
| stanfordnlp-master | stanfordnlp/utils/select_backoff.py |
import sys
import json
toklabels = sys.argv[1]
if toklabels.endswith('.json'):
with open(toklabels, 'r') as f:
l = json.load(f)
l = [''.join([str(x[1]) for x in para]) for para in l]
else:
with open(toklabels, 'r') as f:
l = ''.join(f.readlines())
l = l.split('\n\n')
sentlen = [len(x) + 1 for para in l for x in para.split('2')]
print(sum(sentlen) / len(sentlen))
| stanfordnlp-master | stanfordnlp/utils/avg_sent_len.py |
stanfordnlp-master | stanfordnlp/utils/__init__.py |
|
import argparse
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument('plaintext_file', type=str, help="Plaintext file containing the raw input")
parser.add_argument('conllu_file', type=str, help="CoNLL-U file containing tokens and sentence breaks")
parser.add_argument('-o', '--output', default=None, type=str, help="Output file name; output to the console if not specified (the default)")
parser.add_argument('-m', '--mwt_output', default=None, type=str, help="Output file name for MWT expansions; output to the console if not specified (the default)")
args = parser.parse_args()
with open(args.plaintext_file, 'r') as f:
text = ''.join(f.readlines())
textlen = len(text)
output = sys.stdout if args.output is None else open(args.output, 'w')
index = 0 # character offset in rawtext
def find_next_word(index, text, word, output):
idx = 0
word_sofar = ''
yeah=False
while index < len(text) and idx < len(word):
if text[index] == '\n' and index+1 < len(text) and text[index+1] == '\n':
# paragraph break
if len(word_sofar) > 0:
assert re.match(r'^\s+$', word_sofar), 'Found non-empty string at the end of a paragraph that doesn\'t match any token: |{}|'.format(word_sofar)
word_sofar = ''
output.write('\n\n')
index += 1
elif re.match(r'^\s$', text[index]) and not re.match(r'^\s$', word[idx]):
word_sofar += text[index]
else:
word_sofar += text[index]
assert text[index].replace('\n', ' ') == word[idx], "character mismatch: raw text contains |%s| but the next word is |%s|." % (word_sofar, word)
idx += 1
index += 1
return index, word_sofar
mwt_expansions = []
with open(args.conllu_file, 'r') as f:
buf = ''
mwtbegin = 0
mwtend = -1
expanded = []
last_comments = ""
for line in f:
line = line.strip()
if len(line):
if line[0] == "#":
# comment, don't do anything
if len(last_comments) == 0:
last_comments = line
continue
line = line.split('\t')
if '.' in line[0]:
# the tokenizer doesn't deal with ellipsis
continue
word = line[1]
if '-' in line[0]:
# multiword token
mwtbegin, mwtend = [int(x) for x in line[0].split('-')]
lastmwt = word
expanded = []
elif mwtbegin <= int(line[0]) < mwtend:
expanded += [word]
continue
elif int(line[0]) == mwtend:
expanded += [word]
expanded = [x.lower() for x in expanded] # evaluation doesn't care about case
mwt_expansions += [(lastmwt, tuple(expanded))]
if lastmwt[0].islower() and not expanded[0][0].islower():
print('Sentence ID with potential wrong MWT expansion: ', last_comments, file=sys.stderr)
mwtbegin = 0
mwtend = -1
lastmwt = None
continue
if len(buf):
output.write(buf)
index, word_found = find_next_word(index, text, word, output)
buf = '0' * (len(word_found)-1) + ('1' if '-' not in line[0] else '3')
else:
# sentence break found
if len(buf):
assert int(buf[-1]) >= 1
output.write(buf[:-1] + '{}'.format(int(buf[-1]) + 1))
buf = ''
last_comments = ''
output.close()
from collections import Counter
mwts = Counter(mwt_expansions)
if args.mwt_output is None:
print('MWTs:', mwts)
else:
import json
with open(args.mwt_output, 'w') as f:
json.dump(list(mwts.items()), f)
print('{} unique MWTs found in data'.format(len(mwts)))
| stanfordnlp-master | stanfordnlp/utils/prepare_tokenizer_data.py |
import argparse
import re
import sys
from collections import Counter
import json
def para_to_chunks(text, char_level_pred):
chunks = []
preds = []
lastchunk = ''
lastpred = ''
for idx in range(len(text)):
if re.match('^\w$', text[idx], flags=re.UNICODE):
lastchunk += text[idx]
else:
if len(lastchunk) > 0 and not re.match('^\W+$', lastchunk, flags=re.UNICODE):
chunks += [lastchunk]
assert len(lastpred) > 0
preds += [int(lastpred)]
lastchunk = ''
if not re.match('^\s$', text[idx], flags=re.UNICODE):
# punctuation
chunks += [text[idx]]
assert len(lastpred) > 0
preds += [int(char_level_pred[idx])]
else:
# prepend leading white spaces to chunks so we can tell the difference between "2 , 2" and "2,2"
lastchunk += text[idx]
lastpred = char_level_pred[idx]
if len(lastchunk) > 0:
chunks += [lastchunk]
preds += [int(lastpred)]
return list(zip(chunks, preds))
def paras_to_chunks(text, char_level_pred):
return [para_to_chunks(re.sub('\s', ' ', pt.rstrip()), pc) for pt, pc in zip(text.split('\n\n'), char_level_pred.split('\n\n'))]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('plaintext_file', type=str, help="Plaintext file containing the raw input")
parser.add_argument('--char_level_pred', type=str, default=None, help="Plaintext file containing character-level predictions")
parser.add_argument('-o', '--output', default=None, type=str, help="Output file name; output to the console if not specified (the default)")
args = parser.parse_args()
with open(args.plaintext_file, 'r') as f:
text = ''.join(f.readlines()).rstrip()
text = '\n\n'.join([x for x in text.split('\n\n')])
if args.char_level_pred is not None:
with open(args.char_level_pred, 'r') as f:
char_level_pred = ''.join(f.readlines())
else:
char_level_pred = '\n\n'.join(['0' * len(x) for x in text.split('\n\n')])
assert len(text) == len(char_level_pred), 'Text has {} characters but there are {} char-level labels!'.format(len(text), len(char_level_pred))
output = sys.stdout if args.output is None else open(args.output, 'w')
json.dump(paras_to_chunks(text, char_level_pred), output)
output.close()
| stanfordnlp-master | stanfordnlp/utils/postprocess_vietnamese_tokenizer_data.py |
import sys
with open(sys.argv[2], 'w') as fout:
with open(sys.argv[1], 'r') as fin:
idx = 0
mwt_begin = 0
mwt_end = -1
for line in fin:
line = line.strip()
if line.startswith('#'):
print(line, file=fout)
continue
elif len(line) <= 0:
print(line, file=fout)
idx = 0
mwt_begin = 0
mwt_end = -1
continue
idx += 1
line = line.split('\t')
if '-' in line[0]:
mwt_begin, mwt_end = [int(x) for x in line[0].split('-')]
print("{}\t{}\t{}".format(idx, "\t".join(line[1:-1]), "MWT=Yes" if line[-1] == '_' else line[-1] + ",MWT=Yes"), file=fout)
idx -= 1
elif mwt_begin <= idx <= mwt_end:
continue
else:
print("{}\t{}".format(idx, "\t".join(line[1:])), file=fout)
| stanfordnlp-master | stanfordnlp/utils/contract_mwt.py |
from collections import OrderedDict
from functools import reduce
import json
import numpy as np
from operator import mul
import os
import pickle
from pprint import pprint
import random
import sys
import subprocess
config_file, sweep_progress, command = sys.argv[1], sys.argv[2], sys.argv[3:]
with open(config_file, 'r') as f:
loaded = ''.join([x.strip() for x in f.readlines()])
config = json.loads(loaded, object_pairs_hook=OrderedDict)
SAVED_PROGRESS = sweep_progress
PRIOR_STRENGTH = .01
BINARY_PRIOR_STRENGTH = 1
unitary = {k: [[0.0, PRIOR_STRENGTH] for _ in range(len(config[k])-1)] for k in config}
binary_keys = [k for k in config.keys() if len(config[k]) > 2]
binary = {"{}<>{}".format(k1, k2):[[0.0, BINARY_PRIOR_STRENGTH] for _ in range((len(config[k1]) - 2) * (len(config[k2]) - 2))] for i, k1 in enumerate(binary_keys[:-1]) for k2 in binary_keys[i+1:]}
overall = [0, PRIOR_STRENGTH]
def estimate_params(progress, unitary=unitary, overall=overall, config=config, binary=binary, binary_keys=binary_keys):
print("Estimating hyperparameter optimizer parameters...")
print(" > Generating features...")
D = sum([len(unitary[k]) for k in unitary])
D2 = sum([len(binary[k]) for k in binary])
# build prior
SQRT_PRIOR = np.sqrt(PRIOR_STRENGTH)
SQRT_BPRIOR = np.sqrt(BINARY_PRIOR_STRENGTH)
A = [] # features are organized as follows [overall bias, unitary features, binary interaction features]
b = []
for i in range(D+D2):
A += [[0] + [(SQRT_PRIOR if i < D else SQRT_BPRIOR) if j == i else 0 for j in range(D+D2)]]
b += [0]
#for i in range(D):
# A += [[SQRT_PRIOR] + [SQRT_PRIOR if j == i else 0 for j in range(D)] + [0] * D2]
# b += [0]
#for i, k1 in enumerate(binary_keys[:-1]):
# for k2 in binary_keys[i+1:]:
# for x in range(2, len(config[k1])):
# for y in range(2, len(config[k2])):
# cur = [SQRT_PRIOR] + [SQRT_PRIOR if (k == k1 and j == x) or (k == k2 and j == y) else 0 for k in config.keys() for j in range(1, len(config[k]))]
# cur += [SQRT_PRIOR if (k1_ == k1 and x_ == x and k2_ == k2 and y_ == y) else 0 for i_, k1_ in enumerate(binary_keys[:-1]) for k2_ in binary_keys[i_+1:] for x_ in range(2, len(config[k1_])) for y_ in range(2, len(config[k2_]))]
# A += [cur]
# b += [0]
# convert actual data
for proposal, res in progress:
cur = [1]
try:
for k in config:
idx = config[k].index(proposal.get(k, config[k][0])) - 1
cur += [1 if idx == j else 0 for j in range(len(config[k]) - 1)]
except ValueError:
continue
for i, k1 in enumerate(binary_keys[:-1]):
idx1 = config[k1].index(proposal.get(k1, config[k1][0]))
for k2 in binary_keys[i+1:]:
idx2 = config[k2].index(proposal.get(k2, config[k2][0]))
cur += [1 if a == idx1 and b == idx2 else 0 for a in range(2, len(config[k1])) for b in range(2, len(config[k2]))]
A += [cur]
b += [res - 100]
A = np.array(A)
b = np.array(b)
print(" > Solving for parameters...")
params = np.linalg.lstsq(A, b, rcond=None)[0]
print(" > Unpacking parameters...")
overall[0] = params[0]
overall[1] = A.shape[0] - (D+D2) + PRIOR_STRENGTH
counts = A[(D+D2):].sum(0)
idx = 1
for k in config:
for j in range(len(unitary[k])):
unitary[k][j] = params[idx], counts[idx] + PRIOR_STRENGTH
idx += 1
for i, k1 in enumerate(binary_keys[:-1]):
for k2 in binary_keys[i+1:]:
k = "{}<>{}".format(k1, k2)
for j in range(len(binary[k])):
binary[k][j] = params[idx], counts[idx] + BINARY_PRIOR_STRENGTH
idx += 1
assert idx == len(params)
print(overall)
pprint(unitary)
#pprint(binary)
def get_proposal(invtemp=1, unitary=unitary, config=config, binary=binary, binary_keys=binary_keys):
res = OrderedDict()
for k in config:
if np.random.random() < .05:
# epsilon-greedy
res[k] = config[k][np.random.randint(len(unitary[k])+1)]
continue
p = np.array([0] + [x[0] + np.random.randn() / np.sqrt(x[1]) / invtemp for x in unitary[k]], dtype=np.float64)
if k in binary_keys:
for k1 in binary_keys:
if k1 == k: break
idx1 = config[k1].index(res[k1])
if idx1 < 2:
continue
key = "{}<>{}".format(k1, k)
for j in range(2, len(config[k])):
cand = binary[key][(idx1 - 2) * (len(config[k]) - 2) + j - 2]
p[j] += cand[0] + np.random.randn() / np.sqrt(cand[1]) / invtemp
p += np.random.randn(*p.shape) / invtemp / np.sqrt(overall[1])
# p = p - np.max(p)
# p = np.exp(p * invtemp)
# p /= np.sum(p)
# res[k] = config[k][np.random.choice(np.arange(len(config[k])), p=p)]
res[k] = config[k][np.argmax(p, axis=0)]
return res
def evaluate_proposal(proposal, command=command, config=config):
cmd = ['bash'] + command
is_conv = False
conv_str = ''
for k in config:
if not k.startswith('conv_filters'):
if proposal[k] != False or not isinstance(proposal[k], bool):
cmd += ["--{}".format(k)]
if proposal[k] != True or not isinstance(proposal[k], bool):
cmd += [str(proposal[k])]
else:
if not is_conv:
cmd += ['--conv_filters']
conv_str += proposal[k]
is_conv = True
elif proposal[k] != False:
conv_str += ',,' + proposal[k]
else:
break
cmd += [conv_str]
res = subprocess.run(cmd, stderr=subprocess.PIPE)
try:
return float(res.stderr)
except Exception as e:
print(res.stderr.decode('utf-8'))
raise e
def save_load_progress(progress, update=[], filename=SAVED_PROGRESS):
print('Saving sweep progress to "{}", please be patient...'.format(filename))
if os.path.exists(filename):
with open(filename, 'rb') as f:
progress = pickle.load(f)
progress += update
with open(filename, 'wb') as f:
pickle.dump(progress, f)
print('Done!')
return progress
progress = []
if os.path.exists(SAVED_PROGRESS):
with open(SAVED_PROGRESS, 'rb') as f:
progress = pickle.load(f)
estimate_params(progress)
try:
while True:
#invtemp = min(1, .001 * (1+len(progress)))
invtemp = 1
print('Inv Temp = {}'.format(invtemp))
print('Grid size = {}'.format(reduce(mul, [len(config[k]) for k in config], 1)))
proposal = get_proposal(invtemp=invtemp)
res = evaluate_proposal(proposal)
progress = save_load_progress(progress, [[proposal, res]])
estimate_params(progress)
except:
import traceback
traceback.print_last()
save_load_progress(progress)
| stanfordnlp-master | stanfordnlp/utils/sweep.py |
import os
import re
import sys
name_map = {'af_afribooms': 'UD_Afrikaans-AfriBooms', 'grc_perseus': 'UD_Ancient_Greek-Perseus', 'grc_proiel': 'UD_Ancient_Greek-PROIEL', 'ar_padt': 'UD_Arabic-PADT', 'hy_armtdp': 'UD_Armenian-ArmTDP', 'eu_bdt': 'UD_Basque-BDT', 'br_keb': 'UD_Breton-KEB', 'bg_btb': 'UD_Bulgarian-BTB', 'bxr_bdt': 'UD_Buryat-BDT', 'bxr_bdt_xv': 'UD_Buryat-BDT_XV', 'ca_ancora': 'UD_Catalan-AnCora', 'zh_gsd': 'UD_Chinese-GSD', 'hr_set': 'UD_Croatian-SET', 'cs_cac': 'UD_Czech-CAC', 'cs_fictree': 'UD_Czech-FicTree', 'cs_pdt': 'UD_Czech-PDT', 'cs_pud': 'UD_Czech-PUD', 'da_ddt': 'UD_Danish-DDT', 'nl_alpino': 'UD_Dutch-Alpino', 'nl_lassysmall': 'UD_Dutch-LassySmall', 'en_ewt': 'UD_English-EWT', 'en_gum': 'UD_English-GUM', 'en_lines': 'UD_English-LinES', 'en_pud': 'UD_English-PUD', 'et_edt': 'UD_Estonian-EDT', 'fo_oft': 'UD_Faroese-OFT', 'fi_ftb': 'UD_Finnish-FTB', 'fi_pud': 'UD_Finnish-PUD', 'fi_tdt': 'UD_Finnish-TDT', 'fr_gsd': 'UD_French-GSD', 'fr_sequoia': 'UD_French-Sequoia', 'fr_spoken': 'UD_French-Spoken', 'gl_ctg': 'UD_Galician-CTG', 'gl_treegal': 'UD_Galician-TreeGal', 'de_gsd': 'UD_German-GSD', 'got_proiel': 'UD_Gothic-PROIEL', 'el_gdt': 'UD_Greek-GDT', 'he_htb': 'UD_Hebrew-HTB', 'hi_hdtb': 'UD_Hindi-HDTB', 'hu_szeged': 'UD_Hungarian-Szeged', 'id_gsd': 'UD_Indonesian-GSD', 'ga_idt': 'UD_Irish-IDT', 'ga_idt_xv': 'UD_Irish-IDT_XV', 'it_isdt': 'UD_Italian-ISDT', 'it_postwita': 'UD_Italian-PoSTWITA', 'ja_gsd': 'UD_Japanese-GSD', 'ja_modern': 'UD_Japanese-Modern', 'kk_ktb': 'UD_Kazakh-KTB', 'ko_gsd': 'UD_Korean-GSD', 'ko_kaist': 'UD_Korean-Kaist', 'kmr_mg': 'UD_Kurmanji-MG', 'kmr_mg_xv': 'UD_Kurmanji-MG_XV', 'la_ittb': 'UD_Latin-ITTB', 'la_perseus': 'UD_Latin-Perseus', 'la_proiel': 'UD_Latin-PROIEL', 'lv_lvtb': 'UD_Latvian-LVTB', 'pcm_nsc': 'UD_Naija-NSC', 'sme_giella': 'UD_North_Sami-Giella', 'no_bokmaal': 'UD_Norwegian-Bokmaal', 'no_nynorsk': 'UD_Norwegian-Nynorsk', 'no_nynorsklia': 'UD_Norwegian-NynorskLIA', 'cu_proiel': 'UD_Old_Church_Slavonic-PROIEL', 'fro_srcmf': 'UD_Old_French-SRCMF', 'fa_seraji': 'UD_Persian-Seraji', 'pl_lfg': 'UD_Polish-LFG', 'pl_sz': 'UD_Polish-SZ', 'pt_bosque': 'UD_Portuguese-Bosque', 'ro_rrt': 'UD_Romanian-RRT', 'ru_syntagrus': 'UD_Russian-SynTagRus', 'ru_taiga': 'UD_Russian-Taiga', 'sr_set': 'UD_Serbian-SET', 'sk_snk': 'UD_Slovak-SNK', 'sl_ssj': 'UD_Slovenian-SSJ', 'sl_sst': 'UD_Slovenian-SST', 'es_ancora': 'UD_Spanish-AnCora', 'sv_lines': 'UD_Swedish-LinES', 'sv_pud': 'UD_Swedish-PUD', 'sv_talbanken': 'UD_Swedish-Talbanken', 'th_pud': 'UD_Thai-PUD', 'tr_imst': 'UD_Turkish-IMST', 'uk_iu': 'UD_Ukrainian-IU', 'hsb_ufal': 'UD_Upper_Sorbian-UFAL', 'hsb_ufal_xv': 'UD_Upper_Sorbian-UFAL_XV', 'ur_udtb': 'UD_Urdu-UDTB', 'ug_udt': 'UD_Uyghur-UDT', 'vi_vtb': 'UD_Vietnamese-VTB'}
# get list of report files
report_files_dir = sys.argv[1]
report_files = os.listdir(report_files_dir)
report_files.sort()
f1_header_regex = re.compile('F1 Score')
for report_file in report_files:
contents = open(report_files_dir+'/'+report_file).read().split('\n')[:-1]
row = name_map[report_file.split('.')[0]]
for line in contents:
if len(line.split('|')) > 3 and not f1_header_regex.search(line):
row += (','+line.split('|')[3].rstrip().lstrip())
print(row)
| stanfordnlp-master | stanfordnlp/utils/generate_ete_report.py |
"""
utilities for getting resources
"""
import os
import requests
import sys
import urllib.request
import zipfile
from tqdm import tqdm
from pathlib import Path
# set home dir for default
HOME_DIR = str(Path.home())
DEFAULT_MODEL_DIR = os.path.join(HOME_DIR,'stanfordnlp_resources')
# list of language shorthands
conll_shorthands = ['af_afribooms', 'ar_padt', 'bg_btb', 'bxr_bdt', 'ca_ancora', 'cs_cac', 'cs_fictree', 'cs_pdt', 'cu_proiel', 'da_ddt', 'de_gsd', 'el_gdt', 'en_ewt', 'en_gum', 'en_lines', 'es_ancora', 'et_edt', 'eu_bdt', 'fa_seraji', 'fi_ftb', 'fi_tdt', 'fr_gsd', 'fro_srcmf', 'fr_sequoia', 'fr_spoken', 'ga_idt', 'gl_ctg', 'gl_treegal', 'got_proiel', 'grc_perseus', 'grc_proiel', 'he_htb', 'hi_hdtb', 'hr_set', 'hsb_ufal', 'hu_szeged', 'hy_armtdp', 'id_gsd', 'it_isdt', 'it_postwita', 'ja_gsd', 'kk_ktb', 'kmr_mg', 'ko_gsd', 'ko_kaist', 'la_ittb', 'la_perseus', 'la_proiel', 'lv_lvtb', 'nl_alpino', 'nl_lassysmall', 'no_bokmaal', 'no_nynorsklia', 'no_nynorsk', 'pl_lfg', 'pl_sz', 'pt_bosque', 'ro_rrt', 'ru_syntagrus', 'ru_taiga', 'sk_snk', 'sl_ssj', 'sl_sst', 'sme_giella', 'sr_set', 'sv_lines', 'sv_talbanken', 'tr_imst', 'ug_udt', 'uk_iu', 'ur_udtb', 'vi_vtb', 'zh_gsd']
# all languages with mwt
mwt_languages = ['ar_padt', 'ca_ancora', 'cs_cac', 'cs_fictree', 'cs_pdt', 'de_gsd', 'el_gdt', 'es_ancora', 'fa_seraji', 'fi_ftb', 'fr_gsd', 'fr_sequoia', 'gl_ctg', 'gl_treegal', 'he_htb', 'hy_armtdp', 'it_isdt', 'it_postwita', 'kk_ktb', 'pl_sz', 'pt_bosque', 'tr_imst']
# default treebank for languages
default_treebanks = {'af': 'af_afribooms', 'grc': 'grc_proiel', 'ar': 'ar_padt', 'hy': 'hy_armtdp', 'eu': 'eu_bdt', 'bg': 'bg_btb', 'bxr': 'bxr_bdt', 'ca': 'ca_ancora', 'zh': 'zh_gsd', 'hr': 'hr_set', 'cs': 'cs_pdt', 'da': 'da_ddt', 'nl': 'nl_alpino', 'en': 'en_ewt', 'et': 'et_edt', 'fi': 'fi_tdt', 'fr': 'fr_gsd', 'gl': 'gl_ctg', 'de': 'de_gsd', 'got': 'got_proiel', 'el': 'el_gdt', 'he': 'he_htb', 'hi': 'hi_hdtb', 'hu': 'hu_szeged', 'id': 'id_gsd', 'ga': 'ga_idt', 'it': 'it_isdt', 'ja': 'ja_gsd', 'kk': 'kk_ktb', 'ko': 'ko_kaist', 'kmr': 'kmr_mg', 'la': 'la_ittb', 'lv': 'lv_lvtb', 'sme': 'sme_giella', 'no_bokmaal': 'no_bokmaal', 'no_nynorsk': 'no_nynorsk', 'cu': 'cu_proiel', 'fro': 'fro_srcmf', 'fa': 'fa_seraji', 'pl': 'pl_lfg', 'pt': 'pt_bosque', 'ro': 'ro_rrt', 'ru': 'ru_syntagrus', 'sr': 'sr_set', 'sk': 'sk_snk', 'sl': 'sl_ssj', 'es': 'es_ancora', 'sv': 'sv_talbanken', 'tr': 'tr_imst', 'uk': 'uk_iu', 'hsb': 'hsb_ufal', 'ur': 'ur_udtb', 'ug': 'ug_udt', 'vi': 'vi_vtb'}
# map processor name to file ending
processor_to_ending = {'tokenize': 'tokenizer', 'mwt': 'mwt_expander', 'pos': 'tagger', 'lemma': 'lemmatizer', 'depparse': 'parser'}
# functions for handling configs
# given a language and models path, build a default configuration
def build_default_config(treebank, models_path):
default_config = {}
if treebank in mwt_languages:
default_config['processors'] = 'tokenize,mwt,pos,lemma,depparse'
else:
default_config['processors'] = 'tokenize,pos,lemma,depparse'
if treebank == 'vi_vtb':
default_config['lemma_use_identity'] = True
default_config['lemma_batch_size'] = 5000
treebank_dir = os.path.join(models_path, f"{treebank}_models")
for processor in default_config['processors'].split(','):
model_file_ending = f"{processor_to_ending[processor]}.pt"
default_config[f"{processor}_model_path"] = os.path.join(treebank_dir, f"{treebank}_{model_file_ending}")
if processor in ['pos', 'depparse']:
default_config[f"{processor}_pretrain_path"] = os.path.join(treebank_dir, f"{treebank}.pretrain.pt")
return default_config
# load a config from file
def load_config(config_file_path):
loaded_config = {}
with open(config_file_path) as config_file:
for config_line in config_file:
config_key, config_value = config_line.split(':')
loaded_config[config_key] = config_value.rstrip().lstrip()
return loaded_config
# download a ud models zip file
def download_ud_model(lang_name, resource_dir=None, should_unzip=True, confirm_if_exists=False, force=False):
# ask if user wants to download
if resource_dir is not None and os.path.exists(os.path.join(resource_dir, f"{lang_name}_models")):
if confirm_if_exists:
print("")
print(f"The model directory already exists at \"{resource_dir}/{lang_name}_models\". Do you want to download the models again? [y/N]")
should_download = 'y' if force else input()
should_download = should_download.strip().lower() in ['yes', 'y']
else:
should_download = False
else:
print('Would you like to download the models for: '+lang_name+' now? (Y/n)')
should_download = 'y' if force else input()
should_download = should_download.strip().lower() in ['yes', 'y', '']
if should_download:
# set up data directory
if resource_dir is None:
print('')
print('Default download directory: ' + DEFAULT_MODEL_DIR)
print('Hit enter to continue or type an alternate directory.')
where_to_download = '' if force else input()
if where_to_download != '':
download_dir = where_to_download
else:
download_dir = DEFAULT_MODEL_DIR
else:
download_dir = resource_dir
if not os.path.exists(download_dir):
os.makedirs(download_dir)
print('')
print('Downloading models for: '+lang_name)
model_zip_file_name = lang_name+'_models.zip'
download_url = 'http://nlp.stanford.edu/software/conll_2018/'+model_zip_file_name
download_file_path = os.path.join(download_dir, model_zip_file_name)
print('Download location: '+download_file_path)
# initiate download
r = requests.get(download_url, stream=True)
with open(download_file_path, 'wb') as f:
file_size = int(r.headers.get('content-length'))
default_chunk_size = 67108864
with tqdm(total=file_size, unit='B', unit_scale=True) as pbar:
for chunk in r.iter_content(chunk_size=default_chunk_size):
if chunk:
f.write(chunk)
f.flush()
pbar.update(len(chunk))
# unzip models file
print('')
print('Download complete. Models saved to: '+download_file_path)
if should_unzip:
unzip_ud_model(lang_name, download_file_path, download_dir)
# remove the zipe file
print("Cleaning up...", end="")
os.remove(download_file_path)
print('Done.')
# unzip a ud models zip file
def unzip_ud_model(lang_name, zip_file_src, zip_file_target):
print('Extracting models file for: '+lang_name)
with zipfile.ZipFile(zip_file_src, "r") as zip_ref:
zip_ref.extractall(zip_file_target)
# main download function
def download(download_label, resource_dir=None, confirm_if_exists=False, force=False):
if download_label in conll_shorthands:
download_ud_model(download_label, resource_dir=resource_dir, confirm_if_exists=confirm_if_exists, force=force)
elif download_label in default_treebanks:
print(f'Using the default treebank "{default_treebanks[download_label]}" for language "{download_label}".')
download_ud_model(default_treebanks[download_label], resource_dir=resource_dir, confirm_if_exists=confirm_if_exists, force=force)
else:
raise ValueError(f'The language or treebank "{download_label}" is not currently supported by this function. Please try again with other languages or treebanks.')
| stanfordnlp-master | stanfordnlp/utils/resources.py |
#!/usr/bin/env python3
# Compatible with Python 2.7 and 3.2+, can be used either as a module
# or a standalone executable.
#
# Copyright 2017, 2018 Institute of Formal and Applied Linguistics (UFAL),
# Faculty of Mathematics and Physics, Charles University, Czech Republic.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Authors: Milan Straka, Martin Popel <[email protected]>
#
# Changelog:
# - [12 Apr 2018] Version 0.9: Initial release.
# - [19 Apr 2018] Version 1.0: Fix bug in MLAS (duplicate entries in functional_children).
# Add --counts option.
# - [02 May 2018] Version 1.1: When removing spaces to match gold and system characters,
# consider all Unicode characters of category Zs instead of
# just ASCII space.
# - [25 Jun 2018] Version 1.2: Use python3 in the she-bang (instead of python).
# In Python2, make the whole computation use `unicode` strings.
# Command line usage
# ------------------
# conll18_ud_eval.py [-v] gold_conllu_file system_conllu_file
#
# - if no -v is given, only the official CoNLL18 UD Shared Task evaluation metrics
# are printed
# - if -v is given, more metrics are printed (as precision, recall, F1 score,
# and in case the metric is computed on aligned words also accuracy on these):
# - Tokens: how well do the gold tokens match system tokens
# - Sentences: how well do the gold sentences match system sentences
# - Words: how well can the gold words be aligned to system words
# - UPOS: using aligned words, how well does UPOS match
# - XPOS: using aligned words, how well does XPOS match
# - UFeats: using aligned words, how well does universal FEATS match
# - AllTags: using aligned words, how well does UPOS+XPOS+FEATS match
# - Lemmas: using aligned words, how well does LEMMA match
# - UAS: using aligned words, how well does HEAD match
# - LAS: using aligned words, how well does HEAD+DEPREL(ignoring subtypes) match
# - CLAS: using aligned words with content DEPREL, how well does
# HEAD+DEPREL(ignoring subtypes) match
# - MLAS: using aligned words with content DEPREL, how well does
# HEAD+DEPREL(ignoring subtypes)+UPOS+UFEATS+FunctionalChildren(DEPREL+UPOS+UFEATS) match
# - BLEX: using aligned words with content DEPREL, how well does
# HEAD+DEPREL(ignoring subtypes)+LEMMAS match
# - if -c is given, raw counts of correct/gold_total/system_total/aligned words are printed
# instead of precision/recall/F1/AlignedAccuracy for all metrics.
# API usage
# ---------
# - load_conllu(file)
# - loads CoNLL-U file from given file object to an internal representation
# - the file object should return str in both Python 2 and Python 3
# - raises UDError exception if the given file cannot be loaded
# - evaluate(gold_ud, system_ud)
# - evaluate the given gold and system CoNLL-U files (loaded with load_conllu)
# - raises UDError if the concatenated tokens of gold and system file do not match
# - returns a dictionary with the metrics described above, each metric having
# three fields: precision, recall and f1
# Description of token matching
# -----------------------------
# In order to match tokens of gold file and system file, we consider the text
# resulting from concatenation of gold tokens and text resulting from
# concatenation of system tokens. These texts should match -- if they do not,
# the evaluation fails.
#
# If the texts do match, every token is represented as a range in this original
# text, and tokens are equal only if their range is the same.
# Description of word matching
# ----------------------------
# When matching words of gold file and system file, we first match the tokens.
# The words which are also tokens are matched as tokens, but words in multi-word
# tokens have to be handled differently.
#
# To handle multi-word tokens, we start by finding "multi-word spans".
# Multi-word span is a span in the original text such that
# - it contains at least one multi-word token
# - all multi-word tokens in the span (considering both gold and system ones)
# are completely inside the span (i.e., they do not "stick out")
# - the multi-word span is as small as possible
#
# For every multi-word span, we align the gold and system words completely
# inside this span using LCS on their FORMs. The words not intersecting
# (even partially) any multi-word span are then aligned as tokens.
from __future__ import division
from __future__ import print_function
import argparse
import io
import sys
import unicodedata
import unittest
# CoNLL-U column names
ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10)
# Content and functional relations
CONTENT_DEPRELS = {
"nsubj", "obj", "iobj", "csubj", "ccomp", "xcomp", "obl", "vocative",
"expl", "dislocated", "advcl", "advmod", "discourse", "nmod", "appos",
"nummod", "acl", "amod", "conj", "fixed", "flat", "compound", "list",
"parataxis", "orphan", "goeswith", "reparandum", "root", "dep"
}
FUNCTIONAL_DEPRELS = {
"aux", "cop", "mark", "det", "clf", "case", "cc"
}
UNIVERSAL_FEATURES = {
"PronType", "NumType", "Poss", "Reflex", "Foreign", "Abbr", "Gender",
"Animacy", "Number", "Case", "Definite", "Degree", "VerbForm", "Mood",
"Tense", "Aspect", "Voice", "Evident", "Polarity", "Person", "Polite"
}
# UD Error is used when raising exceptions in this module
class UDError(Exception):
pass
# Conversion methods handling `str` <-> `unicode` conversions in Python2
def _decode(text):
return text if sys.version_info[0] >= 3 or not isinstance(text, str) else text.decode("utf-8")
def _encode(text):
return text if sys.version_info[0] >= 3 or not isinstance(text, unicode) else text.encode("utf-8")
# Load given CoNLL-U file into internal representation
def load_conllu(file):
# Internal representation classes
class UDRepresentation:
def __init__(self):
# Characters of all the tokens in the whole file.
# Whitespace between tokens is not included.
self.characters = []
# List of UDSpan instances with start&end indices into `characters`.
self.tokens = []
# List of UDWord instances.
self.words = []
# List of UDSpan instances with start&end indices into `characters`.
self.sentences = []
class UDSpan:
def __init__(self, start, end):
self.start = start
# Note that self.end marks the first position **after the end** of span,
# so we can use characters[start:end] or range(start, end).
self.end = end
class UDWord:
def __init__(self, span, columns, is_multiword):
# Span of this word (or MWT, see below) within ud_representation.characters.
self.span = span
# 10 columns of the CoNLL-U file: ID, FORM, LEMMA,...
self.columns = columns
# is_multiword==True means that this word is part of a multi-word token.
# In that case, self.span marks the span of the whole multi-word token.
self.is_multiword = is_multiword
# Reference to the UDWord instance representing the HEAD (or None if root).
self.parent = None
# List of references to UDWord instances representing functional-deprel children.
self.functional_children = []
# Only consider universal FEATS.
self.columns[FEATS] = "|".join(sorted(feat for feat in columns[FEATS].split("|")
if feat.split("=", 1)[0] in UNIVERSAL_FEATURES))
# Let's ignore language-specific deprel subtypes.
self.columns[DEPREL] = columns[DEPREL].split(":")[0]
# Precompute which deprels are CONTENT_DEPRELS and which FUNCTIONAL_DEPRELS
self.is_content_deprel = self.columns[DEPREL] in CONTENT_DEPRELS
self.is_functional_deprel = self.columns[DEPREL] in FUNCTIONAL_DEPRELS
ud = UDRepresentation()
# Load the CoNLL-U file
index, sentence_start = 0, None
while True:
line = file.readline()
if not line:
break
line = _decode(line.rstrip("\r\n"))
# Handle sentence start boundaries
if sentence_start is None:
# Skip comments
if line.startswith("#"):
continue
# Start a new sentence
ud.sentences.append(UDSpan(index, 0))
sentence_start = len(ud.words)
if not line:
# Add parent and children UDWord links and check there are no cycles
def process_word(word):
if word.parent == "remapping":
raise UDError("There is a cycle in a sentence")
if word.parent is None:
head = int(word.columns[HEAD])
if head < 0 or head > len(ud.words) - sentence_start:
raise UDError("HEAD '{}' points outside of the sentence".format(_encode(word.columns[HEAD])))
if head:
parent = ud.words[sentence_start + head - 1]
word.parent = "remapping"
process_word(parent)
word.parent = parent
for word in ud.words[sentence_start:]:
process_word(word)
# func_children cannot be assigned within process_word
# because it is called recursively and may result in adding one child twice.
for word in ud.words[sentence_start:]:
if word.parent and word.is_functional_deprel:
word.parent.functional_children.append(word)
# Check there is a single root node
if len([word for word in ud.words[sentence_start:] if word.parent is None]) != 1:
raise UDError("There are multiple roots in a sentence")
# End the sentence
ud.sentences[-1].end = index
sentence_start = None
continue
# Read next token/word
columns = line.split("\t")
if len(columns) != 10:
raise UDError("The CoNLL-U line does not contain 10 tab-separated columns: '{}'".format(_encode(line)))
# Skip empty nodes
if "." in columns[ID]:
continue
# Delete spaces from FORM, so gold.characters == system.characters
# even if one of them tokenizes the space. Use any Unicode character
# with category Zs.
columns[FORM] = "".join(filter(lambda c: unicodedata.category(c) != "Zs", columns[FORM]))
if not columns[FORM]:
raise UDError("There is an empty FORM in the CoNLL-U file")
# Save token
ud.characters.extend(columns[FORM])
ud.tokens.append(UDSpan(index, index + len(columns[FORM])))
index += len(columns[FORM])
# Handle multi-word tokens to save word(s)
if "-" in columns[ID]:
try:
start, end = map(int, columns[ID].split("-"))
except:
raise UDError("Cannot parse multi-word token ID '{}'".format(_encode(columns[ID])))
for _ in range(start, end + 1):
word_line = _decode(file.readline().rstrip("\r\n"))
word_columns = word_line.split("\t")
if len(word_columns) != 10:
raise UDError("The CoNLL-U line does not contain 10 tab-separated columns: '{}'".format(_encode(word_line)))
ud.words.append(UDWord(ud.tokens[-1], word_columns, is_multiword=True))
# Basic tokens/words
else:
try:
word_id = int(columns[ID])
except:
raise UDError("Cannot parse word ID '{}'".format(_encode(columns[ID])))
if word_id != len(ud.words) - sentence_start + 1:
raise UDError("Incorrect word ID '{}' for word '{}', expected '{}'".format(
_encode(columns[ID]), _encode(columns[FORM]), len(ud.words) - sentence_start + 1))
try:
head_id = int(columns[HEAD])
except:
raise UDError("Cannot parse HEAD '{}'".format(_encode(columns[HEAD])))
if head_id < 0:
raise UDError("HEAD cannot be negative")
ud.words.append(UDWord(ud.tokens[-1], columns, is_multiword=False))
if sentence_start is not None:
raise UDError("The CoNLL-U file does not end with empty line")
return ud
# Evaluate the gold and system treebanks (loaded using load_conllu).
def evaluate(gold_ud, system_ud):
class Score:
def __init__(self, gold_total, system_total, correct, aligned_total=None):
self.correct = correct
self.gold_total = gold_total
self.system_total = system_total
self.aligned_total = aligned_total
self.precision = correct / system_total if system_total else 0.0
self.recall = correct / gold_total if gold_total else 0.0
self.f1 = 2 * correct / (system_total + gold_total) if system_total + gold_total else 0.0
self.aligned_accuracy = correct / aligned_total if aligned_total else aligned_total
class AlignmentWord:
def __init__(self, gold_word, system_word):
self.gold_word = gold_word
self.system_word = system_word
class Alignment:
def __init__(self, gold_words, system_words):
self.gold_words = gold_words
self.system_words = system_words
self.matched_words = []
self.matched_words_map = {}
def append_aligned_words(self, gold_word, system_word):
self.matched_words.append(AlignmentWord(gold_word, system_word))
self.matched_words_map[system_word] = gold_word
def spans_score(gold_spans, system_spans):
correct, gi, si = 0, 0, 0
while gi < len(gold_spans) and si < len(system_spans):
if system_spans[si].start < gold_spans[gi].start:
si += 1
elif gold_spans[gi].start < system_spans[si].start:
gi += 1
else:
correct += gold_spans[gi].end == system_spans[si].end
si += 1
gi += 1
return Score(len(gold_spans), len(system_spans), correct)
def alignment_score(alignment, key_fn=None, filter_fn=None):
if filter_fn is not None:
gold = sum(1 for gold in alignment.gold_words if filter_fn(gold))
system = sum(1 for system in alignment.system_words if filter_fn(system))
aligned = sum(1 for word in alignment.matched_words if filter_fn(word.gold_word))
else:
gold = len(alignment.gold_words)
system = len(alignment.system_words)
aligned = len(alignment.matched_words)
if key_fn is None:
# Return score for whole aligned words
return Score(gold, system, aligned)
def gold_aligned_gold(word):
return word
def gold_aligned_system(word):
return alignment.matched_words_map.get(word, "NotAligned") if word is not None else None
correct = 0
for words in alignment.matched_words:
if filter_fn is None or filter_fn(words.gold_word):
if key_fn(words.gold_word, gold_aligned_gold) == key_fn(words.system_word, gold_aligned_system):
correct += 1
return Score(gold, system, correct, aligned)
def beyond_end(words, i, multiword_span_end):
if i >= len(words):
return True
if words[i].is_multiword:
return words[i].span.start >= multiword_span_end
return words[i].span.end > multiword_span_end
def extend_end(word, multiword_span_end):
if word.is_multiword and word.span.end > multiword_span_end:
return word.span.end
return multiword_span_end
def find_multiword_span(gold_words, system_words, gi, si):
# We know gold_words[gi].is_multiword or system_words[si].is_multiword.
# Find the start of the multiword span (gs, ss), so the multiword span is minimal.
# Initialize multiword_span_end characters index.
if gold_words[gi].is_multiword:
multiword_span_end = gold_words[gi].span.end
if not system_words[si].is_multiword and system_words[si].span.start < gold_words[gi].span.start:
si += 1
else: # if system_words[si].is_multiword
multiword_span_end = system_words[si].span.end
if not gold_words[gi].is_multiword and gold_words[gi].span.start < system_words[si].span.start:
gi += 1
gs, ss = gi, si
# Find the end of the multiword span
# (so both gi and si are pointing to the word following the multiword span end).
while not beyond_end(gold_words, gi, multiword_span_end) or \
not beyond_end(system_words, si, multiword_span_end):
if gi < len(gold_words) and (si >= len(system_words) or
gold_words[gi].span.start <= system_words[si].span.start):
multiword_span_end = extend_end(gold_words[gi], multiword_span_end)
gi += 1
else:
multiword_span_end = extend_end(system_words[si], multiword_span_end)
si += 1
return gs, ss, gi, si
def compute_lcs(gold_words, system_words, gi, si, gs, ss):
lcs = [[0] * (si - ss) for i in range(gi - gs)]
for g in reversed(range(gi - gs)):
for s in reversed(range(si - ss)):
if gold_words[gs + g].columns[FORM].lower() == system_words[ss + s].columns[FORM].lower():
lcs[g][s] = 1 + (lcs[g+1][s+1] if g+1 < gi-gs and s+1 < si-ss else 0)
lcs[g][s] = max(lcs[g][s], lcs[g+1][s] if g+1 < gi-gs else 0)
lcs[g][s] = max(lcs[g][s], lcs[g][s+1] if s+1 < si-ss else 0)
return lcs
def align_words(gold_words, system_words):
alignment = Alignment(gold_words, system_words)
gi, si = 0, 0
while gi < len(gold_words) and si < len(system_words):
if gold_words[gi].is_multiword or system_words[si].is_multiword:
# A: Multi-word tokens => align via LCS within the whole "multiword span".
gs, ss, gi, si = find_multiword_span(gold_words, system_words, gi, si)
if si > ss and gi > gs:
lcs = compute_lcs(gold_words, system_words, gi, si, gs, ss)
# Store aligned words
s, g = 0, 0
while g < gi - gs and s < si - ss:
if gold_words[gs + g].columns[FORM].lower() == system_words[ss + s].columns[FORM].lower():
alignment.append_aligned_words(gold_words[gs+g], system_words[ss+s])
g += 1
s += 1
elif lcs[g][s] == (lcs[g+1][s] if g+1 < gi-gs else 0):
g += 1
else:
s += 1
else:
# B: No multi-word token => align according to spans.
if (gold_words[gi].span.start, gold_words[gi].span.end) == (system_words[si].span.start, system_words[si].span.end):
alignment.append_aligned_words(gold_words[gi], system_words[si])
gi += 1
si += 1
elif gold_words[gi].span.start <= system_words[si].span.start:
gi += 1
else:
si += 1
return alignment
# Check that the underlying character sequences do match.
if gold_ud.characters != system_ud.characters:
index = 0
while index < len(gold_ud.characters) and index < len(system_ud.characters) and \
gold_ud.characters[index] == system_ud.characters[index]:
index += 1
raise UDError(
"The concatenation of tokens in gold file and in system file differ!\n" +
"First 20 differing characters in gold file: '{}' and system file: '{}'".format(
"".join(map(_encode, gold_ud.characters[index:index + 20])),
"".join(map(_encode, system_ud.characters[index:index + 20]))
)
)
# Align words
alignment = align_words(gold_ud.words, system_ud.words)
# Compute the F1-scores
return {
"Tokens": spans_score(gold_ud.tokens, system_ud.tokens),
"Sentences": spans_score(gold_ud.sentences, system_ud.sentences),
"Words": alignment_score(alignment),
"UPOS": alignment_score(alignment, lambda w, _: w.columns[UPOS]),
"XPOS": alignment_score(alignment, lambda w, _: w.columns[XPOS]),
"UFeats": alignment_score(alignment, lambda w, _: w.columns[FEATS]),
"AllTags": alignment_score(alignment, lambda w, _: (w.columns[UPOS], w.columns[XPOS], w.columns[FEATS])),
"Lemmas": alignment_score(alignment, lambda w, ga: w.columns[LEMMA] if ga(w).columns[LEMMA] != "_" else "_"),
"UAS": alignment_score(alignment, lambda w, ga: ga(w.parent)),
"LAS": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL])),
"CLAS": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL]),
filter_fn=lambda w: w.is_content_deprel),
"MLAS": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL], w.columns[UPOS], w.columns[FEATS],
[(ga(c), c.columns[DEPREL], c.columns[UPOS], c.columns[FEATS])
for c in w.functional_children]),
filter_fn=lambda w: w.is_content_deprel),
"BLEX": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL],
w.columns[LEMMA] if ga(w).columns[LEMMA] != "_" else "_"),
filter_fn=lambda w: w.is_content_deprel),
}
def load_conllu_file(path):
_file = open(path, mode="r", **({"encoding": "utf-8"} if sys.version_info >= (3, 0) else {}))
return load_conllu(_file)
def evaluate_wrapper(args):
# Load CoNLL-U files
gold_ud = load_conllu_file(args.gold_file)
system_ud = load_conllu_file(args.system_file)
return evaluate(gold_ud, system_ud)
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("gold_file", type=str,
help="Name of the CoNLL-U file with the gold data.")
parser.add_argument("system_file", type=str,
help="Name of the CoNLL-U file with the predicted data.")
parser.add_argument("--verbose", "-v", default=False, action="store_true",
help="Print all metrics.")
parser.add_argument("--counts", "-c", default=False, action="store_true",
help="Print raw counts of correct/gold/system/aligned words instead of prec/rec/F1 for all metrics.")
args = parser.parse_args()
# Evaluate
evaluation = evaluate_wrapper(args)
# Print the evaluation
if not args.verbose and not args.counts:
print("LAS F1 Score: {:.2f}".format(100 * evaluation["LAS"].f1))
print("MLAS Score: {:.2f}".format(100 * evaluation["MLAS"].f1))
print("BLEX Score: {:.2f}".format(100 * evaluation["BLEX"].f1))
else:
if args.counts:
print("Metric | Correct | Gold | Predicted | Aligned")
else:
print("Metric | Precision | Recall | F1 Score | AligndAcc")
print("-----------+-----------+-----------+-----------+-----------")
for metric in["Tokens", "Sentences", "Words", "UPOS", "XPOS", "UFeats", "AllTags", "Lemmas", "UAS", "LAS", "CLAS", "MLAS", "BLEX"]:
if args.counts:
print("{:11}|{:10} |{:10} |{:10} |{:10}".format(
metric,
evaluation[metric].correct,
evaluation[metric].gold_total,
evaluation[metric].system_total,
evaluation[metric].aligned_total or (evaluation[metric].correct if metric == "Words" else "")
))
else:
print("{:11}|{:10.2f} |{:10.2f} |{:10.2f} |{}".format(
metric,
100 * evaluation[metric].precision,
100 * evaluation[metric].recall,
100 * evaluation[metric].f1,
"{:10.2f}".format(100 * evaluation[metric].aligned_accuracy) if evaluation[metric].aligned_accuracy is not None else ""
))
if __name__ == "__main__":
main()
# Tests, which can be executed with `python -m unittest conll18_ud_eval`.
class TestAlignment(unittest.TestCase):
@staticmethod
def _load_words(words):
"""Prepare fake CoNLL-U files with fake HEAD to prevent multiple roots errors."""
lines, num_words = [], 0
for w in words:
parts = w.split(" ")
if len(parts) == 1:
num_words += 1
lines.append("{}\t{}\t_\t_\t_\t_\t{}\t_\t_\t_".format(num_words, parts[0], int(num_words>1)))
else:
lines.append("{}-{}\t{}\t_\t_\t_\t_\t_\t_\t_\t_".format(num_words + 1, num_words + len(parts) - 1, parts[0]))
for part in parts[1:]:
num_words += 1
lines.append("{}\t{}\t_\t_\t_\t_\t{}\t_\t_\t_".format(num_words, part, int(num_words>1)))
return load_conllu((io.StringIO if sys.version_info >= (3, 0) else io.BytesIO)("\n".join(lines+["\n"])))
def _test_exception(self, gold, system):
self.assertRaises(UDError, evaluate, self._load_words(gold), self._load_words(system))
def _test_ok(self, gold, system, correct):
metrics = evaluate(self._load_words(gold), self._load_words(system))
gold_words = sum((max(1, len(word.split(" ")) - 1) for word in gold))
system_words = sum((max(1, len(word.split(" ")) - 1) for word in system))
self.assertEqual((metrics["Words"].precision, metrics["Words"].recall, metrics["Words"].f1),
(correct / system_words, correct / gold_words, 2 * correct / (gold_words + system_words)))
def test_exception(self):
self._test_exception(["a"], ["b"])
def test_equal(self):
self._test_ok(["a"], ["a"], 1)
self._test_ok(["a", "b", "c"], ["a", "b", "c"], 3)
def test_equal_with_multiword(self):
self._test_ok(["abc a b c"], ["a", "b", "c"], 3)
self._test_ok(["a", "bc b c", "d"], ["a", "b", "c", "d"], 4)
self._test_ok(["abcd a b c d"], ["ab a b", "cd c d"], 4)
self._test_ok(["abc a b c", "de d e"], ["a", "bcd b c d", "e"], 5)
def test_alignment(self):
self._test_ok(["abcd"], ["a", "b", "c", "d"], 0)
self._test_ok(["abc", "d"], ["a", "b", "c", "d"], 1)
self._test_ok(["a", "bc", "d"], ["a", "b", "c", "d"], 2)
self._test_ok(["a", "bc b c", "d"], ["a", "b", "cd"], 2)
self._test_ok(["abc a BX c", "def d EX f"], ["ab a b", "cd c d", "ef e f"], 4)
self._test_ok(["ab a b", "cd bc d"], ["a", "bc", "d"], 2)
self._test_ok(["a", "bc b c", "d"], ["ab AX BX", "cd CX a"], 1)
| stanfordnlp-master | stanfordnlp/utils/conll18_ud_eval.py |
"""
Entry point for training and evaluating a lemmatizer.
This lemmatizer combines a neural sequence-to-sequence architecture with an `edit` classifier
and two dictionaries to produce robust lemmas from word forms.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
import sys
import os
import shutil
import time
from datetime import datetime
import argparse
import numpy as np
import random
import torch
from torch import nn, optim
from stanfordnlp.models.lemma.data import DataLoader
from stanfordnlp.models.lemma.vocab import Vocab
from stanfordnlp.models.lemma.trainer import Trainer
from stanfordnlp.models.lemma import scorer, edit
from stanfordnlp.models.common import utils
import stanfordnlp.models.common.seq2seq_constant as constant
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/lemma', help='Directory for all lemma data.')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--no_dict', dest='ensemble_dict', action='store_false', help='Do not ensemble dictionary with seq2seq. By default use ensemble.')
parser.add_argument('--dict_only', action='store_true', help='Only train a dictionary-based lemmatizer.')
parser.add_argument('--hidden_dim', type=int, default=200)
parser.add_argument('--emb_dim', type=int, default=50)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--emb_dropout', type=float, default=0.5)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--max_dec_len', type=int, default=50)
parser.add_argument('--beam_size', type=int, default=1)
parser.add_argument('--attn_type', default='soft', choices=['soft', 'mlp', 'linear', 'deep'], help='Attention type')
parser.add_argument('--pos', action='store_true', help='Use UPOS in lemmatization.')
parser.add_argument('--pos_dim', type=int, default=50)
parser.add_argument('--pos_dropout', type=float, default=0.5)
parser.add_argument('--no_edit', dest='edit', action='store_false', help='Do not use edit classifier in lemmatization. By default use an edit classifier.')
parser.add_argument('--num_edit', type=int, default=len(edit.EDIT_TO_ID))
parser.add_argument('--alpha', type=float, default=1.0)
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--decay_epoch', type=int, default=30, help="Decay the lr starting from this epoch.")
parser.add_argument('--num_epoch', type=int, default=60)
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--model_dir', type=str, default='saved_models/lemma', help='Root dir for saving models.')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
return args
def main():
args = parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
args = vars(args)
print("Running lemmatizer in {} mode".format(args['mode']))
# manually correct for training epochs
if args['lang'] in ['cs_pdt', 'ru_syntagrus']:
args['num_epoch'] = 30
if args['mode'] == 'train':
train(args)
else:
evaluate(args)
def train(args):
# load data
print("[Loading data with batch size {}...]".format(args['batch_size']))
train_batch = DataLoader(args['train_file'], args['batch_size'], args, evaluation=False)
vocab = train_batch.vocab
args['vocab_size'] = vocab['char'].size
args['pos_vocab_size'] = vocab['pos'].size
dev_batch = DataLoader(args['eval_file'], args['batch_size'], args, vocab=vocab, evaluation=True)
utils.ensure_dir(args['model_dir'])
model_file = '{}/{}_lemmatizer.pt'.format(args['model_dir'], args['lang'])
# pred and gold path
system_pred_file = args['output_file']
gold_file = args['gold_file']
utils.print_config(args)
# skip training if the language does not have training or dev data
if len(train_batch) == 0 or len(dev_batch) == 0:
print("[Skip training because no data available...]")
sys.exit(0)
# start training
# train a dictionary-based lemmatizer
trainer = Trainer(args=args, vocab=vocab, use_cuda=args['cuda'])
print("[Training dictionary-based lemmatizer...]")
trainer.train_dict(train_batch.conll.get(['word', 'upos', 'lemma']))
print("Evaluating on dev set...")
dev_preds = trainer.predict_dict(dev_batch.conll.get(['word', 'upos']))
dev_batch.conll.write_conll_with_lemmas(dev_preds, system_pred_file)
_, _, dev_f = scorer.score(system_pred_file, gold_file)
print("Dev F1 = {:.2f}".format(dev_f * 100))
if args.get('dict_only', False):
# save dictionaries
trainer.save(model_file)
else:
# train a seq2seq model
print("[Training seq2seq-based lemmatizer...]")
global_step = 0
max_steps = len(train_batch) * args['num_epoch']
dev_score_history = []
best_dev_preds = []
current_lr = args['lr']
global_start_time = time.time()
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
# start training
for epoch in range(1, args['num_epoch']+1):
train_loss = 0
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch, eval=False) # update step
train_loss += loss
if global_step % args['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,\
max_steps, epoch, args['num_epoch'], loss, duration, current_lr))
# eval on dev
print("Evaluating on dev set...")
dev_preds = []
dev_edits = []
for i, batch in enumerate(dev_batch):
preds, edits = trainer.predict(batch, args['beam_size'])
dev_preds += preds
if edits is not None:
dev_edits += edits
dev_preds = trainer.postprocess(dev_batch.conll.get(['word']), dev_preds, edits=dev_edits)
# try ensembling with dict if necessary
if args.get('ensemble_dict', False):
print("[Ensembling dict with seq2seq model...]")
dev_preds = trainer.ensemble(dev_batch.conll.get(['word', 'upos']), dev_preds)
dev_batch.conll.write_conll_with_lemmas(dev_preds, system_pred_file)
_, _, dev_score = scorer.score(system_pred_file, gold_file)
train_loss = train_loss / train_batch.num_examples * args['batch_size'] # avg loss per batch
print("epoch {}: train_loss = {:.6f}, dev_score = {:.4f}".format(epoch, train_loss, dev_score))
# save best model
if epoch == 1 or dev_score > max(dev_score_history):
trainer.save(model_file)
print("new best model saved.")
best_dev_preds = dev_preds
# lr schedule
if epoch > args['decay_epoch'] and dev_score <= dev_score_history[-1] and \
args['optim'] in ['sgd', 'adagrad']:
current_lr *= args['lr_decay']
trainer.update_lr(current_lr)
dev_score_history += [dev_score]
print("")
print("Training ended with {} epochs.".format(epoch))
best_f, best_epoch = max(dev_score_history)*100, np.argmax(dev_score_history)+1
print("Best dev F1 = {:.2f}, at epoch = {}".format(best_f, best_epoch))
def evaluate(args):
# file paths
system_pred_file = args['output_file']
gold_file = args['gold_file']
model_file = '{}/{}_lemmatizer.pt'.format(args['model_dir'], args['lang'])
# load model
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
for k in args:
if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand']:
loaded_args[k] = args[k]
# laod data
print("Loading data with batch size {}...".format(args['batch_size']))
batch = DataLoader(args['eval_file'], args['batch_size'], loaded_args, vocab=vocab, evaluation=True)
# skip eval if dev data does not exist
if len(batch) == 0:
print("Skip evaluation because no dev data is available...")
print("Lemma score:")
print("{} ".format(args['lang']))
sys.exit(0)
dict_preds = trainer.predict_dict(batch.conll.get(['word', 'upos']))
if loaded_args.get('dict_only', False):
preds = dict_preds
else:
print("Running the seq2seq model...")
preds = []
edits = []
for i, b in enumerate(batch):
ps, es = trainer.predict(b, args['beam_size'])
preds += ps
if es is not None:
edits += es
preds = trainer.postprocess(batch.conll.get(['word']), preds, edits=edits)
if loaded_args.get('ensemble_dict', False):
print("[Ensembling dict with seq2seq lemmatizer...]")
preds = trainer.ensemble(batch.conll.get(['word', 'upos']), preds)
# write to file and score
batch.conll.write_conll_with_lemmas(preds, system_pred_file)
if gold_file is not None:
_, _, score = scorer.score(system_pred_file, gold_file)
print("Lemma score:")
print("{} {:.2f}".format(args['lang'], score*100))
if __name__ == '__main__':
main()
| stanfordnlp-master | stanfordnlp/models/lemmatizer.py |
"""
Wrapper functions to run UDPipe modules just as other neural modules. Only one module will be run at each call.
For more information on the UDPipe system, please visit: http://ufal.mff.cuni.cz/udpipe.
"""
import os
import io
import argparse
import subprocess
import time
from stanfordnlp.models.common import conll
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', default=None, help='Path to input file.')
parser.add_argument('--output_file', default=None, help='Path to output file.')
parser.add_argument('--treebank', default=None, help='Full treebank short name.')
parser.add_argument('--module', choices=['tokenize', 'lemma', 'pos', 'ufeats', 'parse'], help='The module to run at a single step.')
parser.add_argument('--udpipe_dir', default=None, help='Root directory of UDPipe.')
parser.add_argument('--short2tb', default='short_to_tb', help='Mapper file from treebank short code to fullname.')
args = parser.parse_args()
return args
def main():
args = parse_args()
args = vars(args)
print("Running UDPipe with module {}...".format(args['module']))
# convert names
short2tb = load_short2tb(args['short2tb'])
tb_short = args['treebank']
tb_full = short2tb[tb_short]
lang_full = tb_full[3:].split('-')[0].lower()
lang_short, tb_code = tb_short.split('_')
# look for commands and models
udpipe_script = '{}/bin-linux64/udpipe'.format(args['udpipe_dir'])
model_name = '{}-{}-ud-2.2-conll18-180430.udpipe'.format(lang_full, tb_code)
model_file = '{}/models/{}'.format(args['udpipe_dir'], model_name)
if not os.path.exists(model_file):
model_name = "mixed-ud-ud-2.2-conll18-180430.udpipe"
model_file = '{}/models/{}'.format(args['udpipe_dir'], model_name)
# check files
if not args['output_file'].endswith('.conllu'):
raise Exception("UDPipe module must write to conllu file.")
if args['module'] == 'tokenize':
# run tokenizer, ssplit and mwt expander at the same time
if not args['input_file'].endswith('.txt'):
raise Exception("UDPipe must take txt file as input when module == tokenize.")
# run tokenizer from txt file
udpipe_cmd = "{} --tokenize {} {} --outfile={} --output=conllu".format(udpipe_script, model_file, args['input_file'], args['output_file'])
run_udpipe(udpipe_cmd)
print("Waiting for filesystem...")
time.sleep(5)
else:
if not args['input_file'].endswith('.conllu'):
raise Exception("UDPipe must take conllu file as input when module != tokenize.")
# first load the original input file
input_conll = conll.CoNLLFile(args['input_file'])
input_conll.load_all()
# do udpipe
if args['module'] == 'parse':
udpipe_cmd = "{} --parse {} {} --output=conllu --input=conllu".format(udpipe_script, model_file, args['input_file'])
else:
udpipe_cmd = "{} --tag {} {} --output=conllu --input=conllu".format(udpipe_script, model_file, args['input_file'])
udpipe_outputs = run_udpipe(udpipe_cmd, return_stdout=True)
print("Waiting for filesystem...")
time.sleep(5)
# load conll back and merge with original conll
udpipe_conll = conll.CoNLLFile(input_str=udpipe_outputs.decode())
udpipe_conll.load_all()
if args['module'] == 'lemma':
fields = ['lemma']
elif args['module'] == 'pos':
fields = ['upos', 'xpos']
elif args['module'] == 'ufeats':
fields = ['feats']
elif args['module'] == 'parse':
fields = ['head', 'deprel', 'deps']
else:
raise Exception("Module {} not recognized.".format(args['module']))
input_conll.set(fields, udpipe_conll.get(fields)) # set fields back
# finally write to file
input_conll.write_conll(args['output_file'])
print("Waiting for filesystem...")
time.sleep(5)
print("All done running module {} with UDPipe.".format(args['module']))
def load_short2tb(filename):
short2tb = dict()
with open(filename) as infile:
for line in infile:
line = line.strip()
if len(line) == 0:
continue
array = line.split()
assert len(array) == 2
short2tb[array[0]] = array[1]
return short2tb
def run_udpipe(cmd, return_stdout=False):
print("Running process: {}".format(cmd))
if return_stdout:
rtn = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
else:
rtn = subprocess.run(cmd, shell=True)
if rtn.returncode != 0:
raise Exception("Calling UDPipe failed with return code {}.".format(rtn.returncode))
return rtn.stdout
if __name__ == '__main__':
main()
| stanfordnlp-master | stanfordnlp/models/udpipe_wrapper.py |
"""
Entry point for training and evaluating a multi-word token (MWT) expander.
This MWT expander combines a neural sequence-to-sequence architecture with a dictionary
to decode the token into multiple words.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
import sys
import os
import shutil
import time
from datetime import datetime
import argparse
import numpy as np
import random
import torch
from torch import nn, optim
from stanfordnlp.models.mwt.data import DataLoader
from stanfordnlp.models.mwt.vocab import Vocab
from stanfordnlp.models.mwt.trainer import Trainer
from stanfordnlp.models.mwt import scorer
from stanfordnlp.models.common import utils
import stanfordnlp.models.common.seq2seq_constant as constant
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/mwt', help='Root dir for saving models.')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--shorthand', type=str, help="Treebank shorthand")
parser.add_argument('--no_dict', dest='ensemble_dict', action='store_false', help='Do not ensemble dictionary with seq2seq. By default ensemble a dict.')
parser.add_argument('--ensemble_early_stop', action='store_true', help='Early stopping based on ensemble performance.')
parser.add_argument('--dict_only', action='store_true', help='Only train a dictionary-based MWT expander.')
parser.add_argument('--hidden_dim', type=int, default=100)
parser.add_argument('--emb_dim', type=int, default=50)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--emb_dropout', type=float, default=0.5)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--max_dec_len', type=int, default=50)
parser.add_argument('--beam_size', type=int, default=1)
parser.add_argument('--attn_type', default='soft', choices=['soft', 'mlp', 'linear', 'deep'], help='Attention type')
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=1e-3, help='Learning rate')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--decay_epoch', type=int, default=30, help="Decay the lr starting from this epoch.")
parser.add_argument('--num_epoch', type=int, default=30)
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--save_dir', type=str, default='saved_models/mwt', help='Root dir for saving models.')
parser.add_argument('--save_name', type=str, default=None, help="File name to save the model")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
return args
def main():
args = parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
args = vars(args)
print("Running MWT expander in {} mode".format(args['mode']))
if args['mode'] == 'train':
train(args)
else:
evaluate(args)
def train(args):
# load data
print('max_dec_len:', args['max_dec_len'])
print("Loading data with batch size {}...".format(args['batch_size']))
train_batch = DataLoader(args['train_file'], args['batch_size'], args, evaluation=False)
vocab = train_batch.vocab
args['vocab_size'] = vocab.size
dev_batch = DataLoader(args['eval_file'], args['batch_size'], args, vocab=vocab, evaluation=True)
utils.ensure_dir(args['save_dir'])
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_mwt_expander.pt'.format(args['save_dir'], args['shorthand'])
# pred and gold path
system_pred_file = args['output_file']
gold_file = args['gold_file']
# skip training if the language does not have training or dev data
if len(train_batch) == 0 or len(dev_batch) == 0:
print("Skip training because no data available...")
sys.exit(0)
# train a dictionary-based MWT expander
trainer = Trainer(args=args, vocab=vocab, use_cuda=args['cuda'])
print("Training dictionary-based MWT expander...")
trainer.train_dict(train_batch.conll.get_mwt_expansions())
print("Evaluating on dev set...")
dev_preds = trainer.predict_dict(dev_batch.conll.get_mwt_expansion_cands())
dev_batch.conll.write_conll_with_mwt_expansions(dev_preds, open(system_pred_file, 'w'))
_, _, dev_f = scorer.score(system_pred_file, gold_file)
print("Dev F1 = {:.2f}".format(dev_f * 100))
if args.get('dict_only', False):
# save dictionaries
trainer.save(model_file)
else:
# train a seq2seq model
print("Training seq2seq-based MWT expander...")
global_step = 0
max_steps = len(train_batch) * args['num_epoch']
dev_score_history = []
best_dev_preds = []
current_lr = args['lr']
global_start_time = time.time()
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
# start training
for epoch in range(1, args['num_epoch']+1):
train_loss = 0
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch, eval=False) # update step
train_loss += loss
if global_step % args['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,\
max_steps, epoch, args['num_epoch'], loss, duration, current_lr))
# eval on dev
print("Evaluating on dev set...")
dev_preds = []
for i, batch in enumerate(dev_batch):
preds = trainer.predict(batch)
dev_preds += preds
if args.get('ensemble_dict', False) and args.get('ensemble_early_stop', False):
print("[Ensembling dict with seq2seq model...]")
dev_preds = trainer.ensemble(dev_batch.conll.get_mwt_expansion_cands(), dev_preds)
dev_batch.conll.write_conll_with_mwt_expansions(dev_preds, open(system_pred_file, 'w'))
_, _, dev_score = scorer.score(system_pred_file, gold_file)
train_loss = train_loss / train_batch.num_examples * args['batch_size'] # avg loss per batch
print("epoch {}: train_loss = {:.6f}, dev_score = {:.4f}".format(epoch, train_loss, dev_score))
# save best model
if epoch == 1 or dev_score > max(dev_score_history):
trainer.save(model_file)
print("new best model saved.")
best_dev_preds = dev_preds
# lr schedule
if epoch > args['decay_epoch'] and dev_score <= dev_score_history[-1]:
current_lr *= args['lr_decay']
trainer.change_lr(current_lr)
dev_score_history += [dev_score]
print("")
print("Training ended with {} epochs.".format(epoch))
best_f, best_epoch = max(dev_score_history)*100, np.argmax(dev_score_history)+1
print("Best dev F1 = {:.2f}, at epoch = {}".format(best_f, best_epoch))
# try ensembling with dict if necessary
if args.get('ensemble_dict', False):
print("[Ensembling dict with seq2seq model...]")
dev_preds = trainer.ensemble(dev_batch.conll.get_mwt_expansion_cands(), best_dev_preds)
dev_batch.conll.write_conll_with_mwt_expansions(dev_preds, open(system_pred_file, 'w'))
_, _, dev_score = scorer.score(system_pred_file, gold_file)
print("Ensemble dev F1 = {:.2f}".format(dev_score*100))
best_f = max(best_f, dev_score)
def evaluate(args):
# file paths
system_pred_file = args['output_file']
gold_file = args['gold_file']
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_mwt_expander.pt'.format(args['save_dir'], args['shorthand'])
# load model
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
for k in args:
if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand']:
loaded_args[k] = args[k]
print('max_dec_len:', loaded_args['max_dec_len'])
# load data
print("Loading data with batch size {}...".format(args['batch_size']))
batch = DataLoader(args['eval_file'], args['batch_size'], loaded_args, vocab=vocab, evaluation=True)
if len(batch) > 0:
dict_preds = trainer.predict_dict(batch.conll.get_mwt_expansion_cands())
# decide trainer type and run eval
if loaded_args['dict_only']:
preds = dict_preds
else:
print("Running the seq2seq model...")
preds = []
for i, b in enumerate(batch):
preds += trainer.predict(b)
if loaded_args.get('ensemble_dict', False):
preds = trainer.ensemble(batch.conll.get_mwt_expansion_cands(), preds)
else:
# skip eval if dev data does not exist
preds = []
# write to file and score
batch.conll.write_conll_with_mwt_expansions(preds, open(system_pred_file, 'w'))
if gold_file is not None:
_, _, score = scorer.score(system_pred_file, gold_file)
print("MWT expansion score:")
print("{} {:.2f}".format(args['shorthand'], score*100))
if __name__ == '__main__':
main()
| stanfordnlp-master | stanfordnlp/models/mwt_expander.py |
stanfordnlp-master | stanfordnlp/models/__init__.py |
|
"""
Entry point for training and evaluating a dependency parser.
This implementation combines a deep biaffine graph-based parser with linearization and distance features.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
"""
Training and evaluation for the parser.
"""
import sys
import os
import shutil
import time
from datetime import datetime
import argparse
import logging
import numpy as np
import random
import torch
from torch import nn, optim
from stanfordnlp.models.depparse.data import DataLoader
from stanfordnlp.models.depparse.trainer import Trainer
from stanfordnlp.models.depparse import scorer
from stanfordnlp.models.common import utils
from stanfordnlp.models.common.pretrain import Pretrain
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/depparse', help='Root dir for saving models.')
parser.add_argument('--wordvec_dir', type=str, default='extern_data/word2vec', help='Directory of word vectors')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--shorthand', type=str, help="Treebank shorthand")
parser.add_argument('--hidden_dim', type=int, default=400)
parser.add_argument('--char_hidden_dim', type=int, default=400)
# parser.add_argument('--deep_biaff_hidden_dim', type=int, default=200)
# parser.add_argument('--composite_deep_biaff_hidden_dim', type=int, default=200)
parser.add_argument('--word_emb_dim', type=int, default=75)
parser.add_argument('--char_emb_dim', type=int, default=100)
parser.add_argument('--output_size', type=int, default=400)
parser.add_argument('--tag_emb_dim', type=int, default=50)
parser.add_argument('--transformed_dim', type=int, default=125)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--char_num_layers', type=int, default=1)
parser.add_argument('--word_dropout', type=float, default=0.0)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--subsample_ratio', type=float, default=1.0)
parser.add_argument('--rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--char_rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--no_char', dest='char', action='store_false', help="Turn off character model.")
parser.add_argument('--no_pretrain', dest='pretrain', action='store_false', help="Turn off pretrained embeddings.")
parser.add_argument('--no_linearization', dest='linearization', action='store_true', help="Turn off linearization term.")
parser.add_argument('--no_distance', dest='distance', action='store_true', help="Turn off distance term.")
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='sgd', help='sgd, rsgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=1e-2, help='Learning rate')
parser.add_argument('--beta2', type=float, default=0.95)
parser.add_argument('--max_steps', type=int, default=1000000)
parser.add_argument('--eval_interval', type=int, default=10)
parser.add_argument('--max_steps_before_stop', type=int, default=1000000)
parser.add_argument('--batch_size', type=int, default=500)
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=2, help='Print log every k steps.')
parser.add_argument('--save_dir', type=str, default='saved_models/depparse', help='Root dir for saving models.')
parser.add_argument('--save_name', type=str, default='bestmodel', help="File name to save the model")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
return args
def main():
args = parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
args = vars(args)
print("Running parser in {} mode".format(args['mode']))
if args['mode'] == 'train':
formatter = logging.Formatter('%(asctime)s %(message)s')
logging.basicConfig(level=logging.DEBUG,
format='%(message)s',
datefmt='%FT%T',)
logging.info(f"Logging")
log = logging.getLogger()
log_name = "logs/"+str(args['save_name'])
if not os.path.exists(log_name): os.system("touch "+log_name)
fh = logging.FileHandler(log_name)
fh.setFormatter(formatter)
log.addHandler(fh)
train(args)
else:
evaluate(args)
def train(args):
utils.ensure_dir(args['save_dir'])
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])
print("model file", model_file)
# load pretrained vectors
vec_file = utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])
pretrain = Pretrain(pretrain_file, vec_file)
print("pretrain file", pretrain_file)
# load data
print("Loading data with batch size {}...".format(args['batch_size']))
train_batch = DataLoader(args['train_file'], args['batch_size'], args, pretrain, evaluation=False)
vocab = train_batch.vocab
dev_batch = DataLoader(args['eval_file'], args['batch_size'], args, pretrain, vocab=vocab, evaluation=True)
# pred and gold path
# system_pred_file = args['output_file']
# gold_file = args['gold_file']
# skip training if the language does not have training or dev data
# if len(train_batch) == 0 or len(dev_batch) == 0:
if len(train_batch) == 0:
print("Skip training because no data available...")
sys.exit(0)
current_lr = args['lr']
mapping_lr = 0.1
# scale_lr = current_lr
print("Training parser...")
trainer = Trainer(args=args, vocab=vocab, pretrain=pretrain, use_cuda=args['cuda'])
print("optimizer:", trainer.optimizer)
print("mapping optimizer:", trainer.mapping_optimizer)
print("scale optimizer:", trainer.scale_optimizer)
global_step = 0
max_steps = args['max_steps']
dev_score_history = []
# global_start_time = time.time()
format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), root acc: {:.6f}'
last_best_step = 0
last_annealed_step = 0
# start training
train_loss = 0
train_edge_acc = 0
dev_root_pred_acc = 0
print("Train batch", len(train_batch))
while True:
do_break = False
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss, edge_acc = trainer.update(batch, eval=False, subsample=True) # update step
train_loss += loss
train_edge_acc += edge_acc
duration = time.time() - start_time
logging.info(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,\
max_steps, loss, duration, edge_acc))
if global_step % len(train_batch) == 0:
print("Evaluating on dev set...")
total_node_system = 0
total_node_gold = 0
total_correct_heads = 0
for db in dev_batch:
root_acc, _, correct_heads, node_system, node_gold = trainer.predict(db)
total_node_system += node_system
total_node_gold += node_gold
total_correct_heads += correct_heads
dev_root_pred_acc += root_acc
# precision = total_correct_heads/total_node_system
# recall = total_correct_heads/total_node_gold
# f_1_overall = 2*precision*recall/(precision+recall)
# dev_batch.conll.set(['head', 'deprel'], [y for x in dev_preds for y in x])
# dev_batch.conll.write_conll(system_pred_file)
# _, _, dev_score = scorer.score(system_pred_file, gold_file)
train_edge_acc /= len(train_batch)
train_loss /= len(train_batch)
dev_root_pred_acc /= len(dev_batch)
logging.info("step {}: Train loss = {:.6f}, Root pred acc. = {:.4f}".format(global_step, train_loss, train_edge_acc))
logging.info("step {}: Dev root pred acc. = {:.6f}".format(global_step, dev_root_pred_acc))
train_loss = 0
train_edge_acc = 0
# if len(dev_score_history) == 0 or f_1_overall > max(dev_score_history):
# print("f1 overall", f_1_overall)
# last_best_step = global_step
# trainer.save(model_file)
# logging.info("new best model saved.")
# dev_score_history.append(f_1_overall)
# train_loss = 0
if (global_step % 50000 == 0):
current_lr *= 0.5
mapping_lr *= 0.75
trainer.optimizer = utils.get_optimizer(trainer.args['optim'], trainer.parameters, current_lr, betas=(0.9, trainer.args['beta2']), eps=1e-6)
trainer.mapping_optimizer = utils.get_optimizer('rsgd', trainer.model.hypmapping.parameters(), mapping_lr)
# current_lr *= 0.75
# scale_lr *= 0.5
# trainer.optimizer = utils.RiemannianSGD(trainer.model.parameters(), lr=current_lr, rgrad=utils.poincare_grad, retraction=utils.retraction)
# trainer.scale_optimizer = torch.optim.SGD([trainer.scale], lr=scale_lr)
# # save best model
# dev_score_history += [dev_score]
# print("")
# if (global_step - last_best_step >= 5*len(train_batch)) and ((global_step - last_annealed_step) >= 3*len(train_batch)):
# last_annealed_step = global_step
# logging.info("Annealing learning rate")
# #annealing
# if not using_amsgrad:
# print("Switching to AMSGrad")
# last_best_step = global_step
# using_amsgrad = True
# trainer.optimizer = optim.Adam(trainer.model.parameters(), amsgrad=True, lr=args['lr'], betas=(.9, args['beta2']), eps=1e-6)
# else:
if global_step >= args['max_steps']:
do_break = True
break
if do_break: break
# print("Reshuffling now")
# train_batch.reshuffle()
print("Training ended with {} steps.".format(global_step))
# best_f, best_eval = max(dev_score_history)*100, np.argmax(dev_score_history)+1
# print("Best dev F1 = {:.2f}, at iteration = {}".format(best_f, best_eval * args['eval_interval']))
def evaluate(args):
# file paths
# system_pred_file = args['output_file']
# gold_file = args['gold_file']
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])
print("model file", model_file)
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])
print("pretrain file", pretrain_file)
# load pretrain
pretrain = Pretrain(pretrain_file)
# load model
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(pretrain=pretrain, model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
# load config
# for k in args:
# if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'] or k == 'mode':
# loaded_args[k] = args[k]
batch_size = 10
# load data
print("Loading data with batch size {}...".format(batch_size))
dev_batch = DataLoader(args['eval_file'], batch_size, loaded_args, pretrain, vocab=vocab, evaluation=True)
if len(dev_batch) > 0:
print("Start evaluation...")
# preds = []
total_node_system = 0
total_node_gold = 0
total_correct_heads = 0
for db in dev_batch:
_, _, correct_heads, node_system, node_gold = trainer.predict(db)
total_node_system += node_system
total_node_gold += node_gold
total_correct_heads += correct_heads
precision = total_correct_heads/total_node_system
recall = total_correct_heads/total_node_gold
f_1_overall = 2*precision*recall/(precision+recall)
print("F1:", f_1_overall)
# else:
# # skip eval if dev data does not exist
# preds = []
# write to file and score
# batch.conll.set(['head', 'deprel'], [y for x in preds for y in x])
# batch.conll.write_conll(system_pred_file)
# if gold_file is not None:
# _, _, score = scorer.score(system_pred_file, gold_file)
# print("Parser score:")
# print("{} {:.2f}".format(args['shorthand'], score*100))
if __name__ == '__main__':
main()
| stanfordnlp-master | stanfordnlp/models/parser.py |
"""
Entry point for training and evaluating a neural tokenizer.
This tokenizer treats tokenization and sentence segmentation as a tagging problem, and uses a combination of
recurrent and convolutional architectures.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
import random
import argparse
from copy import copy
import numpy as np
import torch
from stanfordnlp.models.common import utils
from stanfordnlp.models.tokenize.trainer import Trainer
from stanfordnlp.models.tokenize.data import DataLoader
from stanfordnlp.models.tokenize.utils import load_mwt_dict, eval_model, output_predictions
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--txt_file', type=str, help="Input plaintext file")
parser.add_argument('--label_file', type=str, default=None, help="Character-level label file")
parser.add_argument('--json_file', type=str, default=None, help="JSON file with pre-chunked units")
parser.add_argument('--mwt_json_file', type=str, default=None, help="JSON file for MWT expansions")
parser.add_argument('--conll_file', type=str, default=None, help="CoNLL file for output")
parser.add_argument('--dev_txt_file', type=str, help="(Train only) Input plaintext file for the dev set")
parser.add_argument('--dev_label_file', type=str, default=None, help="(Train only) Character-level label file for the dev set")
parser.add_argument('--dev_json_file', type=str, default=None, help="(Train only) JSON file with pre-chunked units for the dev set")
parser.add_argument('--dev_conll_gold', type=str, default=None, help="(Train only) CoNLL-U file for the dev set for early stopping")
parser.add_argument('--lang', type=str, help="Language")
parser.add_argument('--shorthand', type=str, help="UD treebank shorthand")
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--emb_dim', type=int, default=32, help="Dimension of unit embeddings")
parser.add_argument('--hidden_dim', type=int, default=64, help="Dimension of hidden units")
parser.add_argument('--conv_filters', type=str, default="1,9", help="Configuration of conv filters. ,, separates layers and , separates filter sizes in the same layer.")
parser.add_argument('--no-residual', dest='residual', action='store_false', help="Add linear residual connections")
parser.add_argument('--no-hierarchical', dest='hierarchical', action='store_false', help="\"Hierarchical\" RNN tokenizer")
parser.add_argument('--hier_invtemp', type=float, default=0.5, help="Inverse temperature used in propagating tokenization predictions between RNN layers")
parser.add_argument('--input_dropout', action='store_true', help="Dropout input embeddings as well")
parser.add_argument('--conv_res', type=str, default=None, help="Convolutional residual layers for the RNN")
parser.add_argument('--rnn_layers', type=int, default=1, help="Layers of RNN in the tokenizer")
parser.add_argument('--max_grad_norm', type=float, default=1.0, help="Maximum gradient norm to clip to")
parser.add_argument('--anneal', type=float, default=.999, help="Anneal the learning rate by this amount when dev performance deteriorate")
parser.add_argument('--anneal_after', type=int, default=2000, help="Anneal the learning rate no earlier than this step")
parser.add_argument('--lr0', type=float, default=2e-3, help="Initial learning rate")
parser.add_argument('--dropout', type=float, default=0.33, help="Dropout probability")
parser.add_argument('--unit_dropout', type=float, default=0.33, help="Unit dropout probability")
parser.add_argument('--tok_noise', type=float, default=0.02, help="Probability to induce noise to the input of the higher RNN")
parser.add_argument('--weight_decay', type=float, default=0.0, help="Weight decay")
parser.add_argument('--max_seqlen', type=int, default=100, help="Maximum sequence length to consider at a time")
parser.add_argument('--batch_size', type=int, default=32, help="Batch size to use")
parser.add_argument('--epochs', type=int, default=10, help="Total epochs to train the model for")
parser.add_argument('--steps', type=int, default=20000, help="Steps to train the model for, if unspecified use epochs")
parser.add_argument('--report_steps', type=int, default=20, help="Update step interval to report loss")
parser.add_argument('--shuffle_steps', type=int, default=100, help="Step interval to shuffle each paragragraph in the generator")
parser.add_argument('--eval_steps', type=int, default=200, help="Step interval to evaluate the model on the dev set for early stopping")
parser.add_argument('--save_name', type=str, default=None, help="File name to save the model")
parser.add_argument('--load_name', type=str, default=None, help="File name to load a saved model")
parser.add_argument('--save_dir', type=str, default='saved_models/tokenize', help="Directory to save models in")
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA and run on CPU.')
parser.add_argument('--seed', type=int, default=1234)
args = parser.parse_args()
return args
def main():
args = parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
args = vars(args)
print("Running tokenizer in {} mode".format(args['mode']))
args['feat_funcs'] = ['space_before', 'capitalized', 'all_caps', 'numeric']
args['feat_dim'] = len(args['feat_funcs'])
args['save_name'] = "{}/{}".format(args['save_dir'], args['save_name']) if args['save_name'] is not None \
else '{}/{}_tokenizer.pt'.format(args['save_dir'], args['shorthand'])
utils.ensure_dir(args['save_dir'])
if args['mode'] == 'train':
train(args)
else:
evaluate(args)
def train(args):
mwt_dict = load_mwt_dict(args['mwt_json_file'])
train_input_files = {
'json': args['json_file'],
'txt': args['txt_file'],
'label': args['label_file']
}
train_batches = DataLoader(args, input_files=train_input_files)
vocab = train_batches.vocab
args['vocab_size'] = len(vocab)
dev_input_files = {
'json': args['dev_json_file'],
'txt': args['dev_txt_file'],
'label': args['dev_label_file']
}
dev_batches = DataLoader(args, input_files=dev_input_files, vocab=vocab, evaluation=True)
trainer = Trainer(args=args, vocab=vocab, use_cuda=args['cuda'])
if args['load_name'] is not None:
load_name = "{}/{}".format(args['save_dir'], args['load_name'])
trainer.load(load_name)
trainer.change_lr(args['lr0'])
N = len(train_batches)
steps = args['steps'] if args['steps'] is not None else int(N * args['epochs'] / args['batch_size'] + .5)
lr = args['lr0']
prev_dev_score = -1
best_dev_score = -1
best_dev_step = -1
for step in range(1, steps+1):
batch = train_batches.next(unit_dropout=args['unit_dropout'])
loss = trainer.update(batch)
if step % args['report_steps'] == 0:
print("Step {:6d}/{:6d} Loss: {:.3f}".format(step, steps, loss))
if args['shuffle_steps'] > 0 and step % args['shuffle_steps'] == 0:
train_batches.shuffle()
if step % args['eval_steps'] == 0:
dev_score = eval_model(args, trainer, dev_batches, vocab, mwt_dict)
reports = ['Dev score: {:6.3f}'.format(dev_score * 100)]
if step >= args['anneal_after'] and dev_score < prev_dev_score:
reports += ['lr: {:.6f} -> {:.6f}'.format(lr, lr * args['anneal'])]
lr *= args['anneal']
trainer.change_lr(lr)
prev_dev_score = dev_score
if dev_score > best_dev_score:
reports += ['New best dev score!']
best_dev_score = dev_score
best_dev_step = step
trainer.save(args['save_name'])
print('\t'.join(reports))
print('Best dev score={} at step {}'.format(best_dev_score, best_dev_step))
def evaluate(args):
mwt_dict = load_mwt_dict(args['mwt_json_file'])
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(model_file=args['save_name'], use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
for k in loaded_args:
if not k.endswith('_file') and k not in ['cuda', 'mode', 'save_dir', 'save_name']:
args[k] = loaded_args[k]
eval_input_files = {
'json': args['json_file'],
'txt': args['txt_file'],
'label': args['label_file']
}
batches = DataLoader(args, input_files=eval_input_files, vocab=vocab, evaluation=True)
with open(args['conll_file'], 'w') as conll_output_file:
oov_count, N, _ = output_predictions(conll_output_file, trainer, batches, vocab, mwt_dict, args['max_seqlen'])
print("OOV rate: {:6.3f}% ({:6d}/{:6d})".format(oov_count / N * 100, oov_count, N))
if __name__ == '__main__':
main()
| stanfordnlp-master | stanfordnlp/models/tokenizer.py |
"""
Entry point for training and evaluating a POS/morphological features tagger.
This tagger uses highway BiLSTM layers with character and word-level representations, and biaffine classifiers
to produce consistant POS and UFeats predictions.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
import sys
import os
import shutil
import time
from datetime import datetime
import argparse
import numpy as np
import random
import torch
from torch import nn, optim
from stanfordnlp.models.pos.data import DataLoader
from stanfordnlp.models.pos.trainer import Trainer
from stanfordnlp.models.pos import scorer
from stanfordnlp.models.common import utils
from stanfordnlp.models.common.pretrain import Pretrain
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/pos', help='Root dir for saving models.')
parser.add_argument('--wordvec_dir', type=str, default='extern_data/word2vec', help='Directory of word vectors')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--shorthand', type=str, help="Treebank shorthand")
parser.add_argument('--hidden_dim', type=int, default=200)
parser.add_argument('--char_hidden_dim', type=int, default=400)
parser.add_argument('--deep_biaff_hidden_dim', type=int, default=400)
parser.add_argument('--composite_deep_biaff_hidden_dim', type=int, default=100)
parser.add_argument('--word_emb_dim', type=int, default=75)
parser.add_argument('--char_emb_dim', type=int, default=100)
parser.add_argument('--tag_emb_dim', type=int, default=50)
parser.add_argument('--transformed_dim', type=int, default=125)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--char_num_layers', type=int, default=1)
parser.add_argument('--word_dropout', type=float, default=0.33)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--char_rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--no_char', dest='char', action='store_false', help="Turn off character model.")
parser.add_argument('--no_pretrain', dest='pretrain', action='store_false', help="Turn off pretrained embeddings.")
parser.add_argument('--share_hid', action='store_true', help="Share hidden representations for UPOS, XPOS and UFeats.")
parser.set_defaults(share_hid=False)
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=3e-3, help='Learning rate')
parser.add_argument('--beta2', type=float, default=0.95)
parser.add_argument('--max_steps', type=int, default=50000)
parser.add_argument('--eval_interval', type=int, default=100)
parser.add_argument('--fix_eval_interval', dest='adapt_eval_interval', action='store_false', \
help="Use fixed evaluation interval for all treebanks, otherwise by default the interval will be increased for larger treebanks.")
parser.add_argument('--max_steps_before_stop', type=int, default=3000)
parser.add_argument('--batch_size', type=int, default=5000)
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--save_dir', type=str, default='saved_models/pos', help='Root dir for saving models.')
parser.add_argument('--save_name', type=str, default=None, help="File name to save the model")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
return args
def main():
args = parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
args = vars(args)
print("Running tagger in {} mode".format(args['mode']))
if args['mode'] == 'train':
train(args)
else:
evaluate(args)
def train(args):
utils.ensure_dir(args['save_dir'])
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_tagger.pt'.format(args['save_dir'], args['shorthand'])
# load pretrained vectors
vec_file = utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])
pretrain = Pretrain(pretrain_file, vec_file)
# load data
print("Loading data with batch size {}...".format(args['batch_size']))
train_batch = DataLoader(args['train_file'], args['batch_size'], args, pretrain, evaluation=False)
vocab = train_batch.vocab
dev_batch = DataLoader(args['eval_file'], args['batch_size'], args, pretrain, vocab=vocab, evaluation=True)
# pred and gold path
system_pred_file = args['output_file']
gold_file = args['gold_file']
# skip training if the language does not have training or dev data
if len(train_batch) == 0 or len(dev_batch) == 0:
print("Skip training because no data available...")
sys.exit(0)
print("Training tagger...")
trainer = Trainer(args=args, vocab=vocab, pretrain=pretrain, use_cuda=args['cuda'])
global_step = 0
max_steps = args['max_steps']
dev_score_history = []
best_dev_preds = []
current_lr = args['lr']
global_start_time = time.time()
format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
if args['adapt_eval_interval']:
args['eval_interval'] = utils.get_adaptive_eval_interval(dev_batch.num_examples, 2000, args['eval_interval'])
print("Evaluating the model every {} steps...".format(args['eval_interval']))
using_amsgrad = False
last_best_step = 0
# start training
train_loss = 0
while True:
do_break = False
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch, eval=False) # update step
train_loss += loss
if global_step % args['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,\
max_steps, loss, duration, current_lr))
if global_step % args['eval_interval'] == 0:
# eval on dev
print("Evaluating on dev set...")
dev_preds = []
for batch in dev_batch:
preds = trainer.predict(batch)
dev_preds += preds
dev_batch.conll.set(['upos', 'xpos', 'feats'], [y for x in dev_preds for y in x])
dev_batch.conll.write_conll(system_pred_file)
_, _, dev_score = scorer.score(system_pred_file, gold_file)
train_loss = train_loss / args['eval_interval'] # avg loss per batch
print("step {}: train_loss = {:.6f}, dev_score = {:.4f}".format(global_step, train_loss, dev_score))
train_loss = 0
# save best model
if len(dev_score_history) == 0 or dev_score > max(dev_score_history):
last_best_step = global_step
trainer.save(model_file)
print("new best model saved.")
best_dev_preds = dev_preds
dev_score_history += [dev_score]
print("")
if global_step - last_best_step >= args['max_steps_before_stop']:
if not using_amsgrad:
print("Switching to AMSGrad")
last_best_step = global_step
using_amsgrad = True
trainer.optimizer = optim.Adam(trainer.model.parameters(), amsgrad=True, lr=args['lr'], betas=(.9, args['beta2']), eps=1e-6)
else:
do_break = True
break
if global_step >= args['max_steps']:
do_break = True
break
if do_break: break
train_batch.reshuffle()
print("Training ended with {} steps.".format(global_step))
best_f, best_eval = max(dev_score_history)*100, np.argmax(dev_score_history)+1
print("Best dev F1 = {:.2f}, at iteration = {}".format(best_f, best_eval * args['eval_interval']))
def evaluate(args):
# file paths
system_pred_file = args['output_file']
gold_file = args['gold_file']
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_tagger.pt'.format(args['save_dir'], args['shorthand'])
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])
# load pretrain
pretrain = Pretrain(pretrain_file)
# load model
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(pretrain=pretrain, model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
# load config
for k in args:
if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'] or k == 'mode':
loaded_args[k] = args[k]
# load data
print("Loading data with batch size {}...".format(args['batch_size']))
batch = DataLoader(args['eval_file'], args['batch_size'], loaded_args, pretrain, vocab=vocab, evaluation=True)
if len(batch) > 0:
print("Start evaluation...")
preds = []
for i, b in enumerate(batch):
preds += trainer.predict(b)
else:
# skip eval if dev data does not exist
preds = []
# write to file and score
batch.conll.set(['upos', 'xpos', 'feats'], [y for x in preds for y in x])
batch.conll.write_conll(system_pred_file)
if gold_file is not None:
_, _, score = scorer.score(system_pred_file, gold_file)
print("Tagger score:")
print("{} {:.2f}".format(args['shorthand'], score*100))
if __name__ == '__main__':
main()
| stanfordnlp-master | stanfordnlp/models/tagger.py |
"""
An indentity lemmatizer that mimics the behavior of a normal lemmatizer but directly uses word as lemma.
"""
import os
import argparse
import random
from stanfordnlp.models.lemma.data import DataLoader
from stanfordnlp.models.lemma import scorer
from stanfordnlp.models.common import utils
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/lemma', help='Directory for all lemma data.')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--seed', type=int, default=1234)
args = parser.parse_args()
return args
def main():
args = parse_args()
random.seed(args.seed)
args = vars(args)
print("[Launching identity lemmatizer...]")
if args['mode'] == 'train':
print("[No training is required; will only generate evaluation output...]")
batch = DataLoader(args['eval_file'], args['batch_size'], args, evaluation=True, conll_only=True)
system_pred_file = args['output_file']
gold_file = args['gold_file']
# use identity mapping for prediction
preds = batch.conll.get(['word'])
# write to file and score
batch.conll.write_conll_with_lemmas(preds, system_pred_file)
if gold_file is not None:
_, _, score = scorer.score(system_pred_file, gold_file)
print("Lemma score:")
print("{} {:.2f}".format(args['lang'], score*100))
if __name__ == '__main__':
main()
| stanfordnlp-master | stanfordnlp/models/identity_lemmatizer.py |
from collections import Counter
from stanfordnlp.models.common.vocab import BaseVocab
import stanfordnlp.models.common.seq2seq_constant as constant
class Vocab(BaseVocab):
def build_vocab(self):
pairs = self.data
allchars = "".join([src + tgt for src, tgt in pairs])
counter = Counter(allchars)
self._id2unit = constant.VOCAB_PREFIX + list(sorted(list(counter.keys()), key=lambda k: counter[k], reverse=True))
self._unit2id = {w:i for i, w in enumerate(self._id2unit)}
| stanfordnlp-master | stanfordnlp/models/mwt/vocab.py |
stanfordnlp-master | stanfordnlp/models/mwt/__init__.py |
|
"""
A trainer class to handle training and testing of models.
"""
import sys
import numpy as np
from collections import Counter
import torch
from torch import nn
import torch.nn.init as init
import stanfordnlp.models.common.seq2seq_constant as constant
from stanfordnlp.models.common.trainer import Trainer as BaseTrainer
from stanfordnlp.models.common.seq2seq_model import Seq2SeqModel
from stanfordnlp.models.common import utils, loss
from stanfordnlp.models.mwt.vocab import Vocab
def unpack_batch(batch, use_cuda):
""" Unpack a batch from the data loader. """
if use_cuda:
inputs = [b.cuda() if b is not None else None for b in batch[:4]]
else:
inputs = [b if b is not None else None for b in batch[:4]]
orig_idx = batch[4]
return inputs, orig_idx
class Trainer(object):
""" A trainer for training models. """
def __init__(self, args=None, vocab=None, emb_matrix=None, model_file=None, use_cuda=False):
self.use_cuda = use_cuda
if model_file is not None:
# load from file
self.load(model_file, use_cuda)
else:
self.args = args
self.model = None if args['dict_only'] else Seq2SeqModel(args, emb_matrix=emb_matrix)
self.vocab = vocab
self.expansion_dict = dict()
if not self.args['dict_only']:
self.crit = loss.SequenceLoss(self.vocab.size)
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if use_cuda:
self.model.cuda()
self.crit.cuda()
else:
self.model.cpu()
self.crit.cpu()
self.optimizer = utils.get_optimizer(self.args['optim'], self.parameters, self.args['lr'])
def update(self, batch, eval=False):
inputs, orig_idx = unpack_batch(batch, self.use_cuda)
src, src_mask, tgt_in, tgt_out = inputs
if eval:
self.model.eval()
else:
self.model.train()
self.optimizer.zero_grad()
log_probs, _ = self.model(src, src_mask, tgt_in)
loss = self.crit(log_probs.view(-1, self.vocab.size), tgt_out.view(-1))
loss_val = loss.data.item()
if eval:
return loss_val
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args['max_grad_norm'])
self.optimizer.step()
return loss_val
def predict(self, batch, unsort=True):
inputs, orig_idx = unpack_batch(batch, self.use_cuda)
src, src_mask, tgt, tgt_mask = inputs
self.model.eval()
batch_size = src.size(0)
preds, _ = self.model.predict(src, src_mask, self.args['beam_size'])
pred_seqs = [self.vocab.unmap(ids) for ids in preds] # unmap to tokens
pred_seqs = utils.prune_decoded_seqs(pred_seqs)
pred_tokens = ["".join(seq) for seq in pred_seqs] # join chars to be tokens
if unsort:
pred_tokens = utils.unsort(pred_tokens, orig_idx)
return pred_tokens
def train_dict(self, pairs):
""" Train a MWT expander given training word-expansion pairs. """
# accumulate counter
ctr = Counter()
ctr.update([(p[0], p[1]) for p in pairs])
seen = set()
# find the most frequent mappings
for p, _ in ctr.most_common():
w, l = p
if w not in seen and w != l:
self.expansion_dict[w] = l
seen.add(w)
return
def predict_dict(self, words):
""" Predict a list of expansions given words. """
expansions = []
for w in words:
if w in self.expansion_dict:
expansions += [self.expansion_dict[w]]
elif w.lower() in self.expansion_dict:
expansions += [self.expansion_dict[w.lower()]]
else:
expansions += [w]
return expansions
def ensemble(self, cands, other_preds):
""" Ensemble the dict with statistical model predictions. """
expansions = []
assert len(cands) == len(other_preds)
for c, pred in zip(cands, other_preds):
if c in self.expansion_dict:
expansions += [self.expansion_dict[c]]
elif c.lower() in self.expansion_dict:
expansions += [self.expansion_dict[c.lower()]]
else:
expansions += [pred]
return expansions
def save(self, filename):
params = {
'model': self.model.state_dict() if self.model is not None else None,
'dict': self.expansion_dict,
'vocab': self.vocab.state_dict(),
'config': self.args
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def load(self, filename, use_cuda=False):
try:
checkpoint = torch.load(filename, lambda storage, loc: storage)
except BaseException:
print("Cannot load model from {}".format(filename))
sys.exit(1)
self.args = checkpoint['config']
self.expansion_dict = checkpoint['dict']
if not self.args['dict_only']:
self.model = Seq2SeqModel(self.args, use_cuda=use_cuda)
self.model.load_state_dict(checkpoint['model'])
else:
self.model = None
self.vocab = Vocab.load_state_dict(checkpoint['vocab'])
| stanfordnlp-master | stanfordnlp/models/mwt/trainer.py |
import random
import numpy as np
import os
from collections import Counter
import torch
import stanfordnlp.models.common.seq2seq_constant as constant
from stanfordnlp.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from stanfordnlp.models.common import conll
from stanfordnlp.models.mwt.vocab import Vocab
from stanfordnlp.pipeline.doc import Document
class DataLoader:
def __init__(self, input_src, batch_size, args, vocab=None, evaluation=False):
self.batch_size = batch_size
self.args = args
self.eval = evaluation
self.shuffled = not self.eval
# check if input source is a file or a Document object
if isinstance(input_src, str):
filename = input_src
assert filename.endswith('conllu'), "Loaded file must be conllu file."
self.conll, data = self.load_file(filename, evaluation=self.eval)
elif isinstance(input_src, Document):
filename = None
doc = input_src
self.conll, data = self.load_doc(doc)
# handle vocab
if vocab is None:
self.vocab = self.init_vocab(data)
else:
self.vocab = vocab
# filter and sample data
if args.get('sample_train', 1.0) < 1.0 and not self.eval:
keep = int(args['sample_train'] * len(data))
data = random.sample(data, keep)
print("Subsample training set with rate {:g}".format(args['sample_train']))
data = self.preprocess(data, self.vocab, args)
# shuffle for training
if self.shuffled:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
self.num_examples = len(data)
# chunk into batches
data = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
self.data = data
if filename is not None:
print("{} batches created for {}.".format(len(data), filename))
def init_vocab(self, data):
assert self.eval == False # for eval vocab must exist
vocab = Vocab(data, self.args['shorthand'])
return vocab
def preprocess(self, data, vocab, args):
processed = []
for d in data:
src = list(d[0])
src = [constant.SOS] + src + [constant.EOS]
src = vocab.map(src)
if self.eval:
tgt = src # as a placeholder
else:
tgt = list(d[1])
tgt_in = vocab.map([constant.SOS] + tgt)
tgt_out = vocab.map(tgt + [constant.EOS])
processed += [[src, tgt_in, tgt_out]]
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 3
# sort all fields by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# convert to tensors
src = batch[0]
src = get_long_tensor(src, batch_size)
src_mask = torch.eq(src, constant.PAD_ID)
tgt_in = get_long_tensor(batch[1], batch_size)
tgt_out = get_long_tensor(batch[2], batch_size)
assert tgt_in.size(1) == tgt_out.size(1), \
"Target input and output sequence sizes do not match."
return (src, src_mask, tgt_in, tgt_out, orig_idx)
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def load_file(self, filename, evaluation=False):
conll_file = conll.CoNLLFile(filename)
if evaluation:
data = [[c] for c in conll_file.get_mwt_expansion_cands()]
else:
data = conll_file.get_mwt_expansions()
return conll_file, data
def load_doc(self, doc):
data = [[c] for c in doc.conll_file.get_mwt_expansion_cands()]
return doc.conll_file, data
| stanfordnlp-master | stanfordnlp/models/mwt/data.py |
"""
Utils and wrappers for scoring lemmatizers.
"""
from stanfordnlp.models.common.utils import ud_scores
def score(system_conllu_file, gold_conllu_file):
""" Wrapper for word segmenter scorer. """
evaluation = ud_scores(gold_conllu_file, system_conllu_file)
el = evaluation["Words"]
p, r, f = el.precision, el.recall, el.f1
return p, r, f
| stanfordnlp-master | stanfordnlp/models/mwt/scorer.py |
stanfordnlp-master | stanfordnlp/models/depparse/__init__.py |
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import stanfordnlp.models.depparse.mapping_utils as util
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
from stanfordnlp.models.common.biaffine import DeepBiaffineScorer
from stanfordnlp.models.common.hlstm import HighwayLSTM
from stanfordnlp.models.common.dropout import WordDropout
from stanfordnlp.models.common.vocab import CompositeVocab
from stanfordnlp.models.common.char_model import CharacterModel
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Parser(nn.Module):
def __init__(self, args, vocab, emb_matrix=None, share_hid=False):
super().__init__()
self.vocab = vocab
self.args = args
self.share_hid = share_hid
self.unsaved_modules = []
def add_unsaved_module(name, module):
self.unsaved_modules += [name]
setattr(self, name, module)
# input layers
input_size = 0
if self.args['word_emb_dim'] > 0:
# frequent word embeddings
self.word_emb = nn.Embedding(len(vocab['word']), self.args['word_emb_dim'], padding_idx=0)
self.lemma_emb = nn.Embedding(len(vocab['lemma']), self.args['word_emb_dim'], padding_idx=0)
input_size += self.args['word_emb_dim'] * 2
if self.args['tag_emb_dim'] > 0:
self.upos_emb = nn.Embedding(len(vocab['upos']), self.args['tag_emb_dim'], padding_idx=0)
if not isinstance(vocab['xpos'], CompositeVocab):
self.xpos_emb = nn.Embedding(len(vocab['xpos']), self.args['tag_emb_dim'], padding_idx=0)
else:
self.xpos_emb = nn.ModuleList()
for l in vocab['xpos'].lens():
self.xpos_emb.append(nn.Embedding(l, self.args['tag_emb_dim'], padding_idx=0))
self.ufeats_emb = nn.ModuleList()
for l in vocab['feats'].lens():
self.ufeats_emb.append(nn.Embedding(l, self.args['tag_emb_dim'], padding_idx=0))
input_size += self.args['tag_emb_dim'] * 2
if self.args['char'] and self.args['char_emb_dim'] > 0:
self.charmodel = CharacterModel(args, vocab)
self.trans_char = nn.Linear(self.args['char_hidden_dim'], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
if self.args['pretrain']:
# pretrained embeddings, by default this won't be saved into model file
add_unsaved_module('pretrained_emb', nn.Embedding.from_pretrained(torch.from_numpy(emb_matrix), freeze=True))
self.trans_pretrained = nn.Linear(emb_matrix.shape[1], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
# recurrent layers
self.parserlstm = HighwayLSTM(input_size, self.args['hidden_dim'], self.args['num_layers'], batch_first=True, bidirectional=True, dropout=self.args['dropout'], rec_dropout=self.args['rec_dropout'], highway_func=torch.tanh)
self.drop_replacement = nn.Parameter(torch.randn(input_size) / np.sqrt(input_size))
self.parserlstm_h_init = nn.Parameter(torch.zeros(2 * self.args['num_layers'], 1, self.args['hidden_dim']))
self.parserlstm_c_init = nn.Parameter(torch.zeros(2 * self.args['num_layers'], 1, self.args['hidden_dim']))
self.output_size = 400
# classifiers
self.hypmapping = nn.Sequential(
nn.Linear(2*self.args['hidden_dim'], 1000).to(device),
nn.ReLU().to(device),
nn.Linear(1000, 100).to(device),
nn.ReLU().to(device),
nn.Linear(100, self.output_size).to(device),
nn.ReLU().to(device))
self.rootpred = nn.Sequential(
nn.Linear(self.output_size, 100).to(device),
nn.ReLU().to(device),
nn.Linear(100, 100).to(device),
nn.ReLU().to(device),
nn.Linear(100, 1).to(device))
self.CE = nn.CrossEntropyLoss()
# self.scale = nn.Parameter(torch.cuda.FloatTensor([1.0]), requires_grad=True)
# self.unlabeled = DeepBiaffineScorer(2 * self.args['hidden_dim'], 2 * self.args['hidden_dim'], self.args['deep_biaff_hidden_dim'], 1, pairwise=True, dropout=args['dropout'])
# self.deprel = DeepBiaffineScorer(2 * self.args['hidden_dim'], 2 * self.args['hidden_dim'], self.args['deep_biaff_hidden_dim'], len(vocab['deprel']), pairwise=True, dropout=args['dropout'])
# if args['linearization']:
# self.linearization = DeepBiaffineScorer(2 * self.args['hidden_dim'], 2 * self.args['hidden_dim'], self.args['deep_biaff_hidden_dim'], 1, pairwise=True, dropout=args['dropout'])
# if args['distance']:
# self.distance = DeepBiaffineScorer(2 * self.args['hidden_dim'], 2 * self.args['hidden_dim'], self.args['deep_biaff_hidden_dim'], 1, pairwise=True, dropout=args['dropout'])
# criterion
# self.crit = nn.CrossEntropyLoss(ignore_index=-1, reduction='sum') # ignore padding
self.drop = nn.Dropout(args['dropout'])
self.worddrop = WordDropout(args['word_dropout'])
def forward(self, word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, lemma, head, deprel, word_orig_idx, sentlens, wordlens, scale, root, subsample=True):
def pack(x):
return pack_padded_sequence(x, sentlens, batch_first=True)
inputs = []
if self.args['pretrain']:
pretrained_emb = self.pretrained_emb(pretrained)
pretrained_emb = self.trans_pretrained(pretrained_emb)
pretrained_emb = pack(pretrained_emb)
inputs += [pretrained_emb]
if self.args['word_emb_dim'] > 0:
word_emb = self.word_emb(word)
word_emb = pack(word_emb)
lemma_emb = self.lemma_emb(lemma)
lemma_emb = pack(lemma_emb)
inputs += [word_emb, lemma_emb]
if self.args['tag_emb_dim'] > 0:
pos_emb = self.upos_emb(upos)
if isinstance(self.vocab['xpos'], CompositeVocab):
for i in range(len(self.vocab['xpos'])):
pos_emb += self.xpos_emb[i](xpos[:, :, i])
else:
pos_emb += self.xpos_emb(xpos)
pos_emb = pack(pos_emb)
feats_emb = 0
for i in range(len(self.vocab['feats'])):
feats_emb += self.ufeats_emb[i](ufeats[:, :, i])
feats_emb = pack(feats_emb)
inputs += [pos_emb, feats_emb]
if self.args['char'] and self.args['char_emb_dim'] > 0:
char_reps = self.charmodel(wordchars, wordchars_mask, word_orig_idx, sentlens, wordlens)
char_reps = PackedSequence(self.trans_char(self.drop(char_reps.data)), char_reps.batch_sizes)
inputs += [char_reps]
lstm_inputs = torch.cat([x.data for x in inputs], 1)
lstm_inputs = self.worddrop(lstm_inputs, self.drop_replacement)
lstm_inputs = self.drop(lstm_inputs)
lstm_inputs = PackedSequence(lstm_inputs, inputs[0].batch_sizes)
# print("word size", word.size(0))
lstm_outputs, _ = self.parserlstm(lstm_inputs, sentlens, hx=(self.parserlstm_h_init.expand(2 * self.args['num_layers'], word.size(0), self.args['hidden_dim']).contiguous(), self.parserlstm_c_init.expand(2 * self.args['num_layers'], word.size(0), self.args['hidden_dim']).contiguous()))
lstm_outputs, _ = pad_packed_sequence(lstm_outputs, batch_first=True)
lstm_outputs_normalized = torch.zeros(lstm_outputs.shape, device=device)
# print("lstm shape", lstm_outputs.shape)
#This can be done without a for loop.
for idx in range(lstm_outputs.shape[0]):
embedding = lstm_outputs[idx]
norm = embedding.norm(p=2, dim=1, keepdim=True)
max_norm = torch.max(norm)+1e-3
normalized_emb = embedding.div(max_norm.expand_as(embedding))
# print("normalized norm", normalized_emb.norm(p=2, dim=1, keepdim=True))
lstm_outputs_normalized[idx] = normalized_emb
# print("After normalization:", lstm_outputs.shape)
lstm_postdrop = self.drop(lstm_outputs_normalized)
mapped_vectors = self.hypmapping(lstm_postdrop)
predicted_scores = self.rootpred(mapped_vectors)
predicted_scores = predicted_scores.squeeze(-1)
batch_size = predicted_scores.shape[0]
# print("predicted scores after squeeze", predicted_scores)
# deprel_scores = self.deprel(self.drop(lstm_outputs), self.drop(lstm_outputs))
#goldmask = head.new_zeros(*head.size(), head.size(-1)+1, dtype=torch.uint8)
#goldmask.scatter_(2, head.unsqueeze(2), 1)
# if self.args['linearization'] or self.args['distance']:
# head_offset = torch.arange(word.size(1), device=head.device).view(1, 1, -1).expand(word.size(0), -1, -1) - torch.arange(word.size(1), device=head.device).view(1, -1, 1).expand(word.size(0), -1, -1)
# if self.args['linearization']:
# lin_scores = self.linearization(self.drop(lstm_outputs), self.drop(lstm_outputs)).squeeze(3)
# unlabeled_scores += F.logsigmoid(lin_scores * torch.sign(head_offset).float()).detach()
# if self.args['distance']:
# dist_scores = self.distance(self.drop(lstm_outputs), self.drop(lstm_outputs)).squeeze(3)
# dist_pred = 1 + F.softplus(dist_scores)
# dist_target = torch.abs(head_offset)
# dist_kld = -torch.log((dist_target.float() - dist_pred)**2/2 + 1)
# unlabeled_scores += dist_kld.detach()
# diag = torch.eye(head.size(-1)+1, dtype=torch.uint8, device=head.device).unsqueeze(0)
# unlabeled_scores.masked_fill_(diag, -float('inf'))
# print("target tensor", head)
# print("target tensor shape", head.shape)
# print("mapped vectors", mapped_vectors.shape)
root = torch.cuda.LongTensor(root)
root.requires_grad = False
# print("root", root)
subsample_ratio = 1.0
preds = []
# print("subsample ratio", subsample_ratio)
edge_acc = 0.0
correct = 0.0
f1_total, correct_heads, node_system, node_gold = 0, 0, 0, 0
if self.training:
unlabeled_target = head
# print("target shape", unlabeled_target.shape)
n = unlabeled_target.shape[1]
# if subsample:
# sample_row_num = int(round(n*subsample_ratio))
# sampled_rows = np.random.permutation(n)[:sample_row_num]
# else:
# sampled_rows = list(range(n))
# dist_recovered = util.distance_matrix_hyperbolic_batch(mapped_vectors, sampled_rows, scale)
# # print("dist recovered shape", dist_recovered.shape)
# dummy = dist_recovered.clone()
# target_dummy = unlabeled_target.clone()
# # print("root", root.shape)
# # print("predicted roots", predicted_roots.shape)
# edge_acc = util.compare_mst_batch(target_dummy.cpu().numpy(), dummy.detach().cpu().numpy())
# # print("sampled rows", sampled_rows)
# # print("mapped vectors", mapped_vectors.shape)
# # print("dist recovered shape", dist_recovered.shape)
# loss_distortion = util.distortion_batch(unlabeled_target, dist_recovered, n, sampled_rows)
#Look at percentage correct
predictions = F.softmax(predicted_scores)
max_index = predictions.max(dim = 1)[1]
total = (max_index == root).sum()
correct = total.item()/(batch_size)
# print("total", total)
# print("Correct:", total.item()/(batch_size))
# print("root before", root)
loss_rootpred = self.CE(predicted_scores, root)
loss = loss_rootpred
# unlabeled_scores = unlabeled_scores[:, 1:, :] # exclude attachment for the root symbol
# unlabeled_scores = unlabeled_scores.masked_fill(word_mask.unsqueeze(1), -float('inf'))
# unlabeled_target = head.masked_fill(word_mask[:, 1:], -1)
# loss = self.crit(unlabeled_scores.contiguous().view(-1, unlabeled_scores.size(2)), unlabeled_target.view(-1))
# deprel_scores = deprel_scores[:, 1:] # exclude attachment for the root symbol
# #deprel_scores = deprel_scores.masked_select(goldmask.unsqueeze(3)).view(-1, len(self.vocab['deprel']))
# deprel_scores = torch.gather(deprel_scores, 2, head.unsqueeze(2).unsqueeze(3).expand(-1, -1, -1, len(self.vocab['deprel']))).view(-1, len(self.vocab['deprel']))
# deprel_target = deprel.masked_fill(word_mask[:, 1:], -1)
# loss += self.crit(deprel_scores.contiguous(), deprel_target.view(-1))
# if self.args['linearization']:
# #lin_scores = lin_scores[:, 1:].masked_select(goldmask)
# lin_scores = torch.gather(lin_scores[:, 1:], 2, head.unsqueeze(2)).view(-1)
# lin_scores = torch.cat([-lin_scores.unsqueeze(1)/2, lin_scores.unsqueeze(1)/2], 1)
# #lin_target = (head_offset[:, 1:] > 0).long().masked_select(goldmask)
# lin_target = torch.gather((head_offset[:, 1:] > 0).long(), 2, head.unsqueeze(2))
# loss += self.crit(lin_scores.contiguous(), lin_target.view(-1))
# if self.args['distance']:
# #dist_kld = dist_kld[:, 1:].masked_select(goldmask)
# dist_kld = torch.gather(dist_kld[:, 1:], 2, head.unsqueeze(2))
# loss -= dist_kld.sum()
# loss /= wordchars.size(0) # number of words
else:
loss = 0
unlabeled_target = head
# print("target shape", unlabeled_target.shape)
n = unlabeled_target.shape[1]
sampled_rows = list(range(n))
# print("sampled rows", sampled_rows)
# print("mapped vectors", mapped_vectors.shape)
# dist_recovered = util.distance_matrix_hyperbolic_batch(mapped_vectors, sampled_rows, scale)
# # print("dist recovered shape", dist_recovered.shape)
# dummy = dist_recovered.clone()
# target_dummy = unlabeled_target.clone()
# edge_acc, f1_total, correct_heads, node_system, node_gold = util.predict_batch(target_dummy.cpu().numpy(),dummy.detach().cpu().numpy(), sentlens)
predictions = F.softmax(predicted_scores)
max_index = predictions.max(dim = 1)[1]
total = (max_index == root).sum()
correct = total.item()/(batch_size)
# preds.append(F.log_softmax(unlabeled_scores, 2).detach().cpu().numpy())
# preds.append(deprel_scores.max(3)[1].detach().cpu().numpy())
return loss, correct, f1_total, correct_heads, node_system, node_gold
| stanfordnlp-master | stanfordnlp/models/depparse/model.py |
from __future__ import unicode_literals, print_function, division
import os
import numpy as np
import scipy
import scipy.sparse.csgraph as csg
from joblib import Parallel, delayed
import multiprocessing
import networkx as nx
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import time
import math
from io import open
import unicodedata
import string
import re
import random
import json
from collections import defaultdict
# import utils.load_dist as ld
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Distortion calculations
def hyp_dist_origin(x):
#return np.log((1+np.linalg.norm(x))/(1-np.linalg.norm(x)))
return torch.log(torch.div(1+torch.norm(x),1-torch.norm(x)))
def acosh(x):
return torch.log(x + torch.sqrt(x**2-1))
def _correct(x, eps=1e-1):
current_norms = torch.norm(x,2,x.dim() - 1)
mask_idx = current_norms < 1./(1+eps)
modified = 1./((1+eps)*current_norms)
modified[mask_idx] = 1.0
return modified.unsqueeze(-1)
def dist_h(u,v):
u = u * _correct(u)
v = v * _correct(v)
z = 2*torch.norm(u-v,2)**2
uu = 1. + torch.div(z,((1-torch.norm(u,2)**2)*(1-torch.norm(v,2)**2)))
return acosh(uu)
def dist_e(u, v):
return torch.norm(u-v, 2)
def dist_eb(u, v):
return torch.norm(u-v, 2)
def dist_p(u,v):
z = 2*torch.norm(u-v,2)**2
uu = 1. + torch.div(z,((1-torch.norm(u,2)**2)*(1-torch.norm(v,2)**2)))
machine_eps = np.finfo(uu.data.detach().cpu().numpy().dtype).eps # problem with cuda tensor
return acosh(torch.clamp(uu, min=1+machine_eps))
def dist_pb(u,v):
#print("u = ", u, " v = ", v)
z = 2*torch.norm(u-v,2, dim=1)**2
uu = 1. + torch.div(z,((1-torch.norm(u,2, dim=1)**2)*(1-torch.norm(v,2, dim=1)**2)))
machine_eps = np.finfo(uu.data.detach().cpu().numpy().dtype).eps # problem with cuda tensor
#print("distance was ", acosh(torch.clamp(uu, min=1+machine_eps)))
print("THIS me = ", machine_eps)
return acosh(torch.clamp(uu, min=1+machine_eps))
def distance_matrix_euclidean(input):
row_n = input.shape[0]
mp1 = torch.stack([input]*row_n)
mp2 = torch.stack([input]*row_n).transpose(0,1)
dist_mat = torch.sum((mp1-mp2)**2,2).squeeze()
return dist_mat
def pairwise_distances(x, y=None):
'''
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
x_norm = (x**2).sum(1).view(-1, 1)
if y is not None:
y_norm = (y**2).sum(1).view(1, -1)
else:
y = x
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, torch.transpose(y, 0, 1))
return dist
def distance_matrix_hyperbolic(input, sampled_rows, scale):
row_n = input.shape[0]
dist_mat = torch.zeros(len(sampled_rows), row_n, device=device)
idx = 0
for row in sampled_rows:
for i in range(row_n):
#if i != row:
dist_mat[idx, i] = dist_p(input[row,:], input[i,:])*scale
idx += 1
#print("Distance matrix", dist_mat)
#print()
return dist_mat
def distance_matrix_hyperbolic_batch_old(input, sampled_rows, scale):
#print("were computing the matrix with sampled_rows = ")
#print(sampled_rows)
batch_size = input.shape[0]
row_n = input.shape[1]
dist_mat = torch.zeros(batch_size, len(sampled_rows), row_n, device=device)
# num_cores = multiprocessing.cpu_count()
# dist_mat = Parallel(n_jobs=num_cores)(delayed(compute_row)(i,adj_mat) for i in range(n))
idx = 0
for row in sampled_rows:
for i in range(row_n):
#if i != row:
dist_mat[:,idx, i] = dist_pb(input[:,row,:], input[:,i,:])*scale
idx += 1
return dist_mat
def distance_matrix_hyperbolic_batch(input, sampled_rows, scale):
batch_size = input.shape[0]
row_n = input.shape[1]
u = torch.stack([input]*row_n).transpose(0,1)
v = torch.stack([input]*row_n).transpose(0,1).transpose(1,2)
nrms = torch.norm(input, 2, 2)
pr = torch.ones(batch_size, row_n).cuda() - nrms ** 2
den = pr[:, :, None] @ pr[:, None, :]
num = 2 * torch.sum((u-v)**2,3).squeeze() if row_n > 1 else 2 * torch.sum((u-v)**2,3)
dist_mat = torch.ones(batch_size, row_n, row_n).cuda() + torch.div(num, den) * scale
machine_eps = np.finfo(dist_mat.data.detach().cpu().numpy().dtype).eps # problem with cuda tensor
dist_mat = acosh(torch.clamp(dist_mat, min=1+machine_eps))
return dist_mat
def distance_matrix_euclidean_batch(input, sampled_rows, scale):
#print("were computing the matrix with sampled_rows = ")
#print(sampled_rows)
batch_size = input.shape[0]
row_n = input.shape[1]
dist_mat = torch.zeros(batch_size, len(sampled_rows), row_n, device=device)
# num_cores = multiprocessing.cpu_count()
# dist_mat = Parallel(n_jobs=num_cores)(delayed(compute_row)(i,adj_mat) for i in range(n))
idx = 0
for b in range(batch_size):
dist_mat[b,:,:] = distance_matrix_euclidean(input[b,:,:])
#print("Distance matrix", dist_mat)
return dist_mat
def distance_matrix_euclidean_parsing(input_length, input, sampled_rows):
dist_mat = torch.zeros(len(sampled_rows), input_length, device=device)
idx = 0
for row in sampled_rows:
for i in range(input_length):
if i != row:
dist_mat[idx, i] = dist_e(input[row,:], input[i,:])
idx += 1
# print("Distance matrix", dist_mat)
return dist_mat
def entry_is_good(h, h_rec): return (not torch.isnan(h_rec)) and (not torch.isinf(h_rec)) and h_rec != 0 and h != 0
def distortion_entry(h,h_rec):
avg = abs(h_rec - h)/h
avg += abs(h - h_rec)/h_rec
avg /= 2
return avg
def distortion_row(H1, H2, n, row):
avg, good = 0, 0
for i in range(n):
if i != row and entry_is_good(H1[i], H2[i]):
#if H1[i] <= 4:
if True:
_avg = 1.0 / H1[i] * distortion_entry(H1[i], H2[i])
#_avg = distortion_entry(H1[i], H2[i])
good += 1
avg += _avg
if good > 0:
avg /= good
else:
avg, good = torch.tensor(0., device=device, requires_grad=True), torch.tensor(0., device=device, requires_grad=True)
# print("Number of good entries", good)
return (avg, good)
def distortion(H1, H2, n, sampled_rows, jobs=16):
i = 0
# print("h1", H1.shape)
# print("h2", H2.shape)
dists = torch.zeros(len(sampled_rows))
for row in sampled_rows:
dists[i] = distortion_row(H1[row,:], H2[i,:], n, row)[0]
i += 1
avg = dists.sum() / len(sampled_rows)
return avg
def distortion_batch(H1, H2, n, sampled_rows):
t = time.time()
batch_size = H1.shape[0]
diag_mask = torch.eye(n)
diag_mask = diag_mask.unsqueeze(0)
diag_mask = diag_mask.expand(batch_size, n, n).cuda()
off_diag = torch.ones(batch_size, n, n).cuda() - diag_mask
os = torch.zeros(batch_size, n, n).cuda()
ns = torch.ones(batch_size, n, n).cuda()
H1m = torch.where(H1 > 0, ns, os).cuda()
H2m = torch.where(H2 > 0, ns, os).cuda()
good1 = torch.clamp(H1m.sum(), min=1)
good2 = torch.clamp(H2m.sum(), min=1)
# these have 1's on the diagonals. Also avoid having to divide by 0:
H1_masked = H1 * off_diag + diag_mask + torch.ones(batch_size, n, n).cuda()*0.00001
H2_masked = H2 * off_diag + diag_mask + torch.ones(batch_size, n, n).cuda()*0.00001
dist1 = torch.clamp(torch.div(torch.abs(H1_masked - H2_masked), H2_masked), max=5.0)
dist2 = torch.div(torch.abs(H2_masked - H1_masked), H1_masked)
H1_focus = ns / (torch.clamp(H1_masked, min=1))
l = ((dist1*H2m*H1_focus)).sum()/good1 + ((dist2*H1m*H1_focus)).sum()/good2
#print("time to compute the loss = ", time.time()-t)
return l
def distortion_batch_old(H1, H2, n, sampled_rows, graph, mapped_vectors, jobs=16):
#print("First one\n")
#print(H1)
#print("Second one\n")
#print(H2)
# dists = Parallel(n_jobs=jobs)(delayed(distortion_row)(H1[i,:],H2[i,:],n,i) for i in range(n))
# print(H1.shape) #target
# print(H2.shape) #recovered
batch_size = H1.shape[0]
dists = torch.zeros(batch_size, len(sampled_rows))
dists_orig = torch.zeros(batch_size)
for b in range(batch_size):
# let's add a term that captures how far we are in terms of getting the right guy in
g_nodes = list(graph[b].nodes())
root = g_nodes[0]
'''
print("root = ", root)
print("location = ", mapped_vectors[b,root,:])
print("Root norm = ", np.linalg.norm(mapped_vectors[b,root,:].detach().cpu().numpy()))
print("Other norms = ")
for i in range(n):
print(np.linalg.norm(mapped_vectors[b,i,:].detach().cpu().numpy()))
print()
'''
dists_orig[b] = hyp_dist_origin(mapped_vectors[b,root,:])
i=0
for row in sampled_rows:
'''
print("on row ", row)
print()
print("true")
print(H1[b,row,:])
print("ours")
print(H2[b,i,:])
print()
'''
dists[b,i] = distortion_row(H1[b,row,:], H2[b,i,:], n, row)[0]
i += 1
#to_stack = [tup[0] for tup in dists]
#avg = torch.stack(to_stack).sum() / len(sampled_rows)
avg = dists.sum(dim=1)/len(sampled_rows)
#print(" we added ", dists_orig)
#print(" the normal is ", avg.sum())
tot = (dists_orig.sum() * 1.0 + avg.sum())/batch_size
return tot
def frac_distortion_row(H):
return torch.fmod(H, 1).sum()
def frac_distortion(H, sampled_rows):
frac_dists = torch.zeros(len(sampled_rows))
for i in range(len(sampled_rows)):
frac_dists[i] = frac_distortion_row(H[i,:])
return frac_dists.sum() / len(sampled_rows)
'''
def distortion(H1, H2, n, jobs):
H1 = np.array(H1.cpu()),
H2 = np.array(H2.detach().cpu())
dists = Parallel(n_jobs=jobs)(delayed(distortion_row)(H1[i,:],H2[i,:],n,i) for i in range(n))
dists = np.vstack(dists)
mc = max(dists[:,0])
me = max(dists[:,1])
# wc = max(dists[:,0])*max(dists[:,1])
avg = sum(dists[:,2])/n
bad = sum(dists[:,3])
#return (mc, me, avg, bad)
to_stack = [tup[0] for tup in dists]
avg = torch.stack(to_stack).sum()/n
return avg
'''
#Loading the graph and getting the distance matrix.
def load_graph(file_name, directed=False):
G = nx.DiGraph() if directed else nx.Graph()
with open(file_name, "r") as f:
for line in f:
tokens = line.split()
u = int(tokens[0])
v = int(tokens[1])
if len(tokens) > 2:
w = float(tokens[2])
G.add_edge(u, v, weight=w)
else:
G.add_edge(u,v)
return G
def compute_row(i, adj_mat):
return csg.dijkstra(adj_mat, indices=[i], unweighted=True, directed=False)
def get_dist_mat(G):
n = G.order()
adj_mat = nx.to_scipy_sparse_matrix(G, nodelist=list(range(G.order())))
t = time.time()
num_cores = multiprocessing.cpu_count()
dist_mat = Parallel(n_jobs=num_cores)(delayed(compute_row)(i,adj_mat) for i in range(n))
dist_mat = np.vstack(dist_mat)
return dist_mat
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
def pairfromidx(idx, edge_folder):
G = load_graph(edge_folder+str(idx)+".edges")
target_matrix = get_dist_mat(G)
'''print("target matrix = ", target_matrix)
print()'''
target_tensor = torch.from_numpy(target_matrix).float().to(device)
target_tensor.requires_grad = False
n = G.order()
return ([], target_tensor, n, G)
def gettestpairs(edge_folder, test_folder):
test_pairs = defaultdict()
edge_files = os.listdir(edge_folder)
for file in edge_files:
name = file.split("/")[-1]
ground_truth = load_graph(edge_folder+file)
n = ground_truth.order()
md = torch.load(test_folder+"emb/"+str(name)+".E10-1.lr10.0.emb.final", map_location=device)
embedding = md.E[0].w
norm = embedding.norm(p=2, dim=1, keepdim=True)
max_norm = torch.max(norm)+1e-10
normalized_emb = embedding.div(max_norm.expand_as(embedding))
target_matrix = get_dist_mat(ground_truth)
target_tensor = torch.from_numpy(target_matrix).float().to(device)
target_tensor.requires_grad = False
test_pairs[name] = [normalized_emb, ground_truth, target_tensor, n]
return test_pairs
def compare_mst(G, hrec):
mst = csg.minimum_spanning_tree(hrec)
G_rec = nx.from_scipy_sparse_matrix(mst)
found = 0
for edge in G_rec.edges():
if edge in G.edges(): found+= 1
acc = found / len(list(G.edges()))
return acc
def compare_mst_batch(target_batch, hrec_batch):
batch_size = hrec_batch.shape[0]
batch_acc = 0
for i in range(batch_size):
hrec = hrec_batch[i,:,:]
target = target_batch[i,:,:]
mst = csg.minimum_spanning_tree(hrec)
G_rec = nx.from_scipy_sparse_matrix(mst)
mst_target = csg.minimum_spanning_tree(target)
G = nx.from_scipy_sparse_matrix(mst_target)
found = 1
for edge in G_rec.edges():
if edge in G.edges(): found+= 1
acc = found / (len(list(G.edges()))+1)
batch_acc += acc
return batch_acc/batch_size
def get_heads(G, head_dict, node_list):
if len(list(G.edges())) !=0:
for i,j in list(G.edges()):
if G.degree(i) == 1:
head_dict[i] = j
G.remove_edge(i,j)
node_list.remove(i)
elif G.degree(j) == 1:
head_dict[j] = i
G.remove_edge(i,j)
node_list.remove(j)
get_heads(G, head_dict, node_list)
else:
root = node_list[0]
head_dict[root] = 'root'
return G, head_dict, node_list
def get_heads_batch(hrec_batch, sentlens):
batch_size = hrec_batch.shape[0]
preds = []
rel = 'obj'
for b in range(batch_size):
hrec = hrec_batch[b,:,:]
ind = sentlens[b]
hrec = hrec[:ind,:ind]
mst = csg.minimum_spanning_tree(hrec)
G = nx.from_scipy_sparse_matrix(mst)
seq = []
head_dict = {}
node_list = [n for n in list(G.nodes()) if G.degree(n) > 0]
if len(node_list) !=0:
_, head_dict, _ = get_heads(G, head_dict, node_list)
else:
head_dict[0] = 'root'
keylist = head_dict.keys()
keylist = sorted(keylist)
for key in keylist:
# print(key, seq)
if head_dict[key] == 'root':
seq.append(['0', 'root'])
else:
seq.append([str(head_dict[key]+1), rel])
root_num = 0
for head, deprel in seq:
if deprel == 'root':
root_num += 1
if root_num != 1:
print("Num of root", root_num)
print(seq)
preds += [seq]
return preds
def predict_batch(target_batch, hrec_batch, sentlens):
batch_size = hrec_batch.shape[0]
node_system = 0
node_gold = 0
correct_heads = 0
batch_acc = 0
f1_total = 0
for i in range(batch_size):
ind = sentlens[i]
hrec = hrec_batch[i,:ind,:ind]
target = target_batch[i,:ind,:ind]
mst = csg.minimum_spanning_tree(hrec)
G_rec = nx.from_scipy_sparse_matrix(mst)
mst_target = csg.minimum_spanning_tree(target)
G = nx.from_scipy_sparse_matrix(mst_target)
node_system += len(list(G_rec.nodes()))
node_gold += len(list(G.nodes()))
found = 1 #counting mst root to placeholder root node.
for edge in G_rec.edges():
if edge in G.edges(): found+= 1
correct_heads += found
acc = found / (len(list(G.edges()))+1)
recall = acc
precision = found / (len(list(G_rec.edges()))+1)
f1 = 2*precision*recall/(precision+recall)
batch_acc += acc
f1_total += f1
batch_acc /= batch_size
return batch_acc, f1_total, correct_heads, node_system, node_gold
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def unroll(node, G):
if len(node.children) != 0:
for child in node.children:
G.add_edge(node.token['id'], child.token['id'])
unroll(child, G)
return G
def indexesFromSentence(vocab, sentence):
return [vocab.word2index[token['form']] for token in sentence]
def tensorFromSentence(vocab, sentence):
indexes = indexesFromSentence(vocab, sentence)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def pairfromidx_parsing(idx, input_vocab, filtered_sentences, edge_folder):
input_tensor = tensorFromSentence(input_vocab, filtered_sentences[idx])
G = load_graph(edge_folder+str(idx)+".edges")
target_matrix = get_dist_mat(G)
target_tensor = torch.from_numpy(target_matrix).float().to(device)
target_tensor.requires_grad = False
n = G.order()
if input_tensor.shape[0] == target_matrix.shape[0]:
return (input_tensor, target_tensor, n, G)
else:
return []
def compute_row(i, adj_mat):
return csg.dijkstra(adj_mat, indices=[i], unweighted=True, directed=False)
def save_dist_mat(G, file):
n = G.order()
print("Number of nodes is ", n)
adj_mat = nx.to_scipy_sparse_matrix(G, nodelist=list(range(G.order())))
t = time.time()
num_cores = multiprocessing.cpu_count()
dist_mat = Parallel(n_jobs=20)(delayed(compute_row)(i,adj_mat) for i in range(n))
dist_mat = np.vstack(dist_mat)
print("Time elapsed = ", time.time()-t)
pickle.dump(dist_mat, open(file,"wb"))
def load_dist_mat(file):
return pickle.load(open(file,"rb"))
def unwrap(x):
""" Extract the numbers from (sequences of) pytorch tensors """
if isinstance(x, list) : return [unwrap(u) for u in x]
if isinstance(x, tuple): return tuple([unwrap(u) for u in list(x)])
return x.detach().cpu().numpy()
# def load_emb_dm(file):
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# m = torch.load(file).to(device)
# H = unwrap(m.dist_matrix())
# return H
def get_dist_mat(G, parallelize=False):
n = G.order()
adj_mat = nx.to_scipy_sparse_matrix(G, nodelist=list(range(G.order())))
t = time.time()
num_cores = multiprocessing.cpu_count() if parallelize else 1
dist_mat = Parallel(n_jobs=num_cores)(delayed(compute_row)(i,adj_mat) for i in range(n))
dist_mat = np.vstack(dist_mat)
return dist_mat
| stanfordnlp-master | stanfordnlp/models/depparse/mapping_utils.py |
"""
A trainer class to handle training and testing of models.
"""
import sys
import torch
from torch import nn
from stanfordnlp.models.common.trainer import Trainer as BaseTrainer
from stanfordnlp.models.common import utils, loss
from stanfordnlp.models.common.chuliu_edmonds import chuliu_edmonds_one_root
from stanfordnlp.models.depparse.model import Parser
from stanfordnlp.models.pos.vocab import MultiVocab
def unpack_batch(batch, use_cuda):
""" Unpack a batch from the data loader. """
if use_cuda:
inputs = [b.cuda() if b is not None else None for b in batch[:11]]
else:
inputs = batch[:11]
root = batch[11]
orig_idx = batch[12]
word_orig_idx = batch[13]
sentlens = batch[14]
wordlens = batch[15]
return inputs, root, orig_idx, word_orig_idx, sentlens, wordlens
class Trainer(BaseTrainer):
""" A trainer for training models. """
def __init__(self, args=None, vocab=None, pretrain=None, model_file=None, use_cuda=False):
self.use_cuda = use_cuda
self.scale = nn.Parameter(torch.cuda.FloatTensor([1.0]), requires_grad=True)
if model_file is not None:
# load everything from file
self.load(pretrain, model_file)
else:
assert all(var is not None for var in [args, vocab, pretrain])
# build model from scratch
self.args = args
self.vocab = vocab
self.model = Parser(args, vocab, emb_matrix=pretrain.emb)
# self.parameters = [p for p in self.model.parameters() if p.requires_grad]
self.parameters = []
for module in self.model.modules():
if module != self.model.hypmapping:
for param in module.parameters():
if param.requires_grad:
self.parameters.append(param)
if self.use_cuda:
self.model.cuda()
else:
self.model.cpu()
self.optimizer = utils.get_optimizer(self.args['optim'], self.parameters, self.args['lr'], betas=(0.9, self.args['beta2']), eps=1e-6)
self.mapping_optimizer = utils.get_optimizer('rsgd', self.model.hypmapping.parameters(), 0.1)
self.scale_optimizer = torch.optim.SGD([self.scale], lr=0.01)
def update(self, batch, eval=False, subsample=True):
inputs, root, orig_idx, word_orig_idx, sentlens, wordlens = unpack_batch(batch, self.use_cuda)
word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, lemma, head, deprel = inputs
if eval:
self.model.eval()
else:
self.model.train()
self.optimizer.zero_grad()
self.mapping_optimizer.zero_grad()
self.scale_optimizer.zero_grad()
if subsample:
loss, edge_acc, f1_total, correct_heads, node_system, node_gold= self.model(word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, lemma, head, deprel, word_orig_idx, sentlens, wordlens, self.scale, root, True)
else:
loss, edge_acc, f1_total, correct_heads, node_system, node_gold= self.model(word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, lemma, head, deprel, word_orig_idx, sentlens, wordlens, self.scale, root, False)
loss_val = loss.data.item()
if eval:
return loss_val, edge_acc
loss.backward(retain_graph=True)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args['max_grad_norm'])
self.optimizer.step()
self.mapping_optimizer.step()
self.scale_optimizer.step()
return loss_val, edge_acc
def predict(self, batch, unsort=True):
inputs, root, orig_idx, word_orig_idx, sentlens, wordlens = unpack_batch(batch, self.use_cuda)
word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, lemma, head, deprel = inputs
self.model.eval()
loss, edge_acc, f1_total, correct_heads, node_system, node_gold = self.model(word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, lemma, head, deprel, word_orig_idx, sentlens, wordlens, self.scale, root)
# head_seqs = [chuliu_edmonds_one_root(adj[:l, :l])[1:] for adj, l in zip(preds[0], sentlens)] # remove attachment for the root
# deprel_seqs = [self.vocab['deprel'].unmap([preds[1][i][j+1][h] for j, h in enumerate(hs)]) for i, hs in enumerate(head_seqs)]
# pred_tokens = [[[str(head_seqs[i][j]), deprel_seqs[i][j]] for j in range(sentlens[i]-1)] for i in range(batch_size)]
# if unsort:
# preds = utils.unsort(preds, orig_idx)
return edge_acc, f1_total, correct_heads, node_system, node_gold
def save(self, filename, skip_modules=True):
model_state = self.model.state_dict()
# skip saving modules like pretrained embeddings, because they are large and will be saved in a separate file
if skip_modules:
skipped = [k for k in model_state.keys() if k.split('.')[0] in self.model.unsaved_modules]
for k in skipped:
del model_state[k]
params = {
'model': model_state,
'vocab': self.vocab.state_dict(),
'config': self.args
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def load(self, pretrain, filename):
try:
checkpoint = torch.load(filename, lambda storage, loc: storage)
except BaseException:
print("Cannot load model from {}".format(filename))
sys.exit(1)
self.args = checkpoint['config']
self.vocab = MultiVocab.load_state_dict(checkpoint['vocab'])
self.model = Parser(self.args, self.vocab, emb_matrix=pretrain.emb)
self.model.load_state_dict(checkpoint['model'], strict=False)
| stanfordnlp-master | stanfordnlp/models/depparse/trainer.py |
import random
import torch
from conllu import parse_tree, parse_tree_incr, parse, parse_incr
import networkx as nx
import scipy
import scipy.sparse.csgraph as csg
import numpy as np
import stanfordnlp.models.depparse.mapping_utils as util
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from stanfordnlp.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from stanfordnlp.models.common import conll
from stanfordnlp.models.common.vocab import PAD_ID, VOCAB_PREFIX, ROOT_ID, CompositeVocab
from stanfordnlp.models.pos.vocab import CharVocab, WordVocab, XPOSVocab, FeatureVocab, MultiVocab
from stanfordnlp.models.pos.xpos_vocab_factory import xpos_vocab_factory
from stanfordnlp.pipeline.doc import Document
class DataLoader:
def __init__(self, input_src, batch_size, args, pretrain, vocab=None, evaluation=False):
self.batch_size = batch_size
print("We are entering data loader")
print("batch size should be", batch_size)
self.args = args
self.eval = evaluation
sample_dev_ratio = args['sample_train']
# self.shuffled = not self.eval
# check if input source is a file or a Document object
if isinstance(input_src, str):
filename = input_src
assert filename.endswith('conllu'), "Loaded file must be conllu file."
self.conll, data = self.load_file(filename, evaluation=self.eval)
elif isinstance(input_src, Document):
print("it's a document")
filename = None
doc = input_src
self.conll, data = self.load_doc(doc)
# handle vocab
if vocab is None:
self.vocab = self.init_vocab(data)
else:
self.vocab = vocab
self.pretrain_vocab = pretrain.vocab
# filter, sort the data, take based on the percentage.
if args.get('sample_train', 1.0) < 1.0 and not self.eval:
# data = sorted(data, key = lambda x: len(x[0]))
print("Subsample training set with rate {:g}".format(args['sample_train']))
if sample_dev_ratio < 1.0 and self.eval:
# keep = int(sample_dev_ratio * len(data))
# data = random.sample(data, keep)
print("Subsample dev set with rate {:g}".format(sample_dev_ratio))
data = self.preprocess(input_src, data, self.vocab, self.pretrain_vocab, args)
# shuffle for training
#if self.shuffled:
#random.shuffle(data)
# self.num_examples = len(data)
# print("length of the data", self.num_examples)
# chunk into batches
print("Entering to chunk into batches")
self.data = self.chunk_batches(data, args['sample_train'])
if filename is not None:
print("{} batches created for {}.".format(len(self.data), filename))
def init_vocab(self, data):
assert self.eval == False # for eval vocab must exist
charvocab = CharVocab(data, self.args['shorthand'])
wordvocab = WordVocab(data, self.args['shorthand'], cutoff=7, lower=True)
uposvocab = WordVocab(data, self.args['shorthand'], idx=1)
xposvocab = xpos_vocab_factory(data, self.args['shorthand'])
featsvocab = FeatureVocab(data, self.args['shorthand'], idx=3)
lemmavocab = WordVocab(data, self.args['shorthand'], cutoff=7, idx=4, lower=True)
deprelvocab = WordVocab(data, self.args['shorthand'], idx=6)
vocab = MultiVocab({'char': charvocab,
'word': wordvocab,
'upos': uposvocab,
'xpos': xposvocab,
'feats': featsvocab,
'lemma': lemmavocab,
'deprel': deprelvocab})
return vocab
#I got rid of ROOT_IDs.
def preprocess(self, doc, data, vocab, pretrain_vocab, args):
processed = []
data_file = open(doc, "r", encoding="utf-8")
xpos_replacement = [[ROOT_ID] * len(vocab['xpos'])] if isinstance(vocab['xpos'], CompositeVocab) else [ROOT_ID]
feats_replacement = [[ROOT_ID] * len(vocab['feats'])]
i = 0
for sent in data:
processed_sent = [vocab['word'].map([w[0] for w in sent])]
processed_sent += [[vocab['char'].map([x for x in w[0]]) for w in sent]]
processed_sent += [vocab['upos'].map([w[1] for w in sent])]
processed_sent += [vocab['xpos'].map([w[2] for w in sent])]
processed_sent += [vocab['feats'].map([w[3] for w in sent])]
processed_sent += [pretrain_vocab.map([w[0] for w in sent])]
processed_sent += [vocab['lemma'].map([w[4] for w in sent])]
head = [[int(w[5]) for w in sent]]
processed_sent += head
processed_sent += [vocab['deprel'].map([w[6] for w in sent])]
processed.append(processed_sent)
i+=1
# print("length of data", len(data))
idx = 0
for sentence in parse_incr(data_file):
if idx < len(data):
curr_tree = sentence.to_tree()
G_curr = nx.Graph()
G_curr = util.unroll(curr_tree, G_curr)
if len(G_curr) != 0:
G = nx.relabel_nodes(G_curr, lambda x: x-1)
target_matrix = util.get_dist_mat(G)
target_tensor = torch.from_numpy(target_matrix).float()
target_tensor.requires_grad = False
processed[idx][7] = target_tensor
processed[idx].append(int(curr_tree.token['id'])-1)
elif len(G_curr) == 0:
G = nx.Graph()
G.add_node(0)
target_matrix = util.get_dist_mat(G)
target_tensor = torch.from_numpy(target_matrix).float()
target_tensor.requires_grad = False
processed[idx][7] = target_tensor
processed[idx].append(0)
idx += 1
else:
break
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
# print("batch size", batch_size)
batch = list(zip(*batch))
#assert len(batch) == 9
# sort sentences by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# sort words by lens for easy char-RNN operations
batch_words = [w for sent in batch[1] for w in sent]
word_lens = [len(x) for x in batch_words]
batch_words, word_orig_idx = sort_all([batch_words], word_lens)
batch_words = batch_words[0]
word_lens = [len(x) for x in batch_words]
# convert to tensors
words = batch[0]
words = get_long_tensor(words, batch_size)
words_mask = torch.eq(words, PAD_ID)
wordchars = get_long_tensor(batch_words, len(word_lens))
wordchars_mask = torch.eq(wordchars, PAD_ID)
upos = get_long_tensor(batch[2], batch_size)
xpos = get_long_tensor(batch[3], batch_size)
ufeats = get_long_tensor(batch[4], batch_size)
pretrained = get_long_tensor(batch[5], batch_size)
sentlens = [len(x) for x in batch[0]]
lemma = get_long_tensor(batch[6], batch_size)
max_tensor = batch[7][0]
to_stack = [max_tensor]
# print("batch[7] shape", len(batch[7]))
for b in range(1, batch_size):
new = torch.zeros(max_tensor.shape)
curr = batch[7][b]
new[:(curr.shape[0]), :curr.shape[1]] = curr
to_stack.append(new)
head = torch.stack(to_stack)
# print("batch size", batch_size)
# print("head shape", head.shape)
# print("lemma shape", lemma.shape)
deprel = get_long_tensor(batch[8], batch_size)
root = batch[9]
# root = torch.stack(root_list)
# print("batch[7][0]", batch[7][0])
# print("head", head)
# print("root in data prep", root)
return words, words_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, lemma, head, deprel, root, orig_idx, word_orig_idx, sentlens, word_lens
def load_file(self, filename, evaluation=False):
conll_file = conll.CoNLLFile(filename)
data = conll_file.get(['word', 'upos', 'xpos', 'feats', 'lemma', 'head', 'deprel'], as_sentences=True)
return conll_file, data
def load_doc(self, doc):
data = doc.conll_file.get(['word', 'upos', 'xpos', 'feats', 'lemma', 'head', 'deprel'], as_sentences=True)
return doc.conll_file, data
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def reshuffle(self):
data = [y for x in self.data for y in x]
self.data = self.chunk_batches(data)
random.shuffle(data)
def chunk_batches(self, data, sample_ratio):
keep = int(sample_ratio * len(data))
data = random.sample(data, keep)
data = sorted(data, key = lambda x: len(x[0]))
print("length of data", len(data))
current = []
currentlen = 0
# idx = 0
totallen = 0
batches = []
for x in data:
if currentlen < self.batch_size:
current.append(x)
currentlen += 1
totallen += 1
else:
batches.append(current)
current = [x]
currentlen = 1
totallen += 1
if totallen == len(data):
batches.append(current)
# print("data point", x)
# print("data point length", x[0])
# if len(x[0]) + currentlen > self.batch_size:
# res.append(current)
# current = []
# currentlen = 0
# current.append(x)
# currentlen += len(x[0])
# print("current len", currentlen)
# print("length of current", len(current))
# if currentlen > 0:
# res.append(current)
print("length of batches", len(batches))
return batches
| stanfordnlp-master | stanfordnlp/models/depparse/data.py |
"""
Utils and wrappers for scoring parsers.
"""
from stanfordnlp.models.common.utils import ud_scores
def score(system_conllu_file, gold_conllu_file, verbose=True):
""" Wrapper for UD parser scorer. """
evaluation = ud_scores(gold_conllu_file, system_conllu_file)
el = evaluation['UAS']
p = el.precision
r = el.recall
f = el.f1
if verbose:
scores = [evaluation[k].f1 * 100 for k in ['UAS']]
print("UAS")
print("{:.2f}".format(*scores))
return p, r, f
| stanfordnlp-master | stanfordnlp/models/depparse/scorer.py |
"""
Supports for pretrained data.
"""
import os
import lzma
import numpy as np
import torch
from .vocab import BaseVocab, VOCAB_PREFIX
class PretrainedWordVocab(BaseVocab):
def build_vocab(self):
self._id2unit = VOCAB_PREFIX + self.data
self._unit2id = {w:i for i, w in enumerate(self._id2unit)}
class Pretrain:
""" A loader and saver for pretrained embeddings. """
def __init__(self, filename, vec_filename=None):
self.filename = filename
self.vec_filename = vec_filename
@property
def vocab(self):
if not hasattr(self, '_vocab'):
self._vocab, self._emb = self.load()
return self._vocab
@property
def emb(self):
if not hasattr(self, '_emb'):
self._vocab, self._emb = self.load()
return self._emb
def load(self):
if os.path.exists(self.filename):
try:
data = torch.load(self.filename, lambda storage, loc: storage)
except BaseException as e:
print("Pretrained file exists but cannot be loaded from {}, due to the following exception:".format(self.filename))
print("\t{}".format(e))
return self.read_and_save()
return data['vocab'], data['emb']
else:
return self.read_and_save()
def read_and_save(self):
# load from pretrained filename
if self.vec_filename is None:
raise Exception("Vector file is not provided.")
print("Reading pretrained vectors from {}...".format(self.vec_filename))
first = True
words = []
failed = 0
with lzma.open(self.vec_filename, 'rb') as f:
for i, line in enumerate(f):
try:
line = line.decode()
except UnicodeDecodeError:
failed += 1
continue
if first:
# the first line contains the number of word vectors and the dimensionality
first = False
line = line.strip().split(' ')
rows, cols = [int(x) for x in line]
emb = np.zeros((rows + len(VOCAB_PREFIX), cols), dtype=np.float32)
continue
line = line.rstrip().split(' ')
emb[i+len(VOCAB_PREFIX)-1-failed, :] = [float(x) for x in line[-cols:]]
words.append(' '.join(line[:-cols]))
vocab = PretrainedWordVocab(words, lower=True)
if failed > 0:
emb = emb[:-failed]
# save to file
data = {'vocab': vocab, 'emb': emb}
try:
torch.save(data, self.filename)
print("Saved pretrained vocab and vectors to {}".format(self.filename))
except BaseException as e:
print("Saving pretrained data failed due to the following exception... continuing anyway")
print("\t{}".format(e))
return vocab, emb
| stanfordnlp-master | stanfordnlp/models/common/pretrain.py |
# Adapted from Tim's code here: https://github.com/tdozat/Parser-v3/blob/master/scripts/chuliu_edmonds.py
import numpy as np
def tarjan(tree):
""""""
indices = -np.ones_like(tree)
lowlinks = -np.ones_like(tree)
onstack = np.zeros_like(tree, dtype=bool)
stack = list()
_index = [0]
cycles = []
#-------------------------------------------------------------
def strong_connect(i):
_index[0] += 1
index = _index[-1]
indices[i] = lowlinks[i] = index - 1
stack.append(i)
onstack[i] = True
dependents = np.where(np.equal(tree, i))[0]
for j in dependents:
if indices[j] == -1:
strong_connect(j)
lowlinks[i] = min(lowlinks[i], lowlinks[j])
elif onstack[j]:
lowlinks[i] = min(lowlinks[i], indices[j])
# There's a cycle!
if lowlinks[i] == indices[i]:
cycle = np.zeros_like(indices, dtype=bool)
while stack[-1] != i:
j = stack.pop()
onstack[j] = False
cycle[j] = True
stack.pop()
onstack[i] = False
cycle[i] = True
if cycle.sum() > 1:
cycles.append(cycle)
return
#-------------------------------------------------------------
for i in range(len(tree)):
if indices[i] == -1:
strong_connect(i)
return cycles
def chuliu_edmonds(scores):
""""""
np.fill_diagonal(scores, -float('inf')) # prevent self-loops
scores[0] = -float('inf')
scores[0,0] = 0
tree = np.argmax(scores, axis=1)
cycles = tarjan(tree)
#print(scores)
#print(cycles)
if not cycles:
return tree
else:
# t = len(tree); c = len(cycle); n = len(noncycle)
# locations of cycle; (t) in [0,1]
cycle = cycles.pop()
# indices of cycle in original tree; (c) in t
cycle_locs = np.where(cycle)[0]
# heads of cycle in original tree; (c) in t
cycle_subtree = tree[cycle]
# scores of cycle in original tree; (c) in R
cycle_scores = scores[cycle, cycle_subtree]
# total score of cycle; () in R
cycle_score = cycle_scores.sum()
# locations of noncycle; (t) in [0,1]
noncycle = np.logical_not(cycle)
# indices of noncycle in original tree; (n) in t
noncycle_locs = np.where(noncycle)[0]
#print(cycle_locs, noncycle_locs)
# scores of cycle's potential heads; (c x n) - (c) + () -> (n x c) in R
metanode_head_scores = scores[cycle][:,noncycle] - cycle_scores[:,None] + cycle_score
# scores of cycle's potential dependents; (n x c) in R
metanode_dep_scores = scores[noncycle][:,cycle]
# best noncycle head for each cycle dependent; (n) in c
metanode_heads = np.argmax(metanode_head_scores, axis=0)
# best cycle head for each noncycle dependent; (n) in c
metanode_deps = np.argmax(metanode_dep_scores, axis=1)
# scores of noncycle graph; (n x n) in R
subscores = scores[noncycle][:,noncycle]
# pad to contracted graph; (n+1 x n+1) in R
subscores = np.pad(subscores, ( (0,1) , (0,1) ), 'constant')
# set the contracted graph scores of cycle's potential heads; (c x n)[:, (n) in n] in R -> (n) in R
subscores[-1, :-1] = metanode_head_scores[metanode_heads, np.arange(len(noncycle_locs))]
# set the contracted graph scores of cycle's potential dependents; (n x c)[(n) in n] in R-> (n) in R
subscores[:-1,-1] = metanode_dep_scores[np.arange(len(noncycle_locs)), metanode_deps]
# MST with contraction; (n+1) in n+1
contracted_tree = chuliu_edmonds(subscores)
# head of the cycle; () in n
#print(contracted_tree)
cycle_head = contracted_tree[-1]
# fixed tree: (n) in n+1
contracted_tree = contracted_tree[:-1]
# initialize new tree; (t) in 0
new_tree = -np.ones_like(tree)
#print(0, new_tree)
# fixed tree with no heads coming from the cycle: (n) in [0,1]
contracted_subtree = contracted_tree < len(contracted_tree)
# add the nodes to the new tree (t)[(n)[(n) in [0,1]] in t] in t = (n)[(n)[(n) in [0,1]] in n] in t
new_tree[noncycle_locs[contracted_subtree]] = noncycle_locs[contracted_tree[contracted_subtree]]
#print(1, new_tree)
# fixed tree with heads coming from the cycle: (n) in [0,1]
contracted_subtree = np.logical_not(contracted_subtree)
# add the nodes to the tree (t)[(n)[(n) in [0,1]] in t] in t = (c)[(n)[(n) in [0,1]] in c] in t
new_tree[noncycle_locs[contracted_subtree]] = cycle_locs[metanode_deps[contracted_subtree]]
#print(2, new_tree)
# add the old cycle to the tree; (t)[(c) in t] in t = (t)[(c) in t] in t
new_tree[cycle_locs] = tree[cycle_locs]
#print(3, new_tree)
# root of the cycle; (n)[() in n] in c = () in c
cycle_root = metanode_heads[cycle_head]
# add the root of the cycle to the new tree; (t)[(c)[() in c] in t] = (c)[() in c]
new_tree[cycle_locs[cycle_root]] = noncycle_locs[cycle_head]
#print(4, new_tree)
return new_tree
#===============================================================
def chuliu_edmonds_one_root(scores):
""""""
scores = scores.astype(np.float64)
tree = chuliu_edmonds(scores)
roots_to_try = np.where(np.equal(tree[1:], 0))[0]+1
if len(roots_to_try) == 1:
return tree
#-------------------------------------------------------------
def set_root(scores, root):
root_score = scores[root,0]
scores = np.array(scores)
scores[1:,0] = -float('inf')
scores[root] = -float('inf')
scores[root,0] = 0
return scores, root_score
#-------------------------------------------------------------
best_score, best_tree = -np.inf, None # This is what's causing it to crash
for root in roots_to_try:
_scores, root_score = set_root(scores, root)
_tree = chuliu_edmonds(_scores)
tree_probs = _scores[np.arange(len(_scores)), _tree]
tree_score = (tree_probs).sum()+(root_score) if (tree_probs > -np.inf).all() else -np.inf
if tree_score > best_score:
best_score = tree_score
best_tree = _tree
try:
assert best_tree is not None
except:
with open('debug.log', 'w') as f:
f.write('{}: {}, {}\n'.format(tree, scores, roots_to_try))
f.write('{}: {}, {}, {}\n'.format(_tree, _scores, tree_probs, tree_score))
raise
return best_tree
| stanfordnlp-master | stanfordnlp/models/common/chuliu_edmonds.py |
from copy import copy
from collections import Counter, OrderedDict
import os
import pickle
PAD = '<PAD>'
PAD_ID = 0
UNK = '<UNK>'
UNK_ID = 1
EMPTY = '<EMPTY>'
EMPTY_ID = 2
ROOT = '<ROOT>'
ROOT_ID = 3
VOCAB_PREFIX = [PAD, UNK, EMPTY, ROOT]
class BaseVocab:
""" A base class for common vocabulary operations. Each subclass should at least
implement its own build_vocab() function."""
def __init__(self, data=None, lang="", idx=0, cutoff=0, lower=False):
self.data = data
self.lang = lang
self.idx = idx
self.cutoff = cutoff
self.lower = lower
if data is not None:
self.build_vocab()
self.state_attrs = ['lang', 'idx', 'cutoff', 'lower', '_unit2id', '_id2unit']
def build_vocab(self):
raise NotImplementedError()
def state_dict(self):
""" Returns a dictionary containing all states that are necessary to recover
this vocab. Useful for serialization."""
state = OrderedDict()
for attr in self.state_attrs:
if hasattr(self, attr):
state[attr] = getattr(self, attr)
return state
@classmethod
def load_state_dict(cls, state_dict):
""" Returns a new Vocab instance constructed from a state dict. """
new = cls()
for attr, value in state_dict.items():
setattr(new, attr, value)
return new
def normalize_unit(self, unit):
if self.lower:
return unit.lower()
return unit
def unit2id(self, unit):
unit = self.normalize_unit(unit)
if unit in self._unit2id:
return self._unit2id[unit]
else:
return self._unit2id[UNK]
def id2unit(self, id):
return self._id2unit[id]
def map(self, units):
return [self.unit2id(x) for x in units]
def unmap(self, ids):
return [self.id2unit(x) for x in ids]
def __len__(self):
return len(self._id2unit)
def __getitem__(self, key):
if isinstance(key, str):
return self.unit2id(key)
elif isinstance(key, int) or isinstance(key, list):
return self.id2unit(key)
else:
raise TypeError("Vocab key must be one of str, list, or int")
def __contains__(self, key):
return key in self._unit2id
@property
def size(self):
return len(self)
class CompositeVocab(BaseVocab):
''' Vocabulary class that handles parsing and printing composite values such as
compositional XPOS and universal morphological features (UFeats).
Two key options are `keyed` and `sep`. `sep` specifies the separator used between
different parts of the composite values, which is `|` for UFeats, for example.
If `keyed` is `True`, then the incoming value is treated similarly to UFeats, where
each part is a key/value pair separated by an equal sign (`=`). There are no inherit
order to the keys, and we sort them alphabetically for serialization and deserialization.
Whenever a part is absent, its internal value is a special `<EMPTY>` symbol that will
be treated accordingly when generating the output. If `keyed` is `False`, then the parts
are treated as positioned values, and `<EMPTY>` is used to pad parts at the end when the
incoming value is not long enough.'''
def __init__(self, data=None, lang="", idx=0, sep="", keyed=False):
self.sep = sep
self.keyed = keyed
super().__init__(data, lang, idx=idx)
self.state_attrs += ['sep', 'keyed']
def unit2parts(self, unit):
# unpack parts of a unit
if self.sep == "":
parts = [x for x in unit]
else:
parts = unit.split(self.sep)
if self.keyed:
if len(parts) == 1 and parts[0] == '_':
return dict()
parts = [x.split('=') for x in parts]
# Just treat multi-valued properties values as one possible value
parts = dict(parts)
elif unit == '_':
parts = []
return parts
def unit2id(self, unit):
parts = self.unit2parts(unit)
if self.keyed:
# treat multi-valued properties as singletons
return [self._unit2id[k].get(parts[k], UNK_ID) if k in parts else EMPTY_ID for k in self._unit2id]
else:
return [self._unit2id[i].get(parts[i], UNK_ID) if i < len(parts) else EMPTY_ID for i in range(len(self._unit2id))]
def id2unit(self, id):
items = []
for v, k in zip(id, self._id2unit.keys()):
if v == EMPTY_ID: continue
if self.keyed:
items.append("{}={}".format(k, self._id2unit[k][v]))
else:
items.append(self._id2unit[k][v])
res = self.sep.join(items)
if res == "":
res = "_"
return res
def build_vocab(self):
allunits = [w[self.idx] for sent in self.data for w in sent]
if self.keyed:
self._id2unit = dict()
for u in allunits:
parts = self.unit2parts(u)
for key in parts:
if key not in self._id2unit:
self._id2unit[key] = copy(VOCAB_PREFIX)
# treat multi-valued properties as singletons
if parts[key] not in self._id2unit[key]:
self._id2unit[key].append(parts[key])
# special handle for the case where upos/xpos/ufeats are always empty
if len(self._id2unit) == 0:
self._id2unit['_'] = copy(VOCAB_PREFIX) # use an arbitrary key
else:
self._id2unit = dict()
allparts = [self.unit2parts(u) for u in allunits]
maxlen = max([len(p) for p in allparts])
for parts in allparts:
for i, p in enumerate(parts):
if i not in self._id2unit:
self._id2unit[i] = copy(VOCAB_PREFIX)
if i < len(parts) and p not in self._id2unit[i]:
self._id2unit[i].append(p)
# special handle for the case where upos/xpos/ufeats are always empty
if len(self._id2unit) == 0:
self._id2unit[0] = copy(VOCAB_PREFIX) # use an arbitrary key
self._id2unit = OrderedDict([(k, self._id2unit[k]) for k in sorted(self._id2unit.keys())])
self._unit2id = {k: {w:i for i, w in enumerate(self._id2unit[k])} for k in self._id2unit}
def lens(self):
return [len(self._unit2id[k]) for k in self._unit2id]
class BaseMultiVocab:
""" A convenient vocab container that can store multiple BaseVocab instances, and support
safe serialization of all instances via state dicts. Each subclass of this base class
should implement the load_state_dict() function to specify how a saved state dict
should be loaded back."""
def __init__(self, vocab_dict=None):
self._vocabs = OrderedDict()
if vocab_dict is None:
return
# check all values provided must be a subclass of the Vocab base class
assert all([isinstance(v, BaseVocab) for v in vocab_dict.values()])
for k, v in vocab_dict.items():
self._vocabs[k] = v
def __setitem__(self, key, item):
self._vocabs[key] = item
def __getitem__(self, key):
return self._vocabs[key]
def state_dict(self):
""" Build a state dict by iteratively calling state_dict() of all vocabs. """
state = OrderedDict()
for k, v in self._vocabs.items():
state[k] = v.state_dict()
return state
@classmethod
def load_state_dict(cls, state_dict):
""" Construct a MultiVocab by reading from a state dict."""
raise NotImplementedError
| stanfordnlp-master | stanfordnlp/models/common/vocab.py |
"""
The full encoder-decoder model, built on top of the base seq2seq modules.
"""
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import stanfordnlp.models.common.seq2seq_constant as constant
from stanfordnlp.models.common import utils
from stanfordnlp.models.common.seq2seq_modules import LSTMAttention
from stanfordnlp.models.common.beam import Beam
class Seq2SeqModel(nn.Module):
"""
A complete encoder-decoder model, with optional attention.
"""
def __init__(self, args, emb_matrix=None, use_cuda=False):
super().__init__()
self.vocab_size = args['vocab_size']
self.emb_dim = args['emb_dim']
self.hidden_dim = args['hidden_dim']
self.nlayers = args['num_layers'] # encoder layers, decoder layers = 1
self.emb_dropout = args.get('emb_dropout', 0.0)
self.dropout = args['dropout']
self.pad_token = constant.PAD_ID
self.max_dec_len = args['max_dec_len']
self.use_cuda = use_cuda
self.top = args.get('top', 1e10)
self.args = args
self.emb_matrix = emb_matrix
print("Building an attentional Seq2Seq model...")
print("Using a Bi-LSTM encoder")
self.num_directions = 2
self.enc_hidden_dim = self.hidden_dim // 2
self.dec_hidden_dim = self.hidden_dim
self.use_pos = args.get('pos', False)
self.pos_dim = args.get('pos_dim', 0)
self.pos_vocab_size = args.get('pos_vocab_size', 0)
self.pos_dropout = args.get('pos_dropout', 0)
self.edit = args.get('edit', False)
self.num_edit = args.get('num_edit', 0)
self.emb_drop = nn.Dropout(self.emb_dropout)
self.drop = nn.Dropout(self.dropout)
self.embedding = nn.Embedding(self.vocab_size, self.emb_dim, self.pad_token)
self.encoder = nn.LSTM(self.emb_dim, self.enc_hidden_dim, self.nlayers, \
bidirectional=True, batch_first=True, dropout=self.dropout if self.nlayers > 1 else 0)
self.decoder = LSTMAttention(self.emb_dim, self.dec_hidden_dim, \
batch_first=True, attn_type=self.args['attn_type'])
self.dec2vocab = nn.Linear(self.dec_hidden_dim, self.vocab_size)
if self.use_pos and self.pos_dim > 0:
print("Using POS in encoder")
self.pos_embedding = nn.Embedding(self.pos_vocab_size, self.pos_dim, self.pad_token)
self.pos_drop = nn.Dropout(self.pos_dropout)
if self.edit:
edit_hidden = self.hidden_dim//2
self.edit_clf = nn.Sequential(
nn.Linear(self.hidden_dim, edit_hidden),
nn.ReLU(),
nn.Linear(edit_hidden, self.num_edit))
self.SOS_tensor = torch.LongTensor([constant.SOS_ID])
self.SOS_tensor = self.SOS_tensor.cuda() if self.use_cuda else self.SOS_tensor
self.init_weights()
def init_weights(self):
# initialize embeddings
init_range = constant.EMB_INIT_RANGE
if self.emb_matrix is not None:
if isinstance(self.emb_matrix, np.ndarray):
self.emb_matrix = torch.from_numpy(self.emb_matrix)
assert self.emb_matrix.size() == (self.vocab_size, self.emb_dim), \
"Input embedding matrix must match size: {} x {}".format(self.vocab_size, self.emb_dim)
self.embedding.weight.data.copy_(self.emb_matrix)
else:
self.embedding.weight.data.uniform_(-init_range, init_range)
# decide finetuning
if self.top <= 0:
print("Do not finetune embedding layer.")
self.embedding.weight.requires_grad = False
elif self.top < self.vocab_size:
print("Finetune top {} embeddings.".format(self.top))
self.embedding.weight.register_hook(lambda x: utils.keep_partial_grad(x, self.top))
else:
print("Finetune all embeddings.")
# initialize pos embeddings
if self.use_pos:
self.pos_embedding.weight.data.uniform_(-init_range, init_range)
def cuda(self):
super().cuda()
self.use_cuda = True
def cpu(self):
super().cpu()
self.use_cuda = False
def zero_state(self, inputs):
batch_size = inputs.size(0)
h0 = torch.zeros(self.encoder.num_layers*2, batch_size, self.enc_hidden_dim, requires_grad=False)
c0 = torch.zeros(self.encoder.num_layers*2, batch_size, self.enc_hidden_dim, requires_grad=False)
if self.use_cuda:
return h0.cuda(), c0.cuda()
return h0, c0
def encode(self, enc_inputs, lens):
""" Encode source sequence. """
self.h0, self.c0 = self.zero_state(enc_inputs)
packed_inputs = nn.utils.rnn.pack_padded_sequence(enc_inputs, lens, batch_first=True)
packed_h_in, (hn, cn) = self.encoder(packed_inputs, (self.h0, self.c0))
h_in, _ = nn.utils.rnn.pad_packed_sequence(packed_h_in, batch_first=True)
hn = torch.cat((hn[-1], hn[-2]), 1)
cn = torch.cat((cn[-1], cn[-2]), 1)
return h_in, (hn, cn)
def decode(self, dec_inputs, hn, cn, ctx, ctx_mask=None):
""" Decode a step, based on context encoding and source context states."""
dec_hidden = (hn, cn)
h_out, dec_hidden = self.decoder(dec_inputs, dec_hidden, ctx, ctx_mask)
h_out_reshape = h_out.contiguous().view(h_out.size(0) * h_out.size(1), -1)
decoder_logits = self.dec2vocab(h_out_reshape)
decoder_logits = decoder_logits.view(h_out.size(0), h_out.size(1), -1)
log_probs = self.get_log_prob(decoder_logits)
return log_probs, dec_hidden
def forward(self, src, src_mask, tgt_in, pos=None):
# prepare for encoder/decoder
enc_inputs = self.emb_drop(self.embedding(src))
dec_inputs = self.emb_drop(self.embedding(tgt_in))
src_lens = list(src_mask.data.eq(constant.PAD_ID).long().sum(1))
if self.use_pos:
# append pos to the end of src sequence
assert pos is not None
pos_inputs = self.pos_drop(self.pos_embedding(pos))
enc_inputs = torch.cat([enc_inputs, pos_inputs.unsqueeze(1)], dim=1)
h_in, (hn, cn) = self.encode(enc_inputs, src_lens)
if self.edit:
edit_logits = self.edit_clf(hn)
else:
edit_logits = None
log_probs, _ = self.decode(dec_inputs, hn, cn, h_in, src_mask)
return log_probs, edit_logits
def get_log_prob(self, logits):
logits_reshape = logits.view(-1, self.vocab_size)
log_probs = F.log_softmax(logits_reshape, dim=1)
if logits.dim() == 2:
return log_probs
return log_probs.view(logits.size(0), logits.size(1), logits.size(2))
def predict(self, src, src_mask, pos=None, beam_size=5):
""" Predict with beam search. """
enc_inputs = self.embedding(src)
batch_size = enc_inputs.size(0)
src_lens = list(src_mask.data.eq(constant.PAD_ID).long().sum(1))
if self.use_pos:
assert pos is not None
pos_inputs = self.pos_drop(self.pos_embedding(pos))
enc_inputs = torch.cat([enc_inputs, pos_inputs.unsqueeze(1)], dim=1)
# (1) encode source
h_in, (hn, cn) = self.encode(enc_inputs, src_lens)
if self.edit:
edit_logits = self.edit_clf(hn)
else:
edit_logits = None
# (2) set up beam
with torch.no_grad():
h_in = h_in.data.repeat(beam_size, 1, 1) # repeat data for beam search
src_mask = src_mask.repeat(beam_size, 1)
# repeat decoder hidden states
hn = hn.data.repeat(beam_size, 1)
cn = cn.data.repeat(beam_size, 1)
beam = [Beam(beam_size, self.use_cuda) for _ in range(batch_size)]
def update_state(states, idx, positions, beam_size):
""" Select the states according to back pointers. """
for e in states:
br, d = e.size()
s = e.contiguous().view(beam_size, br // beam_size, d)[:,idx]
s.data.copy_(s.data.index_select(0, positions))
# (3) main loop
for i in range(self.max_dec_len):
dec_inputs = torch.stack([b.get_current_state() for b in beam]).t().contiguous().view(-1, 1)
dec_inputs = self.embedding(dec_inputs)
log_probs, (hn, cn) = self.decode(dec_inputs, hn, cn, h_in, src_mask)
log_probs = log_probs.view(beam_size, batch_size, -1).transpose(0,1)\
.contiguous() # [batch, beam, V]
# advance each beam
done = []
for b in range(batch_size):
is_done = beam[b].advance(log_probs.data[b])
if is_done:
done += [b]
# update beam state
update_state((hn, cn), b, beam[b].get_current_origin(), beam_size)
if len(done) == batch_size:
break
# back trace and find hypothesis
all_hyp, all_scores = [], []
for b in range(batch_size):
scores, ks = beam[b].sort_best()
all_scores += [scores[0]]
k = ks[0]
hyp = beam[b].get_hyp(k)
hyp = utils.prune_hyp(hyp)
all_hyp += [hyp]
return all_hyp, edit_logits
| stanfordnlp-master | stanfordnlp/models/common/seq2seq_model.py |
"""
A wrapper/loader for the official conll-u format files.
"""
import os
import io
FIELD_NUM = 10
FIELD_TO_IDX = {'id': 0, 'word': 1, 'lemma': 2, 'upos': 3, 'xpos': 4, 'feats': 5, 'head': 6, 'deprel': 7, 'deps': 8, 'misc': 9}
class CoNLLFile():
def __init__(self, filename=None, input_str=None, ignore_gapping=True):
# If ignore_gapping is True, all words that are gap fillers (identified with a period in
# the sentence index) will be ignored.
self.ignore_gapping = ignore_gapping
if filename is not None and not os.path.exists(filename):
raise Exception("File not found at: " + filename)
if filename is None:
assert input_str is not None and len(input_str) > 0
self._file = input_str
self._from_str = True
else:
self._file = filename
self._from_str = False
def load_all(self):
""" Trigger all lazy initializations so that the file is loaded."""
_ = self.sents
_ = self.num_words
def load_conll(self):
"""
Load data into a list of sentences, where each sentence is a list of lines,
and each line is a list of conllu fields.
"""
sents, cache = [], []
if self._from_str:
infile = io.StringIO(self.file)
else:
infile = open(self.file)
with infile:
for line in infile:
line = line.strip()
if len(line) == 0:
if len(cache) > 0:
sents.append(cache)
cache = []
else:
if line.startswith('#'): # skip comment line
continue
array = line.split('\t')
if self.ignore_gapping and '.' in array[0]:
continue
assert len(array) == FIELD_NUM
cache += [array]
if len(cache) > 0:
sents.append(cache)
return sents
@property
def file(self):
return self._file
@property
def sents(self):
if not hasattr(self, '_sents'):
self._sents = self.load_conll()
return self._sents
def __len__(self):
return len(self.sents)
@property
def num_words(self):
""" Num of total words, after multi-word expansion."""
if not hasattr(self, '_num_words'):
n = 0
for sent in self.sents:
for ln in sent:
if '-' not in ln[0]:
n += 1
self._num_words = n
return self._num_words
def get(self, fields, as_sentences=False):
""" Get fields from a list of field names. If only one field name is provided, return a list
of that field; if more than one, return a list of list. Note that all returned fields are after
multi-word expansion.
"""
assert isinstance(fields, list), "Must provide field names as a list."
assert len(fields) >= 1, "Must have at least one field."
field_idxs = [FIELD_TO_IDX[f.lower()] for f in fields]
results = []
for sent in self.sents:
cursent = []
for ln in sent:
if '-' in ln[0]: # skip
continue
if len(field_idxs) == 1:
cursent += [ln[field_idxs[0]]]
else:
cursent += [[ln[fid] for fid in field_idxs]]
if as_sentences:
results.append(cursent)
else:
results += cursent
return results
def set(self, fields, contents):
""" Set fields based on contents. If only one field (singleton list) is provided, then a list of content will be expected; otherwise a list of list of contents will be expected.
"""
assert isinstance(fields, list), "Must provide field names as a list."
assert isinstance(contents, list), "Must provide contents as a list (one item per line)."
assert len(fields) >= 1, "Must have at least one field."
print("len(contents", len(contents))
print("num words", self.num_words)
assert self.num_words == len(contents), "Contents must have the same number as the original file."
field_idxs = [FIELD_TO_IDX[f.lower()] for f in fields]
cidx = 0
for sent in self.sents:
for ln in sent:
if '-' in ln[0]:
continue
if len(field_idxs) == 1:
ln[field_idxs[0]] = contents[cidx]
else:
for fid, ct in zip(field_idxs, contents[cidx]):
ln[fid] = ct
cidx += 1
return
def write_conll(self, filename):
""" Write current conll contents to file.
"""
conll_string = self.conll_as_string()
with open(filename, 'w') as outfile:
outfile.write(conll_string)
return
def conll_as_string(self):
""" Return current conll contents as string
"""
return_string = ""
for sent in self.sents:
for ln in sent:
return_string += ("\t".join(ln)+"\n")
return_string += "\n"
return return_string
def write_conll_with_lemmas(self, lemmas, filename):
""" Write a new conll file, but use the new lemmas to replace the old ones."""
assert self.num_words == len(lemmas), "Num of lemmas does not match the number in original data file."
lemma_idx = FIELD_TO_IDX['lemma']
idx = 0
with open(filename, 'w') as outfile:
for sent in self.sents:
for ln in sent:
if '-' not in ln[0]: # do not process if it is a mwt line
lm = lemmas[idx]
if len(lm) == 0:
lm = '_'
ln[lemma_idx] = lm
idx += 1
print("\t".join(ln), file=outfile)
print("", file=outfile)
return
def get_mwt_expansions(self):
word_idx = FIELD_TO_IDX['word']
expansions = []
src = ''
dst = []
for sent in self.sents:
mwt_begin = 0
mwt_end = -1
for ln in sent:
if '.' in ln[0]:
# skip ellipsis
continue
if '-' in ln[0]:
mwt_begin, mwt_end = [int(x) for x in ln[0].split('-')]
src = ln[word_idx]
continue
if mwt_begin <= int(ln[0]) < mwt_end:
dst += [ln[word_idx]]
elif int(ln[0]) == mwt_end:
dst += [ln[word_idx]]
expansions += [[src, ' '.join(dst)]]
src = ''
dst = []
return expansions
def get_mwt_expansion_cands(self):
word_idx = FIELD_TO_IDX['word']
cands = []
for sent in self.sents:
for ln in sent:
if "MWT=Yes" in ln[-1]:
cands += [ln[word_idx]]
return cands
def write_conll_with_mwt_expansions(self, expansions, output_file):
""" Expands MWTs predicted by the tokenizer and write to file. This method replaces the head column with a right branching tree. """
idx = 0
count = 0
for sent in self.sents:
for ln in sent:
idx += 1
if "MWT=Yes" not in ln[-1]:
print("{}\t{}".format(idx, "\t".join(ln[1:6] + [str(idx-1)] + ln[7:])), file=output_file)
else:
# print MWT expansion
expanded = [x for x in expansions[count].split(' ') if len(x) > 0]
count += 1
endidx = idx + len(expanded) - 1
print("{}-{}\t{}".format(idx, endidx, "\t".join(['_' if i == 5 or i == 8 else x for i, x in enumerate(ln[1:])])), file=output_file)
for e_i, e_word in enumerate(expanded):
print("{}\t{}\t{}".format(idx + e_i, e_word, "\t".join(['_'] * 4 + [str(idx + e_i - 1)] + ['_'] * 3)), file=output_file)
idx = endidx
print("", file=output_file)
idx = 0
assert count == len(expansions), "{} {} {}".format(count, len(expansions), expansions)
return
| stanfordnlp-master | stanfordnlp/models/common/conll.py |
"""
Utils for seq2seq models.
"""
from collections import Counter
import random
import json
import unicodedata
import torch
import stanfordnlp.models.common.seq2seq_constant as constant
# torch utils
def get_optimizer(name, parameters, lr):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr)
elif name == 'adagrad':
return torch.optim.Adagrad(parameters, lr=lr)
elif name == 'adam':
return torch.optim.Adam(parameters) # use default lr
elif name == 'adamax':
return torch.optim.Adamax(parameters) # use default lr
else:
raise Exception("Unsupported optimizer: {}".format(name))
def change_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def flatten_indices(seq_lens, width):
flat = []
for i, l in enumerate(seq_lens):
for j in range(l):
flat.append(i * width + j)
return flat
def set_cuda(var, cuda):
if cuda:
return var.cuda()
return var
def keep_partial_grad(grad, topk):
"""
Keep only the topk rows of grads.
"""
assert topk < grad.size(0)
grad.data[topk:].zero_()
return grad
# other utils
def save_config(config, path, verbose=True):
with open(path, 'w') as outfile:
json.dump(config, outfile, indent=2)
if verbose:
print("Config saved to file {}".format(path))
return config
def load_config(path, verbose=True):
with open(path) as f:
config = json.load(f)
if verbose:
print("Config loaded from file {}".format(path))
return config
def normalize_text(text):
return unicodedata.normalize('NFD', text)
def unmap_with_copy(indices, src_tokens, vocab):
"""
Unmap a list of list of indices, by optionally copying from src_tokens.
"""
result = []
for ind, tokens in zip(indices, src_tokens):
words = []
for idx in ind:
if idx >= 0:
words.append(vocab.id2word[idx])
else:
idx = -idx - 1 # flip and minus 1
words.append(tokens[idx])
result += [words]
return result
def prune_decoded_seqs(seqs):
"""
Prune decoded sequences after EOS token.
"""
out = []
for s in seqs:
if constant.EOS in s:
idx = s.index(constant.EOS_TOKEN)
out += [s[:idx]]
else:
out += [s]
return out
def prune_hyp(hyp):
"""
Prune a decoded hypothesis
"""
if constant.EOS_ID in hyp:
idx = hyp.index(constant.EOS_ID)
return hyp[:idx]
else:
return hyp
def prune(data_list, lens):
assert len(data_list) == len(lens)
nl = []
for d, l in zip(data_list, lens):
nl.append(d[:l])
return nl
def sort(packed, ref, reverse=True):
"""
Sort a series of packed list, according to a ref list.
Also return the original index before the sort.
"""
assert (isinstance(packed, tuple) or isinstance(packed, list)) and isinstance(ref, list)
packed = [ref] + [range(len(ref))] + list(packed)
sorted_packed = [list(t) for t in zip(*sorted(zip(*packed), reverse=reverse))]
return tuple(sorted_packed[1:])
def unsort(sorted_list, oidx):
"""
Unsort a sorted list, based on the original idx.
"""
assert len(sorted_list) == len(oidx), "Number of list elements must match with original indices."
_, unsorted = [list(t) for t in zip(*sorted(zip(oidx, sorted_list)))]
return unsorted
| stanfordnlp-master | stanfordnlp/models/common/seq2seq_utils.py |
stanfordnlp-master | stanfordnlp/models/common/__init__.py |
|
"""
Constants for seq2seq models.
"""
PAD = '<PAD>'
PAD_ID = 0
UNK = '<UNK>'
UNK_ID = 1
SOS = '<SOS>'
SOS_ID = 2
EOS = '<EOS>'
EOS_ID = 3
VOCAB_PREFIX = [PAD, UNK, SOS, EOS]
EMB_INIT_RANGE = 1.0
INFINITY_NUMBER = 1e12
| stanfordnlp-master | stanfordnlp/models/common/seq2seq_constant.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class PairwiseBilinear(nn.Module):
''' A bilinear module that deals with broadcasting for efficient memory usage.
Input: tensors of sizes (N x L1 x D1) and (N x L2 x D2)
Output: tensor of size (N x L1 x L2 x O)'''
def __init__(self, input1_size, input2_size, output_size, bias=True):
super().__init__()
self.input1_size = input1_size
self.input2_size = input2_size
self.output_size = output_size
self.weight = nn.Parameter(torch.Tensor(input1_size, input2_size, output_size))
self.bias = nn.Parameter(torch.Tensor(output_size)) if bias else 0
def forward(self, input1, input2):
input1_size = list(input1.size())
input2_size = list(input2.size())
output_size = [input1_size[0], input1_size[1], input2_size[1], self.output_size]
# ((N x L1) x D1) * (D1 x (D2 x O)) -> (N x L1) x (D2 x O)
intermediate = torch.mm(input1.view(-1, input1_size[-1]), self.weight.view(-1, self.input2_size * self.output_size))
# (N x L2 x D2) -> (N x D2 x L2)
input2 = input2.transpose(1, 2)
# (N x (L1 x O) x D2) * (N x D2 x L2) -> (N x (L1 x O) x L2)
output = intermediate.view(input1_size[0], input1_size[1] * self.output_size, input2_size[2]).bmm(input2)
# (N x (L1 x O) x L2) -> (N x L1 x L2 x O)
output = output.view(input1_size[0], input1_size[1], self.output_size, input2_size[1]).transpose(2, 3)
return output
class BiaffineScorer(nn.Module):
def __init__(self, input1_size, input2_size, output_size):
super().__init__()
self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1, output_size)
self.W_bilin.weight.data.zero_()
self.W_bilin.bias.data.zero_()
def forward(self, input1, input2):
input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1)], len(input1.size())-1)
input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1)], len(input2.size())-1)
return self.W_bilin(input1, input2)
class PairwiseBiaffineScorer(nn.Module):
def __init__(self, input1_size, input2_size, output_size):
super().__init__()
self.W_bilin = PairwiseBilinear(input1_size + 1, input2_size + 1, output_size)
self.W_bilin.weight.data.zero_()
self.W_bilin.bias.data.zero_()
def forward(self, input1, input2):
input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1)], len(input1.size())-1)
input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1)], len(input2.size())-1)
return self.W_bilin(input1, input2)
class DeepBiaffineScorer(nn.Module):
def __init__(self, input1_size, input2_size, hidden_size, output_size, hidden_func=F.relu, dropout=0, pairwise=True):
super().__init__()
self.W1 = nn.Linear(input1_size, hidden_size)
self.W2 = nn.Linear(input2_size, hidden_size)
self.hidden_func = hidden_func
if pairwise:
self.scorer = PairwiseBiaffineScorer(hidden_size, hidden_size, output_size)
else:
self.scorer = BiaffineScorer(hidden_size, hidden_size, output_size)
self.dropout = nn.Dropout(dropout)
def forward(self, input1, input2):
return self.scorer(self.dropout(self.hidden_func(self.W1(input1))), self.dropout(self.hidden_func(self.W2(input2))))
if __name__ == "__main__":
x1 = torch.randn(3,4)
x2 = torch.randn(3,5)
scorer = DeepBiaffineScorer(4, 5, 6, 7)
print(scorer(x1, x2))
| stanfordnlp-master | stanfordnlp/models/common/biaffine.py |
"""
Different loss functions.
"""
import torch
import torch.nn as nn
import stanfordnlp.models.common.seq2seq_constant as constant
def SequenceLoss(vocab_size):
weight = torch.ones(vocab_size)
weight[constant.PAD_ID] = 0
crit = nn.NLLLoss(weight)
return crit
class MixLoss(nn.Module):
"""
A mixture of SequenceLoss and CrossEntropyLoss.
Loss = SequenceLoss + alpha * CELoss
"""
def __init__(self, vocab_size, alpha):
super().__init__()
self.seq_loss = SequenceLoss(vocab_size)
self.ce_loss = nn.CrossEntropyLoss()
assert alpha >= 0
self.alpha = alpha
def forward(self, seq_inputs, seq_targets, class_inputs, class_targets):
sl = self.seq_loss(seq_inputs, seq_targets)
cel = self.ce_loss(class_inputs, class_targets)
loss = sl + self.alpha * cel
return loss
class MaxEntropySequenceLoss(nn.Module):
"""
A max entropy loss that encourage the model to have large entropy,
therefore giving more diverse outputs.
Loss = NLLLoss + alpha * EntropyLoss
"""
def __init__(self, vocab_size, alpha):
super().__init__()
weight = torch.ones(vocab_size)
weight[constant.PAD_ID] = 0
self.nll = nn.NLLLoss(weight)
self.alpha = alpha
def forward(self, inputs, targets):
"""
inputs: [N, C]
targets: [N]
"""
assert inputs.size(0) == targets.size(0)
nll_loss = self.nll(inputs, targets)
# entropy loss
mask = targets.eq(constant.PAD_ID).unsqueeze(1).expand_as(inputs)
masked_inputs = inputs.clone().masked_fill_(mask, 0.0)
p = torch.exp(masked_inputs)
ent_loss = p.mul(masked_inputs).sum() / inputs.size(0) # average over minibatch
loss = nll_loss + self.alpha * ent_loss
return loss
| stanfordnlp-master | stanfordnlp/models/common/loss.py |
"""
Utility functions.
"""
import os
from collections import Counter
import random
import json
import unicodedata
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import Optimizer, required
from stanfordnlp.models.common.constant import lcode2lang
import stanfordnlp.models.common.seq2seq_constant as constant
import stanfordnlp.utils.conll18_ud_eval as ud_eval
def poincare_grad(p, d_p):
"""
Calculates Riemannian grad from Euclidean grad.
Args:
p (Tensor): Current point in the ball
d_p (Tensor): Euclidean gradient at p
"""
p_sqnorm = torch.sum(p.data ** 2, dim=-1, keepdim=True)
d_p = d_p * ((1 - p_sqnorm) ** 2 / 4).expand_as(d_p)
return d_p
def _correct(x, eps=1e-10):
current_norms = torch.norm(x,2,x.dim() - 1)
mask_idx = current_norms < 1./(1+eps)
modified = 1./((1+eps)*current_norms)
modified[mask_idx] = 1.0
return modified.unsqueeze(-1)
def euclidean_grad(p, d_p):
return d_p
def retraction(p, d_p, lr):
# Gradient clipping.
d_p.clamp_(min=-10000, max=10000)
p.data.add_(-lr, d_p)
#project back to the manifold.
p.data = p.data * _correct(p.data)
class RiemannianSGD(Optimizer):
r"""Riemannian stochastic gradient descent.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
rgrad (Function): Function to compute the Riemannian gradient from
an Euclidean gradient
retraction (Function): Function to update the parameters via a
retraction of the Riemannian gradient
lr (float): learning rate
"""
def __init__(self, params, lr=required, rgrad=required, retraction=required):
defaults = dict(lr=lr, rgrad=rgrad, retraction=retraction)
super(RiemannianSGD, self).__init__(params, defaults)
def step(self, lr=None):
"""Performs a single optimization step.
Arguments:
lr (float, optional): learning rate for the current update.
"""
loss = None
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if lr is None:
lr = group['lr']
d_p = group['rgrad'](p, d_p)
group['retraction'](p, d_p, lr)
return loss
# filenames
def get_wordvec_file(wordvec_dir, shorthand):
""" Lookup the name of the word vectors file, given a directory and the language shorthand.
"""
lcode, tcode = shorthand.split('_')
lang = lcode2lang[lcode] if lcode != 'no' else lcode2lang[shorthand]
if lcode == 'zh':
lang = 'ChineseT'
return os.path.join(wordvec_dir, lang, '{}.vectors.xz'.format(\
lcode if lcode != 'no' else (shorthand if shorthand != 'no_nynorsklia' else 'no_nynorsk')))
# training schedule
def get_adaptive_eval_interval(cur_dev_size, thres_dev_size, base_interval):
""" Adjust the evaluation interval adaptively.
If cur_dev_size <= thres_dev_size, return base_interval;
else, linearly increase the interval (round to integer times of base interval).
"""
if cur_dev_size <= thres_dev_size:
return base_interval
else:
alpha = round(cur_dev_size / thres_dev_size)
return base_interval * alpha
# ud utils
def ud_scores(gold_conllu_file, system_conllu_file):
gold_ud = ud_eval.load_conllu_file(gold_conllu_file)
system_ud = ud_eval.load_conllu_file(system_conllu_file)
evaluation = ud_eval.evaluate(gold_ud, system_ud)
return evaluation
def harmonic_mean(a, weights=None):
if any([x == 0 for x in a]):
return 0
else:
assert weights is None or len(weights) == len(a), 'Weights has length {} which is different from that of the array ({}).'.format(len(weights), len(a))
if weights is None:
return len(a) / sum([1/x for x in a])
else:
return sum(weights) / sum(w/x for x, w in zip(a, weights))
# torch utils
def get_optimizer(name, parameters, lr, betas=(0.9, 0.999), eps=1e-8):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr)
elif name == 'rsgd':
return RiemannianSGD(parameters, lr=lr, rgrad=poincare_grad, retraction=retraction)
elif name == 'adagrad':
return torch.optim.Adagrad(parameters, lr=lr)
elif name == 'adam':
return torch.optim.Adam(parameters, lr=lr, betas=betas, eps=eps)
elif name == 'adamax':
return torch.optim.Adamax(parameters) # use default lr
else:
raise Exception("Unsupported optimizer: {}".format(name))
def change_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def flatten_indices(seq_lens, width):
flat = []
for i, l in enumerate(seq_lens):
for j in range(l):
flat.append(i * width + j)
return flat
def set_cuda(var, cuda):
if cuda:
return var.cuda()
return var
def keep_partial_grad(grad, topk):
"""
Keep only the topk rows of grads.
"""
assert topk < grad.size(0)
grad.data[topk:].zero_()
return grad
# other utils
def ensure_dir(d, verbose=True):
if not os.path.exists(d):
if verbose:
print("Directory {} do not exist; creating...".format(d))
os.makedirs(d)
def save_config(config, path, verbose=True):
with open(path, 'w') as outfile:
json.dump(config, outfile, indent=2)
if verbose:
print("Config saved to file {}".format(path))
return config
def load_config(path, verbose=True):
with open(path) as f:
config = json.load(f)
if verbose:
print("Config loaded from file {}".format(path))
return config
def print_config(config):
info = "Running with the following configs:\n"
for k,v in config.items():
info += "\t{} : {}\n".format(k, str(v))
print("\n" + info + "\n")
return
def normalize_text(text):
return unicodedata.normalize('NFD', text)
def unmap_with_copy(indices, src_tokens, vocab):
"""
Unmap a list of list of indices, by optionally copying from src_tokens.
"""
result = []
for ind, tokens in zip(indices, src_tokens):
words = []
for idx in ind:
if idx >= 0:
words.append(vocab.id2word[idx])
else:
idx = -idx - 1 # flip and minus 1
words.append(tokens[idx])
result += [words]
return result
def prune_decoded_seqs(seqs):
"""
Prune decoded sequences after EOS token.
"""
out = []
for s in seqs:
if constant.EOS in s:
idx = s.index(constant.EOS_TOKEN)
out += [s[:idx]]
else:
out += [s]
return out
def prune_hyp(hyp):
"""
Prune a decoded hypothesis
"""
if constant.EOS_ID in hyp:
idx = hyp.index(constant.EOS_ID)
return hyp[:idx]
else:
return hyp
def prune(data_list, lens):
assert len(data_list) == len(lens)
nl = []
for d, l in zip(data_list, lens):
nl.append(d[:l])
return nl
def sort(packed, ref, reverse=True):
"""
Sort a series of packed list, according to a ref list.
Also return the original index before the sort.
"""
assert (isinstance(packed, tuple) or isinstance(packed, list)) and isinstance(ref, list)
packed = [ref] + [range(len(ref))] + list(packed)
sorted_packed = [list(t) for t in zip(*sorted(zip(*packed), reverse=reverse))]
return tuple(sorted_packed[1:])
def unsort(sorted_list, oidx):
"""
Unsort a sorted list, based on the original idx.
"""
assert len(sorted_list) == len(oidx), "Number of list elements must match with original indices."
_, unsorted = [list(t) for t in zip(*sorted(zip(oidx, sorted_list)))]
return unsorted
def tensor_unsort(sorted_tensor, oidx):
"""
Unsort a sorted tensor on its 0-th dimension, based on the original idx.
"""
assert sorted_tensor.size(0) == len(oidx), "Number of list elements must match with original indices."
backidx = [x[0] for x in sorted(enumerate(oidx), key=lambda x: x[1])]
return sorted_tensor[backidx]
| stanfordnlp-master | stanfordnlp/models/common/utils.py |
"""
Pytorch implementation of basic sequence to Sequence modules.
"""
import torch
import torch.nn as nn
import math
import numpy as np
import stanfordnlp.models.common.seq2seq_constant as constant
class BasicAttention(nn.Module):
"""
A basic MLP attention layer.
"""
def __init__(self, dim):
super(BasicAttention, self).__init__()
self.linear_in = nn.Linear(dim, dim, bias=False)
self.linear_c = nn.Linear(dim, dim)
self.linear_v = nn.Linear(dim, 1, bias=False)
self.linear_out = nn.Linear(dim * 2, dim, bias=False)
self.tanh = nn.Tanh()
self.sm = nn.Softmax(dim=1)
def forward(self, input, context, mask=None, attn_only=False):
"""
input: batch x dim
context: batch x sourceL x dim
"""
batch_size = context.size(0)
source_len = context.size(1)
dim = context.size(2)
target = self.linear_in(input) # batch x dim
source = self.linear_c(context.contiguous().view(-1, dim)).view(batch_size, source_len, dim)
attn = target.unsqueeze(1).expand_as(context) + source
attn = self.tanh(attn) # batch x sourceL x dim
attn = self.linear_v(attn.view(-1, dim)).view(batch_size, source_len)
if mask is not None:
attn.masked_fill_(mask, -constant.INFINITY_NUMBER)
attn = self.sm(attn)
if attn_only:
return attn
weighted_context = torch.bmm(attn.unsqueeze(1), context).squeeze(1)
h_tilde = torch.cat((weighted_context, input), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
return h_tilde, attn
class SoftDotAttention(nn.Module):
"""Soft Dot Attention.
Ref: http://www.aclweb.org/anthology/D15-1166
Adapted from PyTorch OPEN NMT.
"""
def __init__(self, dim):
"""Initialize layer."""
super(SoftDotAttention, self).__init__()
self.linear_in = nn.Linear(dim, dim, bias=False)
self.sm = nn.Softmax(dim=1)
self.linear_out = nn.Linear(dim * 2, dim, bias=False)
self.tanh = nn.Tanh()
self.mask = None
def forward(self, input, context, mask=None, attn_only=False):
"""Propogate input through the network.
input: batch x dim
context: batch x sourceL x dim
"""
target = self.linear_in(input).unsqueeze(2) # batch x dim x 1
# Get attention
attn = torch.bmm(context, target).squeeze(2) # batch x sourceL
if mask is not None:
# sett the padding attention logits to -inf
assert mask.size() == attn.size(), "Mask size must match the attention size!"
attn.masked_fill_(mask, -constant.INFINITY_NUMBER)
attn = self.sm(attn)
if attn_only:
return attn
attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x sourceL
weighted_context = torch.bmm(attn3, context).squeeze(1) # batch x dim
h_tilde = torch.cat((weighted_context, input), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
return h_tilde, attn
class LinearAttention(nn.Module):
""" A linear attention form, inspired by BiDAF:
a = W (u; v; u o v)
"""
def __init__(self, dim):
super(LinearAttention, self).__init__()
self.linear = nn.Linear(dim*3, 1, bias=False)
self.linear_out = nn.Linear(dim * 2, dim, bias=False)
self.sm = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
self.mask = None
def forward(self, input, context, mask=None, attn_only=False):
"""
input: batch x dim
context: batch x sourceL x dim
"""
batch_size = context.size(0)
source_len = context.size(1)
dim = context.size(2)
u = input.unsqueeze(1).expand_as(context).contiguous().view(-1, dim) # batch*sourceL x dim
v = context.contiguous().view(-1, dim)
attn_in = torch.cat((u, v, u.mul(v)), 1)
attn = self.linear(attn_in).view(batch_size, source_len)
if mask is not None:
# sett the padding attention logits to -inf
assert mask.size() == attn.size(), "Mask size must match the attention size!"
attn.masked_fill_(mask, -constant.INFINITY_NUMBER)
attn = self.sm(attn)
if attn_only:
return attn
attn3 = attn.view(batch_size, 1, source_len) # batch x 1 x sourceL
weighted_context = torch.bmm(attn3, context).squeeze(1) # batch x dim
h_tilde = torch.cat((weighted_context, input), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
return h_tilde, attn
class DeepAttention(nn.Module):
""" A deep attention form, invented by Robert:
u = ReLU(Wx)
v = ReLU(Wy)
a = V.(u o v)
"""
def __init__(self, dim):
super(DeepAttention, self).__init__()
self.linear_in = nn.Linear(dim, dim, bias=False)
self.linear_v = nn.Linear(dim, 1, bias=False)
self.linear_out = nn.Linear(dim * 2, dim, bias=False)
self.relu = nn.ReLU()
self.sm = nn.Softmax(dim=1)
self.tanh = nn.Tanh()
self.mask = None
def forward(self, input, context, mask=None, attn_only=False):
"""
input: batch x dim
context: batch x sourceL x dim
"""
batch_size = context.size(0)
source_len = context.size(1)
dim = context.size(2)
u = input.unsqueeze(1).expand_as(context).contiguous().view(-1, dim) # batch*sourceL x dim
u = self.relu(self.linear_in(u))
v = self.relu(self.linear_in(context.contiguous().view(-1, dim)))
attn = self.linear_v(u.mul(v)).view(batch_size, source_len)
if mask is not None:
# sett the padding attention logits to -inf
assert mask.size() == attn.size(), "Mask size must match the attention size!"
attn.masked_fill_(mask, -constant.INFINITY_NUMBER)
attn = self.sm(attn)
if attn_only:
return attn
attn3 = attn.view(batch_size, 1, source_len) # batch x 1 x sourceL
weighted_context = torch.bmm(attn3, context).squeeze(1) # batch x dim
h_tilde = torch.cat((weighted_context, input), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
return h_tilde, attn
class LSTMAttention(nn.Module):
r"""A long short-term memory (LSTM) cell with attention."""
def __init__(self, input_size, hidden_size, batch_first=True, attn_type='soft'):
"""Initialize params."""
super(LSTMAttention, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.batch_first = batch_first
self.lstm_cell = nn.LSTMCell(input_size, hidden_size)
if attn_type == 'soft':
self.attention_layer = SoftDotAttention(hidden_size)
elif attn_type == 'mlp':
self.attention_layer = BasicAttention(hidden_size)
elif attn_type == 'linear':
self.attention_layer = LinearAttention(hidden_size)
elif attn_type == 'deep':
self.attention_layer = DeepAttention(hidden_size)
else:
raise Exception("Unsupported LSTM attention type: {}".format(attn_type))
print("Using {} attention for LSTM.".format(attn_type))
def forward(self, input, hidden, ctx, ctx_mask=None):
"""Propogate input through the network."""
if self.batch_first:
input = input.transpose(0,1)
output = []
steps = range(input.size(0))
for i in steps:
hidden = self.lstm_cell(input[i], hidden)
hy, cy = hidden
h_tilde, alpha = self.attention_layer(hy, ctx, mask=ctx_mask)
output.append(h_tilde)
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
if self.batch_first:
output = output.transpose(0,1)
return output, hidden
| stanfordnlp-master | stanfordnlp/models/common/seq2seq_modules.py |
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_sequence, pad_packed_sequence, pack_padded_sequence, PackedSequence
from stanfordnlp.models.common.packed_lstm import PackedLSTM
from stanfordnlp.models.common.utils import tensor_unsort
class CharacterModel(nn.Module):
def __init__(self, args, vocab, pad=False):
super().__init__()
self.args = args
self.pad = pad
# char embeddings
self.char_emb = nn.Embedding(len(vocab['char']), self.args['char_emb_dim'], padding_idx=0)
self.char_attn = nn.Linear(self.args['char_hidden_dim'], 1, bias=False)
self.char_attn.weight.data.zero_()
# modules
self.charlstm = PackedLSTM(self.args['char_emb_dim'], self.args['char_hidden_dim'], self.args['char_num_layers'], batch_first=True, dropout=0 if self.args['char_num_layers'] == 1 else args['dropout'], rec_dropout = self.args['char_rec_dropout'])
self.charlstm_h_init = nn.Parameter(torch.zeros(self.args['char_num_layers'], 1, self.args['char_hidden_dim']))
self.charlstm_c_init = nn.Parameter(torch.zeros(self.args['char_num_layers'], 1, self.args['char_hidden_dim']))
self.dropout = nn.Dropout(args['dropout'])
def forward(self, chars, chars_mask, word_orig_idx, sentlens, wordlens):
embs = self.dropout(self.char_emb(chars))
batch_size = embs.size(0)
embs = pack_padded_sequence(embs, wordlens, batch_first=True)
char_reps = self.charlstm(embs, wordlens, hx=(self.charlstm_h_init.expand(self.args['char_num_layers'], batch_size, self.args['char_hidden_dim']).contiguous(), self.charlstm_c_init.expand(self.args['char_num_layers'], batch_size, self.args['char_hidden_dim']).contiguous()))[0]
# attention
weights = torch.sigmoid(self.char_attn(self.dropout(char_reps.data)))
char_reps = PackedSequence(char_reps.data * weights, char_reps.batch_sizes)
char_reps, _ = pad_packed_sequence(char_reps, batch_first=True)
res = char_reps.sum(1)
res = tensor_unsort(res, word_orig_idx)
res = pack_sequence(res.split(sentlens))
if self.pad:
res = pad_packed_sequence(res, batch_first=True)[0]
return res
| stanfordnlp-master | stanfordnlp/models/common/char_model.py |
import torch
class Trainer:
def change_lr(self, new_lr):
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr
def save(self, filename):
savedict = {
'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict()
}
torch.save(savedict, filename)
def load(self, filename):
savedict = torch.load(filename, lambda storage, loc: storage)
self.model.load_state_dict(savedict['model'])
if self.args['mode'] == 'train':
self.optimizer.load_state_dict(savedict['optimizer'])
| stanfordnlp-master | stanfordnlp/models/common/trainer.py |
import torch
import torch.nn as nn
class WordDropout(nn.Module):
def __init__(self, dropprob):
super().__init__()
self.dropprob = dropprob
def forward(self, x, replacement=None):
if not self.training or self.dropprob == 0:
return x
masksize = [y for y in x.size()]
masksize[-1] = 1
dropmask = torch.rand(*masksize, device=x.device) < self.dropprob
res = x.masked_fill(dropmask, 0)
if replacement is not None:
res = res + dropmask.float() * replacement
return res
| stanfordnlp-master | stanfordnlp/models/common/dropout.py |
"""
Global constants.
"""
lcode2lang = {
"af": "Afrikaans",
"grc": "Ancient_Greek",
"ar": "Arabic",
"hy": "Armenian",
"eu": "Basque",
"br": "Breton",
"bg": "Bulgarian",
"bxr": "Buryat",
"ca": "Catalan",
"zh": "Chinese",
"hr": "Croatian",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"et": "Estonian",
"fo": "Faroese",
"fi": "Finnish",
"fr": "French",
"gl": "Galician",
"de": "German",
"got": "Gothic",
"el": "Greek",
"he": "Hebrew",
"hi": "Hindi",
"hu": "Hungarian",
"id": "Indonesian",
"ga": "Irish",
"it": "Italian",
"ja": "Japanese",
"kk": "Kazakh",
"ko": "Korean",
"kmr": "Kurmanji",
"la": "Latin",
"lv": "Latvian",
"pcm": "Naija",
"sme": "North_Sami",
"no_bokmaal": "Norwegian-Bokmaal",
"no_nynorsk": "Norwegian-Nynorsk",
"no_nynorsklia": "Norwegian-Nynorsk",
"cu": "Old_Church_Slavonic",
"fro": "Old_French",
"fa": "Persian",
"pl": "Polish",
"pt": "Portuguese",
"ro": "Romanian",
"ru": "Russian",
"sr": "Serbian",
"sk": "Slovak",
"sl": "Slovenian",
"es": "Spanish",
"sv": "Swedish",
"th": "Thai",
"tr": "Turkish",
"uk": "Ukrainian",
"hsb": "Upper_Sorbian",
"ur": "Urdu",
"ug": "Uyghur",
"vi": "Vietnamese",
}
lang2lcode = {lcode2lang[k]: k for k in lcode2lang}
| stanfordnlp-master | stanfordnlp/models/common/constant.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
class PackedLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, pad=False, rec_dropout=0):
super().__init__()
self.batch_first = batch_first
self.pad = pad
if rec_dropout == 0:
# use the fast, native LSTM implementation
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
else:
self.lstm = LSTMwRecDropout(input_size, hidden_size, num_layers, bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional, rec_dropout=rec_dropout)
def forward(self, input, lengths, hx=None):
if not isinstance(input, PackedSequence):
input = pack_padded_sequence(input, lengths, batch_first=self.batch_first)
res = self.lstm(input, hx)
if self.pad:
res = (pad_packed_sequence(res[0], batch_first=self.batch_first)[0], res[1])
return res
class LSTMwRecDropout(nn.Module):
""" An LSTM implementation that supports recurrent dropout """
def __init__(self, input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, pad=False, rec_dropout=0):
super().__init__()
self.batch_first = batch_first
self.pad = pad
self.num_layers = num_layers
self.hidden_size = hidden_size
self.dropout = dropout
self.drop = nn.Dropout(dropout, inplace=True)
self.rec_drop = nn.Dropout(rec_dropout, inplace=True)
self.num_directions = 2 if bidirectional else 1
self.cells = nn.ModuleList()
for l in range(num_layers):
in_size = input_size if l == 0 else self.num_directions * hidden_size
for d in range(self.num_directions):
self.cells.append(nn.LSTMCell(in_size, hidden_size, bias=bias))
def forward(self, input, hx=None):
def rnn_loop(x, batch_sizes, cell, inits, reverse=False):
# RNN loop for one layer in one direction with recurrent dropout
# Assumes input is PackedSequence, returns PackedSequence as well
batch_size = batch_sizes[0].item()
states = [list(init.split([1] * batch_size)) for init in inits]
h_drop_mask = x.new_ones(batch_size, self.hidden_size)
h_drop_mask = self.rec_drop(h_drop_mask)
resh = []
if not reverse:
st = 0
for bs in batch_sizes:
s1 = cell(x[st:st+bs], (torch.cat(states[0][:bs], 0) * h_drop_mask[:bs], torch.cat(states[1][:bs], 0)))
resh.append(s1[0])
for j in range(bs):
states[0][j] = s1[0][j].unsqueeze(0)
states[1][j] = s1[1][j].unsqueeze(0)
st += bs
else:
en = x.size(0)
for i in range(batch_sizes.size(0)-1, -1, -1):
bs = batch_sizes[i]
s1 = cell(x[en-bs:en], (torch.cat(states[0][:bs], 0) * h_drop_mask[:bs], torch.cat(states[1][:bs], 0)))
resh.append(s1[0])
for j in range(bs):
states[0][j] = s1[0][j].unsqueeze(0)
states[1][j] = s1[1][j].unsqueeze(0)
en -= bs
resh = list(reversed(resh))
return torch.cat(resh, 0), tuple(torch.cat(s, 0) for s in states)
all_states = [[], []]
inputdata, batch_sizes = input.data, input.batch_sizes
for l in range(self.num_layers):
new_input = []
if self.dropout > 0 and l > 0:
inputdata = self.drop(inputdata)
for d in range(self.num_directions):
idx = l * self.num_directions + d
cell = self.cells[idx]
out, states = rnn_loop(inputdata, batch_sizes, cell, (hx[i][idx] for i in range(2)) if hx is not None else (input.data.new_zeros(input.batch_sizes[0].item(), self.hidden_size, requires_grad=False) for _ in range(2)), reverse=(d == 1))
new_input.append(out)
all_states[0].append(states[0].unsqueeze(0))
all_states[1].append(states[1].unsqueeze(0))
if self.num_directions > 1:
# concatenate both directions
inputdata = torch.cat(new_input, 1)
else:
inputdata = new_input[0]
input = PackedSequence(inputdata, batch_sizes)
return input, tuple(torch.cat(x, 0) for x in all_states)
| stanfordnlp-master | stanfordnlp/models/common/packed_lstm.py |
from __future__ import division
import torch
import stanfordnlp.models.common.seq2seq_constant as constant
"""
Adapted and modified from the OpenNMT project.
Class for managing the internals of the beam search process.
hyp1-hyp1---hyp1 -hyp1
\ /
hyp2 \-hyp2 /-hyp2hyp2
/ \
hyp3-hyp3---hyp3 -hyp3
========================
Takes care of beams, back pointers, and scores.
"""
class Beam(object):
def __init__(self, size, cuda=False):
self.size = size
self.done = False
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.allScores = []
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size).fill_(constant.PAD_ID)]
self.nextYs[0][0] = constant.SOS_ID
# The copy indices for each time
self.copy = []
def get_current_state(self):
"Get the outputs for the current timestep."
return self.nextYs[-1]
def get_current_origin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk, copy_indices=None):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `copy_indices` - copy indices (K x ctx_len)
Returns: True if beam search is complete.
"""
if self.done:
return True
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
else:
# first step, expand from the first position
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
self.allScores.append(self.scores)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId / numWords
self.prevKs.append(prevK)
self.nextYs.append(bestScoresId - prevK * numWords)
if copy_indices is not None:
self.copy.append(copy_indices.index_select(0, prevK))
# End condition is when top-of-beam is EOS.
if self.nextYs[-1][0] == constant.EOS_ID:
self.done = True
self.allScores.append(self.scores)
return self.done
def sort_best(self):
return torch.sort(self.scores, 0, True)
def get_best(self):
"Get the score of the best in the beam."
scores, ids = self.sortBest()
return scores[1], ids[1]
def get_hyp(self, k):
"""
Walk back to construct the full hypothesis.
Parameters:
* `k` - the position in the beam to construct.
Returns: The hypothesis
"""
hyp = []
cpy = []
for j in range(len(self.prevKs) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
if len(self.copy) > 0:
cpy.append(self.copy[j][k])
k = self.prevKs[j][k]
hyp = hyp[::-1]
cpy = cpy[::-1]
# postprocess: if cpy index is not -1, use cpy index instead of hyp word
for i,cidx in enumerate(cpy):
if cidx >= 0:
hyp[i] = -(cidx+1) # make index 1-based and flip it for token generation
return hyp
| stanfordnlp-master | stanfordnlp/models/common/beam.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
from stanfordnlp.models.common.packed_lstm import PackedLSTM
# Highway LSTM Cell (Zhang et al. (2018) Highway Long Short-Term Memory RNNs for Distant Speech Recognition)
class HLSTMCell(nn.modules.rnn.RNNCellBase):
def __init__(self, input_size, hidden_size, bias=True):
super(HLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
# LSTM parameters
self.Wi = nn.Linear(input_size + hidden_size, hidden_size, bias=bias)
self.Wf = nn.Linear(input_size + hidden_size, hidden_size, bias=bias)
self.Wo = nn.Linear(input_size + hidden_size, hidden_size, bias=bias)
self.Wg = nn.Linear(input_size + hidden_size, hidden_size, bias=bias)
# highway gate parameters
self.gate = nn.Linear(input_size + 2 * hidden_size, hidden_size, bias=bias)
def forward(self, input, c_l_minus_one=None, hx=None):
self.check_forward_input(input)
if hx is None:
hx = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False)
hx = (hx, hx)
if c_l_minus_one is None:
c_l_minus_one = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False)
self.check_forward_hidden(input, hx[0], '[0]')
self.check_forward_hidden(input, hx[1], '[1]')
self.check_forward_hidden(input, c_l_minus_one, 'c_l_minus_one')
# vanilla LSTM computation
rec_input = torch.cat([input, hx[0]], 1)
i = F.sigmoid(self.Wi(rec_input))
f = F.sigmoid(self.Wi(rec_input))
o = F.sigmoid(self.Wi(rec_input))
g = F.tanh(self.Wi(rec_input))
# highway gates
gate = F.sigmoid(self.gate(torch.cat([c_l_minus_one, hx[1], input], 1)))
c = gate * c_l_minus_one + f * hx[1] + i * g
h = o * F.tanh(c)
return h, c
# Highway LSTM network, does NOT use the HLSTMCell above
class HighwayLSTM(nn.Module):
def __init__(self, input_size, hidden_size,
num_layers=1, bias=True, batch_first=False,
dropout=0, bidirectional=False, rec_dropout=0, highway_func=None, pad=False):
super(HighwayLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.dropout_state = {}
self.bidirectional = bidirectional
self.num_directions = 2 if bidirectional else 1
self.highway_func = highway_func
self.pad = pad
self.lstm = nn.ModuleList()
self.highway = nn.ModuleList()
self.gate = nn.ModuleList()
self.drop = nn.Dropout(dropout, inplace=True)
in_size = input_size
for l in range(num_layers):
self.lstm.append(PackedLSTM(in_size, hidden_size, num_layers=1, bias=bias,
batch_first=batch_first, dropout=0, bidirectional=bidirectional, rec_dropout=rec_dropout))
self.highway.append(nn.Linear(in_size, hidden_size * self.num_directions))
self.gate.append(nn.Linear(in_size, hidden_size * self.num_directions))
self.highway[-1].bias.data.zero_()
self.gate[-1].bias.data.zero_()
in_size = hidden_size * self.num_directions
def forward(self, input, seqlens, hx=None):
highway_func = (lambda x: x) if self.highway_func is None else self.highway_func
hs = []
cs = []
if not isinstance(input, PackedSequence):
input = pack_padded_sequence(input, seqlens, batch_first=self.batch_first)
for l in range(self.num_layers):
if l > 0:
input = PackedSequence(self.drop(input.data), input.batch_sizes)
layer_hx = (hx[0][l * self.num_directions:(l+1)*self.num_directions], hx[1][l * self.num_directions:(l+1)*self.num_directions]) if hx is not None else None
h, (ht, ct) = self.lstm[l](input, seqlens, layer_hx)
hs.append(ht)
cs.append(ct)
input = PackedSequence(h.data + torch.sigmoid(self.gate[l](input.data)) * highway_func(self.highway[l](input.data)), input.batch_sizes)
if self.pad:
input = pad_packed_sequence(input, batch_first=self.batch_first)[0]
return input, (torch.cat(hs, 0), torch.cat(cs, 0))
if __name__ == "__main__":
T = 10
bidir = True
num_dir = 2 if bidir else 1
rnn = HighwayLSTM(10, 20, num_layers=2, bidirectional=True)
input = torch.randn(T, 3, 10)
hx = torch.randn(2 * num_dir, 3, 20)
cx = torch.randn(2 * num_dir, 3, 20)
output = rnn(input, (hx, cx))
print(output)
| stanfordnlp-master | stanfordnlp/models/common/hlstm.py |
"""
Utility functions for data transformations.
"""
import torch
import stanfordnlp.models.common.seq2seq_constant as constant
def map_to_ids(tokens, vocab):
ids = [vocab[t] if t in vocab else constant.UNK_ID for t in tokens]
return ids
def get_long_tensor(tokens_list, batch_size, pad_id=constant.PAD_ID):
""" Convert (list of )+ tokens to a padded LongTensor. """
sizes = []
x = tokens_list
while isinstance(x[0], list):
sizes.append(max(len(y) for y in x))
x = [z for y in x for z in y]
tokens = torch.LongTensor(batch_size, *sizes).fill_(pad_id)
for i, s in enumerate(tokens_list):
tokens[i, :len(s)] = torch.LongTensor(s)
return tokens
def get_float_tensor(features_list, batch_size):
if features_list is None or features_list[0] is None:
return None
seq_len = max(len(x) for x in features_list)
feature_len = len(features_list[0][0])
features = torch.FloatTensor(batch_size, seq_len, feature_len).zero_()
for i,f in enumerate(features_list):
features[i,:len(f),:] = torch.FloatTensor(f)
return features
def sort_all(batch, lens):
""" Sort all fields by descending order of lens, and return the original indices. """
unsorted_all = [lens] + [range(len(lens))] + list(batch)
sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]
return sorted_all[2:], sorted_all[1]
| stanfordnlp-master | stanfordnlp/models/common/data.py |
from collections import Counter, OrderedDict
from stanfordnlp.models.common.vocab import BaseVocab, BaseMultiVocab
from stanfordnlp.models.common.vocab import CompositeVocab, VOCAB_PREFIX, EMPTY, EMPTY_ID
class CharVocab(BaseVocab):
def build_vocab(self):
counter = Counter([c for sent in self.data for w in sent for c in w[self.idx]])
self._id2unit = VOCAB_PREFIX + list(sorted(list(counter.keys()), key=lambda k: counter[k], reverse=True))
self._unit2id = {w:i for i, w in enumerate(self._id2unit)}
class WordVocab(BaseVocab):
def __init__(self, data=None, lang="", idx=0, cutoff=0, lower=False, ignore=[]):
self.ignore = ignore
super().__init__(data, lang=lang, idx=idx, cutoff=cutoff, lower=lower)
self.state_attrs += ['ignore']
def id2unit(self, id):
if len(self.ignore) > 0 and id == EMPTY_ID:
return '_'
else:
return super().id2unit(id)
def unit2id(self, unit):
if len(self.ignore) > 0 and unit in self.ignore:
return self._unit2id[EMPTY]
else:
return super().unit2id(unit)
def build_vocab(self):
if self.lower:
counter = Counter([w[self.idx].lower() for sent in self.data for w in sent])
else:
counter = Counter([w[self.idx] for sent in self.data for w in sent])
for k in list(counter.keys()):
if counter[k] < self.cutoff or k in self.ignore:
del counter[k]
self._id2unit = VOCAB_PREFIX + list(sorted(list(counter.keys()), key=lambda k: counter[k], reverse=True))
self._unit2id = {w:i for i, w in enumerate(self._id2unit)}
class XPOSVocab(CompositeVocab):
def __init__(self, data=None, lang="", idx=0, sep="", keyed=False):
super().__init__(data, lang, idx=idx, sep=sep, keyed=keyed)
class FeatureVocab(CompositeVocab):
def __init__(self, data=None, lang="", idx=0, sep="|", keyed=True):
super().__init__(data, lang, idx=idx, sep=sep, keyed=keyed)
class MultiVocab(BaseMultiVocab):
def state_dict(self):
""" Also save a vocab name to class name mapping in state dict. """
state = OrderedDict()
key2class = OrderedDict()
for k, v in self._vocabs.items():
state[k] = v.state_dict()
key2class[k] = type(v).__name__
state['_key2class'] = key2class
return state
@classmethod
def load_state_dict(cls, state_dict):
class_dict = {'CharVocab': CharVocab,
'WordVocab': WordVocab,
'XPOSVocab': XPOSVocab,
'FeatureVocab': FeatureVocab}
new = cls()
assert '_key2class' in state_dict, "Cannot find class name mapping in state dict!"
key2class = state_dict.pop('_key2class')
for k,v in state_dict.items():
classname = key2class[k]
new[k] = class_dict[classname].load_state_dict(v)
return new
| stanfordnlp-master | stanfordnlp/models/pos/vocab.py |
# This is the XPOS factory method generated automatically from models.pos.build_xpos_factory.
# Please don't edit it!
from stanfordnlp.models.pos.vocab import WordVocab, XPOSVocab
def xpos_vocab_factory(data, shorthand):
if shorthand in ["af_afribooms", "grc_perseus", "ar_padt", "bg_btb", "cs_cac", "cs_fictree", "cs_pdt", "gl_ctg", "gl_treegal", "it_isdt", "it_postwita", "la_perseus", "lv_lvtb", "ro_rrt", "sk_snk", "sl_ssj", "sl_sst", "uk_iu"]:
return XPOSVocab(data, shorthand, idx=2, sep="")
elif shorthand in ["grc_proiel", "hy_armtdp", "eu_bdt", "br_keb", "bxr_bdt", "ca_ancora", "zh_gsd", "hr_set", "cs_pud", "da_ddt", "en_ewt", "en_gum", "en_pud", "et_edt", "fo_oft", "fi_pud", "fi_tdt", "fr_gsd", "fr_sequoia", "fr_spoken", "de_gsd", "got_proiel", "el_gdt", "he_htb", "hi_hdtb", "hu_szeged", "ga_idt", "ja_gsd", "ja_modern", "kk_ktb", "kmr_mg", "la_proiel", "pcm_nsc", "sme_giella", "no_bokmaal", "no_nynorsk", "no_nynorsklia", "cu_proiel", "fro_srcmf", "fa_seraji", "pt_bosque", "ru_syntagrus", "ru_taiga", "sr_set", "es_ancora", "sv_pud", "th_pud", "tr_imst", "hsb_ufal", "ug_udt", "vi_vtb"]:
return WordVocab(data, shorthand, idx=2, ignore=["_"])
elif shorthand in ["nl_alpino", "nl_lassysmall", "la_ittb", "sv_talbanken"]:
return XPOSVocab(data, shorthand, idx=2, sep="|")
elif shorthand in ["en_lines", "sv_lines", "ur_udtb"]:
return XPOSVocab(data, shorthand, idx=2, sep="-")
elif shorthand in ["fi_ftb"]:
return XPOSVocab(data, shorthand, idx=2, sep=",")
elif shorthand in ["id_gsd", "ko_gsd", "ko_kaist"]:
return XPOSVocab(data, shorthand, idx=2, sep="+")
elif shorthand in ["pl_lfg", "pl_sz"]:
return XPOSVocab(data, shorthand, idx=2, sep=":")
else:
raise NotImplementedError('Language shorthand "{}" not found!'.format(shorthand))
| stanfordnlp-master | stanfordnlp/models/pos/xpos_vocab_factory.py |
from collections import defaultdict
import os
import sys
from stanfordnlp.models.common.vocab import VOCAB_PREFIX
from stanfordnlp.models.pos.vocab import XPOSVocab, WordVocab
from stanfordnlp.models.common.conll import CoNLLFile
if len(sys.argv) != 3:
print('Usage: {} short_to_tb_file output_factory_file'.format(sys.argv[0]))
sys.exit(0)
# Read list of all treebanks of concern
short_to_tb_file, output_file = sys.argv[1:]
shorthands = []
fullnames = []
with open(short_to_tb_file) as f:
for line in f:
line = line.strip().split()
shorthands.append(line[0])
fullnames.append(line[1])
# For each treebank, we would like to find the XPOS Vocab configuration that minimizes
# the number of total classes needed to predict by all tagger classifiers. This is
# achieved by enumerating different options of separators that different treebanks might
# use, and comparing that to treating the XPOS tags as separate categories (using a
# WordVocab).
mapping = defaultdict(list)
for sh, fn in zip(shorthands, fullnames):
print('Resolving vocab option for {}...'.format(sh))
if not os.path.exists('data/pos/{}.train.in.conllu'.format(sh)):
raise UserWarning('Training data for {} not found in the data directory, falling back to using WordVocab. To generate the '
'XPOS vocabulary for this treebank properly, please run the following command first:\n'
'\tbash scripts/prep_pos_data.sh {}'.format(fn, fn))
# without the training file, there's not much we can do
key = 'WordVocab(data, shorthand, idx=2)'
mapping[key].append(sh)
continue
conll_file = CoNLLFile('data/pos/{}.train.in.conllu'.format(sh))
data = conll_file.get(['word', 'upos', 'xpos', 'feats'], as_sentences=True)
vocab = WordVocab(data, sh, idx=2, ignore=["_"])
key = 'WordVocab(data, shorthand, idx=2, ignore=["_"])'
best_size = len(vocab) - len(VOCAB_PREFIX)
if best_size > 20:
for sep in ['', '-', '+', '|', ',', ':']: # separators
vocab = XPOSVocab(data, sh, idx=2, sep=sep)
length = sum(len(x) - len(VOCAB_PREFIX) for x in vocab._id2unit.values())
if length < best_size:
key = 'XPOSVocab(data, shorthand, idx=2, sep="{}")'.format(sep)
best_size = length
mapping[key].append(sh)
# Generate code. This takes the XPOS vocabulary classes selected above, and generates the
# actual factory class as seen in models.pos.xpos_vocab_factory.
first = True
with open(output_file, 'w') as f:
print('''# This is the XPOS factory method generated automatically from models.pos.build_xpos_factory.
# Please don't edit it!
from models.pos.vocab import WordVocab, XPOSVocab
def xpos_vocab_factory(data, shorthand):''', file=f)
for key in mapping:
print(" {} shorthand in [{}]:".format('if' if first else 'elif', ', '.join(['"{}"'.format(x) for x in mapping[key]])), file=f)
print(" return {}".format(key), file=f)
first = False
print(''' else:
raise NotImplementedError('Language shorthand "{}" not found!'.format(shorthand))''', file=f)
print('Done!')
| stanfordnlp-master | stanfordnlp/models/pos/build_xpos_vocab_factory.py |
stanfordnlp-master | stanfordnlp/models/pos/__init__.py |
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
from stanfordnlp.models.common.biaffine import BiaffineScorer
from stanfordnlp.models.common.hlstm import HighwayLSTM
from stanfordnlp.models.common.dropout import WordDropout
from stanfordnlp.models.common.vocab import CompositeVocab
from stanfordnlp.models.common.char_model import CharacterModel
class Tagger(nn.Module):
def __init__(self, args, vocab, emb_matrix=None, share_hid=False):
super().__init__()
self.vocab = vocab
self.args = args
self.share_hid = share_hid
self.unsaved_modules = []
def add_unsaved_module(name, module):
self.unsaved_modules += [name]
setattr(self, name, module)
# input layers
input_size = 0
if self.args['word_emb_dim'] > 0:
# frequent word embeddings
self.word_emb = nn.Embedding(len(vocab['word']), self.args['word_emb_dim'], padding_idx=0)
input_size += self.args['word_emb_dim']
if not share_hid:
# upos embeddings
self.upos_emb = nn.Embedding(len(vocab['upos']), self.args['tag_emb_dim'], padding_idx=0)
if self.args['char'] and self.args['char_emb_dim'] > 0:
self.charmodel = CharacterModel(args, vocab)
self.trans_char = nn.Linear(self.args['char_hidden_dim'], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
if self.args['pretrain']:
# pretrained embeddings, by default this won't be saved into model file
add_unsaved_module('pretrained_emb', nn.Embedding.from_pretrained(torch.from_numpy(emb_matrix), freeze=True))
self.trans_pretrained = nn.Linear(emb_matrix.shape[1], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
# recurrent layers
self.taggerlstm = HighwayLSTM(input_size, self.args['hidden_dim'], self.args['num_layers'], batch_first=True, bidirectional=True, dropout=self.args['dropout'], rec_dropout=self.args['rec_dropout'], highway_func=torch.tanh)
self.drop_replacement = nn.Parameter(torch.randn(input_size) / np.sqrt(input_size))
self.taggerlstm_h_init = nn.Parameter(torch.zeros(2 * self.args['num_layers'], 1, self.args['hidden_dim']))
self.taggerlstm_c_init = nn.Parameter(torch.zeros(2 * self.args['num_layers'], 1, self.args['hidden_dim']))
# classifiers
self.upos_hid = nn.Linear(self.args['hidden_dim'] * 2, self.args['deep_biaff_hidden_dim'])
self.upos_clf = nn.Linear(self.args['deep_biaff_hidden_dim'], len(vocab['upos']))
self.upos_clf.weight.data.zero_()
self.upos_clf.bias.data.zero_()
if share_hid:
clf_constructor = lambda insize, outsize: nn.Linear(insize, outsize)
else:
self.xpos_hid = nn.Linear(self.args['hidden_dim'] * 2, self.args['deep_biaff_hidden_dim'] if not isinstance(vocab['xpos'], CompositeVocab) else self.args['composite_deep_biaff_hidden_dim'])
self.ufeats_hid = nn.Linear(self.args['hidden_dim'] * 2, self.args['composite_deep_biaff_hidden_dim'])
clf_constructor = lambda insize, outsize: BiaffineScorer(insize, self.args['tag_emb_dim'], outsize)
if isinstance(vocab['xpos'], CompositeVocab):
self.xpos_clf = nn.ModuleList()
for l in vocab['xpos'].lens():
self.xpos_clf.append(clf_constructor(self.args['composite_deep_biaff_hidden_dim'], l))
else:
self.xpos_clf = clf_constructor(self.args['deep_biaff_hidden_dim'], len(vocab['xpos']))
if share_hid:
self.xpos_clf.weight.data.zero_()
self.xpos_clf.bias.data.zero_()
self.ufeats_clf = nn.ModuleList()
for l in vocab['feats'].lens():
if share_hid:
self.ufeats_clf.append(clf_constructor(self.args['deep_biaff_hidden_dim'], l))
self.ufeats_clf[-1].weight.data.zero_()
self.ufeats_clf[-1].bias.data.zero_()
else:
self.ufeats_clf.append(clf_constructor(self.args['composite_deep_biaff_hidden_dim'], l))
# criterion
self.crit = nn.CrossEntropyLoss(ignore_index=0) # ignore padding
self.drop = nn.Dropout(args['dropout'])
self.worddrop = WordDropout(args['word_dropout'])
def forward(self, word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, word_orig_idx, sentlens, wordlens):
def pack(x):
return pack_padded_sequence(x, sentlens, batch_first=True)
inputs = []
if self.args['word_emb_dim'] > 0:
word_emb = self.word_emb(word)
word_emb = pack(word_emb)
inputs += [word_emb]
if self.args['pretrain']:
pretrained_emb = self.pretrained_emb(pretrained)
pretrained_emb = self.trans_pretrained(pretrained_emb)
pretrained_emb = pack(pretrained_emb)
inputs += [pretrained_emb]
def pad(x):
return pad_packed_sequence(PackedSequence(x, word_emb.batch_sizes), batch_first=True)[0]
if self.args['char'] and self.args['char_emb_dim'] > 0:
char_reps = self.charmodel(wordchars, wordchars_mask, word_orig_idx, sentlens, wordlens)
char_reps = PackedSequence(self.trans_char(self.drop(char_reps.data)), char_reps.batch_sizes)
inputs += [char_reps]
lstm_inputs = torch.cat([x.data for x in inputs], 1)
lstm_inputs = self.worddrop(lstm_inputs, self.drop_replacement)
lstm_inputs = self.drop(lstm_inputs)
lstm_inputs = PackedSequence(lstm_inputs, inputs[0].batch_sizes)
lstm_outputs, _ = self.taggerlstm(lstm_inputs, sentlens, hx=(self.taggerlstm_h_init.expand(2 * self.args['num_layers'], word.size(0), self.args['hidden_dim']).contiguous(), self.taggerlstm_c_init.expand(2 * self.args['num_layers'], word.size(0), self.args['hidden_dim']).contiguous()))
lstm_outputs = lstm_outputs.data
upos_hid = F.relu(self.upos_hid(self.drop(lstm_outputs)))
upos_pred = self.upos_clf(self.drop(upos_hid))
preds = [pad(upos_pred).max(2)[1]]
upos = pack(upos).data
loss = self.crit(upos_pred.view(-1, upos_pred.size(-1)), upos.view(-1))
if self.share_hid:
xpos_hid = upos_hid
ufeats_hid = upos_hid
clffunc = lambda clf, hid: clf(self.drop(hid))
else:
xpos_hid = F.relu(self.xpos_hid(self.drop(lstm_outputs)))
ufeats_hid = F.relu(self.ufeats_hid(self.drop(lstm_outputs)))
if self.training:
upos_emb = self.upos_emb(upos)
else:
upos_emb = self.upos_emb(upos_pred.max(1)[1])
clffunc = lambda clf, hid: clf(self.drop(hid), self.drop(upos_emb))
xpos = pack(xpos).data
if isinstance(self.vocab['xpos'], CompositeVocab):
xpos_preds = []
for i in range(len(self.vocab['xpos'])):
xpos_pred = clffunc(self.xpos_clf[i], xpos_hid)
loss += self.crit(xpos_pred.view(-1, xpos_pred.size(-1)), xpos[:, i].view(-1))
xpos_preds.append(pad(xpos_pred).max(2, keepdim=True)[1])
preds.append(torch.cat(xpos_preds, 2))
else:
xpos_pred = clffunc(self.xpos_clf, xpos_hid)
loss += self.crit(xpos_pred.view(-1, xpos_pred.size(-1)), xpos.view(-1))
preds.append(pad(xpos_pred).max(2)[1])
ufeats_preds = []
ufeats = pack(ufeats).data
for i in range(len(self.vocab['feats'])):
ufeats_pred = clffunc(self.ufeats_clf[i], ufeats_hid)
loss += self.crit(ufeats_pred.view(-1, ufeats_pred.size(-1)), ufeats[:, i].view(-1))
ufeats_preds.append(pad(ufeats_pred).max(2, keepdim=True)[1])
preds.append(torch.cat(ufeats_preds, 2))
return loss, preds
| stanfordnlp-master | stanfordnlp/models/pos/model.py |
"""
A trainer class to handle training and testing of models.
"""
import sys
import torch
from torch import nn
from stanfordnlp.models.common.trainer import Trainer as BaseTrainer
from stanfordnlp.models.common import utils, loss
from stanfordnlp.models.pos.model import Tagger
from stanfordnlp.models.pos.vocab import MultiVocab
def unpack_batch(batch, use_cuda):
""" Unpack a batch from the data loader. """
if use_cuda:
inputs = [b.cuda() if b is not None else None for b in batch[:8]]
else:
inputs = batch[:8]
orig_idx = batch[8]
word_orig_idx = batch[9]
sentlens = batch[10]
wordlens = batch[11]
return inputs, orig_idx, word_orig_idx, sentlens, wordlens
class Trainer(BaseTrainer):
""" A trainer for training models. """
def __init__(self, args=None, vocab=None, pretrain=None, model_file=None, use_cuda=False):
self.use_cuda = use_cuda
if model_file is not None:
# load everything from file
self.load(pretrain, model_file)
else:
assert all(var is not None for var in [args, vocab, pretrain])
# build model from scratch
self.args = args
self.vocab = vocab
self.model = Tagger(args, vocab, emb_matrix=pretrain.emb, share_hid=args['share_hid'])
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if self.use_cuda:
self.model.cuda()
else:
self.model.cpu()
self.optimizer = utils.get_optimizer(self.args['optim'], self.parameters, self.args['lr'], betas=(0.9, self.args['beta2']), eps=1e-6)
def update(self, batch, eval=False):
inputs, orig_idx, word_orig_idx, sentlens, wordlens = unpack_batch(batch, self.use_cuda)
word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained = inputs
if eval:
self.model.eval()
else:
self.model.train()
self.optimizer.zero_grad()
loss, _ = self.model(word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, word_orig_idx, sentlens, wordlens)
loss_val = loss.data.item()
if eval:
return loss_val
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args['max_grad_norm'])
self.optimizer.step()
return loss_val
def predict(self, batch, unsort=True):
inputs, orig_idx, word_orig_idx, sentlens, wordlens = unpack_batch(batch, self.use_cuda)
word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained = inputs
self.model.eval()
batch_size = word.size(0)
_, preds = self.model(word, word_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, word_orig_idx, sentlens, wordlens)
upos_seqs = [self.vocab['upos'].unmap(sent) for sent in preds[0].tolist()]
xpos_seqs = [self.vocab['xpos'].unmap(sent) for sent in preds[1].tolist()]
feats_seqs = [self.vocab['feats'].unmap(sent) for sent in preds[2].tolist()]
pred_tokens = [[[upos_seqs[i][j], xpos_seqs[i][j], feats_seqs[i][j]] for j in range(sentlens[i])] for i in range(batch_size)]
if unsort:
pred_tokens = utils.unsort(pred_tokens, orig_idx)
return pred_tokens
def save(self, filename, skip_modules=True):
model_state = self.model.state_dict()
# skip saving modules like pretrained embeddings, because they are large and will be saved in a separate file
if skip_modules:
skipped = [k for k in model_state.keys() if k.split('.')[0] in self.model.unsaved_modules]
for k in skipped:
del model_state[k]
params = {
'model': model_state,
'vocab': self.vocab.state_dict(),
'config': self.args
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def load(self, pretrain, filename):
try:
checkpoint = torch.load(filename, lambda storage, loc: storage)
except BaseException:
print("Cannot load model from {}".format(filename))
sys.exit(1)
self.args = checkpoint['config']
self.vocab = MultiVocab.load_state_dict(checkpoint['vocab'])
self.model = Tagger(self.args, self.vocab, emb_matrix=pretrain.emb, share_hid=self.args['share_hid'])
self.model.load_state_dict(checkpoint['model'], strict=False)
| stanfordnlp-master | stanfordnlp/models/pos/trainer.py |
import random
import torch
from stanfordnlp.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from stanfordnlp.models.common import conll
from stanfordnlp.models.common.vocab import PAD_ID, VOCAB_PREFIX
from stanfordnlp.models.pos.vocab import CharVocab, WordVocab, XPOSVocab, FeatureVocab, MultiVocab
from stanfordnlp.models.pos.xpos_vocab_factory import xpos_vocab_factory
from stanfordnlp.pipeline.doc import Document
class DataLoader:
def __init__(self, input_src, batch_size, args, pretrain, vocab=None, evaluation=False):
self.batch_size = batch_size
self.args = args
self.eval = evaluation
self.shuffled = not self.eval
# check if input source is a file or a Document object
if isinstance(input_src, str):
filename = input_src
assert filename.endswith('conllu'), "Loaded file must be conllu file."
self.conll, data = self.load_file(filename, evaluation=self.eval)
elif isinstance(input_src, Document):
filename = None
doc = input_src
self.conll, data = self.load_doc(doc)
# handle vocab
if vocab is None:
self.vocab = self.init_vocab(data)
else:
self.vocab = vocab
self.pretrain_vocab = pretrain.vocab
# filter and sample data
if args.get('sample_train', 1.0) < 1.0 and not self.eval:
keep = int(args['sample_train'] * len(data))
data = random.sample(data, keep)
print("Subsample training set with rate {:g}".format(args['sample_train']))
data = self.preprocess(data, self.vocab, self.pretrain_vocab, args)
# shuffle for training
if self.shuffled:
random.shuffle(data)
self.num_examples = len(data)
# chunk into batches
self.data = self.chunk_batches(data)
if filename is not None:
print("{} batches created for {}.".format(len(self.data), filename))
def init_vocab(self, data):
assert self.eval == False # for eval vocab must exist
charvocab = CharVocab(data, self.args['shorthand'])
wordvocab = WordVocab(data, self.args['shorthand'], cutoff=7, lower=True)
uposvocab = WordVocab(data, self.args['shorthand'], idx=1)
xposvocab = xpos_vocab_factory(data, self.args['shorthand'])
featsvocab = FeatureVocab(data, self.args['shorthand'], idx=3)
vocab = MultiVocab({'char': charvocab,
'word': wordvocab,
'upos': uposvocab,
'xpos': xposvocab,
'feats': featsvocab})
return vocab
def preprocess(self, data, vocab, pretrain_vocab, args):
processed = []
for sent in data:
processed_sent = [vocab['word'].map([w[0] for w in sent])]
processed_sent += [[vocab['char'].map([x for x in w[0]]) for w in sent]]
processed_sent += [vocab['upos'].map([w[1] for w in sent])]
processed_sent += [vocab['xpos'].map([w[2] for w in sent])]
processed_sent += [vocab['feats'].map([w[3] for w in sent])]
processed_sent += [pretrain_vocab.map([w[0] for w in sent])]
processed.append(processed_sent)
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 6
# sort sentences by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# sort words by lens for easy char-RNN operations
batch_words = [w for sent in batch[1] for w in sent]
word_lens = [len(x) for x in batch_words]
batch_words, word_orig_idx = sort_all([batch_words], word_lens)
batch_words = batch_words[0]
word_lens = [len(x) for x in batch_words]
# convert to tensors
words = batch[0]
words = get_long_tensor(words, batch_size)
words_mask = torch.eq(words, PAD_ID)
wordchars = get_long_tensor(batch_words, len(word_lens))
wordchars_mask = torch.eq(wordchars, PAD_ID)
upos = get_long_tensor(batch[2], batch_size)
xpos = get_long_tensor(batch[3], batch_size)
ufeats = get_long_tensor(batch[4], batch_size)
pretrained = get_long_tensor(batch[5], batch_size)
sentlens = [len(x) for x in batch[0]]
return words, words_mask, wordchars, wordchars_mask, upos, xpos, ufeats, pretrained, orig_idx, word_orig_idx, sentlens, word_lens
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def load_file(self, filename, evaluation=False):
conll_file = conll.CoNLLFile(filename)
data = conll_file.get(['word', 'upos', 'xpos', 'feats'], as_sentences=True)
return conll_file, data
def load_doc(self, doc):
data = doc.conll_file.get(['word', 'upos', 'xpos', 'feats'], as_sentences=True)
return doc.conll_file, data
def reshuffle(self):
data = [y for x in self.data for y in x]
self.data = self.chunk_batches(data)
random.shuffle(self.data)
def chunk_batches(self, data):
res = []
if not self.eval:
# sort sentences (roughly) by length for better memory utilization
data = sorted(data, key = lambda x: len(x[0]), reverse=random.random() > .5)
current = []
currentlen = 0
for x in data:
if len(x[0]) + currentlen > self.batch_size:
res.append(current)
current = []
currentlen = 0
current.append(x)
currentlen += len(x[0])
if currentlen > 0:
res.append(current)
return res
| stanfordnlp-master | stanfordnlp/models/pos/data.py |
"""
Utils and wrappers for scoring taggers.
"""
from stanfordnlp.models.common.utils import ud_scores
def score(system_conllu_file, gold_conllu_file, verbose=True):
""" Wrapper for tagger scorer. """
evaluation = ud_scores(gold_conllu_file, system_conllu_file)
el = evaluation['AllTags']
p = el.precision
r = el.recall
f = el.f1
if verbose:
scores = [evaluation[k].f1 * 100 for k in ['UPOS', 'XPOS', 'UFeats', 'AllTags']]
print("UPOS\tXPOS\tUFeats\tAllTags")
print("{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}".format(*scores))
return p, r, f
| stanfordnlp-master | stanfordnlp/models/pos/scorer.py |
from collections import Counter
from stanfordnlp.models.common.vocab import BaseVocab, BaseMultiVocab
from stanfordnlp.models.common.seq2seq_constant import VOCAB_PREFIX
class Vocab(BaseVocab):
def build_vocab(self):
counter = Counter(self.data)
self._id2unit = VOCAB_PREFIX + list(sorted(list(counter.keys()), key=lambda k: counter[k], reverse=True))
self._unit2id = {w:i for i, w in enumerate(self._id2unit)}
class MultiVocab(BaseMultiVocab):
@classmethod
def load_state_dict(cls, state_dict):
new = cls()
for k,v in state_dict.items():
new[k] = Vocab.load_state_dict(v)
return new
| stanfordnlp-master | stanfordnlp/models/lemma/vocab.py |
stanfordnlp-master | stanfordnlp/models/lemma/__init__.py |
|
"""
Utilities for calculating edits between word and lemma forms.
"""
EDIT_TO_ID = {'none': 0, 'identity': 1, 'lower': 2}
def get_edit_type(word, lemma):
""" Calculate edit types. """
if lemma == word:
return 'identity'
elif lemma == word.lower():
return 'lower'
return 'none'
def edit_word(word, pred, edit_id):
"""
Edit a word, given edit and seq2seq predictions.
"""
if edit_id == 1:
return word
elif edit_id == 2:
return word.lower()
elif edit_id == 0:
return pred
else:
raise Exception("Unrecognized edit ID: {}".format(edit_id))
| stanfordnlp-master | stanfordnlp/models/lemma/edit.py |
"""
A trainer class to handle training and testing of models.
"""
import sys
import numpy as np
from collections import Counter
import torch
from torch import nn
import torch.nn.init as init
import stanfordnlp.models.common.seq2seq_constant as constant
from stanfordnlp.models.common.seq2seq_model import Seq2SeqModel
from stanfordnlp.models.common import utils, loss
from stanfordnlp.models.lemma import edit
from stanfordnlp.models.lemma.vocab import MultiVocab
def unpack_batch(batch, use_cuda):
""" Unpack a batch from the data loader. """
if use_cuda:
inputs = [b.cuda() if b is not None else None for b in batch[:6]]
else:
inputs = [b if b is not None else None for b in batch[:6]]
orig_idx = batch[6]
return inputs, orig_idx
class Trainer(object):
""" A trainer for training models. """
def __init__(self, args=None, vocab=None, emb_matrix=None, model_file=None, use_cuda=False):
self.use_cuda = use_cuda
if model_file is not None:
# load everything from file
self.load(model_file, use_cuda)
else:
# build model from scratch
self.args = args
self.model = None if args['dict_only'] else Seq2SeqModel(args, emb_matrix=emb_matrix, use_cuda=use_cuda)
self.vocab = vocab
# dict-based components
self.word_dict = dict()
self.composite_dict = dict()
if not self.args['dict_only']:
if self.args.get('edit', False):
self.crit = loss.MixLoss(self.vocab['char'].size, self.args['alpha'])
print("[Running seq2seq lemmatizer with edit classifier]")
else:
self.crit = loss.SequenceLoss(self.vocab['char'].size)
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if use_cuda:
self.model.cuda()
self.crit.cuda()
else:
self.model.cpu()
self.crit.cpu()
self.optimizer = utils.get_optimizer(self.args['optim'], self.parameters, self.args['lr'])
def update(self, batch, eval=False):
inputs, orig_idx = unpack_batch(batch, self.use_cuda)
src, src_mask, tgt_in, tgt_out, pos, edits = inputs
if eval:
self.model.eval()
else:
self.model.train()
self.optimizer.zero_grad()
log_probs, edit_logits = self.model(src, src_mask, tgt_in, pos)
if self.args.get('edit', False):
assert edit_logits is not None
loss = self.crit(log_probs.view(-1, self.vocab['char'].size), tgt_out.view(-1), \
edit_logits, edits)
else:
loss = self.crit(log_probs.view(-1, self.vocab['char'].size), tgt_out.view(-1))
loss_val = loss.data.item()
if eval:
return loss_val
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args['max_grad_norm'])
self.optimizer.step()
return loss_val
def predict(self, batch, beam_size=1):
inputs, orig_idx = unpack_batch(batch, self.use_cuda)
src, src_mask, tgt, tgt_mask, pos, edits = inputs
self.model.eval()
batch_size = src.size(0)
preds, edit_logits = self.model.predict(src, src_mask, pos=pos, beam_size=beam_size)
pred_seqs = [self.vocab['char'].unmap(ids) for ids in preds] # unmap to tokens
pred_seqs = utils.prune_decoded_seqs(pred_seqs)
pred_tokens = ["".join(seq) for seq in pred_seqs] # join chars to be tokens
pred_tokens = utils.unsort(pred_tokens, orig_idx)
if self.args.get('edit', False):
assert edit_logits is not None
edits = np.argmax(edit_logits.data.cpu().numpy(), axis=1).reshape([batch_size]).tolist()
edits = utils.unsort(edits, orig_idx)
else:
edits = None
return pred_tokens, edits
def postprocess(self, words, preds, edits=None):
""" Postprocess, mainly for handing edits. """
assert len(words) == len(preds), "Lemma predictions must have same length as words."
edited = []
if self.args.get('edit', False):
assert edits is not None and len(words) == len(edits)
for w, p, e in zip(words, preds, edits):
lem = edit.edit_word(w, p, e)
edited += [lem]
else:
edited = preds # do not edit
# final sanity check
assert len(edited) == len(words)
final = []
for lem, w in zip(edited, words):
if len(lem) == 0 or constant.UNK in lem:
final += [w] # invalid prediction, fall back to word
else:
final += [lem]
return final
def update_lr(self, new_lr):
utils.change_lr(self.optimizer, new_lr)
def train_dict(self, triples):
""" Train a dict lemmatizer given training (word, pos, lemma) triples. """
# accumulate counter
ctr = Counter()
ctr.update([(p[0], p[1], p[2]) for p in triples])
# find the most frequent mappings
for p, _ in ctr.most_common():
w, pos, l = p
if (w,pos) not in self.composite_dict:
self.composite_dict[(w,pos)] = l
if w not in self.word_dict:
self.word_dict[w] = l
return
def predict_dict(self, pairs):
""" Predict a list of lemmas using the dict model given (word, pos) pairs. """
lemmas = []
for p in pairs:
w, pos = p
if (w,pos) in self.composite_dict:
lemmas += [self.composite_dict[(w,pos)]]
elif w in self.word_dict:
lemmas += [self.word_dict[w]]
else:
lemmas += [w]
return lemmas
def ensemble(self, pairs, other_preds):
""" Ensemble the dict with statitical model predictions. """
lemmas = []
assert len(pairs) == len(other_preds)
for p, pred in zip(pairs, other_preds):
w, pos = p
if (w,pos) in self.composite_dict:
lemmas += [self.composite_dict[(w,pos)]]
elif w in self.word_dict:
lemmas += [self.word_dict[w]]
else:
lemmas += [pred]
return lemmas
def save(self, filename):
params = {
'model': self.model.state_dict() if self.model is not None else None,
'dicts': (self.word_dict, self.composite_dict),
'vocab': self.vocab.state_dict(),
'config': self.args
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def load(self, filename, use_cuda=False):
try:
checkpoint = torch.load(filename, lambda storage, loc: storage)
except BaseException:
print("Cannot load model from {}".format(filename))
sys.exit(1)
self.args = checkpoint['config']
self.word_dict, self.composite_dict = checkpoint['dicts']
if not self.args['dict_only']:
self.model = Seq2SeqModel(self.args, use_cuda=use_cuda)
self.model.load_state_dict(checkpoint['model'])
else:
self.model = None
self.vocab = MultiVocab.load_state_dict(checkpoint['vocab'])
| stanfordnlp-master | stanfordnlp/models/lemma/trainer.py |
import random
import numpy as np
import os
from collections import Counter
import torch
import stanfordnlp.models.common.seq2seq_constant as constant
from stanfordnlp.models.common.data import map_to_ids, get_long_tensor, get_float_tensor, sort_all
from stanfordnlp.models.common import conll
from stanfordnlp.models.lemma.vocab import Vocab, MultiVocab
from stanfordnlp.models.lemma import edit
from stanfordnlp.pipeline.doc import Document
class DataLoader:
def __init__(self, input_src, batch_size, args, vocab=None, evaluation=False, conll_only=False):
self.batch_size = batch_size
self.args = args
self.eval = evaluation
self.shuffled = not self.eval
# check if input source is a file or a Document object
if isinstance(input_src, str):
filename = input_src
assert filename.endswith('conllu'), "Loaded file must be conllu file."
self.conll, data = self.load_file(filename)
elif isinstance(input_src, Document):
filename = None
doc = input_src
self.conll, data = self.load_doc(doc)
if conll_only: # only load conll file
return
# handle vocab
if vocab is not None:
self.vocab = vocab
else:
self.vocab = dict()
char_vocab, pos_vocab = self.init_vocab(data)
self.vocab = MultiVocab({'char': char_vocab, 'pos': pos_vocab})
# filter and sample data
if args.get('sample_train', 1.0) < 1.0 and not self.eval:
keep = int(args['sample_train'] * len(data))
data = random.sample(data, keep)
print("Subsample training set with rate {:g}".format(args['sample_train']))
data = self.preprocess(data, self.vocab['char'], self.vocab['pos'], args)
# shuffle for training
if self.shuffled:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
self.num_examples = len(data)
# chunk into batches
data = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
self.data = data
def init_vocab(self, data):
assert self.eval is False, "Vocab file must exist for evaluation"
char_data = "".join(d[0] + d[2] for d in data)
char_vocab = Vocab(char_data, self.args['lang'])
pos_data = [d[1] for d in data]
pos_vocab = Vocab(pos_data, self.args['lang'])
return char_vocab, pos_vocab
def preprocess(self, data, char_vocab, pos_vocab, args):
processed = []
for d in data:
edit_type = edit.EDIT_TO_ID[edit.get_edit_type(d[0], d[2])]
src = list(d[0])
src = [constant.SOS] + src + [constant.EOS]
src = char_vocab.map(src)
pos = d[1]
pos = pos_vocab.unit2id(pos)
tgt = list(d[2])
tgt_in = char_vocab.map([constant.SOS] + tgt)
tgt_out = char_vocab.map(tgt + [constant.EOS])
processed += [[src, tgt_in, tgt_out, pos, edit_type]]
return processed
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 5
# sort all fields by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# convert to tensors
src = batch[0]
src = get_long_tensor(src, batch_size)
src_mask = torch.eq(src, constant.PAD_ID)
tgt_in = get_long_tensor(batch[1], batch_size)
tgt_out = get_long_tensor(batch[2], batch_size)
pos = torch.LongTensor(batch[3])
edits = torch.LongTensor(batch[4])
assert tgt_in.size(1) == tgt_out.size(1), "Target input and output sequence sizes do not match."
return src, src_mask, tgt_in, tgt_out, pos, edits, orig_idx
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def load_file(self, filename):
conll_file = conll.CoNLLFile(filename)
data = conll_file.get(['word', 'upos', 'lemma'])
return conll_file, data
def load_doc(self, doc):
data = doc.conll_file.get(['word', 'upos', 'lemma'])
return doc.conll_file, data
| stanfordnlp-master | stanfordnlp/models/lemma/data.py |
"""
Utils and wrappers for scoring lemmatizers.
"""
from stanfordnlp.utils import conll18_ud_eval as ud_eval
def score(system_conllu_file, gold_conllu_file):
""" Wrapper for lemma scorer. """
gold_ud = ud_eval.load_conllu_file(gold_conllu_file)
system_ud = ud_eval.load_conllu_file(system_conllu_file)
evaluation = ud_eval.evaluate(gold_ud, system_ud)
el = evaluation["Lemmas"]
p, r, f = el.precision, el.recall, el.f1
return p, r, f
| stanfordnlp-master | stanfordnlp/models/lemma/scorer.py |
from collections import Counter
import re
from stanfordnlp.models.common.vocab import BaseVocab
from stanfordnlp.models.common.vocab import UNK, PAD
class Vocab(BaseVocab):
def build_vocab(self):
paras = self.data
counter = Counter()
for para in paras:
for unit in para:
normalized = self.normalize_unit(unit[0])
counter[normalized] += 1
self._id2unit = [PAD, UNK] + list(sorted(list(counter.keys()), key=lambda k: counter[k], reverse=True))
self._unit2id = {w:i for i, w in enumerate(self._id2unit)}
def normalize_unit(self, unit):
# Normalize minimal units used by the tokenizer
# For Vietnamese this means a syllable, for other languages this means a character
normalized = unit
if self.lang.startswith('vi'):
normalized = normalized.lstrip()
return normalized
def normalize_token(self, token):
token = re.sub('\s', ' ', token.lstrip())
if any([self.lang.startswith(x) for x in ['zh', 'ja', 'ko']]):
token = token.replace(' ', '')
return token
| stanfordnlp-master | stanfordnlp/models/tokenize/vocab.py |
stanfordnlp-master | stanfordnlp/models/tokenize/__init__.py |
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class Tokenizer(nn.Module):
def __init__(self, args, nchars, emb_dim, hidden_dim, N_CLASSES=5, dropout=0):
super().__init__()
self.args = args
feat_dim = args['feat_dim']
self.embeddings = nn.Embedding(nchars, emb_dim, padding_idx=0)
self.rnn = nn.LSTM(emb_dim + feat_dim, hidden_dim, num_layers=self.args['rnn_layers'], bidirectional=True, batch_first=True, dropout=dropout if self.args['rnn_layers'] > 1 else 0)
if self.args['conv_res'] is not None:
self.conv_res = nn.ModuleList()
self.conv_sizes = [int(x) for x in self.args['conv_res'].split(',')]
for si, size in enumerate(self.conv_sizes):
l = nn.Conv1d(emb_dim + feat_dim, hidden_dim * 2, size, padding=size//2, bias=self.args.get('hier_conv_res', False) or (si == 0))
self.conv_res.append(l)
if self.args.get('hier_conv_res', False):
self.conv_res2 = nn.Conv1d(hidden_dim * 2 * len(self.conv_sizes), hidden_dim * 2, 1)
self.tok_clf = nn.Linear(hidden_dim * 2, 1)
self.sent_clf = nn.Linear(hidden_dim * 2, 1)
self.mwt_clf = nn.Linear(hidden_dim * 2, 1)
if args['hierarchical']:
in_dim = hidden_dim * 2
self.rnn2 = nn.LSTM(in_dim, hidden_dim, num_layers=1, bidirectional=True, batch_first=True)
self.tok_clf2 = nn.Linear(hidden_dim * 2, 1, bias=False)
self.sent_clf2 = nn.Linear(hidden_dim * 2, 1, bias=False)
self.mwt_clf2 = nn.Linear(hidden_dim * 2, 1, bias=False)
self.dropout = nn.Dropout(dropout)
self.toknoise = nn.Dropout(self.args['tok_noise'])
def forward(self, x, feats):
emb = self.embeddings(x)
emb = self.dropout(emb)
emb = torch.cat([emb, feats], 2)
inp, _ = self.rnn(emb)
if self.args['conv_res'] is not None:
conv_input = emb.transpose(1, 2).contiguous()
if not self.args.get('hier_conv_res', False):
for l in self.conv_res:
inp = inp + l(conv_input).transpose(1, 2).contiguous()
else:
hid = []
for l in self.conv_res:
hid += [l(conv_input)]
hid = torch.cat(hid, 1)
hid = F.relu(hid)
hid = self.dropout(hid)
inp = inp + self.conv_res2(hid).transpose(1, 2).contiguous()
inp = self.dropout(inp)
tok0 = self.tok_clf(inp)
sent0 = self.sent_clf(inp)
mwt0 = self.mwt_clf(inp)
if self.args['hierarchical']:
if self.args['hier_invtemp'] > 0:
inp2, _ = self.rnn2(inp * (1 - self.toknoise(torch.sigmoid(-tok0 * self.args['hier_invtemp']))))
else:
inp2, _ = self.rnn2(inp)
inp2 = self.dropout(inp2)
tok0 = tok0 + self.tok_clf2(inp2)
sent0 = sent0 + self.sent_clf2(inp2)
mwt0 = mwt0 + self.mwt_clf2(inp2)
nontok = F.logsigmoid(-tok0)
tok = F.logsigmoid(tok0)
nonsent = F.logsigmoid(-sent0)
sent = F.logsigmoid(sent0)
nonmwt = F.logsigmoid(-mwt0)
mwt = F.logsigmoid(mwt0)
pred = torch.cat([nontok, tok+nonsent+nonmwt, tok+sent+nonmwt, tok+nonsent+mwt, tok+sent+mwt], 2)
return pred
| stanfordnlp-master | stanfordnlp/models/tokenize/model.py |
from collections import Counter
from copy import copy
import json
import numpy as np
from stanfordnlp.models.common.utils import ud_scores, harmonic_mean
def load_mwt_dict(filename):
if filename is not None:
with open(filename, 'r') as f:
mwt_dict0 = json.load(f)
mwt_dict = dict()
for item in mwt_dict0:
(key, expansion), count = item
if key not in mwt_dict or mwt_dict[key][1] < count:
mwt_dict[key] = (expansion, count)
return mwt_dict
else:
return
def print_sentence(sentence, f, mwt_dict=None):
i = 0
for tok, p in sentence:
expansion = None
if (p == 3 or p == 4) and mwt_dict is not None:
# MWT found, (attempt to) expand it!
if tok in mwt_dict:
expansion = mwt_dict[tok][0]
elif tok.lower() in mwt_dict:
expansion = mwt_dict[tok.lower()][0]
if expansion is not None:
f.write("{}-{}\t{}{}\n".format(i+1, i+len(expansion), tok, "\t_" * 8))
for etok in expansion:
f.write("{}\t{}{}\t{}{}\n".format(i+1, etok, "\t_" * 4, i, "\t_" * 3))
i += 1
else:
if len(tok) <= 0:
continue
f.write("{}\t{}{}\t{}{}\t{}\n".format(i+1, tok, "\t_" * 4, i, "\t_" * 2, "MWT=Yes" if p == 3 or p == 4 else "_"))
i += 1
f.write('\n')
def output_predictions(output_file, trainer, data_generator, vocab, mwt_dict, max_seqlen=1000):
paragraphs = []
for i, p in enumerate(data_generator.sentences):
start = 0 if i == 0 else paragraphs[-1][2]
length = sum([len(x) for x in p])
paragraphs += [(i, start, start+length, length+1)] # para idx, start idx, end idx, length
paragraphs = list(sorted(paragraphs, key=lambda x: x[3], reverse=True))
all_preds = [None] * len(paragraphs)
all_raw = [None] * len(paragraphs)
eval_limit = max(3000, max_seqlen)
batch_size = trainer.args['batch_size']
batches = int((len(paragraphs) + batch_size - 1) / batch_size)
t = 0
for i in range(batches):
batchparas = paragraphs[i * batch_size : (i + 1) * batch_size]
offsets = [x[1] for x in batchparas]
t += sum([x[3] for x in batchparas])
batch = data_generator.next(eval_offsets=offsets)
raw = batch[3]
N = len(batch[3][0])
if N <= eval_limit:
pred = np.argmax(trainer.predict(batch), axis=2)
else:
idx = [0] * len(batchparas)
Ns = [p[3] for p in batchparas]
pred = [[] for _ in batchparas]
while True:
ens = [min(N - idx1, eval_limit) for idx1, N in zip(idx, Ns)]
en = max(ens)
batch1 = batch[0][:, :en], batch[1][:, :en], batch[2][:, :en], [x[:en] for x in batch[3]]
pred1 = np.argmax(trainer.predict(batch1), axis=2)
for j in range(len(batchparas)):
sentbreaks = np.where((pred1[j] == 2) + (pred1[j] == 4))[0]
if len(sentbreaks) <= 0 or idx[j] >= Ns[j] - eval_limit:
advance = ens[j]
else:
advance = np.max(sentbreaks) + 1
pred[j] += [pred1[j, :advance]]
idx[j] += advance
if all([idx1 >= N for idx1, N in zip(idx, Ns)]):
break
batch = data_generator.next(eval_offsets=[x+y for x, y in zip(idx, offsets)])
pred = [np.concatenate(p, 0) for p in pred]
for j, p in enumerate(batchparas):
len1 = len([1 for x in raw[j] if x != '<PAD>'])
if pred[j][len1-1] < 2:
pred[j][len1-1] = 2
elif pred[j][len1-1] > 2:
pred[j][len1-1] = 4
all_preds[p[0]] = pred[j][:len1]
all_raw[p[0]] = raw[j]
offset = 0
oov_count = 0
for j in range(len(paragraphs)):
raw = all_raw[j]
pred = all_preds[j]
current_tok = ''
current_sent = []
for t, p in zip(raw, pred):
if t == '<PAD>':
break
# hack la_ittb
if trainer.args['shorthand'] == 'la_ittb' and t in [":", ";"]:
p = 2
offset += 1
if vocab.unit2id(t) == vocab.unit2id('<UNK>'):
oov_count += 1
current_tok += t
if p >= 1:
tok = vocab.normalize_token(current_tok)
assert '\t' not in tok, tok
if len(tok) <= 0:
current_tok = ''
continue
current_sent += [(tok, p)]
current_tok = ''
if p == 2 or p == 4:
print_sentence(current_sent, output_file, mwt_dict)
current_sent = []
if len(current_tok):
tok = vocab.normalize_token(current_tok)
assert '\t' not in tok, tok
if len(tok) > 0:
current_sent += [(tok, 2)]
if len(current_sent):
print_sentence(current_sent, output_file, mwt_dict)
return oov_count, offset, all_preds
def eval_model(args, trainer, batches, vocab, mwt_dict):
with open(args['conll_file'], 'w') as conll_output:
oov_count, N, all_preds = output_predictions(conll_output, trainer, batches, vocab, mwt_dict, args['max_seqlen'])
all_preds = np.concatenate(all_preds, 0)
labels = [y[1] for x in batches.data for y in x]
counter = Counter(zip(all_preds, labels))
def f1(pred, gold, mapping):
pred = [mapping[p] for p in pred]
gold = [mapping[g] for g in gold]
lastp = -1; lastg = -1
tp = 0; fp = 0; fn = 0
for i, (p, g) in enumerate(zip(pred, gold)):
if p == g > 0 and lastp == lastg:
lastp = i
lastg = i
tp += 1
elif p > 0 and g > 0:
lastp = i
lastg = i
fp += 1
fn += 1
elif p > 0:
# and g == 0
lastp = i
fp += 1
elif g > 0:
lastg = i
fn += 1
if tp == 0:
return 0
else:
return 2 * tp / (2 * tp + fp + fn)
f1tok = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:1, 4:1})
f1sent = f1(all_preds, labels, {0:0, 1:0, 2:1, 3:0, 4:1})
f1mwt = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:2, 4:2})
print(args['shorthand'], f1tok, f1sent, f1mwt)
return harmonic_mean([f1tok, f1sent, f1mwt], [1, 1, .01])
| stanfordnlp-master | stanfordnlp/models/tokenize/utils.py |
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from stanfordnlp.models.common.trainer import Trainer
from .model import Tokenizer
from .vocab import Vocab
class Trainer(Trainer):
def __init__(self, args=None, vocab=None, model_file=None, use_cuda=False):
self.use_cuda = use_cuda
if model_file is not None:
# load everything from file
self.load(model_file)
else:
# build model from scratch
self.args = args
self.vocab = vocab
self.model = Tokenizer(self.args, self.args['vocab_size'], self.args['emb_dim'], self.args['hidden_dim'], dropout=self.args['dropout'])
self.criterion = nn.CrossEntropyLoss(ignore_index=-1)
if use_cuda:
self.model.cuda()
self.criterion.cuda()
else:
self.model.cpu()
self.criterion.cpu()
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
self.optimizer = optim.Adam(self.parameters, lr=self.args['lr0'], betas=(.9, .9), weight_decay=self.args['weight_decay'])
self.feat_funcs = self.args.get('feat_funcs', None)
self.lang = self.args['lang'] # language determines how token normlization is done
def update(self, inputs):
self.model.train()
units, labels, features, _ = inputs
if self.use_cuda:
units = units.cuda()
labels = labels.cuda()
features = features.cuda()
pred = self.model(units, features)
self.optimizer.zero_grad()
classes = pred.size(2)
loss = self.criterion(pred.view(-1, classes), labels.view(-1))
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args['max_grad_norm'])
self.optimizer.step()
return loss.item()
def predict(self, inputs):
self.model.eval()
units, labels, features, _ = inputs
if self.use_cuda:
units = units.cuda()
labels = labels.cuda()
features = features.cuda()
pred = self.model(units, features)
return pred.data.cpu().numpy()
def save(self, filename):
params = {
'model': self.model.state_dict() if self.model is not None else None,
'vocab': self.vocab.state_dict(),
'config': self.args
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def load(self, filename):
try:
checkpoint = torch.load(filename, lambda storage, loc: storage)
except BaseException:
print("Cannot load model from {}".format(filename))
sys.exit(1)
self.args = checkpoint['config']
self.model = Tokenizer(self.args, self.args['vocab_size'], self.args['emb_dim'], self.args['hidden_dim'], dropout=self.args['dropout'])
self.model.load_state_dict(checkpoint['model'])
self.vocab = Vocab.load_state_dict(checkpoint['vocab'])
| stanfordnlp-master | stanfordnlp/models/tokenize/trainer.py |
from bisect import bisect_right
from copy import copy
import json
import numpy as np
import random
import re
import torch
from .vocab import Vocab
class DataLoader:
def __init__(self, args, input_files={'json': None, 'txt': None, 'label': None}, input_text=None, input_data=None, vocab=None, evaluation=False):
self.args = args
self.eval = evaluation
# get input files
json_file = input_files['json']
txt_file = input_files['txt']
label_file = input_files['label']
# Load data and process it
if input_data is not None:
self.data = input_data
elif json_file is not None:
with open(json_file) as f:
self.data = json.load(f)
else:
# set up text from file or input string
assert txt_file is not None or input_text is not None
if input_text is None:
with open(txt_file) as f:
text = ''.join(f.readlines()).rstrip()
else:
text = input_text
if label_file is not None:
with open(label_file) as f:
labels = ''.join(f.readlines()).rstrip()
else:
labels = '\n\n'.join(['0' * len(pt.rstrip()) for pt in re.split('\n\s*\n', text)])
self.data = [list(zip(re.sub('\s', ' ', pt.rstrip()), [int(x) for x in pc])) for pt, pc in zip(re.split('\n\s*\n', text), labels.split('\n\n')) if len(pt.rstrip()) > 0]
self.vocab = vocab if vocab is not None else self.init_vocab()
# data comes in a list of paragraphs, where each paragraph is a list of units with unit-level labels
self.sentences = [self.para_to_sentences(para) for para in self.data]
self.init_sent_ids()
def init_vocab(self):
vocab = Vocab(self.data, self.args['lang'])
return vocab
def init_sent_ids(self):
self.sentence_ids = []
self.cumlen = [0]
for i, para in enumerate(self.sentences):
for j in range(len(para)):
self.sentence_ids += [(i, j)]
self.cumlen += [self.cumlen[-1] + len(self.sentences[i][j])]
def para_to_sentences(self, para):
res = []
funcs = []
for feat_func in self.args['feat_funcs']:
if feat_func == 'space_before':
func = lambda x: 1 if x.startswith(' ') else 0
elif feat_func == 'capitalized':
func = lambda x: 1 if x[0].isupper() else 0
elif feat_func == 'all_caps':
func = lambda x: 1 if x.isupper() else 0
elif feat_func == 'numeric':
func = lambda x: 1 if (re.match('^([\d]+[,\.]*)+$', x) is not None) else 0
else:
assert False, 'Feature function "{}" is undefined.'.format(feat_func)
funcs += [func]
composite_func = lambda x: list(map(lambda f: f(x), funcs))
def process_and_featurize(sent):
return [(self.vocab.unit2id(y[0]), y[1], composite_func(y[0]), y[0]) for y in sent]
current = []
for unit, label in para:
label1 = label if not self.eval else 0
current += [[unit, label]]
if label1 == 2 or label1 == 4: # end of sentence
if len(current) <= self.args['max_seqlen']:
# get rid of sentences that are too long during training of the tokenizer
res += [process_and_featurize(current)]
current = []
if len(current) > 0:
if self.eval or len(current) <= self.args['max_seqlen']:
res += [process_and_featurize(current)]
return res
def __len__(self):
return len(self.sentence_ids)
def shuffle(self):
for para in self.sentences:
random.shuffle(para)
self.init_sent_ids()
def next(self, eval_offsets=None, unit_dropout=0.0):
null_feats = [0] * len(self.sentences[0][0][0][2])
def strings_starting(id_pair, offset=0, pad_len=self.args['max_seqlen']):
pid, sid = id_pair
res = copy(self.sentences[pid][sid][offset:])
assert self.eval or len(res) <= self.args['max_seqlen'], 'The maximum sequence length {} is less than that of the longest sentence length ({}) in the data, consider increasing it! {}'.format(self.args['max_seqlen'], len(res), ' '.join(["{}/{}".format(*x) for x in self.sentences[pid][sid]]))
for sid1 in range(sid+1, len(self.sentences[pid])):
res += self.sentences[pid][sid1]
if not self.eval and len(res) >= self.args['max_seqlen']:
res = res[:self.args['max_seqlen']]
break
if unit_dropout > 0 and not self.eval:
unkid = self.vocab.unit2id('<UNK>')
res = [(unkid, x[1], x[2], '<UNK>') if random.random() < unit_dropout else x for x in res]
# pad with padding units and labels if necessary
if pad_len > 0 and len(res) < pad_len:
padid = self.vocab.unit2id('<PAD>')
res += [(padid, -1, null_feats, '<PAD>')] * (pad_len - len(res))
return res
if eval_offsets is not None:
# find max padding length
pad_len = 0
for eval_offset in eval_offsets:
if eval_offset < self.cumlen[-1]:
pair_id = bisect_right(self.cumlen, eval_offset) - 1
pair = self.sentence_ids[pair_id]
pad_len = max(pad_len, len(strings_starting(pair, offset=eval_offset-self.cumlen[pair_id], pad_len=0)))
res = []
pad_len += 1
for eval_offset in eval_offsets:
# find unit
if eval_offset >= self.cumlen[-1]:
padid = self.vocab.unit2id('<PAD>')
res += [[(padid, -1, null_feats, '<PAD>')] * pad_len]
continue
pair_id = bisect_right(self.cumlen, eval_offset) - 1
pair = self.sentence_ids[pair_id]
res += [strings_starting(pair, offset=eval_offset-self.cumlen[pair_id], pad_len=pad_len)]
else:
id_pairs = random.sample(self.sentence_ids, min(len(self.sentence_ids), self.args['batch_size']))
res = [strings_starting(pair) for pair in id_pairs]
units = [[y[0] for y in x] for x in res]
labels = [[y[1] for y in x] for x in res]
features = [[y[2] for y in x] for x in res]
raw_units = [[y[3] for y in x] for x in res]
convert = lambda t: (torch.from_numpy(np.array(t[0], dtype=t[1])))
units, labels, features = list(map(convert, [(units, np.int64), (labels, np.int64), (features, np.float32)]))
return units, labels, features, raw_units
| stanfordnlp-master | stanfordnlp/models/tokenize/data.py |
from __future__ import absolute_import
from io import BytesIO
from google.protobuf.internal.encoder import _EncodeVarint
from google.protobuf.internal.decoder import _DecodeVarint
from .CoreNLP_pb2 import *
def parseFromDelimitedString(obj, buf, offset=0):
"""
Stanford CoreNLP uses the Java "writeDelimitedTo" function, which
writes the size (and offset) of the buffer before writing the object.
This function handles parsing this message starting from offset 0.
@returns how many bytes of @buf were consumed.
"""
size, pos = _DecodeVarint(buf, offset)
obj.ParseFromString(buf[offset+pos:offset+pos+size])
return pos+size
def writeToDelimitedString(obj, stream=None):
"""
Stanford CoreNLP uses the Java "writeDelimitedTo" function, which
writes the size (and offset) of the buffer before writing the object.
This function handles parsing this message starting from offset 0.
@returns how many bytes of @buf were consumed.
"""
if stream is None:
stream = BytesIO()
_EncodeVarint(stream.write, obj.ByteSize(), True)
stream.write(obj.SerializeToString())
return stream
def to_text(sentence):
"""
Helper routine that converts a Sentence protobuf to a string from
its tokens.
"""
text = ""
for i, tok in enumerate(sentence.token):
if i != 0:
text += tok.before
text += tok.word
return text
| stanfordnlp-master | stanfordnlp/protobuf/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: CoreNLP.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='CoreNLP.proto',
package='edu.stanford.nlp.pipeline',
syntax='proto2',
serialized_pb=_b('\n\rCoreNLP.proto\x12\x19\x65\x64u.stanford.nlp.pipeline\"\xe1\x05\n\x08\x44ocument\x12\x0c\n\x04text\x18\x01 \x02(\t\x12\x35\n\x08sentence\x18\x02 \x03(\x0b\x32#.edu.stanford.nlp.pipeline.Sentence\x12\x39\n\ncorefChain\x18\x03 \x03(\x0b\x32%.edu.stanford.nlp.pipeline.CorefChain\x12\r\n\x05\x64ocID\x18\x04 \x01(\t\x12\x0f\n\x07\x64ocDate\x18\x07 \x01(\t\x12\x10\n\x08\x63\x61lendar\x18\x08 \x01(\x04\x12;\n\x11sentencelessToken\x18\x05 \x03(\x0b\x32 .edu.stanford.nlp.pipeline.Token\x12\x33\n\tcharacter\x18\n \x03(\x0b\x32 .edu.stanford.nlp.pipeline.Token\x12/\n\x05quote\x18\x06 \x03(\x0b\x32 .edu.stanford.nlp.pipeline.Quote\x12\x37\n\x08mentions\x18\t \x03(\x0b\x32%.edu.stanford.nlp.pipeline.NERMention\x12#\n\x1bhasEntityMentionsAnnotation\x18\r \x01(\x08\x12\x0e\n\x06xmlDoc\x18\x0b \x01(\x08\x12\x34\n\x08sections\x18\x0c \x03(\x0b\x32\".edu.stanford.nlp.pipeline.Section\x12<\n\x10mentionsForCoref\x18\x0e \x03(\x0b\x32\".edu.stanford.nlp.pipeline.Mention\x12!\n\x19hasCorefMentionAnnotation\x18\x0f \x01(\x08\x12\x1a\n\x12hasCorefAnnotation\x18\x10 \x01(\x08\x12+\n#corefMentionToEntityMentionMappings\x18\x11 \x03(\r\x12+\n#entityMentionToCorefMentionMappings\x18\x12 \x03(\r*\x05\x08\x64\x10\x80\x02\"\x8e\x0f\n\x08Sentence\x12/\n\x05token\x18\x01 \x03(\x0b\x32 .edu.stanford.nlp.pipeline.Token\x12\x18\n\x10tokenOffsetBegin\x18\x02 \x02(\r\x12\x16\n\x0etokenOffsetEnd\x18\x03 \x02(\r\x12\x15\n\rsentenceIndex\x18\x04 \x01(\r\x12\x1c\n\x14\x63haracterOffsetBegin\x18\x05 \x01(\r\x12\x1a\n\x12\x63haracterOffsetEnd\x18\x06 \x01(\r\x12\x37\n\tparseTree\x18\x07 \x01(\x0b\x32$.edu.stanford.nlp.pipeline.ParseTree\x12@\n\x12\x62inarizedParseTree\x18\x1f \x01(\x0b\x32$.edu.stanford.nlp.pipeline.ParseTree\x12@\n\x12\x61nnotatedParseTree\x18 \x01(\x0b\x32$.edu.stanford.nlp.pipeline.ParseTree\x12\x11\n\tsentiment\x18! \x01(\t\x12=\n\x0fkBestParseTrees\x18\" \x03(\x0b\x32$.edu.stanford.nlp.pipeline.ParseTree\x12\x45\n\x11\x62\x61sicDependencies\x18\x08 \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12I\n\x15\x63ollapsedDependencies\x18\t \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12T\n collapsedCCProcessedDependencies\x18\n \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12K\n\x17\x61lternativeDependencies\x18\r \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12?\n\x0copenieTriple\x18\x0e \x03(\x0b\x32).edu.stanford.nlp.pipeline.RelationTriple\x12<\n\tkbpTriple\x18\x10 \x03(\x0b\x32).edu.stanford.nlp.pipeline.RelationTriple\x12\x45\n\x10\x65ntailedSentence\x18\x0f \x03(\x0b\x32+.edu.stanford.nlp.pipeline.SentenceFragment\x12\x43\n\x0e\x65ntailedClause\x18# \x03(\x0b\x32+.edu.stanford.nlp.pipeline.SentenceFragment\x12H\n\x14\x65nhancedDependencies\x18\x11 \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12P\n\x1c\x65nhancedPlusPlusDependencies\x18\x12 \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12\x33\n\tcharacter\x18\x13 \x03(\x0b\x32 .edu.stanford.nlp.pipeline.Token\x12\x11\n\tparagraph\x18\x0b \x01(\r\x12\x0c\n\x04text\x18\x0c \x01(\t\x12\x12\n\nlineNumber\x18\x14 \x01(\r\x12\x1e\n\x16hasRelationAnnotations\x18\x33 \x01(\x08\x12\x31\n\x06\x65ntity\x18\x34 \x03(\x0b\x32!.edu.stanford.nlp.pipeline.Entity\x12\x35\n\x08relation\x18\x35 \x03(\x0b\x32#.edu.stanford.nlp.pipeline.Relation\x12$\n\x1chasNumerizedTokensAnnotation\x18\x36 \x01(\x08\x12\x37\n\x08mentions\x18\x37 \x03(\x0b\x32%.edu.stanford.nlp.pipeline.NERMention\x12<\n\x10mentionsForCoref\x18\x38 \x03(\x0b\x32\".edu.stanford.nlp.pipeline.Mention\x12\"\n\x1ahasCorefMentionsAnnotation\x18\x39 \x01(\x08\x12\x12\n\nsentenceID\x18: \x01(\t\x12\x13\n\x0bsectionDate\x18; \x01(\t\x12\x14\n\x0csectionIndex\x18< \x01(\r\x12\x13\n\x0bsectionName\x18= \x01(\t\x12\x15\n\rsectionAuthor\x18> \x01(\t\x12\r\n\x05\x64ocID\x18? \x01(\t\x12\x15\n\rsectionQuoted\x18@ \x01(\x08\x12#\n\x1bhasEntityMentionsAnnotation\x18\x41 \x01(\x08\x12\x1f\n\x17hasKBPTriplesAnnotation\x18\x44 \x01(\x08\x12\"\n\x1ahasOpenieTriplesAnnotation\x18\x45 \x01(\x08\x12\x14\n\x0c\x63hapterIndex\x18\x42 \x01(\r\x12\x16\n\x0eparagraphIndex\x18\x43 \x01(\r*\x05\x08\x64\x10\x80\x02\"\xad\n\n\x05Token\x12\x0c\n\x04word\x18\x01 \x01(\t\x12\x0b\n\x03pos\x18\x02 \x01(\t\x12\r\n\x05value\x18\x03 \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x04 \x01(\t\x12\x0e\n\x06\x62\x65\x66ore\x18\x05 \x01(\t\x12\r\n\x05\x61\x66ter\x18\x06 \x01(\t\x12\x14\n\x0coriginalText\x18\x07 \x01(\t\x12\x0b\n\x03ner\x18\x08 \x01(\t\x12\x11\n\tcoarseNER\x18> \x01(\t\x12\x16\n\x0e\x66ineGrainedNER\x18? \x01(\t\x12\x15\n\rnormalizedNER\x18\t \x01(\t\x12\r\n\x05lemma\x18\n \x01(\t\x12\x11\n\tbeginChar\x18\x0b \x01(\r\x12\x0f\n\x07\x65ndChar\x18\x0c \x01(\r\x12\x11\n\tutterance\x18\r \x01(\r\x12\x0f\n\x07speaker\x18\x0e \x01(\t\x12\x12\n\nbeginIndex\x18\x0f \x01(\r\x12\x10\n\x08\x65ndIndex\x18\x10 \x01(\r\x12\x17\n\x0ftokenBeginIndex\x18\x11 \x01(\r\x12\x15\n\rtokenEndIndex\x18\x12 \x01(\r\x12\x34\n\ntimexValue\x18\x13 \x01(\x0b\x32 .edu.stanford.nlp.pipeline.Timex\x12\x15\n\rhasXmlContext\x18\x15 \x01(\x08\x12\x12\n\nxmlContext\x18\x16 \x03(\t\x12\x16\n\x0e\x63orefClusterID\x18\x17 \x01(\r\x12\x0e\n\x06\x61nswer\x18\x18 \x01(\t\x12\x15\n\rheadWordIndex\x18\x1a \x01(\r\x12\x35\n\x08operator\x18\x1b \x01(\x0b\x32#.edu.stanford.nlp.pipeline.Operator\x12\x35\n\x08polarity\x18\x1c \x01(\x0b\x32#.edu.stanford.nlp.pipeline.Polarity\x12\x14\n\x0cpolarity_dir\x18\' \x01(\t\x12-\n\x04span\x18\x1d \x01(\x0b\x32\x1f.edu.stanford.nlp.pipeline.Span\x12\x11\n\tsentiment\x18\x1e \x01(\t\x12\x16\n\x0equotationIndex\x18\x1f \x01(\x05\x12\x42\n\x0e\x63onllUFeatures\x18 \x01(\x0b\x32*.edu.stanford.nlp.pipeline.MapStringString\x12\x11\n\tcoarseTag\x18! \x01(\t\x12\x38\n\x0f\x63onllUTokenSpan\x18\" \x01(\x0b\x32\x1f.edu.stanford.nlp.pipeline.Span\x12\x12\n\nconllUMisc\x18# \x01(\t\x12G\n\x13\x63onllUSecondaryDeps\x18$ \x01(\x0b\x32*.edu.stanford.nlp.pipeline.MapStringString\x12\x17\n\x0fwikipediaEntity\x18% \x01(\t\x12\x11\n\tisNewline\x18& \x01(\x08\x12\x0e\n\x06gender\x18\x33 \x01(\t\x12\x10\n\x08trueCase\x18\x34 \x01(\t\x12\x14\n\x0ctrueCaseText\x18\x35 \x01(\t\x12\x13\n\x0b\x63hineseChar\x18\x36 \x01(\t\x12\x12\n\nchineseSeg\x18\x37 \x01(\t\x12\x16\n\x0e\x63hineseXMLChar\x18< \x01(\t\x12\x13\n\x0bsectionName\x18\x38 \x01(\t\x12\x15\n\rsectionAuthor\x18\x39 \x01(\t\x12\x13\n\x0bsectionDate\x18: \x01(\t\x12\x17\n\x0fsectionEndLabel\x18; \x01(\t\x12\x0e\n\x06parent\x18= \x01(\t\x12\x19\n\x11\x63orefMentionIndex\x18@ \x03(\r\x12\x1a\n\x12\x65ntityMentionIndex\x18\x41 \x01(\r*\x05\x08\x64\x10\x80\x02\"\xe4\x03\n\x05Quote\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\r\n\x05\x62\x65gin\x18\x02 \x01(\r\x12\x0b\n\x03\x65nd\x18\x03 \x01(\r\x12\x15\n\rsentenceBegin\x18\x05 \x01(\r\x12\x13\n\x0bsentenceEnd\x18\x06 \x01(\r\x12\x12\n\ntokenBegin\x18\x07 \x01(\r\x12\x10\n\x08tokenEnd\x18\x08 \x01(\r\x12\r\n\x05\x64ocid\x18\t \x01(\t\x12\r\n\x05index\x18\n \x01(\r\x12\x0e\n\x06\x61uthor\x18\x0b \x01(\t\x12\x0f\n\x07mention\x18\x0c \x01(\t\x12\x14\n\x0cmentionBegin\x18\r \x01(\r\x12\x12\n\nmentionEnd\x18\x0e \x01(\r\x12\x13\n\x0bmentionType\x18\x0f \x01(\t\x12\x14\n\x0cmentionSieve\x18\x10 \x01(\t\x12\x0f\n\x07speaker\x18\x11 \x01(\t\x12\x14\n\x0cspeakerSieve\x18\x12 \x01(\t\x12\x18\n\x10\x63\x61nonicalMention\x18\x13 \x01(\t\x12\x1d\n\x15\x63\x61nonicalMentionBegin\x18\x14 \x01(\r\x12\x1b\n\x13\x63\x61nonicalMentionEnd\x18\x15 \x01(\r\x12N\n\x1a\x61ttributionDependencyGraph\x18\x16 \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\"\xc7\x01\n\tParseTree\x12\x33\n\x05\x63hild\x18\x01 \x03(\x0b\x32$.edu.stanford.nlp.pipeline.ParseTree\x12\r\n\x05value\x18\x02 \x01(\t\x12\x17\n\x0fyieldBeginIndex\x18\x03 \x01(\r\x12\x15\n\ryieldEndIndex\x18\x04 \x01(\r\x12\r\n\x05score\x18\x05 \x01(\x01\x12\x37\n\tsentiment\x18\x06 \x01(\x0e\x32$.edu.stanford.nlp.pipeline.Sentiment\"\x96\x03\n\x0f\x44\x65pendencyGraph\x12=\n\x04node\x18\x01 \x03(\x0b\x32/.edu.stanford.nlp.pipeline.DependencyGraph.Node\x12=\n\x04\x65\x64ge\x18\x02 \x03(\x0b\x32/.edu.stanford.nlp.pipeline.DependencyGraph.Edge\x12\x10\n\x04root\x18\x03 \x03(\rB\x02\x10\x01\x1a\x44\n\x04Node\x12\x15\n\rsentenceIndex\x18\x01 \x02(\r\x12\r\n\x05index\x18\x02 \x02(\r\x12\x16\n\x0e\x63opyAnnotation\x18\x03 \x01(\r\x1a\xac\x01\n\x04\x45\x64ge\x12\x0e\n\x06source\x18\x01 \x02(\r\x12\x0e\n\x06target\x18\x02 \x02(\r\x12\x0b\n\x03\x64\x65p\x18\x03 \x01(\t\x12\x0f\n\x07isExtra\x18\x04 \x01(\x08\x12\x12\n\nsourceCopy\x18\x05 \x01(\r\x12\x12\n\ntargetCopy\x18\x06 \x01(\r\x12>\n\x08language\x18\x07 \x01(\x0e\x32#.edu.stanford.nlp.pipeline.Language:\x07Unknown\"\xc6\x02\n\nCorefChain\x12\x0f\n\x07\x63hainID\x18\x01 \x02(\x05\x12\x43\n\x07mention\x18\x02 \x03(\x0b\x32\x32.edu.stanford.nlp.pipeline.CorefChain.CorefMention\x12\x16\n\x0erepresentative\x18\x03 \x02(\r\x1a\xc9\x01\n\x0c\x43orefMention\x12\x11\n\tmentionID\x18\x01 \x01(\x05\x12\x13\n\x0bmentionType\x18\x02 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\t\x12\x0e\n\x06gender\x18\x04 \x01(\t\x12\x0f\n\x07\x61nimacy\x18\x05 \x01(\t\x12\x12\n\nbeginIndex\x18\x06 \x01(\r\x12\x10\n\x08\x65ndIndex\x18\x07 \x01(\r\x12\x11\n\theadIndex\x18\t \x01(\r\x12\x15\n\rsentenceIndex\x18\n \x01(\r\x12\x10\n\x08position\x18\x0b \x01(\r\"\xef\x08\n\x07Mention\x12\x11\n\tmentionID\x18\x01 \x01(\x05\x12\x13\n\x0bmentionType\x18\x02 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\t\x12\x0e\n\x06gender\x18\x04 \x01(\t\x12\x0f\n\x07\x61nimacy\x18\x05 \x01(\t\x12\x0e\n\x06person\x18\x06 \x01(\t\x12\x12\n\nstartIndex\x18\x07 \x01(\r\x12\x10\n\x08\x65ndIndex\x18\t \x01(\r\x12\x11\n\theadIndex\x18\n \x01(\r\x12\x12\n\nheadString\x18\x0b \x01(\t\x12\x11\n\tnerString\x18\x0c \x01(\t\x12\x13\n\x0boriginalRef\x18\r \x01(\r\x12\x1a\n\x12goldCorefClusterID\x18\x0e \x01(\x05\x12\x16\n\x0e\x63orefClusterID\x18\x0f \x01(\x05\x12\x12\n\nmentionNum\x18\x10 \x01(\r\x12\x0f\n\x07sentNum\x18\x11 \x01(\r\x12\r\n\x05utter\x18\x12 \x01(\r\x12\x11\n\tparagraph\x18\x13 \x01(\r\x12\x11\n\tisSubject\x18\x14 \x01(\x08\x12\x16\n\x0eisDirectObject\x18\x15 \x01(\x08\x12\x18\n\x10isIndirectObject\x18\x16 \x01(\x08\x12\x1b\n\x13isPrepositionObject\x18\x17 \x01(\x08\x12\x0f\n\x07hasTwin\x18\x18 \x01(\x08\x12\x0f\n\x07generic\x18\x19 \x01(\x08\x12\x13\n\x0bisSingleton\x18\x1a \x01(\x08\x12\x1a\n\x12hasBasicDependency\x18\x1b \x01(\x08\x12\x1d\n\x15hasEnhancedDepenedncy\x18\x1c \x01(\x08\x12\x1b\n\x13hasContextParseTree\x18\x1d \x01(\x08\x12?\n\x0fheadIndexedWord\x18\x1e \x01(\x0b\x32&.edu.stanford.nlp.pipeline.IndexedWord\x12=\n\rdependingVerb\x18\x1f \x01(\x0b\x32&.edu.stanford.nlp.pipeline.IndexedWord\x12\x38\n\x08headWord\x18 \x01(\x0b\x32&.edu.stanford.nlp.pipeline.IndexedWord\x12;\n\x0bspeakerInfo\x18! \x01(\x0b\x32&.edu.stanford.nlp.pipeline.SpeakerInfo\x12=\n\rsentenceWords\x18\x32 \x03(\x0b\x32&.edu.stanford.nlp.pipeline.IndexedWord\x12<\n\x0coriginalSpan\x18\x33 \x03(\x0b\x32&.edu.stanford.nlp.pipeline.IndexedWord\x12\x12\n\ndependents\x18\x34 \x03(\t\x12\x19\n\x11preprocessedTerms\x18\x35 \x03(\t\x12\x13\n\x0b\x61ppositions\x18\x36 \x03(\x05\x12\x1c\n\x14predicateNominatives\x18\x37 \x03(\x05\x12\x18\n\x10relativePronouns\x18\x38 \x03(\x05\x12\x13\n\x0blistMembers\x18\x39 \x03(\x05\x12\x15\n\rbelongToLists\x18: \x03(\x05\"X\n\x0bIndexedWord\x12\x13\n\x0bsentenceNum\x18\x01 \x01(\r\x12\x12\n\ntokenIndex\x18\x02 \x01(\r\x12\r\n\x05\x64ocID\x18\x03 \x01(\r\x12\x11\n\tcopyCount\x18\x04 \x01(\r\"4\n\x0bSpeakerInfo\x12\x13\n\x0bspeakerName\x18\x01 \x01(\t\x12\x10\n\x08mentions\x18\x02 \x03(\x05\"\"\n\x04Span\x12\r\n\x05\x62\x65gin\x18\x01 \x02(\r\x12\x0b\n\x03\x65nd\x18\x02 \x02(\r\"w\n\x05Timex\x12\r\n\x05value\x18\x01 \x01(\t\x12\x10\n\x08\x61ltValue\x18\x02 \x01(\t\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0b\n\x03tid\x18\x05 \x01(\t\x12\x12\n\nbeginPoint\x18\x06 \x01(\r\x12\x10\n\x08\x65ndPoint\x18\x07 \x01(\r\"\xdb\x01\n\x06\x45ntity\x12\x11\n\theadStart\x18\x06 \x01(\r\x12\x0f\n\x07headEnd\x18\x07 \x01(\r\x12\x13\n\x0bmentionType\x18\x08 \x01(\t\x12\x16\n\x0enormalizedName\x18\t \x01(\t\x12\x16\n\x0eheadTokenIndex\x18\n \x01(\r\x12\x0f\n\x07\x63orefID\x18\x0b \x01(\t\x12\x10\n\x08objectID\x18\x01 \x01(\t\x12\x13\n\x0b\x65xtentStart\x18\x02 \x01(\r\x12\x11\n\textentEnd\x18\x03 \x01(\r\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0f\n\x07subtype\x18\x05 \x01(\t\"\xb7\x01\n\x08Relation\x12\x0f\n\x07\x61rgName\x18\x06 \x03(\t\x12.\n\x03\x61rg\x18\x07 \x03(\x0b\x32!.edu.stanford.nlp.pipeline.Entity\x12\x11\n\tsignature\x18\x08 \x01(\t\x12\x10\n\x08objectID\x18\x01 \x01(\t\x12\x13\n\x0b\x65xtentStart\x18\x02 \x01(\r\x12\x11\n\textentEnd\x18\x03 \x01(\r\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0f\n\x07subtype\x18\x05 \x01(\t\"\xb2\x01\n\x08Operator\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x1b\n\x13quantifierSpanBegin\x18\x02 \x02(\x05\x12\x19\n\x11quantifierSpanEnd\x18\x03 \x02(\x05\x12\x18\n\x10subjectSpanBegin\x18\x04 \x02(\x05\x12\x16\n\x0esubjectSpanEnd\x18\x05 \x02(\x05\x12\x17\n\x0fobjectSpanBegin\x18\x06 \x02(\x05\x12\x15\n\robjectSpanEnd\x18\x07 \x02(\x05\"\xa9\x04\n\x08Polarity\x12K\n\x12projectEquivalence\x18\x01 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12Q\n\x18projectForwardEntailment\x18\x02 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12Q\n\x18projectReverseEntailment\x18\x03 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12H\n\x0fprojectNegation\x18\x04 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12K\n\x12projectAlternation\x18\x05 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12\x45\n\x0cprojectCover\x18\x06 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\x12L\n\x13projectIndependence\x18\x07 \x02(\x0e\x32/.edu.stanford.nlp.pipeline.NaturalLogicRelation\"\xdd\x02\n\nNERMention\x12\x15\n\rsentenceIndex\x18\x01 \x01(\r\x12%\n\x1dtokenStartInSentenceInclusive\x18\x02 \x02(\r\x12#\n\x1btokenEndInSentenceExclusive\x18\x03 \x02(\r\x12\x0b\n\x03ner\x18\x04 \x02(\t\x12\x15\n\rnormalizedNER\x18\x05 \x01(\t\x12\x12\n\nentityType\x18\x06 \x01(\t\x12/\n\x05timex\x18\x07 \x01(\x0b\x32 .edu.stanford.nlp.pipeline.Timex\x12\x17\n\x0fwikipediaEntity\x18\x08 \x01(\t\x12\x0e\n\x06gender\x18\t \x01(\t\x12\x1a\n\x12\x65ntityMentionIndex\x18\n \x01(\r\x12#\n\x1b\x63\x61nonicalEntityMentionIndex\x18\x0b \x01(\r\x12\x19\n\x11\x65ntityMentionText\x18\x0c \x01(\t\"Y\n\x10SentenceFragment\x12\x12\n\ntokenIndex\x18\x01 \x03(\r\x12\x0c\n\x04root\x18\x02 \x01(\r\x12\x14\n\x0c\x61ssumedTruth\x18\x03 \x01(\x08\x12\r\n\x05score\x18\x04 \x01(\x01\":\n\rTokenLocation\x12\x15\n\rsentenceIndex\x18\x01 \x01(\r\x12\x12\n\ntokenIndex\x18\x02 \x01(\r\"\x9a\x03\n\x0eRelationTriple\x12\x0f\n\x07subject\x18\x01 \x01(\t\x12\x10\n\x08relation\x18\x02 \x01(\t\x12\x0e\n\x06object\x18\x03 \x01(\t\x12\x12\n\nconfidence\x18\x04 \x01(\x01\x12?\n\rsubjectTokens\x18\r \x03(\x0b\x32(.edu.stanford.nlp.pipeline.TokenLocation\x12@\n\x0erelationTokens\x18\x0e \x03(\x0b\x32(.edu.stanford.nlp.pipeline.TokenLocation\x12>\n\x0cobjectTokens\x18\x0f \x03(\x0b\x32(.edu.stanford.nlp.pipeline.TokenLocation\x12\x38\n\x04tree\x18\x08 \x01(\x0b\x32*.edu.stanford.nlp.pipeline.DependencyGraph\x12\x0e\n\x06istmod\x18\t \x01(\x08\x12\x10\n\x08prefixBe\x18\n \x01(\x08\x12\x10\n\x08suffixBe\x18\x0b \x01(\x08\x12\x10\n\x08suffixOf\x18\x0c \x01(\x08\"-\n\x0fMapStringString\x12\x0b\n\x03key\x18\x01 \x03(\t\x12\r\n\x05value\x18\x02 \x03(\t\"*\n\x0cMapIntString\x12\x0b\n\x03key\x18\x01 \x03(\r\x12\r\n\x05value\x18\x02 \x03(\t\"\xfc\x01\n\x07Section\x12\x11\n\tcharBegin\x18\x01 \x02(\r\x12\x0f\n\x07\x63harEnd\x18\x02 \x02(\r\x12\x0e\n\x06\x61uthor\x18\x03 \x01(\t\x12\x17\n\x0fsentenceIndexes\x18\x04 \x03(\r\x12\x10\n\x08\x64\x61tetime\x18\x05 \x01(\t\x12\x30\n\x06quotes\x18\x06 \x03(\x0b\x32 .edu.stanford.nlp.pipeline.Quote\x12\x17\n\x0f\x61uthorCharBegin\x18\x07 \x01(\r\x12\x15\n\rauthorCharEnd\x18\x08 \x01(\r\x12\x30\n\x06xmlTag\x18\t \x02(\x0b\x32 .edu.stanford.nlp.pipeline.Token*\xa3\x01\n\x08Language\x12\x0b\n\x07Unknown\x10\x00\x12\x07\n\x03\x41ny\x10\x01\x12\n\n\x06\x41rabic\x10\x02\x12\x0b\n\x07\x43hinese\x10\x03\x12\x0b\n\x07\x45nglish\x10\x04\x12\n\n\x06German\x10\x05\x12\n\n\x06\x46rench\x10\x06\x12\n\n\x06Hebrew\x10\x07\x12\x0b\n\x07Spanish\x10\x08\x12\x14\n\x10UniversalEnglish\x10\t\x12\x14\n\x10UniversalChinese\x10\n*h\n\tSentiment\x12\x13\n\x0fSTRONG_NEGATIVE\x10\x00\x12\x11\n\rWEAK_NEGATIVE\x10\x01\x12\x0b\n\x07NEUTRAL\x10\x02\x12\x11\n\rWEAK_POSITIVE\x10\x03\x12\x13\n\x0fSTRONG_POSITIVE\x10\x04*\x93\x01\n\x14NaturalLogicRelation\x12\x0f\n\x0b\x45QUIVALENCE\x10\x00\x12\x16\n\x12\x46ORWARD_ENTAILMENT\x10\x01\x12\x16\n\x12REVERSE_ENTAILMENT\x10\x02\x12\x0c\n\x08NEGATION\x10\x03\x12\x0f\n\x0b\x41LTERNATION\x10\x04\x12\t\n\x05\x43OVER\x10\x05\x12\x10\n\x0cINDEPENDENCE\x10\x06\x42*\n\x19\x65\x64u.stanford.nlp.pipelineB\rCoreNLPProtos')
)
_LANGUAGE = _descriptor.EnumDescriptor(
name='Language',
full_name='edu.stanford.nlp.pipeline.Language',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='Unknown', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Any', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Arabic', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Chinese', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='English', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='German', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='French', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Hebrew', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Spanish', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UniversalEnglish', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UniversalChinese', index=10, number=10,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9323,
serialized_end=9486,
)
_sym_db.RegisterEnumDescriptor(_LANGUAGE)
Language = enum_type_wrapper.EnumTypeWrapper(_LANGUAGE)
_SENTIMENT = _descriptor.EnumDescriptor(
name='Sentiment',
full_name='edu.stanford.nlp.pipeline.Sentiment',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STRONG_NEGATIVE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WEAK_NEGATIVE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NEUTRAL', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WEAK_POSITIVE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRONG_POSITIVE', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9488,
serialized_end=9592,
)
_sym_db.RegisterEnumDescriptor(_SENTIMENT)
Sentiment = enum_type_wrapper.EnumTypeWrapper(_SENTIMENT)
_NATURALLOGICRELATION = _descriptor.EnumDescriptor(
name='NaturalLogicRelation',
full_name='edu.stanford.nlp.pipeline.NaturalLogicRelation',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='EQUIVALENCE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FORWARD_ENTAILMENT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REVERSE_ENTAILMENT', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NEGATION', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALTERNATION', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COVER', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INDEPENDENCE', index=6, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9595,
serialized_end=9742,
)
_sym_db.RegisterEnumDescriptor(_NATURALLOGICRELATION)
NaturalLogicRelation = enum_type_wrapper.EnumTypeWrapper(_NATURALLOGICRELATION)
Unknown = 0
Any = 1
Arabic = 2
Chinese = 3
English = 4
German = 5
French = 6
Hebrew = 7
Spanish = 8
UniversalEnglish = 9
UniversalChinese = 10
STRONG_NEGATIVE = 0
WEAK_NEGATIVE = 1
NEUTRAL = 2
WEAK_POSITIVE = 3
STRONG_POSITIVE = 4
EQUIVALENCE = 0
FORWARD_ENTAILMENT = 1
REVERSE_ENTAILMENT = 2
NEGATION = 3
ALTERNATION = 4
COVER = 5
INDEPENDENCE = 6
_DOCUMENT = _descriptor.Descriptor(
name='Document',
full_name='edu.stanford.nlp.pipeline.Document',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='edu.stanford.nlp.pipeline.Document.text', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentence', full_name='edu.stanford.nlp.pipeline.Document.sentence', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='corefChain', full_name='edu.stanford.nlp.pipeline.Document.corefChain', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='docID', full_name='edu.stanford.nlp.pipeline.Document.docID', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='docDate', full_name='edu.stanford.nlp.pipeline.Document.docDate', index=4,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='calendar', full_name='edu.stanford.nlp.pipeline.Document.calendar', index=5,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentencelessToken', full_name='edu.stanford.nlp.pipeline.Document.sentencelessToken', index=6,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='character', full_name='edu.stanford.nlp.pipeline.Document.character', index=7,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quote', full_name='edu.stanford.nlp.pipeline.Document.quote', index=8,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentions', full_name='edu.stanford.nlp.pipeline.Document.mentions', index=9,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasEntityMentionsAnnotation', full_name='edu.stanford.nlp.pipeline.Document.hasEntityMentionsAnnotation', index=10,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='xmlDoc', full_name='edu.stanford.nlp.pipeline.Document.xmlDoc', index=11,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sections', full_name='edu.stanford.nlp.pipeline.Document.sections', index=12,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentionsForCoref', full_name='edu.stanford.nlp.pipeline.Document.mentionsForCoref', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasCorefMentionAnnotation', full_name='edu.stanford.nlp.pipeline.Document.hasCorefMentionAnnotation', index=14,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasCorefAnnotation', full_name='edu.stanford.nlp.pipeline.Document.hasCorefAnnotation', index=15,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='corefMentionToEntityMentionMappings', full_name='edu.stanford.nlp.pipeline.Document.corefMentionToEntityMentionMappings', index=16,
number=17, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entityMentionToCorefMentionMappings', full_name='edu.stanford.nlp.pipeline.Document.entityMentionToCorefMentionMappings', index=17,
number=18, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(100, 256), ],
oneofs=[
],
serialized_start=45,
serialized_end=782,
)
_SENTENCE = _descriptor.Descriptor(
name='Sentence',
full_name='edu.stanford.nlp.pipeline.Sentence',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='edu.stanford.nlp.pipeline.Sentence.token', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokenOffsetBegin', full_name='edu.stanford.nlp.pipeline.Sentence.tokenOffsetBegin', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokenOffsetEnd', full_name='edu.stanford.nlp.pipeline.Sentence.tokenOffsetEnd', index=2,
number=3, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentenceIndex', full_name='edu.stanford.nlp.pipeline.Sentence.sentenceIndex', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='characterOffsetBegin', full_name='edu.stanford.nlp.pipeline.Sentence.characterOffsetBegin', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='characterOffsetEnd', full_name='edu.stanford.nlp.pipeline.Sentence.characterOffsetEnd', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parseTree', full_name='edu.stanford.nlp.pipeline.Sentence.parseTree', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='binarizedParseTree', full_name='edu.stanford.nlp.pipeline.Sentence.binarizedParseTree', index=7,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annotatedParseTree', full_name='edu.stanford.nlp.pipeline.Sentence.annotatedParseTree', index=8,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentiment', full_name='edu.stanford.nlp.pipeline.Sentence.sentiment', index=9,
number=33, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kBestParseTrees', full_name='edu.stanford.nlp.pipeline.Sentence.kBestParseTrees', index=10,
number=34, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='basicDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.basicDependencies', index=11,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='collapsedDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.collapsedDependencies', index=12,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='collapsedCCProcessedDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.collapsedCCProcessedDependencies', index=13,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alternativeDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.alternativeDependencies', index=14,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='openieTriple', full_name='edu.stanford.nlp.pipeline.Sentence.openieTriple', index=15,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kbpTriple', full_name='edu.stanford.nlp.pipeline.Sentence.kbpTriple', index=16,
number=16, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entailedSentence', full_name='edu.stanford.nlp.pipeline.Sentence.entailedSentence', index=17,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entailedClause', full_name='edu.stanford.nlp.pipeline.Sentence.entailedClause', index=18,
number=35, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enhancedDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.enhancedDependencies', index=19,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enhancedPlusPlusDependencies', full_name='edu.stanford.nlp.pipeline.Sentence.enhancedPlusPlusDependencies', index=20,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='character', full_name='edu.stanford.nlp.pipeline.Sentence.character', index=21,
number=19, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='paragraph', full_name='edu.stanford.nlp.pipeline.Sentence.paragraph', index=22,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='text', full_name='edu.stanford.nlp.pipeline.Sentence.text', index=23,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lineNumber', full_name='edu.stanford.nlp.pipeline.Sentence.lineNumber', index=24,
number=20, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasRelationAnnotations', full_name='edu.stanford.nlp.pipeline.Sentence.hasRelationAnnotations', index=25,
number=51, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entity', full_name='edu.stanford.nlp.pipeline.Sentence.entity', index=26,
number=52, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relation', full_name='edu.stanford.nlp.pipeline.Sentence.relation', index=27,
number=53, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasNumerizedTokensAnnotation', full_name='edu.stanford.nlp.pipeline.Sentence.hasNumerizedTokensAnnotation', index=28,
number=54, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentions', full_name='edu.stanford.nlp.pipeline.Sentence.mentions', index=29,
number=55, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentionsForCoref', full_name='edu.stanford.nlp.pipeline.Sentence.mentionsForCoref', index=30,
number=56, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasCorefMentionsAnnotation', full_name='edu.stanford.nlp.pipeline.Sentence.hasCorefMentionsAnnotation', index=31,
number=57, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentenceID', full_name='edu.stanford.nlp.pipeline.Sentence.sentenceID', index=32,
number=58, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sectionDate', full_name='edu.stanford.nlp.pipeline.Sentence.sectionDate', index=33,
number=59, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sectionIndex', full_name='edu.stanford.nlp.pipeline.Sentence.sectionIndex', index=34,
number=60, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sectionName', full_name='edu.stanford.nlp.pipeline.Sentence.sectionName', index=35,
number=61, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sectionAuthor', full_name='edu.stanford.nlp.pipeline.Sentence.sectionAuthor', index=36,
number=62, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='docID', full_name='edu.stanford.nlp.pipeline.Sentence.docID', index=37,
number=63, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sectionQuoted', full_name='edu.stanford.nlp.pipeline.Sentence.sectionQuoted', index=38,
number=64, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasEntityMentionsAnnotation', full_name='edu.stanford.nlp.pipeline.Sentence.hasEntityMentionsAnnotation', index=39,
number=65, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasKBPTriplesAnnotation', full_name='edu.stanford.nlp.pipeline.Sentence.hasKBPTriplesAnnotation', index=40,
number=68, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasOpenieTriplesAnnotation', full_name='edu.stanford.nlp.pipeline.Sentence.hasOpenieTriplesAnnotation', index=41,
number=69, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='chapterIndex', full_name='edu.stanford.nlp.pipeline.Sentence.chapterIndex', index=42,
number=66, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='paragraphIndex', full_name='edu.stanford.nlp.pipeline.Sentence.paragraphIndex', index=43,
number=67, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(100, 256), ],
oneofs=[
],
serialized_start=785,
serialized_end=2719,
)
_TOKEN = _descriptor.Descriptor(
name='Token',
full_name='edu.stanford.nlp.pipeline.Token',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='word', full_name='edu.stanford.nlp.pipeline.Token.word', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pos', full_name='edu.stanford.nlp.pipeline.Token.pos', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='edu.stanford.nlp.pipeline.Token.value', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='edu.stanford.nlp.pipeline.Token.category', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='before', full_name='edu.stanford.nlp.pipeline.Token.before', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='after', full_name='edu.stanford.nlp.pipeline.Token.after', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='originalText', full_name='edu.stanford.nlp.pipeline.Token.originalText', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ner', full_name='edu.stanford.nlp.pipeline.Token.ner', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='coarseNER', full_name='edu.stanford.nlp.pipeline.Token.coarseNER', index=8,
number=62, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fineGrainedNER', full_name='edu.stanford.nlp.pipeline.Token.fineGrainedNER', index=9,
number=63, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='normalizedNER', full_name='edu.stanford.nlp.pipeline.Token.normalizedNER', index=10,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lemma', full_name='edu.stanford.nlp.pipeline.Token.lemma', index=11,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beginChar', full_name='edu.stanford.nlp.pipeline.Token.beginChar', index=12,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endChar', full_name='edu.stanford.nlp.pipeline.Token.endChar', index=13,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='utterance', full_name='edu.stanford.nlp.pipeline.Token.utterance', index=14,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='speaker', full_name='edu.stanford.nlp.pipeline.Token.speaker', index=15,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beginIndex', full_name='edu.stanford.nlp.pipeline.Token.beginIndex', index=16,
number=15, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endIndex', full_name='edu.stanford.nlp.pipeline.Token.endIndex', index=17,
number=16, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokenBeginIndex', full_name='edu.stanford.nlp.pipeline.Token.tokenBeginIndex', index=18,
number=17, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokenEndIndex', full_name='edu.stanford.nlp.pipeline.Token.tokenEndIndex', index=19,
number=18, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timexValue', full_name='edu.stanford.nlp.pipeline.Token.timexValue', index=20,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasXmlContext', full_name='edu.stanford.nlp.pipeline.Token.hasXmlContext', index=21,
number=21, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='xmlContext', full_name='edu.stanford.nlp.pipeline.Token.xmlContext', index=22,
number=22, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='corefClusterID', full_name='edu.stanford.nlp.pipeline.Token.corefClusterID', index=23,
number=23, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='answer', full_name='edu.stanford.nlp.pipeline.Token.answer', index=24,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headWordIndex', full_name='edu.stanford.nlp.pipeline.Token.headWordIndex', index=25,
number=26, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operator', full_name='edu.stanford.nlp.pipeline.Token.operator', index=26,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='polarity', full_name='edu.stanford.nlp.pipeline.Token.polarity', index=27,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='polarity_dir', full_name='edu.stanford.nlp.pipeline.Token.polarity_dir', index=28,
number=39, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='span', full_name='edu.stanford.nlp.pipeline.Token.span', index=29,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentiment', full_name='edu.stanford.nlp.pipeline.Token.sentiment', index=30,
number=30, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quotationIndex', full_name='edu.stanford.nlp.pipeline.Token.quotationIndex', index=31,
number=31, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conllUFeatures', full_name='edu.stanford.nlp.pipeline.Token.conllUFeatures', index=32,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='coarseTag', full_name='edu.stanford.nlp.pipeline.Token.coarseTag', index=33,
number=33, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conllUTokenSpan', full_name='edu.stanford.nlp.pipeline.Token.conllUTokenSpan', index=34,
number=34, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conllUMisc', full_name='edu.stanford.nlp.pipeline.Token.conllUMisc', index=35,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conllUSecondaryDeps', full_name='edu.stanford.nlp.pipeline.Token.conllUSecondaryDeps', index=36,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wikipediaEntity', full_name='edu.stanford.nlp.pipeline.Token.wikipediaEntity', index=37,
number=37, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isNewline', full_name='edu.stanford.nlp.pipeline.Token.isNewline', index=38,
number=38, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gender', full_name='edu.stanford.nlp.pipeline.Token.gender', index=39,
number=51, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trueCase', full_name='edu.stanford.nlp.pipeline.Token.trueCase', index=40,
number=52, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trueCaseText', full_name='edu.stanford.nlp.pipeline.Token.trueCaseText', index=41,
number=53, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='chineseChar', full_name='edu.stanford.nlp.pipeline.Token.chineseChar', index=42,
number=54, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='chineseSeg', full_name='edu.stanford.nlp.pipeline.Token.chineseSeg', index=43,
number=55, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='chineseXMLChar', full_name='edu.stanford.nlp.pipeline.Token.chineseXMLChar', index=44,
number=60, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sectionName', full_name='edu.stanford.nlp.pipeline.Token.sectionName', index=45,
number=56, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sectionAuthor', full_name='edu.stanford.nlp.pipeline.Token.sectionAuthor', index=46,
number=57, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sectionDate', full_name='edu.stanford.nlp.pipeline.Token.sectionDate', index=47,
number=58, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sectionEndLabel', full_name='edu.stanford.nlp.pipeline.Token.sectionEndLabel', index=48,
number=59, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent', full_name='edu.stanford.nlp.pipeline.Token.parent', index=49,
number=61, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='corefMentionIndex', full_name='edu.stanford.nlp.pipeline.Token.corefMentionIndex', index=50,
number=64, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entityMentionIndex', full_name='edu.stanford.nlp.pipeline.Token.entityMentionIndex', index=51,
number=65, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(100, 256), ],
oneofs=[
],
serialized_start=2722,
serialized_end=4047,
)
_QUOTE = _descriptor.Descriptor(
name='Quote',
full_name='edu.stanford.nlp.pipeline.Quote',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='edu.stanford.nlp.pipeline.Quote.text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='begin', full_name='edu.stanford.nlp.pipeline.Quote.begin', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='edu.stanford.nlp.pipeline.Quote.end', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentenceBegin', full_name='edu.stanford.nlp.pipeline.Quote.sentenceBegin', index=3,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentenceEnd', full_name='edu.stanford.nlp.pipeline.Quote.sentenceEnd', index=4,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokenBegin', full_name='edu.stanford.nlp.pipeline.Quote.tokenBegin', index=5,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokenEnd', full_name='edu.stanford.nlp.pipeline.Quote.tokenEnd', index=6,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='docid', full_name='edu.stanford.nlp.pipeline.Quote.docid', index=7,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='index', full_name='edu.stanford.nlp.pipeline.Quote.index', index=8,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='author', full_name='edu.stanford.nlp.pipeline.Quote.author', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mention', full_name='edu.stanford.nlp.pipeline.Quote.mention', index=10,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentionBegin', full_name='edu.stanford.nlp.pipeline.Quote.mentionBegin', index=11,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentionEnd', full_name='edu.stanford.nlp.pipeline.Quote.mentionEnd', index=12,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentionType', full_name='edu.stanford.nlp.pipeline.Quote.mentionType', index=13,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentionSieve', full_name='edu.stanford.nlp.pipeline.Quote.mentionSieve', index=14,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='speaker', full_name='edu.stanford.nlp.pipeline.Quote.speaker', index=15,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='speakerSieve', full_name='edu.stanford.nlp.pipeline.Quote.speakerSieve', index=16,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='canonicalMention', full_name='edu.stanford.nlp.pipeline.Quote.canonicalMention', index=17,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='canonicalMentionBegin', full_name='edu.stanford.nlp.pipeline.Quote.canonicalMentionBegin', index=18,
number=20, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='canonicalMentionEnd', full_name='edu.stanford.nlp.pipeline.Quote.canonicalMentionEnd', index=19,
number=21, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='attributionDependencyGraph', full_name='edu.stanford.nlp.pipeline.Quote.attributionDependencyGraph', index=20,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=4050,
serialized_end=4534,
)
_PARSETREE = _descriptor.Descriptor(
name='ParseTree',
full_name='edu.stanford.nlp.pipeline.ParseTree',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='child', full_name='edu.stanford.nlp.pipeline.ParseTree.child', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='edu.stanford.nlp.pipeline.ParseTree.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='yieldBeginIndex', full_name='edu.stanford.nlp.pipeline.ParseTree.yieldBeginIndex', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='yieldEndIndex', full_name='edu.stanford.nlp.pipeline.ParseTree.yieldEndIndex', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='score', full_name='edu.stanford.nlp.pipeline.ParseTree.score', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentiment', full_name='edu.stanford.nlp.pipeline.ParseTree.sentiment', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=4537,
serialized_end=4736,
)
_DEPENDENCYGRAPH_NODE = _descriptor.Descriptor(
name='Node',
full_name='edu.stanford.nlp.pipeline.DependencyGraph.Node',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sentenceIndex', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Node.sentenceIndex', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='index', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Node.index', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='copyAnnotation', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Node.copyAnnotation', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=4902,
serialized_end=4970,
)
_DEPENDENCYGRAPH_EDGE = _descriptor.Descriptor(
name='Edge',
full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.source', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.target', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dep', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.dep', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isExtra', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.isExtra', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sourceCopy', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.sourceCopy', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetCopy', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.targetCopy', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language', full_name='edu.stanford.nlp.pipeline.DependencyGraph.Edge.language', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=4973,
serialized_end=5145,
)
_DEPENDENCYGRAPH = _descriptor.Descriptor(
name='DependencyGraph',
full_name='edu.stanford.nlp.pipeline.DependencyGraph',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node', full_name='edu.stanford.nlp.pipeline.DependencyGraph.node', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='edge', full_name='edu.stanford.nlp.pipeline.DependencyGraph.edge', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='root', full_name='edu.stanford.nlp.pipeline.DependencyGraph.root', index=2,
number=3, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPENDENCYGRAPH_NODE, _DEPENDENCYGRAPH_EDGE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=4739,
serialized_end=5145,
)
_COREFCHAIN_COREFMENTION = _descriptor.Descriptor(
name='CorefMention',
full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mentionID', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.mentionID', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentionType', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.mentionType', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='number', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.number', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gender', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.gender', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='animacy', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.animacy', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beginIndex', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.beginIndex', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endIndex', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.endIndex', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headIndex', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.headIndex', index=7,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentenceIndex', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.sentenceIndex', index=8,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='position', full_name='edu.stanford.nlp.pipeline.CorefChain.CorefMention.position', index=9,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5273,
serialized_end=5474,
)
_COREFCHAIN = _descriptor.Descriptor(
name='CorefChain',
full_name='edu.stanford.nlp.pipeline.CorefChain',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='chainID', full_name='edu.stanford.nlp.pipeline.CorefChain.chainID', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mention', full_name='edu.stanford.nlp.pipeline.CorefChain.mention', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='representative', full_name='edu.stanford.nlp.pipeline.CorefChain.representative', index=2,
number=3, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COREFCHAIN_COREFMENTION, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5148,
serialized_end=5474,
)
_MENTION = _descriptor.Descriptor(
name='Mention',
full_name='edu.stanford.nlp.pipeline.Mention',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mentionID', full_name='edu.stanford.nlp.pipeline.Mention.mentionID', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentionType', full_name='edu.stanford.nlp.pipeline.Mention.mentionType', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='number', full_name='edu.stanford.nlp.pipeline.Mention.number', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gender', full_name='edu.stanford.nlp.pipeline.Mention.gender', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='animacy', full_name='edu.stanford.nlp.pipeline.Mention.animacy', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='person', full_name='edu.stanford.nlp.pipeline.Mention.person', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startIndex', full_name='edu.stanford.nlp.pipeline.Mention.startIndex', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endIndex', full_name='edu.stanford.nlp.pipeline.Mention.endIndex', index=7,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headIndex', full_name='edu.stanford.nlp.pipeline.Mention.headIndex', index=8,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headString', full_name='edu.stanford.nlp.pipeline.Mention.headString', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nerString', full_name='edu.stanford.nlp.pipeline.Mention.nerString', index=10,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='originalRef', full_name='edu.stanford.nlp.pipeline.Mention.originalRef', index=11,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='goldCorefClusterID', full_name='edu.stanford.nlp.pipeline.Mention.goldCorefClusterID', index=12,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='corefClusterID', full_name='edu.stanford.nlp.pipeline.Mention.corefClusterID', index=13,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentionNum', full_name='edu.stanford.nlp.pipeline.Mention.mentionNum', index=14,
number=16, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentNum', full_name='edu.stanford.nlp.pipeline.Mention.sentNum', index=15,
number=17, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='utter', full_name='edu.stanford.nlp.pipeline.Mention.utter', index=16,
number=18, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='paragraph', full_name='edu.stanford.nlp.pipeline.Mention.paragraph', index=17,
number=19, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isSubject', full_name='edu.stanford.nlp.pipeline.Mention.isSubject', index=18,
number=20, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isDirectObject', full_name='edu.stanford.nlp.pipeline.Mention.isDirectObject', index=19,
number=21, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isIndirectObject', full_name='edu.stanford.nlp.pipeline.Mention.isIndirectObject', index=20,
number=22, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isPrepositionObject', full_name='edu.stanford.nlp.pipeline.Mention.isPrepositionObject', index=21,
number=23, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasTwin', full_name='edu.stanford.nlp.pipeline.Mention.hasTwin', index=22,
number=24, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='generic', full_name='edu.stanford.nlp.pipeline.Mention.generic', index=23,
number=25, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isSingleton', full_name='edu.stanford.nlp.pipeline.Mention.isSingleton', index=24,
number=26, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasBasicDependency', full_name='edu.stanford.nlp.pipeline.Mention.hasBasicDependency', index=25,
number=27, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasEnhancedDepenedncy', full_name='edu.stanford.nlp.pipeline.Mention.hasEnhancedDepenedncy', index=26,
number=28, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hasContextParseTree', full_name='edu.stanford.nlp.pipeline.Mention.hasContextParseTree', index=27,
number=29, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headIndexedWord', full_name='edu.stanford.nlp.pipeline.Mention.headIndexedWord', index=28,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dependingVerb', full_name='edu.stanford.nlp.pipeline.Mention.dependingVerb', index=29,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headWord', full_name='edu.stanford.nlp.pipeline.Mention.headWord', index=30,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='speakerInfo', full_name='edu.stanford.nlp.pipeline.Mention.speakerInfo', index=31,
number=33, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentenceWords', full_name='edu.stanford.nlp.pipeline.Mention.sentenceWords', index=32,
number=50, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='originalSpan', full_name='edu.stanford.nlp.pipeline.Mention.originalSpan', index=33,
number=51, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dependents', full_name='edu.stanford.nlp.pipeline.Mention.dependents', index=34,
number=52, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preprocessedTerms', full_name='edu.stanford.nlp.pipeline.Mention.preprocessedTerms', index=35,
number=53, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appositions', full_name='edu.stanford.nlp.pipeline.Mention.appositions', index=36,
number=54, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='predicateNominatives', full_name='edu.stanford.nlp.pipeline.Mention.predicateNominatives', index=37,
number=55, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relativePronouns', full_name='edu.stanford.nlp.pipeline.Mention.relativePronouns', index=38,
number=56, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='listMembers', full_name='edu.stanford.nlp.pipeline.Mention.listMembers', index=39,
number=57, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='belongToLists', full_name='edu.stanford.nlp.pipeline.Mention.belongToLists', index=40,
number=58, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5477,
serialized_end=6612,
)
_INDEXEDWORD = _descriptor.Descriptor(
name='IndexedWord',
full_name='edu.stanford.nlp.pipeline.IndexedWord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sentenceNum', full_name='edu.stanford.nlp.pipeline.IndexedWord.sentenceNum', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokenIndex', full_name='edu.stanford.nlp.pipeline.IndexedWord.tokenIndex', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='docID', full_name='edu.stanford.nlp.pipeline.IndexedWord.docID', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='copyCount', full_name='edu.stanford.nlp.pipeline.IndexedWord.copyCount', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6614,
serialized_end=6702,
)
_SPEAKERINFO = _descriptor.Descriptor(
name='SpeakerInfo',
full_name='edu.stanford.nlp.pipeline.SpeakerInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='speakerName', full_name='edu.stanford.nlp.pipeline.SpeakerInfo.speakerName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentions', full_name='edu.stanford.nlp.pipeline.SpeakerInfo.mentions', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6704,
serialized_end=6756,
)
_SPAN = _descriptor.Descriptor(
name='Span',
full_name='edu.stanford.nlp.pipeline.Span',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='begin', full_name='edu.stanford.nlp.pipeline.Span.begin', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='edu.stanford.nlp.pipeline.Span.end', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6758,
serialized_end=6792,
)
_TIMEX = _descriptor.Descriptor(
name='Timex',
full_name='edu.stanford.nlp.pipeline.Timex',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='edu.stanford.nlp.pipeline.Timex.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='altValue', full_name='edu.stanford.nlp.pipeline.Timex.altValue', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='text', full_name='edu.stanford.nlp.pipeline.Timex.text', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='edu.stanford.nlp.pipeline.Timex.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tid', full_name='edu.stanford.nlp.pipeline.Timex.tid', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beginPoint', full_name='edu.stanford.nlp.pipeline.Timex.beginPoint', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endPoint', full_name='edu.stanford.nlp.pipeline.Timex.endPoint', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6794,
serialized_end=6913,
)
_ENTITY = _descriptor.Descriptor(
name='Entity',
full_name='edu.stanford.nlp.pipeline.Entity',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='headStart', full_name='edu.stanford.nlp.pipeline.Entity.headStart', index=0,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headEnd', full_name='edu.stanford.nlp.pipeline.Entity.headEnd', index=1,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mentionType', full_name='edu.stanford.nlp.pipeline.Entity.mentionType', index=2,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='normalizedName', full_name='edu.stanford.nlp.pipeline.Entity.normalizedName', index=3,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='headTokenIndex', full_name='edu.stanford.nlp.pipeline.Entity.headTokenIndex', index=4,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='corefID', full_name='edu.stanford.nlp.pipeline.Entity.corefID', index=5,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectID', full_name='edu.stanford.nlp.pipeline.Entity.objectID', index=6,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extentStart', full_name='edu.stanford.nlp.pipeline.Entity.extentStart', index=7,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extentEnd', full_name='edu.stanford.nlp.pipeline.Entity.extentEnd', index=8,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='edu.stanford.nlp.pipeline.Entity.type', index=9,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subtype', full_name='edu.stanford.nlp.pipeline.Entity.subtype', index=10,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6916,
serialized_end=7135,
)
_RELATION = _descriptor.Descriptor(
name='Relation',
full_name='edu.stanford.nlp.pipeline.Relation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='argName', full_name='edu.stanford.nlp.pipeline.Relation.argName', index=0,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arg', full_name='edu.stanford.nlp.pipeline.Relation.arg', index=1,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signature', full_name='edu.stanford.nlp.pipeline.Relation.signature', index=2,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectID', full_name='edu.stanford.nlp.pipeline.Relation.objectID', index=3,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extentStart', full_name='edu.stanford.nlp.pipeline.Relation.extentStart', index=4,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='extentEnd', full_name='edu.stanford.nlp.pipeline.Relation.extentEnd', index=5,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='edu.stanford.nlp.pipeline.Relation.type', index=6,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subtype', full_name='edu.stanford.nlp.pipeline.Relation.subtype', index=7,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7138,
serialized_end=7321,
)
_OPERATOR = _descriptor.Descriptor(
name='Operator',
full_name='edu.stanford.nlp.pipeline.Operator',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='edu.stanford.nlp.pipeline.Operator.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantifierSpanBegin', full_name='edu.stanford.nlp.pipeline.Operator.quantifierSpanBegin', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quantifierSpanEnd', full_name='edu.stanford.nlp.pipeline.Operator.quantifierSpanEnd', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subjectSpanBegin', full_name='edu.stanford.nlp.pipeline.Operator.subjectSpanBegin', index=3,
number=4, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subjectSpanEnd', full_name='edu.stanford.nlp.pipeline.Operator.subjectSpanEnd', index=4,
number=5, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectSpanBegin', full_name='edu.stanford.nlp.pipeline.Operator.objectSpanBegin', index=5,
number=6, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectSpanEnd', full_name='edu.stanford.nlp.pipeline.Operator.objectSpanEnd', index=6,
number=7, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7324,
serialized_end=7502,
)
_POLARITY = _descriptor.Descriptor(
name='Polarity',
full_name='edu.stanford.nlp.pipeline.Polarity',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='projectEquivalence', full_name='edu.stanford.nlp.pipeline.Polarity.projectEquivalence', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='projectForwardEntailment', full_name='edu.stanford.nlp.pipeline.Polarity.projectForwardEntailment', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='projectReverseEntailment', full_name='edu.stanford.nlp.pipeline.Polarity.projectReverseEntailment', index=2,
number=3, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='projectNegation', full_name='edu.stanford.nlp.pipeline.Polarity.projectNegation', index=3,
number=4, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='projectAlternation', full_name='edu.stanford.nlp.pipeline.Polarity.projectAlternation', index=4,
number=5, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='projectCover', full_name='edu.stanford.nlp.pipeline.Polarity.projectCover', index=5,
number=6, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='projectIndependence', full_name='edu.stanford.nlp.pipeline.Polarity.projectIndependence', index=6,
number=7, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7505,
serialized_end=8058,
)
_NERMENTION = _descriptor.Descriptor(
name='NERMention',
full_name='edu.stanford.nlp.pipeline.NERMention',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sentenceIndex', full_name='edu.stanford.nlp.pipeline.NERMention.sentenceIndex', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokenStartInSentenceInclusive', full_name='edu.stanford.nlp.pipeline.NERMention.tokenStartInSentenceInclusive', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokenEndInSentenceExclusive', full_name='edu.stanford.nlp.pipeline.NERMention.tokenEndInSentenceExclusive', index=2,
number=3, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ner', full_name='edu.stanford.nlp.pipeline.NERMention.ner', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='normalizedNER', full_name='edu.stanford.nlp.pipeline.NERMention.normalizedNER', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entityType', full_name='edu.stanford.nlp.pipeline.NERMention.entityType', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timex', full_name='edu.stanford.nlp.pipeline.NERMention.timex', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wikipediaEntity', full_name='edu.stanford.nlp.pipeline.NERMention.wikipediaEntity', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gender', full_name='edu.stanford.nlp.pipeline.NERMention.gender', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entityMentionIndex', full_name='edu.stanford.nlp.pipeline.NERMention.entityMentionIndex', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='canonicalEntityMentionIndex', full_name='edu.stanford.nlp.pipeline.NERMention.canonicalEntityMentionIndex', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entityMentionText', full_name='edu.stanford.nlp.pipeline.NERMention.entityMentionText', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8061,
serialized_end=8410,
)
_SENTENCEFRAGMENT = _descriptor.Descriptor(
name='SentenceFragment',
full_name='edu.stanford.nlp.pipeline.SentenceFragment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tokenIndex', full_name='edu.stanford.nlp.pipeline.SentenceFragment.tokenIndex', index=0,
number=1, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='root', full_name='edu.stanford.nlp.pipeline.SentenceFragment.root', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='assumedTruth', full_name='edu.stanford.nlp.pipeline.SentenceFragment.assumedTruth', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='score', full_name='edu.stanford.nlp.pipeline.SentenceFragment.score', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8412,
serialized_end=8501,
)
_TOKENLOCATION = _descriptor.Descriptor(
name='TokenLocation',
full_name='edu.stanford.nlp.pipeline.TokenLocation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sentenceIndex', full_name='edu.stanford.nlp.pipeline.TokenLocation.sentenceIndex', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tokenIndex', full_name='edu.stanford.nlp.pipeline.TokenLocation.tokenIndex', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8503,
serialized_end=8561,
)
_RELATIONTRIPLE = _descriptor.Descriptor(
name='RelationTriple',
full_name='edu.stanford.nlp.pipeline.RelationTriple',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='subject', full_name='edu.stanford.nlp.pipeline.RelationTriple.subject', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relation', full_name='edu.stanford.nlp.pipeline.RelationTriple.relation', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object', full_name='edu.stanford.nlp.pipeline.RelationTriple.object', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confidence', full_name='edu.stanford.nlp.pipeline.RelationTriple.confidence', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subjectTokens', full_name='edu.stanford.nlp.pipeline.RelationTriple.subjectTokens', index=4,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relationTokens', full_name='edu.stanford.nlp.pipeline.RelationTriple.relationTokens', index=5,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectTokens', full_name='edu.stanford.nlp.pipeline.RelationTriple.objectTokens', index=6,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tree', full_name='edu.stanford.nlp.pipeline.RelationTriple.tree', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='istmod', full_name='edu.stanford.nlp.pipeline.RelationTriple.istmod', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='prefixBe', full_name='edu.stanford.nlp.pipeline.RelationTriple.prefixBe', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='suffixBe', full_name='edu.stanford.nlp.pipeline.RelationTriple.suffixBe', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='suffixOf', full_name='edu.stanford.nlp.pipeline.RelationTriple.suffixOf', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8564,
serialized_end=8974,
)
_MAPSTRINGSTRING = _descriptor.Descriptor(
name='MapStringString',
full_name='edu.stanford.nlp.pipeline.MapStringString',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='edu.stanford.nlp.pipeline.MapStringString.key', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='edu.stanford.nlp.pipeline.MapStringString.value', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8976,
serialized_end=9021,
)
_MAPINTSTRING = _descriptor.Descriptor(
name='MapIntString',
full_name='edu.stanford.nlp.pipeline.MapIntString',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='edu.stanford.nlp.pipeline.MapIntString.key', index=0,
number=1, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='edu.stanford.nlp.pipeline.MapIntString.value', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9023,
serialized_end=9065,
)
_SECTION = _descriptor.Descriptor(
name='Section',
full_name='edu.stanford.nlp.pipeline.Section',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='charBegin', full_name='edu.stanford.nlp.pipeline.Section.charBegin', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='charEnd', full_name='edu.stanford.nlp.pipeline.Section.charEnd', index=1,
number=2, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='author', full_name='edu.stanford.nlp.pipeline.Section.author', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sentenceIndexes', full_name='edu.stanford.nlp.pipeline.Section.sentenceIndexes', index=3,
number=4, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='datetime', full_name='edu.stanford.nlp.pipeline.Section.datetime', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='quotes', full_name='edu.stanford.nlp.pipeline.Section.quotes', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='authorCharBegin', full_name='edu.stanford.nlp.pipeline.Section.authorCharBegin', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='authorCharEnd', full_name='edu.stanford.nlp.pipeline.Section.authorCharEnd', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='xmlTag', full_name='edu.stanford.nlp.pipeline.Section.xmlTag', index=8,
number=9, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9068,
serialized_end=9320,
)
_DOCUMENT.fields_by_name['sentence'].message_type = _SENTENCE
_DOCUMENT.fields_by_name['corefChain'].message_type = _COREFCHAIN
_DOCUMENT.fields_by_name['sentencelessToken'].message_type = _TOKEN
_DOCUMENT.fields_by_name['character'].message_type = _TOKEN
_DOCUMENT.fields_by_name['quote'].message_type = _QUOTE
_DOCUMENT.fields_by_name['mentions'].message_type = _NERMENTION
_DOCUMENT.fields_by_name['sections'].message_type = _SECTION
_DOCUMENT.fields_by_name['mentionsForCoref'].message_type = _MENTION
_SENTENCE.fields_by_name['token'].message_type = _TOKEN
_SENTENCE.fields_by_name['parseTree'].message_type = _PARSETREE
_SENTENCE.fields_by_name['binarizedParseTree'].message_type = _PARSETREE
_SENTENCE.fields_by_name['annotatedParseTree'].message_type = _PARSETREE
_SENTENCE.fields_by_name['kBestParseTrees'].message_type = _PARSETREE
_SENTENCE.fields_by_name['basicDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['collapsedDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['collapsedCCProcessedDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['alternativeDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['openieTriple'].message_type = _RELATIONTRIPLE
_SENTENCE.fields_by_name['kbpTriple'].message_type = _RELATIONTRIPLE
_SENTENCE.fields_by_name['entailedSentence'].message_type = _SENTENCEFRAGMENT
_SENTENCE.fields_by_name['entailedClause'].message_type = _SENTENCEFRAGMENT
_SENTENCE.fields_by_name['enhancedDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['enhancedPlusPlusDependencies'].message_type = _DEPENDENCYGRAPH
_SENTENCE.fields_by_name['character'].message_type = _TOKEN
_SENTENCE.fields_by_name['entity'].message_type = _ENTITY
_SENTENCE.fields_by_name['relation'].message_type = _RELATION
_SENTENCE.fields_by_name['mentions'].message_type = _NERMENTION
_SENTENCE.fields_by_name['mentionsForCoref'].message_type = _MENTION
_TOKEN.fields_by_name['timexValue'].message_type = _TIMEX
_TOKEN.fields_by_name['operator'].message_type = _OPERATOR
_TOKEN.fields_by_name['polarity'].message_type = _POLARITY
_TOKEN.fields_by_name['span'].message_type = _SPAN
_TOKEN.fields_by_name['conllUFeatures'].message_type = _MAPSTRINGSTRING
_TOKEN.fields_by_name['conllUTokenSpan'].message_type = _SPAN
_TOKEN.fields_by_name['conllUSecondaryDeps'].message_type = _MAPSTRINGSTRING
_QUOTE.fields_by_name['attributionDependencyGraph'].message_type = _DEPENDENCYGRAPH
_PARSETREE.fields_by_name['child'].message_type = _PARSETREE
_PARSETREE.fields_by_name['sentiment'].enum_type = _SENTIMENT
_DEPENDENCYGRAPH_NODE.containing_type = _DEPENDENCYGRAPH
_DEPENDENCYGRAPH_EDGE.fields_by_name['language'].enum_type = _LANGUAGE
_DEPENDENCYGRAPH_EDGE.containing_type = _DEPENDENCYGRAPH
_DEPENDENCYGRAPH.fields_by_name['node'].message_type = _DEPENDENCYGRAPH_NODE
_DEPENDENCYGRAPH.fields_by_name['edge'].message_type = _DEPENDENCYGRAPH_EDGE
_COREFCHAIN_COREFMENTION.containing_type = _COREFCHAIN
_COREFCHAIN.fields_by_name['mention'].message_type = _COREFCHAIN_COREFMENTION
_MENTION.fields_by_name['headIndexedWord'].message_type = _INDEXEDWORD
_MENTION.fields_by_name['dependingVerb'].message_type = _INDEXEDWORD
_MENTION.fields_by_name['headWord'].message_type = _INDEXEDWORD
_MENTION.fields_by_name['speakerInfo'].message_type = _SPEAKERINFO
_MENTION.fields_by_name['sentenceWords'].message_type = _INDEXEDWORD
_MENTION.fields_by_name['originalSpan'].message_type = _INDEXEDWORD
_RELATION.fields_by_name['arg'].message_type = _ENTITY
_POLARITY.fields_by_name['projectEquivalence'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectForwardEntailment'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectReverseEntailment'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectNegation'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectAlternation'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectCover'].enum_type = _NATURALLOGICRELATION
_POLARITY.fields_by_name['projectIndependence'].enum_type = _NATURALLOGICRELATION
_NERMENTION.fields_by_name['timex'].message_type = _TIMEX
_RELATIONTRIPLE.fields_by_name['subjectTokens'].message_type = _TOKENLOCATION
_RELATIONTRIPLE.fields_by_name['relationTokens'].message_type = _TOKENLOCATION
_RELATIONTRIPLE.fields_by_name['objectTokens'].message_type = _TOKENLOCATION
_RELATIONTRIPLE.fields_by_name['tree'].message_type = _DEPENDENCYGRAPH
_SECTION.fields_by_name['quotes'].message_type = _QUOTE
_SECTION.fields_by_name['xmlTag'].message_type = _TOKEN
DESCRIPTOR.message_types_by_name['Document'] = _DOCUMENT
DESCRIPTOR.message_types_by_name['Sentence'] = _SENTENCE
DESCRIPTOR.message_types_by_name['Token'] = _TOKEN
DESCRIPTOR.message_types_by_name['Quote'] = _QUOTE
DESCRIPTOR.message_types_by_name['ParseTree'] = _PARSETREE
DESCRIPTOR.message_types_by_name['DependencyGraph'] = _DEPENDENCYGRAPH
DESCRIPTOR.message_types_by_name['CorefChain'] = _COREFCHAIN
DESCRIPTOR.message_types_by_name['Mention'] = _MENTION
DESCRIPTOR.message_types_by_name['IndexedWord'] = _INDEXEDWORD
DESCRIPTOR.message_types_by_name['SpeakerInfo'] = _SPEAKERINFO
DESCRIPTOR.message_types_by_name['Span'] = _SPAN
DESCRIPTOR.message_types_by_name['Timex'] = _TIMEX
DESCRIPTOR.message_types_by_name['Entity'] = _ENTITY
DESCRIPTOR.message_types_by_name['Relation'] = _RELATION
DESCRIPTOR.message_types_by_name['Operator'] = _OPERATOR
DESCRIPTOR.message_types_by_name['Polarity'] = _POLARITY
DESCRIPTOR.message_types_by_name['NERMention'] = _NERMENTION
DESCRIPTOR.message_types_by_name['SentenceFragment'] = _SENTENCEFRAGMENT
DESCRIPTOR.message_types_by_name['TokenLocation'] = _TOKENLOCATION
DESCRIPTOR.message_types_by_name['RelationTriple'] = _RELATIONTRIPLE
DESCRIPTOR.message_types_by_name['MapStringString'] = _MAPSTRINGSTRING
DESCRIPTOR.message_types_by_name['MapIntString'] = _MAPINTSTRING
DESCRIPTOR.message_types_by_name['Section'] = _SECTION
DESCRIPTOR.enum_types_by_name['Language'] = _LANGUAGE
DESCRIPTOR.enum_types_by_name['Sentiment'] = _SENTIMENT
DESCRIPTOR.enum_types_by_name['NaturalLogicRelation'] = _NATURALLOGICRELATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Document = _reflection.GeneratedProtocolMessageType('Document', (_message.Message,), dict(
DESCRIPTOR = _DOCUMENT,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Document)
))
_sym_db.RegisterMessage(Document)
Sentence = _reflection.GeneratedProtocolMessageType('Sentence', (_message.Message,), dict(
DESCRIPTOR = _SENTENCE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Sentence)
))
_sym_db.RegisterMessage(Sentence)
Token = _reflection.GeneratedProtocolMessageType('Token', (_message.Message,), dict(
DESCRIPTOR = _TOKEN,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Token)
))
_sym_db.RegisterMessage(Token)
Quote = _reflection.GeneratedProtocolMessageType('Quote', (_message.Message,), dict(
DESCRIPTOR = _QUOTE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Quote)
))
_sym_db.RegisterMessage(Quote)
ParseTree = _reflection.GeneratedProtocolMessageType('ParseTree', (_message.Message,), dict(
DESCRIPTOR = _PARSETREE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.ParseTree)
))
_sym_db.RegisterMessage(ParseTree)
DependencyGraph = _reflection.GeneratedProtocolMessageType('DependencyGraph', (_message.Message,), dict(
Node = _reflection.GeneratedProtocolMessageType('Node', (_message.Message,), dict(
DESCRIPTOR = _DEPENDENCYGRAPH_NODE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.DependencyGraph.Node)
))
,
Edge = _reflection.GeneratedProtocolMessageType('Edge', (_message.Message,), dict(
DESCRIPTOR = _DEPENDENCYGRAPH_EDGE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.DependencyGraph.Edge)
))
,
DESCRIPTOR = _DEPENDENCYGRAPH,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.DependencyGraph)
))
_sym_db.RegisterMessage(DependencyGraph)
_sym_db.RegisterMessage(DependencyGraph.Node)
_sym_db.RegisterMessage(DependencyGraph.Edge)
CorefChain = _reflection.GeneratedProtocolMessageType('CorefChain', (_message.Message,), dict(
CorefMention = _reflection.GeneratedProtocolMessageType('CorefMention', (_message.Message,), dict(
DESCRIPTOR = _COREFCHAIN_COREFMENTION,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.CorefChain.CorefMention)
))
,
DESCRIPTOR = _COREFCHAIN,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.CorefChain)
))
_sym_db.RegisterMessage(CorefChain)
_sym_db.RegisterMessage(CorefChain.CorefMention)
Mention = _reflection.GeneratedProtocolMessageType('Mention', (_message.Message,), dict(
DESCRIPTOR = _MENTION,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Mention)
))
_sym_db.RegisterMessage(Mention)
IndexedWord = _reflection.GeneratedProtocolMessageType('IndexedWord', (_message.Message,), dict(
DESCRIPTOR = _INDEXEDWORD,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.IndexedWord)
))
_sym_db.RegisterMessage(IndexedWord)
SpeakerInfo = _reflection.GeneratedProtocolMessageType('SpeakerInfo', (_message.Message,), dict(
DESCRIPTOR = _SPEAKERINFO,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.SpeakerInfo)
))
_sym_db.RegisterMessage(SpeakerInfo)
Span = _reflection.GeneratedProtocolMessageType('Span', (_message.Message,), dict(
DESCRIPTOR = _SPAN,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Span)
))
_sym_db.RegisterMessage(Span)
Timex = _reflection.GeneratedProtocolMessageType('Timex', (_message.Message,), dict(
DESCRIPTOR = _TIMEX,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Timex)
))
_sym_db.RegisterMessage(Timex)
Entity = _reflection.GeneratedProtocolMessageType('Entity', (_message.Message,), dict(
DESCRIPTOR = _ENTITY,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Entity)
))
_sym_db.RegisterMessage(Entity)
Relation = _reflection.GeneratedProtocolMessageType('Relation', (_message.Message,), dict(
DESCRIPTOR = _RELATION,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Relation)
))
_sym_db.RegisterMessage(Relation)
Operator = _reflection.GeneratedProtocolMessageType('Operator', (_message.Message,), dict(
DESCRIPTOR = _OPERATOR,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Operator)
))
_sym_db.RegisterMessage(Operator)
Polarity = _reflection.GeneratedProtocolMessageType('Polarity', (_message.Message,), dict(
DESCRIPTOR = _POLARITY,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Polarity)
))
_sym_db.RegisterMessage(Polarity)
NERMention = _reflection.GeneratedProtocolMessageType('NERMention', (_message.Message,), dict(
DESCRIPTOR = _NERMENTION,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.NERMention)
))
_sym_db.RegisterMessage(NERMention)
SentenceFragment = _reflection.GeneratedProtocolMessageType('SentenceFragment', (_message.Message,), dict(
DESCRIPTOR = _SENTENCEFRAGMENT,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.SentenceFragment)
))
_sym_db.RegisterMessage(SentenceFragment)
TokenLocation = _reflection.GeneratedProtocolMessageType('TokenLocation', (_message.Message,), dict(
DESCRIPTOR = _TOKENLOCATION,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.TokenLocation)
))
_sym_db.RegisterMessage(TokenLocation)
RelationTriple = _reflection.GeneratedProtocolMessageType('RelationTriple', (_message.Message,), dict(
DESCRIPTOR = _RELATIONTRIPLE,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.RelationTriple)
))
_sym_db.RegisterMessage(RelationTriple)
MapStringString = _reflection.GeneratedProtocolMessageType('MapStringString', (_message.Message,), dict(
DESCRIPTOR = _MAPSTRINGSTRING,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.MapStringString)
))
_sym_db.RegisterMessage(MapStringString)
MapIntString = _reflection.GeneratedProtocolMessageType('MapIntString', (_message.Message,), dict(
DESCRIPTOR = _MAPINTSTRING,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.MapIntString)
))
_sym_db.RegisterMessage(MapIntString)
Section = _reflection.GeneratedProtocolMessageType('Section', (_message.Message,), dict(
DESCRIPTOR = _SECTION,
__module__ = 'CoreNLP_pb2'
# @@protoc_insertion_point(class_scope:edu.stanford.nlp.pipeline.Section)
))
_sym_db.RegisterMessage(Section)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\031edu.stanford.nlp.pipelineB\rCoreNLPProtos'))
_DEPENDENCYGRAPH.fields_by_name['root'].has_options = True
_DEPENDENCYGRAPH.fields_by_name['root']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
| stanfordnlp-master | stanfordnlp/protobuf/CoreNLP_pb2.py |
"""
Tests for the run_pipeline.py script, also serves as integration test
"""
import re
import subprocess
from datetime import datetime
from tests import *
def test_fr_pipeline():
# check input files present
assert os.path.exists(FR_TEST_IN), f'Missing test input file: {FR_TEST_IN}'
assert os.path.exists(FR_TEST_GOLD_OUT), f'Missing test gold output file: {FR_TEST_GOLD_OUT}'
# verify models not downloaded and output file doesn't exist
safe_rm(FR_TEST_OUT)
safe_rm(FR_MODELS_DIR)
# run french pipeline command and check results
fr_pipeline_cmd = \
f"python -m stanfordnlp.run_pipeline -l fr -d {TEST_WORKING_DIR} --force-download -o {FR_TEST_OUT} {FR_TEST_IN}"
subprocess.call(fr_pipeline_cmd, shell=True)
assert open(FR_TEST_GOLD_OUT).read() == open(FR_TEST_OUT).read(), f'Test failure: output does not match gold'
# cleanup
# log this test run's final output
if os.path.exists(FR_TEST_OUT):
curr_timestamp = re.sub(' ', '-', str(datetime.now()))
os.rename(FR_TEST_OUT, f'{FR_TEST_OUT}-{curr_timestamp}')
safe_rm(FR_MODELS_DIR)
| stanfordnlp-master | tests/test_run_pipeline.py |
"""
Basic testing of the English pipeline
"""
import pytest
import stanfordnlp
from tests import *
def setup_module(module):
"""Set up resources for all tests in this module"""
safe_rm(EN_MODELS_DIR)
stanfordnlp.download('en', resource_dir=TEST_WORKING_DIR, force=True)
def teardown_module(module):
"""Clean up resources after tests complete"""
safe_rm(EN_MODELS_DIR)
# data for testing
EN_DOC = "Barack Obama was born in Hawaii. He was elected president in 2008. Obama attended Harvard."
EN_DOC_TOKENS_GOLD = """
<Token index=1;words=[<Word index=1;text=Barack;lemma=Barack;upos=PROPN;xpos=NNP;feats=Number=Sing;governor=4;dependency_relation=nsubj:pass>]>
<Token index=2;words=[<Word index=2;text=Obama;lemma=Obama;upos=PROPN;xpos=NNP;feats=Number=Sing;governor=1;dependency_relation=flat>]>
<Token index=3;words=[<Word index=3;text=was;lemma=be;upos=AUX;xpos=VBD;feats=Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin;governor=4;dependency_relation=aux:pass>]>
<Token index=4;words=[<Word index=4;text=born;lemma=bear;upos=VERB;xpos=VBN;feats=Tense=Past|VerbForm=Part|Voice=Pass;governor=0;dependency_relation=root>]>
<Token index=5;words=[<Word index=5;text=in;lemma=in;upos=ADP;xpos=IN;feats=_;governor=6;dependency_relation=case>]>
<Token index=6;words=[<Word index=6;text=Hawaii;lemma=Hawaii;upos=PROPN;xpos=NNP;feats=Number=Sing;governor=4;dependency_relation=obl>]>
<Token index=7;words=[<Word index=7;text=.;lemma=.;upos=PUNCT;xpos=.;feats=_;governor=4;dependency_relation=punct>]>
<Token index=1;words=[<Word index=1;text=He;lemma=he;upos=PRON;xpos=PRP;feats=Case=Nom|Gender=Masc|Number=Sing|Person=3|PronType=Prs;governor=3;dependency_relation=nsubj:pass>]>
<Token index=2;words=[<Word index=2;text=was;lemma=be;upos=AUX;xpos=VBD;feats=Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin;governor=3;dependency_relation=aux:pass>]>
<Token index=3;words=[<Word index=3;text=elected;lemma=elect;upos=VERB;xpos=VBN;feats=Tense=Past|VerbForm=Part|Voice=Pass;governor=0;dependency_relation=root>]>
<Token index=4;words=[<Word index=4;text=president;lemma=president;upos=NOUN;xpos=NN;feats=Number=Sing;governor=3;dependency_relation=obj>]>
<Token index=5;words=[<Word index=5;text=in;lemma=in;upos=ADP;xpos=IN;feats=_;governor=6;dependency_relation=case>]>
<Token index=6;words=[<Word index=6;text=2008;lemma=2008;upos=NUM;xpos=CD;feats=NumType=Card;governor=3;dependency_relation=obl>]>
<Token index=7;words=[<Word index=7;text=.;lemma=.;upos=PUNCT;xpos=.;feats=_;governor=3;dependency_relation=punct>]>
<Token index=1;words=[<Word index=1;text=Obama;lemma=Obama;upos=PROPN;xpos=NNP;feats=Number=Sing;governor=2;dependency_relation=nsubj>]>
<Token index=2;words=[<Word index=2;text=attended;lemma=attend;upos=VERB;xpos=VBD;feats=Mood=Ind|Tense=Past|VerbForm=Fin;governor=0;dependency_relation=root>]>
<Token index=3;words=[<Word index=3;text=Harvard;lemma=Harvard;upos=PROPN;xpos=NNP;feats=Number=Sing;governor=2;dependency_relation=obj>]>
<Token index=4;words=[<Word index=4;text=.;lemma=.;upos=PUNCT;xpos=.;feats=_;governor=2;dependency_relation=punct>]>
""".strip()
EN_DOC_WORDS_GOLD = """
<Word index=1;text=Barack;lemma=Barack;upos=PROPN;xpos=NNP;feats=Number=Sing;governor=4;dependency_relation=nsubj:pass>
<Word index=2;text=Obama;lemma=Obama;upos=PROPN;xpos=NNP;feats=Number=Sing;governor=1;dependency_relation=flat>
<Word index=3;text=was;lemma=be;upos=AUX;xpos=VBD;feats=Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin;governor=4;dependency_relation=aux:pass>
<Word index=4;text=born;lemma=bear;upos=VERB;xpos=VBN;feats=Tense=Past|VerbForm=Part|Voice=Pass;governor=0;dependency_relation=root>
<Word index=5;text=in;lemma=in;upos=ADP;xpos=IN;feats=_;governor=6;dependency_relation=case>
<Word index=6;text=Hawaii;lemma=Hawaii;upos=PROPN;xpos=NNP;feats=Number=Sing;governor=4;dependency_relation=obl>
<Word index=7;text=.;lemma=.;upos=PUNCT;xpos=.;feats=_;governor=4;dependency_relation=punct>
<Word index=1;text=He;lemma=he;upos=PRON;xpos=PRP;feats=Case=Nom|Gender=Masc|Number=Sing|Person=3|PronType=Prs;governor=3;dependency_relation=nsubj:pass>
<Word index=2;text=was;lemma=be;upos=AUX;xpos=VBD;feats=Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin;governor=3;dependency_relation=aux:pass>
<Word index=3;text=elected;lemma=elect;upos=VERB;xpos=VBN;feats=Tense=Past|VerbForm=Part|Voice=Pass;governor=0;dependency_relation=root>
<Word index=4;text=president;lemma=president;upos=NOUN;xpos=NN;feats=Number=Sing;governor=3;dependency_relation=obj>
<Word index=5;text=in;lemma=in;upos=ADP;xpos=IN;feats=_;governor=6;dependency_relation=case>
<Word index=6;text=2008;lemma=2008;upos=NUM;xpos=CD;feats=NumType=Card;governor=3;dependency_relation=obl>
<Word index=7;text=.;lemma=.;upos=PUNCT;xpos=.;feats=_;governor=3;dependency_relation=punct>
<Word index=1;text=Obama;lemma=Obama;upos=PROPN;xpos=NNP;feats=Number=Sing;governor=2;dependency_relation=nsubj>
<Word index=2;text=attended;lemma=attend;upos=VERB;xpos=VBD;feats=Mood=Ind|Tense=Past|VerbForm=Fin;governor=0;dependency_relation=root>
<Word index=3;text=Harvard;lemma=Harvard;upos=PROPN;xpos=NNP;feats=Number=Sing;governor=2;dependency_relation=obj>
<Word index=4;text=.;lemma=.;upos=PUNCT;xpos=.;feats=_;governor=2;dependency_relation=punct>
""".strip()
EN_DOC_DEPENDENCY_PARSES_GOLD = """
('Barack', '4', 'nsubj:pass')
('Obama', '1', 'flat')
('was', '4', 'aux:pass')
('born', '0', 'root')
('in', '6', 'case')
('Hawaii', '4', 'obl')
('.', '4', 'punct')
('He', '3', 'nsubj:pass')
('was', '3', 'aux:pass')
('elected', '0', 'root')
('president', '3', 'obj')
('in', '6', 'case')
('2008', '3', 'obl')
('.', '3', 'punct')
('Obama', '2', 'nsubj')
('attended', '0', 'root')
('Harvard', '2', 'obj')
('.', '2', 'punct')
""".strip()
EN_DOC_CONLLU_GOLD = """
1 Barack Barack PROPN NNP Number=Sing 4 nsubj:pass _ _
2 Obama Obama PROPN NNP Number=Sing 1 flat _ _
3 was be AUX VBD Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin 4 aux:pass _ _
4 born bear VERB VBN Tense=Past|VerbForm=Part|Voice=Pass 0 root _ _
5 in in ADP IN _ 6 case _ _
6 Hawaii Hawaii PROPN NNP Number=Sing 4 obl _ _
7 . . PUNCT . _ 4 punct _ _
1 He he PRON PRP Case=Nom|Gender=Masc|Number=Sing|Person=3|PronType=Prs 3 nsubj:pass _ _
2 was be AUX VBD Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin 3 aux:pass _ _
3 elected elect VERB VBN Tense=Past|VerbForm=Part|Voice=Pass 0 root _ _
4 president president NOUN NN Number=Sing 3 obj _ _
5 in in ADP IN _ 6 case _ _
6 2008 2008 NUM CD NumType=Card 3 obl _ _
7 . . PUNCT . _ 3 punct _ _
1 Obama Obama PROPN NNP Number=Sing 2 nsubj _ _
2 attended attend VERB VBD Mood=Ind|Tense=Past|VerbForm=Fin 0 root _ _
3 Harvard Harvard PROPN NNP Number=Sing 2 obj _ _
4 . . PUNCT . _ 2 punct _ _
""".lstrip()
@pytest.fixture(scope="module")
def processed_doc():
""" Document created by running full English pipeline on a few sentences """
nlp = stanfordnlp.Pipeline(models_dir=TEST_WORKING_DIR)
return nlp(EN_DOC)
def test_text(processed_doc):
assert processed_doc.text == EN_DOC
def test_conllu(processed_doc):
assert processed_doc.conll_file.conll_as_string() == EN_DOC_CONLLU_GOLD
def test_tokens(processed_doc):
assert "\n\n".join([sent.tokens_string() for sent in processed_doc.sentences]) == EN_DOC_TOKENS_GOLD
def test_words(processed_doc):
assert "\n\n".join([sent.words_string() for sent in processed_doc.sentences]) == EN_DOC_WORDS_GOLD
def test_dependency_parse(processed_doc):
assert "\n\n".join([sent.dependencies_string() for sent in processed_doc.sentences]) == \
EN_DOC_DEPENDENCY_PARSES_GOLD
| stanfordnlp-master | tests/test_english_pipeline.py |
"""
Utilities for testing
"""
import os
# Environment Variables
# set this to specify working directory of tests
TEST_HOME_VAR = 'STANFORDNLP_TEST_HOME'
# Global Variables
# test working directory base name must be stanfordnlp_test
TEST_DIR_BASE_NAME = 'stanfordnlp_test'
# check the working dir is set and compliant
assert os.getenv(TEST_HOME_VAR) is not None, \
f'Please set {TEST_HOME_VAR} environment variable for test working dir, base name must be: {TEST_DIR_BASE_NAME}'
TEST_WORKING_DIR = os.getenv(TEST_HOME_VAR)
assert os.path.basename(TEST_WORKING_DIR) == TEST_DIR_BASE_NAME, \
f'Base name of test home dir must be: {TEST_DIR_BASE_NAME}'
# langauge resources
LANGUAGE_RESOURCES = {}
TOKENIZE_MODEL = 'tokenizer.pt'
MWT_MODEL = 'mwt_expander.pt'
POS_MODEL = 'tagger.pt'
POS_PRETRAIN = 'pretrain.pt'
LEMMA_MODEL = 'lemmatizer.pt'
DEPPARSE_MODEL = 'parser.pt'
DEPPARSE_PRETRAIN = 'pretrain.pt'
MODEL_FILES = [TOKENIZE_MODEL, MWT_MODEL, POS_MODEL, POS_PRETRAIN, LEMMA_MODEL, DEPPARSE_MODEL, DEPPARSE_PRETRAIN]
# English resources
EN_KEY = 'en'
EN_SHORTHAND = 'en_ewt'
# models
EN_MODELS_DIR = f'{TEST_WORKING_DIR}/{EN_SHORTHAND}_models'
EN_MODEL_FILES = [f'{EN_MODELS_DIR}/{EN_SHORTHAND}_{model_fname}' for model_fname in MODEL_FILES]
# French resources
FR_KEY = 'fr'
FR_SHORTHAND = 'fr_gsd'
# regression file paths
FR_TEST_IN = f'{TEST_WORKING_DIR}/in/fr_gsd.test.txt'
FR_TEST_OUT = f'{TEST_WORKING_DIR}/out/fr_gsd.test.txt.out'
FR_TEST_GOLD_OUT = f'{TEST_WORKING_DIR}/out/fr_gsd.test.txt.out.gold'
# models
FR_MODELS_DIR = f'{TEST_WORKING_DIR}/{FR_SHORTHAND}_models'
FR_MODEL_FILES = [f'{FR_MODELS_DIR}/{FR_SHORTHAND}_{model_fname}' for model_fname in MODEL_FILES]
# utils for clean up
# only allow removal of dirs/files in this approved list
REMOVABLE_PATHS = ['en_ewt_models', 'en_ewt_tokenizer.pt', 'en_ewt_mwt_expander.pt', 'en_ewt_tagger.pt',
'en_ewt.pretrain.pt', 'en_ewt_lemmatizer.pt', 'en_ewt_parser.pt', 'fr_gsd_models',
'fr_gsd_tokenizer.pt', 'fr_gsd_mwt_expander.pt', 'fr_gsd_tagger.pt', 'fr_gsd.pretrain.pt',
'fr_gsd_lemmatizer.pt', 'fr_gsd_parser.pt']
def safe_rm(path_to_rm):
"""
Safely remove a directory of files or a file
1.) check path exists, files are files, dirs are dirs
2.) only remove things on approved list REMOVABLE_PATHS
3.) assert no longer exists
"""
# handle directory
if os.path.isdir(path_to_rm):
files_to_rm = [f'{path_to_rm}/{fname}' for fname in os.listdir(path_to_rm)]
dir_to_rm = path_to_rm
else:
files_to_rm = [path_to_rm]
dir_to_rm = None
# clear out files
for file_to_rm in files_to_rm:
if os.path.isfile(file_to_rm) and os.path.basename(file_to_rm) in REMOVABLE_PATHS:
os.remove(file_to_rm)
assert not os.path.exists(file_to_rm), f'Error removing: {file_to_rm}'
# clear out directory
if dir_to_rm is not None and os.path.isdir(dir_to_rm):
os.rmdir(dir_to_rm)
assert not os.path.exists(dir_to_rm), f'Error removing: {dir_to_rm}'
| stanfordnlp-master | tests/__init__.py |
"""
Tests to read a stored protobuf.
Also serves as an example of how to parse sentences, tokens, pos, lemma,
ner, dependencies and mentions.
The test corresponds to annotations for the following sentence:
Chris wrote a simple sentence that he parsed with Stanford CoreNLP.
"""
import os
import pytest
from pytest import fixture
from stanfordnlp.protobuf import Document, Sentence, Token, DependencyGraph,\
CorefChain
from stanfordnlp.protobuf import parseFromDelimitedString, writeToDelimitedString, to_text
# set the marker for this module
pytestmark = pytest.mark.travis
# Text that was annotated
TEXT = "Chris wrote a simple sentence that he parsed with Stanford CoreNLP.\n"
@fixture
def doc_pb():
test_dir = os.path.dirname(os.path.abspath(__file__))
test_data = os.path.join(test_dir, 'data', 'test.dat')
with open(test_data, 'rb') as f:
buf = f.read()
doc = Document()
parseFromDelimitedString(doc, buf)
return doc
def test_parse_protobuf(doc_pb):
assert doc_pb.ByteSize() == 4239
def test_write_protobuf(doc_pb):
stream = writeToDelimitedString(doc_pb)
buf = stream.getvalue()
stream.close()
doc_pb_ = Document()
parseFromDelimitedString(doc_pb_, buf)
assert doc_pb == doc_pb_
def test_document_text(doc_pb):
assert doc_pb.text == TEXT
def test_sentences(doc_pb):
assert len(doc_pb.sentence) == 1
sentence = doc_pb.sentence[0]
assert isinstance(sentence, Sentence)
# check sentence length
assert sentence.characterOffsetEnd - sentence.characterOffsetBegin == 67
# Note that the sentence text should actually be recovered from the tokens.
assert sentence.text == ''
assert to_text(sentence) == TEXT[:-1]
def test_tokens(doc_pb):
sentence = doc_pb.sentence[0]
tokens = sentence.token
assert len(tokens) == 12
assert isinstance(tokens[0], Token)
# Word
words = "Chris wrote a simple sentence that he parsed with Stanford CoreNLP .".split()
words_ = [t.word for t in tokens]
assert words_ == words
# Lemma
lemmas = "Chris write a simple sentence that he parse with Stanford CoreNLP .".split()
lemmas_ = [t.lemma for t in tokens]
assert lemmas_ == lemmas
# POS
pos = "NNP VBD DT JJ NN IN PRP VBD IN NNP NNP .".split()
pos_ = [t.pos for t in tokens]
assert pos_ == pos
# NER
ner = "PERSON O O O O O O O O ORGANIZATION O O".split()
ner_ = [t.ner for t in tokens]
assert ner_ == ner
# character offsets
begin = [int(i) for i in "0 6 12 14 21 30 35 38 45 50 59 66".split()]
end = [int(i) for i in "5 11 13 20 29 34 37 44 49 58 66 67".split()]
begin_ = [t.beginChar for t in tokens]
end_ = [t.endChar for t in tokens]
assert begin_ == begin
assert end_ == end
def test_dependency_parse(doc_pb):
"""
Extract the dependency parse from the annotation.
"""
sentence = doc_pb.sentence[0]
# You can choose from the following types of dependencies.
# In general, you'll want enhancedPlusPlus
assert sentence.basicDependencies.ByteSize() > 0
assert sentence.enhancedDependencies.ByteSize() > 0
assert sentence.enhancedPlusPlusDependencies.ByteSize() > 0
tree = sentence.enhancedPlusPlusDependencies
isinstance(tree, DependencyGraph)
# Indices are 1-indexd with 0 being the "pseudo root"
assert tree.root # 'wrote' is the root. == [2]
# There are as many nodes as there are tokens.
assert len(tree.node) == len(sentence.token)
# Enhanced++ depdencies often contain additional edges and are
# not trees -- here, 'parsed' would also have an edge to
# 'sentence'
assert len(tree.edge) == 12
# This edge goes from "wrote" to "Chirs"
edge = tree.edge[0]
assert edge.source == 2
assert edge.target == 1
assert edge.dep == "nsubj"
def test_coref_chain(doc_pb):
"""
Extract the corefence chains from the annotation.
"""
# Coreference chains span sentences and are stored in the
# document.
chains = doc_pb.corefChain
# In this document there is 1 chain with Chris and he.
assert len(chains) == 1
chain = chains[0]
assert isinstance(chain, CorefChain)
assert chain.mention[0].beginIndex == 0 # 'Chris'
assert chain.mention[0].endIndex == 1
assert chain.mention[0].gender == "MALE"
assert chain.mention[1].beginIndex == 6 # 'he'
assert chain.mention[1].endIndex == 7
assert chain.mention[1].gender == "MALE"
assert chain.representative == 0 # Head of the chain is 'Chris'
| stanfordnlp-master | tests/test_protobuf.py |
"""
Tests that call a running CoreNLPClient.
"""
import pytest
import stanfordnlp.server as corenlp
# set the marker for this module
pytestmark = pytest.mark.travis
TEXT = "Chris wrote a simple sentence that he parsed with Stanford CoreNLP.\n"
def test_connect():
with corenlp.CoreNLPClient() as client:
client.ensure_alive()
assert client.is_active
assert client.is_alive()
def test_annotate():
with corenlp.CoreNLPClient(annotators="tokenize ssplit".split()) as client:
ann = client.annotate(TEXT)
assert corenlp.to_text(ann.sentence[0]) == TEXT[:-1]
def test_update():
with corenlp.CoreNLPClient(annotators="tokenize ssplit".split()) as client:
ann = client.annotate(TEXT)
ann = client.update(ann)
assert corenlp.to_text(ann.sentence[0]) == TEXT[:-1]
def test_tokensregex():
with corenlp.CoreNLPClient(annotators='tokenize ssplit ner depparse'.split(), timeout=90000) as client:
# Example pattern from: https://nlp.stanford.edu/software/tokensregex.shtml
pattern = '([ner: PERSON]+) /wrote/ /an?/ []{0,3} /sentence|article/'
matches = client.tokensregex(TEXT, pattern)
assert len(matches["sentences"]) == 1
assert matches["sentences"][0]["length"] == 1
assert matches == {
"sentences": [{
"0": {
"text": "Chris wrote a simple sentence",
"begin": 0,
"end": 5,
"1": {
"text": "Chris",
"begin": 0,
"end": 1
}},
"length": 1
},]}
def test_semgrex():
with corenlp.CoreNLPClient(annotators='tokenize ssplit pos lemma ner depparse'.split(), timeout=90000) as client:
pattern = '{word:wrote} >nsubj {}=subject >dobj {}=object'
matches = client.semgrex(TEXT, pattern, to_words=True)
assert matches == [
{
"text": "wrote",
"begin": 1,
"end": 2,
"$subject": {
"text": "Chris",
"begin": 0,
"end": 1
},
"$object": {
"text": "sentence",
"begin": 4,
"end": 5
},
"sentence": 0,}]
| stanfordnlp-master | tests/test_client.py |
"""
Basic testing of multi-word-token expansion
"""
import stanfordnlp
from tests import *
def setup_module(module):
"""Set up resources for all tests in this module"""
safe_rm(FR_MODELS_DIR)
stanfordnlp.download('fr', resource_dir=TEST_WORKING_DIR, force=True)
def teardown_module(module):
"""Clean up resources after tests complete"""
safe_rm(FR_MODELS_DIR)
# mwt data for testing
FR_MWT_SENTENCE = "Alors encore inconnu du grand public, Emmanuel Macron devient en 2014 ministre de l'Économie, de " \
"l'Industrie et du Numérique."
FR_MWT_TOKEN_TO_WORDS_GOLD = """
token: Alors words: [<Word index=1;text=Alors>]
token: encore words: [<Word index=2;text=encore>]
token: inconnu words: [<Word index=3;text=inconnu>]
token: du words: [<Word index=4;text=de>, <Word index=5;text=le>]
token: grand words: [<Word index=6;text=grand>]
token: public words: [<Word index=7;text=public>]
token: , words: [<Word index=8;text=,>]
token: Emmanuel words: [<Word index=9;text=Emmanuel>]
token: Macron words: [<Word index=10;text=Macron>]
token: devient words: [<Word index=11;text=devient>]
token: en words: [<Word index=12;text=en>]
token: 2014 words: [<Word index=13;text=2014>]
token: ministre words: [<Word index=14;text=ministre>]
token: de words: [<Word index=15;text=de>]
token: l' words: [<Word index=16;text=l'>]
token: Économie words: [<Word index=17;text=Économie>]
token: , words: [<Word index=18;text=,>]
token: de words: [<Word index=19;text=de>]
token: l' words: [<Word index=20;text=l'>]
token: Industrie words: [<Word index=21;text=Industrie>]
token: et words: [<Word index=22;text=et>]
token: du words: [<Word index=23;text=de>, <Word index=24;text=le>]
token: Numérique words: [<Word index=25;text=Numérique>]
token: . words: [<Word index=26;text=.>]
""".strip()
FR_MWT_WORD_TO_TOKEN_GOLD = """
word: Alors token parent:1-Alors
word: encore token parent:2-encore
word: inconnu token parent:3-inconnu
word: de token parent:4-5-du
word: le token parent:4-5-du
word: grand token parent:6-grand
word: public token parent:7-public
word: , token parent:8-,
word: Emmanuel token parent:9-Emmanuel
word: Macron token parent:10-Macron
word: devient token parent:11-devient
word: en token parent:12-en
word: 2014 token parent:13-2014
word: ministre token parent:14-ministre
word: de token parent:15-de
word: l' token parent:16-l'
word: Économie token parent:17-Économie
word: , token parent:18-,
word: de token parent:19-de
word: l' token parent:20-l'
word: Industrie token parent:21-Industrie
word: et token parent:22-et
word: de token parent:23-24-du
word: le token parent:23-24-du
word: Numérique token parent:25-Numérique
word: . token parent:26-.
""".strip()
def test_mwt():
pipeline = stanfordnlp.Pipeline(processors='tokenize,mwt', models_dir=TEST_WORKING_DIR, lang='fr')
doc = pipeline(FR_MWT_SENTENCE)
token_to_words = "\n".join(
[f'token: {token.text.ljust(9)}\t\twords: {token.words}' for sent in doc.sentences for token in sent.tokens]
).strip()
word_to_token = "\n".join(
[f'word: {word.text.ljust(9)}\t\ttoken parent:{word.parent_token.index+"-"+word.parent_token.text}'
for sent in doc.sentences for word in sent.words]).strip()
assert token_to_words == FR_MWT_TOKEN_TO_WORDS_GOLD
assert word_to_token == FR_MWT_WORD_TO_TOKEN_GOLD
| stanfordnlp-master | tests/test_mwt.py |
# we want to just keep some shorter sentences here
limit = 25
file_out = open('en_ewt.train.in.conllu', 'w')
linearr = []
write_line = True
with open('en_ewt.train.in.conllu.backup') as f:
for line in f:
linearr.append(line)
words = line.split()
if len(words) > 0:
if int(words[0]) > limit:
# get rid of this line
write_line = False
else:
# prev sentence was OK, keep it
if write_line:
for line in linearr:
file_out.write(line)
linearr = []
write_line = True
file_out.close() | stanfordnlp-master | scripts/get_short_sents.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import find_packages, setup
setup(
name="cotracker",
version="1.0",
install_requires=[],
packages=find_packages(exclude="notebooks"),
extras_require={
"all": ["matplotlib", "opencv-python"],
"dev": ["flake8", "black"],
},
)
| co-tracker-main | setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import torch
import signal
import socket
import sys
import json
import numpy as np
import argparse
import logging
from pathlib import Path
from tqdm import tqdm
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.cuda.amp import GradScaler
from torch.utils.tensorboard import SummaryWriter
from pytorch_lightning.lite import LightningLite
from cotracker.models.evaluation_predictor import EvaluationPredictor
from cotracker.models.core.cotracker.cotracker import CoTracker
from cotracker.utils.visualizer import Visualizer
from cotracker.datasets.tap_vid_datasets import TapVidDataset
from cotracker.datasets.badja_dataset import BadjaDataset
from cotracker.datasets.fast_capture_dataset import FastCaptureDataset
from cotracker.evaluation.core.evaluator import Evaluator
from cotracker.datasets import kubric_movif_dataset
from cotracker.datasets.utils import collate_fn, collate_fn_train, dataclass_to_cuda_
from cotracker.models.core.cotracker.losses import sequence_loss, balanced_ce_loss
def fetch_optimizer(args, model):
"""Create the optimizer and learning rate scheduler"""
optimizer = optim.AdamW(
model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=1e-8
)
scheduler = optim.lr_scheduler.OneCycleLR(
optimizer,
args.lr,
args.num_steps + 100,
pct_start=0.05,
cycle_momentum=False,
anneal_strategy="linear",
)
return optimizer, scheduler
def forward_batch(batch, model, args, loss_fn=None, writer=None, step=0):
rgbs = batch.video
trajs_g = batch.trajectory
vis_g = batch.visibility
valids = batch.valid
B, T, C, H, W = rgbs.shape
assert C == 3
B, T, N, D = trajs_g.shape
device = rgbs.device
__, first_positive_inds = torch.max(vis_g, dim=1)
# We want to make sure that during training the model sees visible points
# that it does not need to track just yet: they are visible but queried from a later frame
N_rand = N // 4
# inds of visible points in the 1st frame
nonzero_inds = [torch.nonzero(vis_g[0, :, i]) for i in range(N)]
rand_vis_inds = torch.cat(
[
nonzero_row[torch.randint(len(nonzero_row), size=(1,))]
for nonzero_row in nonzero_inds
],
dim=1,
)
first_positive_inds = torch.cat(
[rand_vis_inds[:, :N_rand], first_positive_inds[:, N_rand:]], dim=1
)
ind_array_ = torch.arange(T, device=device)
ind_array_ = ind_array_[None, :, None].repeat(B, 1, N)
assert torch.allclose(
vis_g[ind_array_ == first_positive_inds[:, None, :]],
torch.ones_like(vis_g),
)
assert torch.allclose(
vis_g[ind_array_ == rand_vis_inds[:, None, :]], torch.ones_like(vis_g)
)
gather = torch.gather(
trajs_g, 1, first_positive_inds[:, :, None, None].repeat(1, 1, N, 2)
)
xys = torch.diagonal(gather, dim1=1, dim2=2).permute(0, 2, 1)
queries = torch.cat([first_positive_inds[:, :, None], xys], dim=2)
predictions, __, visibility, train_data = model(
rgbs=rgbs, queries=queries, iters=args.train_iters, is_train=True
)
vis_predictions, coord_predictions, wind_inds, sort_inds = train_data
trajs_g = trajs_g[:, :, sort_inds]
vis_g = vis_g[:, :, sort_inds]
valids = valids[:, :, sort_inds]
vis_gts = []
traj_gts = []
valids_gts = []
for i, wind_idx in enumerate(wind_inds):
ind = i * (args.sliding_window_len // 2)
vis_gts.append(vis_g[:, ind : ind + args.sliding_window_len, :wind_idx])
traj_gts.append(trajs_g[:, ind : ind + args.sliding_window_len, :wind_idx])
valids_gts.append(valids[:, ind : ind + args.sliding_window_len, :wind_idx])
seq_loss = sequence_loss(coord_predictions, traj_gts, vis_gts, valids_gts, 0.8)
vis_loss = balanced_ce_loss(vis_predictions, vis_gts, valids_gts)
output = {"flow": {"predictions": predictions[0].detach()}}
output["flow"]["loss"] = seq_loss.mean()
output["visibility"] = {
"loss": vis_loss.mean() * 10.0,
"predictions": visibility[0].detach(),
}
return output
def run_test_eval(evaluator, model, dataloaders, writer, step):
model.eval()
for ds_name, dataloader in dataloaders:
predictor = EvaluationPredictor(
model.module.module,
grid_size=6,
local_grid_size=0,
single_point=False,
n_iters=6,
)
if torch.cuda.is_available():
predictor.model = predictor.model.cuda()
metrics = evaluator.evaluate_sequence(
model=predictor,
test_dataloader=dataloader,
dataset_name=ds_name,
train_mode=True,
writer=writer,
step=step,
)
if ds_name == "badja" or ds_name == "fastcapture" or ("kubric" in ds_name):
metrics = {
**{
f"{ds_name}_avg": np.mean(
[v for k, v in metrics.items() if "accuracy" not in k]
)
},
**{
f"{ds_name}_avg_accuracy": np.mean(
[v for k, v in metrics.items() if "accuracy" in k]
)
},
}
print("avg", np.mean([v for v in metrics.values()]))
if "tapvid" in ds_name:
metrics = {
f"{ds_name}_avg_OA": metrics["avg"]["occlusion_accuracy"] * 100,
f"{ds_name}_avg_delta": metrics["avg"]["average_pts_within_thresh"]
* 100,
f"{ds_name}_avg_Jaccard": metrics["avg"]["average_jaccard"] * 100,
}
writer.add_scalars(f"Eval", metrics, step)
class Logger:
SUM_FREQ = 100
def __init__(self, model, scheduler):
self.model = model
self.scheduler = scheduler
self.total_steps = 0
self.running_loss = {}
self.writer = SummaryWriter(log_dir=os.path.join(args.ckpt_path, "runs"))
def _print_training_status(self):
metrics_data = [
self.running_loss[k] / Logger.SUM_FREQ
for k in sorted(self.running_loss.keys())
]
training_str = "[{:6d}] ".format(self.total_steps + 1)
metrics_str = ("{:10.4f}, " * len(metrics_data)).format(*metrics_data)
# print the training status
logging.info(
f"Training Metrics ({self.total_steps}): {training_str + metrics_str}"
)
if self.writer is None:
self.writer = SummaryWriter(log_dir=os.path.join(args.ckpt_path, "runs"))
for k in self.running_loss:
self.writer.add_scalar(
k, self.running_loss[k] / Logger.SUM_FREQ, self.total_steps
)
self.running_loss[k] = 0.0
def push(self, metrics, task):
self.total_steps += 1
for key in metrics:
task_key = str(key) + "_" + task
if task_key not in self.running_loss:
self.running_loss[task_key] = 0.0
self.running_loss[task_key] += metrics[key]
if self.total_steps % Logger.SUM_FREQ == Logger.SUM_FREQ - 1:
self._print_training_status()
self.running_loss = {}
def write_dict(self, results):
if self.writer is None:
self.writer = SummaryWriter(log_dir=os.path.join(args.ckpt_path, "runs"))
for key in results:
self.writer.add_scalar(key, results[key], self.total_steps)
def close(self):
self.writer.close()
class Lite(LightningLite):
def run(self, args):
def seed_everything(seed: int):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed_everything(0)
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
g = torch.Generator()
g.manual_seed(0)
eval_dataloaders = []
if "badja" in args.eval_datasets:
eval_dataset = BadjaDataset(
data_root=os.path.join(args.dataset_root, "BADJA"),
max_seq_len=args.eval_max_seq_len,
dataset_resolution=args.crop_size,
)
eval_dataloader_badja = torch.utils.data.DataLoader(
eval_dataset,
batch_size=1,
shuffle=False,
num_workers=8,
collate_fn=collate_fn,
)
eval_dataloaders.append(("badja", eval_dataloader_badja))
if "fastcapture" in args.eval_datasets:
eval_dataset = FastCaptureDataset(
data_root=os.path.join(args.dataset_root, "fastcapture"),
max_seq_len=min(100, args.eval_max_seq_len),
max_num_points=40,
dataset_resolution=args.crop_size,
)
eval_dataloader_fastcapture = torch.utils.data.DataLoader(
eval_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
collate_fn=collate_fn,
)
eval_dataloaders.append(("fastcapture", eval_dataloader_fastcapture))
if "tapvid_davis_first" in args.eval_datasets:
data_root = os.path.join(args.dataset_root, "tapvid_davis/tapvid_davis.pkl")
eval_dataset = TapVidDataset(dataset_type="davis", data_root=data_root)
eval_dataloader_tapvid_davis = torch.utils.data.DataLoader(
eval_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
collate_fn=collate_fn,
)
eval_dataloaders.append(("tapvid_davis", eval_dataloader_tapvid_davis))
evaluator = Evaluator(args.ckpt_path)
visualizer = Visualizer(
save_dir=args.ckpt_path,
pad_value=80,
fps=1,
show_first_frame=0,
tracks_leave_trace=0,
)
loss_fn = None
if args.model_name == "cotracker":
model = CoTracker(
stride=args.model_stride,
S=args.sliding_window_len,
add_space_attn=not args.remove_space_attn,
num_heads=args.updateformer_num_heads,
hidden_size=args.updateformer_hidden_size,
space_depth=args.updateformer_space_depth,
time_depth=args.updateformer_time_depth,
)
else:
raise ValueError(f"Model {args.model_name} doesn't exist")
with open(args.ckpt_path + "/meta.json", "w") as file:
json.dump(vars(args), file, sort_keys=True, indent=4)
model.cuda()
train_dataset = kubric_movif_dataset.KubricMovifDataset(
data_root=os.path.join(args.dataset_root, "kubric_movi_f"),
crop_size=args.crop_size,
seq_len=args.sequence_len,
traj_per_sample=args.traj_per_sample,
sample_vis_1st_frame=args.sample_vis_1st_frame,
use_augs=not args.dont_use_augs,
)
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
worker_init_fn=seed_worker,
generator=g,
pin_memory=True,
collate_fn=collate_fn_train,
drop_last=True,
)
train_loader = self.setup_dataloaders(train_loader, move_to_device=False)
print("LEN TRAIN LOADER", len(train_loader))
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = 0
logger = Logger(model, scheduler)
folder_ckpts = [
f
for f in os.listdir(args.ckpt_path)
if not os.path.isdir(f) and f.endswith(".pth") and not "final" in f
]
if len(folder_ckpts) > 0:
ckpt_path = sorted(folder_ckpts)[-1]
ckpt = self.load(os.path.join(args.ckpt_path, ckpt_path))
logging.info(f"Loading checkpoint {ckpt_path}")
if "model" in ckpt:
model.load_state_dict(ckpt["model"])
else:
model.load_state_dict(ckpt)
if "optimizer" in ckpt:
logging.info("Load optimizer")
optimizer.load_state_dict(ckpt["optimizer"])
if "scheduler" in ckpt:
logging.info("Load scheduler")
scheduler.load_state_dict(ckpt["scheduler"])
if "total_steps" in ckpt:
total_steps = ckpt["total_steps"]
logging.info(f"Load total_steps {total_steps}")
elif args.restore_ckpt is not None:
assert args.restore_ckpt.endswith(".pth") or args.restore_ckpt.endswith(
".pt"
)
logging.info("Loading checkpoint...")
strict = True
state_dict = self.load(args.restore_ckpt)
if "model" in state_dict:
state_dict = state_dict["model"]
if list(state_dict.keys())[0].startswith("module."):
state_dict = {
k.replace("module.", ""): v for k, v in state_dict.items()
}
model.load_state_dict(state_dict, strict=strict)
logging.info(f"Done loading checkpoint")
model, optimizer = self.setup(model, optimizer, move_to_device=False)
# model.cuda()
model.train()
save_freq = args.save_freq
scaler = GradScaler(enabled=args.mixed_precision)
should_keep_training = True
global_batch_num = 0
epoch = -1
while should_keep_training:
epoch += 1
for i_batch, batch in enumerate(tqdm(train_loader)):
batch, gotit = batch
if not all(gotit):
print("batch is None")
continue
dataclass_to_cuda_(batch)
optimizer.zero_grad()
assert model.training
output = forward_batch(
batch,
model,
args,
loss_fn=loss_fn,
writer=logger.writer,
step=total_steps,
)
loss = 0
for k, v in output.items():
if "loss" in v:
loss += v["loss"]
logger.writer.add_scalar(
f"live_{k}_loss", v["loss"].item(), total_steps
)
if "metrics" in v:
logger.push(v["metrics"], k)
if self.global_rank == 0:
if total_steps % save_freq == save_freq - 1:
if args.model_name == "motion_diffuser":
pred_coords = model.module.module.forward_batch_test(
batch, interp_shape=args.crop_size
)
output["flow"] = {"predictions": pred_coords[0].detach()}
visualizer.visualize(
video=batch.video.clone(),
tracks=batch.trajectory.clone(),
filename="train_gt_traj",
writer=logger.writer,
step=total_steps,
)
visualizer.visualize(
video=batch.video.clone(),
tracks=output["flow"]["predictions"][None],
filename="train_pred_traj",
writer=logger.writer,
step=total_steps,
)
if len(output) > 1:
logger.writer.add_scalar(
f"live_total_loss", loss.item(), total_steps
)
logger.writer.add_scalar(
f"learning_rate", optimizer.param_groups[0]["lr"], total_steps
)
global_batch_num += 1
self.barrier()
self.backward(scaler.scale(loss))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 10.0)
scaler.step(optimizer)
scheduler.step()
scaler.update()
total_steps += 1
if self.global_rank == 0:
if (i_batch >= len(train_loader) - 1) or (
total_steps == 1 and args.validate_at_start
):
if (epoch + 1) % args.save_every_n_epoch == 0:
ckpt_iter = "0" * (6 - len(str(total_steps))) + str(
total_steps
)
save_path = Path(
f"{args.ckpt_path}/model_{args.model_name}_{ckpt_iter}.pth"
)
save_dict = {
"model": model.module.module.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"total_steps": total_steps,
}
logging.info(f"Saving file {save_path}")
self.save(save_dict, save_path)
if (epoch + 1) % args.evaluate_every_n_epoch == 0 or (
args.validate_at_start and epoch == 0
):
run_test_eval(
evaluator,
model,
eval_dataloaders,
logger.writer,
total_steps,
)
model.train()
torch.cuda.empty_cache()
self.barrier()
if total_steps > args.num_steps:
should_keep_training = False
break
print("FINISHED TRAINING")
PATH = f"{args.ckpt_path}/{args.model_name}_final.pth"
torch.save(model.module.module.state_dict(), PATH)
run_test_eval(evaluator, model, eval_dataloaders, logger.writer, total_steps)
logger.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", default="cotracker", help="model name")
parser.add_argument("--restore_ckpt", help="path to restore a checkpoint")
parser.add_argument("--ckpt_path", help="path to save checkpoints")
parser.add_argument(
"--batch_size", type=int, default=4, help="batch size used during training."
)
parser.add_argument(
"--num_workers", type=int, default=6, help="number of dataloader workers"
)
parser.add_argument(
"--mixed_precision", action="store_true", help="use mixed precision"
)
parser.add_argument("--lr", type=float, default=0.0005, help="max learning rate.")
parser.add_argument(
"--wdecay", type=float, default=0.00001, help="Weight decay in optimizer."
)
parser.add_argument(
"--num_steps", type=int, default=200000, help="length of training schedule."
)
parser.add_argument(
"--evaluate_every_n_epoch",
type=int,
default=1,
help="evaluate during training after every n epochs, after every epoch by default",
)
parser.add_argument(
"--save_every_n_epoch",
type=int,
default=1,
help="save checkpoints during training after every n epochs, after every epoch by default",
)
parser.add_argument(
"--validate_at_start",
action="store_true",
help="whether to run evaluation before training starts",
)
parser.add_argument(
"--save_freq",
type=int,
default=100,
help="frequency of trajectory visualization during training",
)
parser.add_argument(
"--traj_per_sample",
type=int,
default=768,
help="the number of trajectories to sample for training",
)
parser.add_argument(
"--dataset_root", type=str, help="path lo all the datasets (train and eval)"
)
parser.add_argument(
"--train_iters",
type=int,
default=4,
help="number of updates to the disparity field in each forward pass.",
)
parser.add_argument(
"--sequence_len", type=int, default=8, help="train sequence length"
)
parser.add_argument(
"--eval_datasets",
nargs="+",
default=["things", "badja"],
help="what datasets to use for evaluation",
)
parser.add_argument(
"--remove_space_attn",
action="store_true",
help="remove space attention from CoTracker",
)
parser.add_argument(
"--dont_use_augs",
action="store_true",
help="don't apply augmentations during training",
)
parser.add_argument(
"--sample_vis_1st_frame",
action="store_true",
help="only sample trajectories with points visible on the first frame",
)
parser.add_argument(
"--sliding_window_len",
type=int,
default=8,
help="length of the CoTracker sliding window",
)
parser.add_argument(
"--updateformer_hidden_size",
type=int,
default=384,
help="hidden dimension of the CoTracker transformer model",
)
parser.add_argument(
"--updateformer_num_heads",
type=int,
default=8,
help="number of heads of the CoTracker transformer model",
)
parser.add_argument(
"--updateformer_space_depth",
type=int,
default=12,
help="number of group attention layers in the CoTracker transformer model",
)
parser.add_argument(
"--updateformer_time_depth",
type=int,
default=12,
help="number of time attention layers in the CoTracker transformer model",
)
parser.add_argument(
"--model_stride",
type=int,
default=8,
help="stride of the CoTracker feature network",
)
parser.add_argument(
"--crop_size",
type=int,
nargs="+",
default=[384, 512],
help="crop videos to this resolution during training",
)
parser.add_argument(
"--eval_max_seq_len",
type=int,
default=1000,
help="maximum length of evaluation videos",
)
args = parser.parse_args()
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
)
Path(args.ckpt_path).mkdir(exist_ok=True, parents=True)
from pytorch_lightning.strategies import DDPStrategy
Lite(
strategy=DDPStrategy(find_unused_parameters=True),
devices="auto",
accelerator="gpu",
precision=32,
# num_nodes=4,
).run(args)
| co-tracker-main | train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
dependencies = ["torch", "einops", "timm", "tqdm"]
_COTRACKER_URL = (
"https://dl.fbaipublicfiles.com/cotracker/cotracker_stride_4_wind_8.pth"
)
def _make_cotracker_predictor(*, pretrained: bool = True, **kwargs):
from cotracker.predictor import CoTrackerPredictor
predictor = CoTrackerPredictor(checkpoint=None)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
_COTRACKER_URL, map_location="cpu"
)
predictor.model.load_state_dict(state_dict)
return predictor
def cotracker_w8(*, pretrained: bool = True, **kwargs):
"""
CoTracker model with stride 4 and window length 8. (The main model from the paper)
"""
return _make_cotracker_predictor(pretrained=pretrained, **kwargs)
| co-tracker-main | hubconf.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import cv2
import torch
import argparse
import numpy as np
from PIL import Image
from cotracker.utils.visualizer import Visualizer, read_video_from_path
from cotracker.predictor import CoTrackerPredictor
DEFAULT_DEVICE = ('cuda' if torch.cuda.is_available() else
'mps' if torch.backends.mps.is_available() else
'cpu')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--video_path",
default="./assets/apple.mp4",
help="path to a video",
)
parser.add_argument(
"--mask_path",
default="./assets/apple_mask.png",
help="path to a segmentation mask",
)
parser.add_argument(
"--checkpoint",
default="./checkpoints/cotracker_stride_4_wind_8.pth",
help="cotracker model",
)
parser.add_argument("--grid_size", type=int, default=0, help="Regular grid size")
parser.add_argument(
"--grid_query_frame",
type=int,
default=0,
help="Compute dense and grid tracks starting from this frame ",
)
parser.add_argument(
"--backward_tracking",
action="store_true",
help="Compute tracks in both directions, not only forward",
)
args = parser.parse_args()
# load the input video frame by frame
video = read_video_from_path(args.video_path)
video = torch.from_numpy(video).permute(0, 3, 1, 2)[None].float()
segm_mask = np.array(Image.open(os.path.join(args.mask_path)))
segm_mask = torch.from_numpy(segm_mask)[None, None]
model = CoTrackerPredictor(checkpoint=args.checkpoint)
model = model.to(DEFAULT_DEVICE)
video = video.to(DEFAULT_DEVICE)
pred_tracks, pred_visibility = model(
video,
grid_size=args.grid_size,
grid_query_frame=args.grid_query_frame,
backward_tracking=args.backward_tracking,
# segm_mask=segm_mask
)
print("computed")
# save a video with predicted tracks
seq_name = args.video_path.split("/")[-1]
vis = Visualizer(save_dir="./saved_videos", pad_value=120, linewidth=3)
vis.visualize(video, pred_tracks, pred_visibility, query_frame=args.grid_query_frame)
| co-tracker-main | demo.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from tqdm import tqdm
from cotracker.models.core.cotracker.cotracker import get_points_on_a_grid
from cotracker.models.core.model_utils import smart_cat
from cotracker.models.build_cotracker import (
build_cotracker,
)
class CoTrackerPredictor(torch.nn.Module):
def __init__(
self, checkpoint="cotracker/checkpoints/cotracker_stride_4_wind_8.pth"
):
super().__init__()
self.interp_shape = (384, 512)
self.support_grid_size = 6
model = build_cotracker(checkpoint)
self.model = model
self.model.eval()
@torch.no_grad()
def forward(
self,
video, # (1, T, 3, H, W)
# input prompt types:
# - None. Dense tracks are computed in this case. You can adjust *query_frame* to compute tracks starting from a specific frame.
# *backward_tracking=True* will compute tracks in both directions.
# - queries. Queried points of shape (1, N, 3) in format (t, x, y) for frame index and pixel coordinates.
# - grid_size. Grid of N*N points from the first frame. if segm_mask is provided, then computed only for the mask.
# You can adjust *query_frame* and *backward_tracking* for the regular grid in the same way as for dense tracks.
queries: torch.Tensor = None,
segm_mask: torch.Tensor = None, # Segmentation mask of shape (B, 1, H, W)
grid_size: int = 0,
grid_query_frame: int = 0, # only for dense and regular grid tracks
backward_tracking: bool = False,
):
if queries is None and grid_size == 0:
tracks, visibilities = self._compute_dense_tracks(
video,
grid_query_frame=grid_query_frame,
backward_tracking=backward_tracking,
)
else:
tracks, visibilities = self._compute_sparse_tracks(
video,
queries,
segm_mask,
grid_size,
add_support_grid=(grid_size == 0 or segm_mask is not None),
grid_query_frame=grid_query_frame,
backward_tracking=backward_tracking,
)
return tracks, visibilities
def _compute_dense_tracks(
self, video, grid_query_frame, grid_size=30, backward_tracking=False
):
*_, H, W = video.shape
grid_step = W // grid_size
grid_width = W // grid_step
grid_height = H // grid_step
tracks = visibilities = None
grid_pts = torch.zeros((1, grid_width * grid_height, 3)).to(video.device)
grid_pts[0, :, 0] = grid_query_frame
for offset in tqdm(range(grid_step * grid_step)):
ox = offset % grid_step
oy = offset // grid_step
grid_pts[0, :, 1] = (
torch.arange(grid_width).repeat(grid_height) * grid_step + ox
)
grid_pts[0, :, 2] = (
torch.arange(grid_height).repeat_interleave(grid_width) * grid_step + oy
)
tracks_step, visibilities_step = self._compute_sparse_tracks(
video=video,
queries=grid_pts,
backward_tracking=backward_tracking,
)
tracks = smart_cat(tracks, tracks_step, dim=2)
visibilities = smart_cat(visibilities, visibilities_step, dim=2)
return tracks, visibilities
def _compute_sparse_tracks(
self,
video,
queries,
segm_mask=None,
grid_size=0,
add_support_grid=False,
grid_query_frame=0,
backward_tracking=False,
):
B, T, C, H, W = video.shape
assert B == 1
video = video.reshape(B * T, C, H, W)
video = F.interpolate(video, tuple(self.interp_shape), mode="bilinear")
video = video.reshape(B, T, 3, self.interp_shape[0], self.interp_shape[1])
if queries is not None:
queries = queries.clone()
B, N, D = queries.shape
assert D == 3
queries[:, :, 1] *= self.interp_shape[1] / W
queries[:, :, 2] *= self.interp_shape[0] / H
elif grid_size > 0:
grid_pts = get_points_on_a_grid(grid_size, self.interp_shape, device=video.device)
if segm_mask is not None:
segm_mask = F.interpolate(
segm_mask, tuple(self.interp_shape), mode="nearest"
)
point_mask = segm_mask[0, 0][
(grid_pts[0, :, 1]).round().long().cpu(),
(grid_pts[0, :, 0]).round().long().cpu(),
].bool()
grid_pts = grid_pts[:, point_mask]
queries = torch.cat(
[torch.ones_like(grid_pts[:, :, :1]) * grid_query_frame, grid_pts],
dim=2,
)
if add_support_grid:
grid_pts = get_points_on_a_grid(self.support_grid_size, self.interp_shape, device=video.device)
grid_pts = torch.cat(
[torch.zeros_like(grid_pts[:, :, :1]), grid_pts], dim=2
)
queries = torch.cat([queries, grid_pts], dim=1)
tracks, __, visibilities, __ = self.model(rgbs=video, queries=queries, iters=6)
if backward_tracking:
tracks, visibilities = self._compute_backward_tracks(
video, queries, tracks, visibilities
)
if add_support_grid:
queries[:, -self.support_grid_size ** 2 :, 0] = T - 1
if add_support_grid:
tracks = tracks[:, :, : -self.support_grid_size ** 2]
visibilities = visibilities[:, :, : -self.support_grid_size ** 2]
thr = 0.9
visibilities = visibilities > thr
# correct query-point predictions
# see https://github.com/facebookresearch/co-tracker/issues/28
# TODO: batchify
for i in range(len(queries)):
queries_t = queries[i, :tracks.size(2), 0].to(torch.int64)
arange = torch.arange(0, len(queries_t))
# overwrite the predictions with the query points
tracks[i, queries_t, arange] = queries[i, :tracks.size(2), 1:]
# correct visibilities, the query points should be visible
visibilities[i, queries_t, arange] = True
tracks[:, :, :, 0] *= W / float(self.interp_shape[1])
tracks[:, :, :, 1] *= H / float(self.interp_shape[0])
return tracks, visibilities
def _compute_backward_tracks(self, video, queries, tracks, visibilities):
inv_video = video.flip(1).clone()
inv_queries = queries.clone()
inv_queries[:, :, 0] = inv_video.shape[1] - inv_queries[:, :, 0] - 1
inv_tracks, __, inv_visibilities, __ = self.model(
rgbs=inv_video, queries=inv_queries, iters=6
)
inv_tracks = inv_tracks.flip(1)
inv_visibilities = inv_visibilities.flip(1)
mask = tracks == 0
tracks[mask] = inv_tracks[mask]
visibilities[mask[:, :, :, 0]] = inv_visibilities[mask[:, :, :, 0]]
return tracks, visibilities
| co-tracker-main | cotracker/predictor.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| co-tracker-main | cotracker/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import io
import glob
import torch
import pickle
import numpy as np
import mediapy as media
from PIL import Image
from typing import Mapping, Tuple, Union
from cotracker.datasets.utils import CoTrackerData
DatasetElement = Mapping[str, Mapping[str, Union[np.ndarray, str]]]
def resize_video(video: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray:
"""Resize a video to output_size."""
# If you have a GPU, consider replacing this with a GPU-enabled resize op,
# such as a jitted jax.image.resize. It will make things faster.
return media.resize_video(video, output_size)
def sample_queries_first(
target_occluded: np.ndarray,
target_points: np.ndarray,
frames: np.ndarray,
) -> Mapping[str, np.ndarray]:
"""Package a set of frames and tracks for use in TAPNet evaluations.
Given a set of frames and tracks with no query points, use the first
visible point in each track as the query.
Args:
target_occluded: Boolean occlusion flag, of shape [n_tracks, n_frames],
where True indicates occluded.
target_points: Position, of shape [n_tracks, n_frames, 2], where each point
is [x,y] scaled between 0 and 1.
frames: Video tensor, of shape [n_frames, height, width, 3]. Scaled between
-1 and 1.
Returns:
A dict with the keys:
video: Video tensor of shape [1, n_frames, height, width, 3]
query_points: Query points of shape [1, n_queries, 3] where
each point is [t, y, x] scaled to the range [-1, 1]
target_points: Target points of shape [1, n_queries, n_frames, 2] where
each point is [x, y] scaled to the range [-1, 1]
"""
valid = np.sum(~target_occluded, axis=1) > 0
target_points = target_points[valid, :]
target_occluded = target_occluded[valid, :]
query_points = []
for i in range(target_points.shape[0]):
index = np.where(target_occluded[i] == 0)[0][0]
x, y = target_points[i, index, 0], target_points[i, index, 1]
query_points.append(np.array([index, y, x])) # [t, y, x]
query_points = np.stack(query_points, axis=0)
return {
"video": frames[np.newaxis, ...],
"query_points": query_points[np.newaxis, ...],
"target_points": target_points[np.newaxis, ...],
"occluded": target_occluded[np.newaxis, ...],
}
def sample_queries_strided(
target_occluded: np.ndarray,
target_points: np.ndarray,
frames: np.ndarray,
query_stride: int = 5,
) -> Mapping[str, np.ndarray]:
"""Package a set of frames and tracks for use in TAPNet evaluations.
Given a set of frames and tracks with no query points, sample queries
strided every query_stride frames, ignoring points that are not visible
at the selected frames.
Args:
target_occluded: Boolean occlusion flag, of shape [n_tracks, n_frames],
where True indicates occluded.
target_points: Position, of shape [n_tracks, n_frames, 2], where each point
is [x,y] scaled between 0 and 1.
frames: Video tensor, of shape [n_frames, height, width, 3]. Scaled between
-1 and 1.
query_stride: When sampling query points, search for un-occluded points
every query_stride frames and convert each one into a query.
Returns:
A dict with the keys:
video: Video tensor of shape [1, n_frames, height, width, 3]. The video
has floats scaled to the range [-1, 1].
query_points: Query points of shape [1, n_queries, 3] where
each point is [t, y, x] scaled to the range [-1, 1].
target_points: Target points of shape [1, n_queries, n_frames, 2] where
each point is [x, y] scaled to the range [-1, 1].
trackgroup: Index of the original track that each query point was
sampled from. This is useful for visualization.
"""
tracks = []
occs = []
queries = []
trackgroups = []
total = 0
trackgroup = np.arange(target_occluded.shape[0])
for i in range(0, target_occluded.shape[1], query_stride):
mask = target_occluded[:, i] == 0
query = np.stack(
[
i * np.ones(target_occluded.shape[0:1]),
target_points[:, i, 1],
target_points[:, i, 0],
],
axis=-1,
)
queries.append(query[mask])
tracks.append(target_points[mask])
occs.append(target_occluded[mask])
trackgroups.append(trackgroup[mask])
total += np.array(np.sum(target_occluded[:, i] == 0))
return {
"video": frames[np.newaxis, ...],
"query_points": np.concatenate(queries, axis=0)[np.newaxis, ...],
"target_points": np.concatenate(tracks, axis=0)[np.newaxis, ...],
"occluded": np.concatenate(occs, axis=0)[np.newaxis, ...],
"trackgroup": np.concatenate(trackgroups, axis=0)[np.newaxis, ...],
}
class TapVidDataset(torch.utils.data.Dataset):
def __init__(
self,
data_root,
dataset_type="davis",
resize_to_256=True,
queried_first=True,
):
self.dataset_type = dataset_type
self.resize_to_256 = resize_to_256
self.queried_first = queried_first
if self.dataset_type == "kinetics":
all_paths = glob.glob(os.path.join(data_root, "*_of_0010.pkl"))
points_dataset = []
for pickle_path in all_paths:
with open(pickle_path, "rb") as f:
data = pickle.load(f)
points_dataset = points_dataset + data
self.points_dataset = points_dataset
else:
with open(data_root, "rb") as f:
self.points_dataset = pickle.load(f)
if self.dataset_type == "davis":
self.video_names = list(self.points_dataset.keys())
print("found %d unique videos in %s" % (len(self.points_dataset), data_root))
def __getitem__(self, index):
if self.dataset_type == "davis":
video_name = self.video_names[index]
else:
video_name = index
video = self.points_dataset[video_name]
frames = video["video"]
if isinstance(frames[0], bytes):
# TAP-Vid is stored and JPEG bytes rather than `np.ndarray`s.
def decode(frame):
byteio = io.BytesIO(frame)
img = Image.open(byteio)
return np.array(img)
frames = np.array([decode(frame) for frame in frames])
target_points = self.points_dataset[video_name]["points"]
if self.resize_to_256:
frames = resize_video(frames, [256, 256])
target_points *= np.array([256, 256])
else:
target_points *= np.array([frames.shape[2], frames.shape[1]])
T, H, W, C = frames.shape
N, T, D = target_points.shape
target_occ = self.points_dataset[video_name]["occluded"]
if self.queried_first:
converted = sample_queries_first(target_occ, target_points, frames)
else:
converted = sample_queries_strided(target_occ, target_points, frames)
assert converted["target_points"].shape[1] == converted["query_points"].shape[1]
trajs = (
torch.from_numpy(converted["target_points"])[0].permute(1, 0, 2).float()
) # T, N, D
rgbs = torch.from_numpy(frames).permute(0, 3, 1, 2).float()
segs = torch.ones(T, 1, H, W).float()
visibles = torch.logical_not(torch.from_numpy(converted["occluded"]))[
0
].permute(
1, 0
) # T, N
query_points = torch.from_numpy(converted["query_points"])[0] # T, N
return CoTrackerData(
rgbs,
segs,
trajs,
visibles,
seq_name=str(video_name),
query_points=query_points,
)
def __len__(self):
return len(self.points_dataset)
| co-tracker-main | cotracker/datasets/tap_vid_datasets.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import imageio
import numpy as np
from cotracker.datasets.utils import CoTrackerData
from torchvision.transforms import ColorJitter, GaussianBlur
from PIL import Image
import cv2
class CoTrackerDataset(torch.utils.data.Dataset):
def __init__(
self,
data_root,
crop_size=(384, 512),
seq_len=24,
traj_per_sample=768,
sample_vis_1st_frame=False,
use_augs=False,
):
super(CoTrackerDataset, self).__init__()
np.random.seed(0)
torch.manual_seed(0)
self.data_root = data_root
self.seq_len = seq_len
self.traj_per_sample = traj_per_sample
self.sample_vis_1st_frame = sample_vis_1st_frame
self.use_augs = use_augs
self.crop_size = crop_size
# photometric augmentation
self.photo_aug = ColorJitter(
brightness=0.2, contrast=0.2, saturation=0.2, hue=0.25 / 3.14
)
self.blur_aug = GaussianBlur(11, sigma=(0.1, 2.0))
self.blur_aug_prob = 0.25
self.color_aug_prob = 0.25
# occlusion augmentation
self.eraser_aug_prob = 0.5
self.eraser_bounds = [2, 100]
self.eraser_max = 10
# occlusion augmentation
self.replace_aug_prob = 0.5
self.replace_bounds = [2, 100]
self.replace_max = 10
# spatial augmentations
self.pad_bounds = [0, 100]
self.crop_size = crop_size
self.resize_lim = [0.25, 2.0] # sample resizes from here
self.resize_delta = 0.2
self.max_crop_offset = 50
self.do_flip = True
self.h_flip_prob = 0.5
self.v_flip_prob = 0.5
def getitem_helper(self, index):
return NotImplementedError
def __getitem__(self, index):
gotit = False
sample, gotit = self.getitem_helper(index)
if not gotit:
print("warning: sampling failed")
# fake sample, so we can still collate
sample = CoTrackerData(
video=torch.zeros(
(self.seq_len, 3, self.crop_size[0], self.crop_size[1])
),
segmentation=torch.zeros(
(self.seq_len, 1, self.crop_size[0], self.crop_size[1])
),
trajectory=torch.zeros((self.seq_len, self.traj_per_sample, 2)),
visibility=torch.zeros((self.seq_len, self.traj_per_sample)),
valid=torch.zeros((self.seq_len, self.traj_per_sample)),
)
return sample, gotit
def add_photometric_augs(self, rgbs, trajs, visibles, eraser=True, replace=True):
T, N, _ = trajs.shape
S = len(rgbs)
H, W = rgbs[0].shape[:2]
assert S == T
if eraser:
############ eraser transform (per image after the first) ############
rgbs = [rgb.astype(np.float32) for rgb in rgbs]
for i in range(1, S):
if np.random.rand() < self.eraser_aug_prob:
for _ in range(
np.random.randint(1, self.eraser_max + 1)
): # number of times to occlude
xc = np.random.randint(0, W)
yc = np.random.randint(0, H)
dx = np.random.randint(
self.eraser_bounds[0], self.eraser_bounds[1]
)
dy = np.random.randint(
self.eraser_bounds[0], self.eraser_bounds[1]
)
x0 = np.clip(xc - dx / 2, 0, W - 1).round().astype(np.int32)
x1 = np.clip(xc + dx / 2, 0, W - 1).round().astype(np.int32)
y0 = np.clip(yc - dy / 2, 0, H - 1).round().astype(np.int32)
y1 = np.clip(yc + dy / 2, 0, H - 1).round().astype(np.int32)
mean_color = np.mean(
rgbs[i][y0:y1, x0:x1, :].reshape(-1, 3), axis=0
)
rgbs[i][y0:y1, x0:x1, :] = mean_color
occ_inds = np.logical_and(
np.logical_and(trajs[i, :, 0] >= x0, trajs[i, :, 0] < x1),
np.logical_and(trajs[i, :, 1] >= y0, trajs[i, :, 1] < y1),
)
visibles[i, occ_inds] = 0
rgbs = [rgb.astype(np.uint8) for rgb in rgbs]
if replace:
rgbs_alt = [
np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8)
for rgb in rgbs
]
rgbs_alt = [
np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8)
for rgb in rgbs_alt
]
############ replace transform (per image after the first) ############
rgbs = [rgb.astype(np.float32) for rgb in rgbs]
rgbs_alt = [rgb.astype(np.float32) for rgb in rgbs_alt]
for i in range(1, S):
if np.random.rand() < self.replace_aug_prob:
for _ in range(
np.random.randint(1, self.replace_max + 1)
): # number of times to occlude
xc = np.random.randint(0, W)
yc = np.random.randint(0, H)
dx = np.random.randint(
self.replace_bounds[0], self.replace_bounds[1]
)
dy = np.random.randint(
self.replace_bounds[0], self.replace_bounds[1]
)
x0 = np.clip(xc - dx / 2, 0, W - 1).round().astype(np.int32)
x1 = np.clip(xc + dx / 2, 0, W - 1).round().astype(np.int32)
y0 = np.clip(yc - dy / 2, 0, H - 1).round().astype(np.int32)
y1 = np.clip(yc + dy / 2, 0, H - 1).round().astype(np.int32)
wid = x1 - x0
hei = y1 - y0
y00 = np.random.randint(0, H - hei)
x00 = np.random.randint(0, W - wid)
fr = np.random.randint(0, S)
rep = rgbs_alt[fr][y00 : y00 + hei, x00 : x00 + wid, :]
rgbs[i][y0:y1, x0:x1, :] = rep
occ_inds = np.logical_and(
np.logical_and(trajs[i, :, 0] >= x0, trajs[i, :, 0] < x1),
np.logical_and(trajs[i, :, 1] >= y0, trajs[i, :, 1] < y1),
)
visibles[i, occ_inds] = 0
rgbs = [rgb.astype(np.uint8) for rgb in rgbs]
############ photometric augmentation ############
if np.random.rand() < self.color_aug_prob:
# random per-frame amount of aug
rgbs = [
np.array(self.photo_aug(Image.fromarray(rgb)), dtype=np.uint8)
for rgb in rgbs
]
if np.random.rand() < self.blur_aug_prob:
# random per-frame amount of blur
rgbs = [
np.array(self.blur_aug(Image.fromarray(rgb)), dtype=np.uint8)
for rgb in rgbs
]
return rgbs, trajs, visibles
def add_spatial_augs(self, rgbs, trajs, visibles):
T, N, __ = trajs.shape
S = len(rgbs)
H, W = rgbs[0].shape[:2]
assert S == T
rgbs = [rgb.astype(np.float32) for rgb in rgbs]
############ spatial transform ############
# padding
pad_x0 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1])
pad_x1 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1])
pad_y0 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1])
pad_y1 = np.random.randint(self.pad_bounds[0], self.pad_bounds[1])
rgbs = [
np.pad(rgb, ((pad_y0, pad_y1), (pad_x0, pad_x1), (0, 0))) for rgb in rgbs
]
trajs[:, :, 0] += pad_x0
trajs[:, :, 1] += pad_y0
H, W = rgbs[0].shape[:2]
# scaling + stretching
scale = np.random.uniform(self.resize_lim[0], self.resize_lim[1])
scale_x = scale
scale_y = scale
H_new = H
W_new = W
scale_delta_x = 0.0
scale_delta_y = 0.0
rgbs_scaled = []
for s in range(S):
if s == 1:
scale_delta_x = np.random.uniform(-self.resize_delta, self.resize_delta)
scale_delta_y = np.random.uniform(-self.resize_delta, self.resize_delta)
elif s > 1:
scale_delta_x = (
scale_delta_x * 0.8
+ np.random.uniform(-self.resize_delta, self.resize_delta) * 0.2
)
scale_delta_y = (
scale_delta_y * 0.8
+ np.random.uniform(-self.resize_delta, self.resize_delta) * 0.2
)
scale_x = scale_x + scale_delta_x
scale_y = scale_y + scale_delta_y
# bring h/w closer
scale_xy = (scale_x + scale_y) * 0.5
scale_x = scale_x * 0.5 + scale_xy * 0.5
scale_y = scale_y * 0.5 + scale_xy * 0.5
# don't get too crazy
scale_x = np.clip(scale_x, 0.2, 2.0)
scale_y = np.clip(scale_y, 0.2, 2.0)
H_new = int(H * scale_y)
W_new = int(W * scale_x)
# make it at least slightly bigger than the crop area,
# so that the random cropping can add diversity
H_new = np.clip(H_new, self.crop_size[0] + 10, None)
W_new = np.clip(W_new, self.crop_size[1] + 10, None)
# recompute scale in case we clipped
scale_x = W_new / float(W)
scale_y = H_new / float(H)
rgbs_scaled.append(
cv2.resize(rgbs[s], (W_new, H_new), interpolation=cv2.INTER_LINEAR)
)
trajs[s, :, 0] *= scale_x
trajs[s, :, 1] *= scale_y
rgbs = rgbs_scaled
ok_inds = visibles[0, :] > 0
vis_trajs = trajs[:, ok_inds] # S,?,2
if vis_trajs.shape[1] > 0:
mid_x = np.mean(vis_trajs[0, :, 0])
mid_y = np.mean(vis_trajs[0, :, 1])
else:
mid_y = self.crop_size[0]
mid_x = self.crop_size[1]
x0 = int(mid_x - self.crop_size[1] // 2)
y0 = int(mid_y - self.crop_size[0] // 2)
offset_x = 0
offset_y = 0
for s in range(S):
# on each frame, shift a bit more
if s == 1:
offset_x = np.random.randint(
-self.max_crop_offset, self.max_crop_offset
)
offset_y = np.random.randint(
-self.max_crop_offset, self.max_crop_offset
)
elif s > 1:
offset_x = int(
offset_x * 0.8
+ np.random.randint(-self.max_crop_offset, self.max_crop_offset + 1)
* 0.2
)
offset_y = int(
offset_y * 0.8
+ np.random.randint(-self.max_crop_offset, self.max_crop_offset + 1)
* 0.2
)
x0 = x0 + offset_x
y0 = y0 + offset_y
H_new, W_new = rgbs[s].shape[:2]
if H_new == self.crop_size[0]:
y0 = 0
else:
y0 = min(max(0, y0), H_new - self.crop_size[0] - 1)
if W_new == self.crop_size[1]:
x0 = 0
else:
x0 = min(max(0, x0), W_new - self.crop_size[1] - 1)
rgbs[s] = rgbs[s][y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
trajs[s, :, 0] -= x0
trajs[s, :, 1] -= y0
H_new = self.crop_size[0]
W_new = self.crop_size[1]
# flip
h_flipped = False
v_flipped = False
if self.do_flip:
# h flip
if np.random.rand() < self.h_flip_prob:
h_flipped = True
rgbs = [rgb[:, ::-1] for rgb in rgbs]
# v flip
if np.random.rand() < self.v_flip_prob:
v_flipped = True
rgbs = [rgb[::-1] for rgb in rgbs]
if h_flipped:
trajs[:, :, 0] = W_new - trajs[:, :, 0]
if v_flipped:
trajs[:, :, 1] = H_new - trajs[:, :, 1]
return rgbs, trajs
def crop(self, rgbs, trajs):
T, N, _ = trajs.shape
S = len(rgbs)
H, W = rgbs[0].shape[:2]
assert S == T
############ spatial transform ############
H_new = H
W_new = W
# simple random crop
y0 = (
0
if self.crop_size[0] >= H_new
else np.random.randint(0, H_new - self.crop_size[0])
)
x0 = (
0
if self.crop_size[1] >= W_new
else np.random.randint(0, W_new - self.crop_size[1])
)
rgbs = [
rgb[y0 : y0 + self.crop_size[0], x0 : x0 + self.crop_size[1]]
for rgb in rgbs
]
trajs[:, :, 0] -= x0
trajs[:, :, 1] -= y0
return rgbs, trajs
class KubricMovifDataset(CoTrackerDataset):
def __init__(
self,
data_root,
crop_size=(384, 512),
seq_len=24,
traj_per_sample=768,
sample_vis_1st_frame=False,
use_augs=False,
):
super(KubricMovifDataset, self).__init__(
data_root=data_root,
crop_size=crop_size,
seq_len=seq_len,
traj_per_sample=traj_per_sample,
sample_vis_1st_frame=sample_vis_1st_frame,
use_augs=use_augs,
)
self.pad_bounds = [0, 25]
self.resize_lim = [0.75, 1.25] # sample resizes from here
self.resize_delta = 0.05
self.max_crop_offset = 15
self.seq_names = [
fname
for fname in os.listdir(data_root)
if os.path.isdir(os.path.join(data_root, fname))
]
print("found %d unique videos in %s" % (len(self.seq_names), self.data_root))
def getitem_helper(self, index):
gotit = True
seq_name = self.seq_names[index]
npy_path = os.path.join(self.data_root, seq_name, seq_name + ".npy")
rgb_path = os.path.join(self.data_root, seq_name, "frames")
img_paths = sorted(os.listdir(rgb_path))
rgbs = []
for i, img_path in enumerate(img_paths):
rgbs.append(imageio.v2.imread(os.path.join(rgb_path, img_path)))
rgbs = np.stack(rgbs)
annot_dict = np.load(npy_path, allow_pickle=True).item()
traj_2d = annot_dict["coords"]
visibility = annot_dict["visibility"]
# random crop
assert self.seq_len <= len(rgbs)
if self.seq_len < len(rgbs):
start_ind = np.random.choice(len(rgbs) - self.seq_len, 1)[0]
rgbs = rgbs[start_ind : start_ind + self.seq_len]
traj_2d = traj_2d[:, start_ind : start_ind + self.seq_len]
visibility = visibility[:, start_ind : start_ind + self.seq_len]
traj_2d = np.transpose(traj_2d, (1, 0, 2))
visibility = np.transpose(np.logical_not(visibility), (1, 0))
if self.use_augs:
rgbs, traj_2d, visibility = self.add_photometric_augs(
rgbs, traj_2d, visibility
)
rgbs, traj_2d = self.add_spatial_augs(rgbs, traj_2d, visibility)
else:
rgbs, traj_2d = self.crop(rgbs, traj_2d)
visibility[traj_2d[:, :, 0] > self.crop_size[1] - 1] = False
visibility[traj_2d[:, :, 0] < 0] = False
visibility[traj_2d[:, :, 1] > self.crop_size[0] - 1] = False
visibility[traj_2d[:, :, 1] < 0] = False
visibility = torch.from_numpy(visibility)
traj_2d = torch.from_numpy(traj_2d)
visibile_pts_first_frame_inds = (visibility[0]).nonzero(as_tuple=False)[:, 0]
if self.sample_vis_1st_frame:
visibile_pts_inds = visibile_pts_first_frame_inds
else:
visibile_pts_mid_frame_inds = (visibility[self.seq_len // 2]).nonzero(
as_tuple=False
)[:, 0]
visibile_pts_inds = torch.cat(
(visibile_pts_first_frame_inds, visibile_pts_mid_frame_inds), dim=0
)
point_inds = torch.randperm(len(visibile_pts_inds))[: self.traj_per_sample]
if len(point_inds) < self.traj_per_sample:
gotit = False
visible_inds_sampled = visibile_pts_inds[point_inds]
trajs = traj_2d[:, visible_inds_sampled].float()
visibles = visibility[:, visible_inds_sampled]
valids = torch.ones((self.seq_len, self.traj_per_sample))
rgbs = torch.from_numpy(np.stack(rgbs)).permute(0, 3, 1, 2).float()
segs = torch.ones((self.seq_len, 1, self.crop_size[0], self.crop_size[1]))
sample = CoTrackerData(
video=rgbs,
segmentation=segs,
trajectory=trajs,
visibility=visibles,
valid=valids,
seq_name=seq_name,
)
return sample, gotit
def __len__(self):
return len(self.seq_names)
| co-tracker-main | cotracker/datasets/kubric_movif_dataset.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
import os
import json
import imageio
import cv2
from enum import Enum
from cotracker.datasets.utils import CoTrackerData, resize_sample
IGNORE_ANIMALS = [
# "bear.json",
# "camel.json",
"cat_jump.json"
# "cows.json",
# "dog.json",
# "dog-agility.json",
# "horsejump-high.json",
# "horsejump-low.json",
# "impala0.json",
# "rs_dog.json"
"tiger.json"
]
class SMALJointCatalog(Enum):
# body_0 = 0
# body_1 = 1
# body_2 = 2
# body_3 = 3
# body_4 = 4
# body_5 = 5
# body_6 = 6
# upper_right_0 = 7
upper_right_1 = 8
upper_right_2 = 9
upper_right_3 = 10
# upper_left_0 = 11
upper_left_1 = 12
upper_left_2 = 13
upper_left_3 = 14
neck_lower = 15
# neck_upper = 16
# lower_right_0 = 17
lower_right_1 = 18
lower_right_2 = 19
lower_right_3 = 20
# lower_left_0 = 21
lower_left_1 = 22
lower_left_2 = 23
lower_left_3 = 24
tail_0 = 25
# tail_1 = 26
# tail_2 = 27
tail_3 = 28
# tail_4 = 29
# tail_5 = 30
tail_6 = 31
jaw = 32
nose = 33 # ADDED JOINT FOR VERTEX 1863
# chin = 34 # ADDED JOINT FOR VERTEX 26
right_ear = 35 # ADDED JOINT FOR VERTEX 149
left_ear = 36 # ADDED JOINT FOR VERTEX 2124
class SMALJointInfo:
def __init__(self):
# These are the
self.annotated_classes = np.array(
[
8,
9,
10, # upper_right
12,
13,
14, # upper_left
15, # neck
18,
19,
20, # lower_right
22,
23,
24, # lower_left
25,
28,
31, # tail
32,
33, # head
35, # right_ear
36,
]
) # left_ear
self.annotated_markers = np.array(
[
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_TRIANGLE_DOWN,
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_TRIANGLE_DOWN,
cv2.MARKER_CROSS,
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_TRIANGLE_DOWN,
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_TRIANGLE_DOWN,
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_TRIANGLE_DOWN,
cv2.MARKER_CROSS,
cv2.MARKER_STAR,
cv2.MARKER_CROSS,
cv2.MARKER_CROSS,
]
)
self.joint_regions = np.array(
[
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
2,
2,
2,
2,
3,
3,
4,
4,
4,
4,
5,
5,
5,
5,
6,
6,
6,
6,
6,
6,
6,
7,
7,
7,
8,
9,
]
)
self.annotated_joint_region = self.joint_regions[self.annotated_classes]
self.region_colors = np.array(
[
[250, 190, 190], # body, light pink
[60, 180, 75], # upper_right, green
[230, 25, 75], # upper_left, red
[128, 0, 0], # neck, maroon
[0, 130, 200], # lower_right, blue
[255, 255, 25], # lower_left, yellow
[240, 50, 230], # tail, majenta
[245, 130, 48], # jaw / nose / chin, orange
[29, 98, 115], # right_ear, turquoise
[255, 153, 204],
]
) # left_ear, pink
self.joint_colors = np.array(self.region_colors)[self.annotated_joint_region]
class BADJAData:
def __init__(self, data_root, complete=False):
annotations_path = os.path.join(data_root, "joint_annotations")
self.animal_dict = {}
self.animal_count = 0
self.smal_joint_info = SMALJointInfo()
for __, animal_json in enumerate(sorted(os.listdir(annotations_path))):
if animal_json not in IGNORE_ANIMALS:
json_path = os.path.join(annotations_path, animal_json)
with open(json_path) as json_data:
animal_joint_data = json.load(json_data)
filenames = []
segnames = []
joints = []
visible = []
first_path = animal_joint_data[0]["segmentation_path"]
last_path = animal_joint_data[-1]["segmentation_path"]
first_frame = first_path.split("/")[-1]
last_frame = last_path.split("/")[-1]
if not "extra_videos" in first_path:
animal = first_path.split("/")[-2]
first_frame_int = int(first_frame.split(".")[0])
last_frame_int = int(last_frame.split(".")[0])
for fr in range(first_frame_int, last_frame_int + 1):
ref_file_name = os.path.join(
data_root,
"DAVIS/JPEGImages/Full-Resolution/%s/%05d.jpg"
% (animal, fr),
)
ref_seg_name = os.path.join(
data_root,
"DAVIS/Annotations/Full-Resolution/%s/%05d.png"
% (animal, fr),
)
foundit = False
for ind, image_annotation in enumerate(animal_joint_data):
file_name = os.path.join(
data_root, image_annotation["image_path"]
)
seg_name = os.path.join(
data_root, image_annotation["segmentation_path"]
)
if file_name == ref_file_name:
foundit = True
label_ind = ind
if foundit:
image_annotation = animal_joint_data[label_ind]
file_name = os.path.join(
data_root, image_annotation["image_path"]
)
seg_name = os.path.join(
data_root, image_annotation["segmentation_path"]
)
joint = np.array(image_annotation["joints"])
vis = np.array(image_annotation["visibility"])
else:
file_name = ref_file_name
seg_name = ref_seg_name
joint = None
vis = None
filenames.append(file_name)
segnames.append(seg_name)
joints.append(joint)
visible.append(vis)
if len(filenames):
self.animal_dict[self.animal_count] = (
filenames,
segnames,
joints,
visible,
)
self.animal_count += 1
print("Loaded BADJA dataset")
def get_loader(self):
for __ in range(int(1e6)):
animal_id = np.random.choice(len(self.animal_dict.keys()))
filenames, segnames, joints, visible = self.animal_dict[animal_id]
image_id = np.random.randint(0, len(filenames))
seg_file = segnames[image_id]
image_file = filenames[image_id]
joints = joints[image_id].copy()
joints = joints[self.smal_joint_info.annotated_classes]
visible = visible[image_id][self.smal_joint_info.annotated_classes]
rgb_img = imageio.imread(image_file) # , mode='RGB')
sil_img = imageio.imread(seg_file) # , mode='RGB')
rgb_h, rgb_w, _ = rgb_img.shape
sil_img = cv2.resize(sil_img, (rgb_w, rgb_h), cv2.INTER_NEAREST)
yield rgb_img, sil_img, joints, visible, image_file
def get_video(self, animal_id):
filenames, segnames, joint, visible = self.animal_dict[animal_id]
rgbs = []
segs = []
joints = []
visibles = []
for s in range(len(filenames)):
image_file = filenames[s]
rgb_img = imageio.imread(image_file) # , mode='RGB')
rgb_h, rgb_w, _ = rgb_img.shape
seg_file = segnames[s]
sil_img = imageio.imread(seg_file) # , mode='RGB')
sil_img = cv2.resize(sil_img, (rgb_w, rgb_h), cv2.INTER_NEAREST)
jo = joint[s]
if jo is not None:
joi = joint[s].copy()
joi = joi[self.smal_joint_info.annotated_classes]
vis = visible[s][self.smal_joint_info.annotated_classes]
else:
joi = None
vis = None
rgbs.append(rgb_img)
segs.append(sil_img)
joints.append(joi)
visibles.append(vis)
return rgbs, segs, joints, visibles, filenames[0]
class BadjaDataset(torch.utils.data.Dataset):
def __init__(
self, data_root, max_seq_len=1000, dataset_resolution=(384, 512)
):
self.data_root = data_root
self.badja_data = BADJAData(data_root)
self.max_seq_len = max_seq_len
self.dataset_resolution = dataset_resolution
print(
"found %d unique videos in %s"
% (self.badja_data.animal_count, self.data_root)
)
def __getitem__(self, index):
rgbs, segs, joints, visibles, filename = self.badja_data.get_video(index)
S = len(rgbs)
H, W, __ = rgbs[0].shape
H, W, __ = segs[0].shape
N, __ = joints[0].shape
# let's eliminate the Nones
# note the first one is guaranteed present
for s in range(1, S):
if joints[s] is None:
joints[s] = np.zeros_like(joints[0])
visibles[s] = np.zeros_like(visibles[0])
# eliminate the mystery dim
segs = [seg[:, :, 0] for seg in segs]
rgbs = np.stack(rgbs, 0)
segs = np.stack(segs, 0)
trajs = np.stack(joints, 0)
visibles = np.stack(visibles, 0)
rgbs = torch.from_numpy(rgbs).reshape(S, H, W, 3).permute(0, 3, 1, 2).float()
segs = torch.from_numpy(segs).reshape(S, 1, H, W).float()
trajs = torch.from_numpy(trajs).reshape(S, N, 2).float()
visibles = torch.from_numpy(visibles).reshape(S, N)
rgbs = rgbs[: self.max_seq_len]
segs = segs[: self.max_seq_len]
trajs = trajs[: self.max_seq_len]
visibles = visibles[: self.max_seq_len]
# apparently the coords are in yx order
trajs = torch.flip(trajs, [2])
if "extra_videos" in filename:
seq_name = filename.split("/")[-3]
else:
seq_name = filename.split("/")[-2]
rgbs, trajs, segs = resize_sample(rgbs, trajs, segs, self.dataset_resolution)
return CoTrackerData(rgbs, segs, trajs, visibles, seq_name=seq_name)
def __len__(self):
return self.badja_data.animal_count
| co-tracker-main | cotracker/datasets/badja_dataset.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.