text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import sentencepiece as spm
if __name__ == "__main__":
spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
|
COCO-LM/fairseq/scripts/spm_train.py/0
|
{
"file_path": "COCO-LM/fairseq/scripts/spm_train.py",
"repo_id": "COCO-LM",
"token_count": 131
}
| 211 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from examples.speech_recognition.criterions.cross_entropy_acc import (
CrossEntropyWithAccCriterion,
)
from .asr_test_base import CrossEntropyCriterionTestBase
class CrossEntropyWithAccCriterionTest(CrossEntropyCriterionTestBase):
def setUp(self):
self.criterion_cls = CrossEntropyWithAccCriterion
super().setUp()
def test_cross_entropy_all_correct(self):
sample = self.get_test_sample(correct=True, soft_target=False, aggregate=False)
loss, sample_size, logging_output = self.criterion(
self.model, sample, "sum", log_probs=True
)
assert logging_output["correct"] == 20
assert logging_output["total"] == 20
assert logging_output["sample_size"] == 20
assert logging_output["ntokens"] == 20
def test_cross_entropy_all_wrong(self):
sample = self.get_test_sample(correct=False, soft_target=False, aggregate=False)
loss, sample_size, logging_output = self.criterion(
self.model, sample, "sum", log_probs=True
)
assert logging_output["correct"] == 0
assert logging_output["total"] == 20
assert logging_output["sample_size"] == 20
assert logging_output["ntokens"] == 20
|
COCO-LM/fairseq/tests/speech_recognition/test_cross_entropy.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/speech_recognition/test_cross_entropy.py",
"repo_id": "COCO-LM",
"token_count": 536
}
| 212 |
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import sys
import tempfile
import unittest
from typing import Optional
from unittest.mock import MagicMock
class TestFileIO(unittest.TestCase):
_tmpdir: Optional[str] = None
_tmpfile: Optional[str] = None
_tmpfile_contents = "Hello, World"
@classmethod
def setUpClass(cls) -> None:
cls._tmpdir = tempfile.mkdtemp()
with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f:
cls._tmpfile = f.name
f.write(cls._tmpfile_contents)
f.flush()
@classmethod
def tearDownClass(cls) -> None:
# Cleanup temp working dir.
if cls._tmpdir is not None:
shutil.rmtree(cls._tmpdir) # type: ignore
def test_file_io(self):
from fairseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
def test_file_io_oss(self):
# Mock fvcore to simulate oss environment.
sys.modules["fvcore"] = MagicMock()
from fairseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
def test_file_io_async(self):
# ioPath `PathManager` is initialized after the first `opena` call.
try:
from fairseq.file_io import IOPathPathManager, PathManager
self.assertIsNone(IOPathPathManager)
_asyncfile = os.path.join(self._tmpdir, "async.txt")
f = PathManager.opena(_asyncfile, "wb")
f.close()
from fairseq.file_io import IOPathPathManager
self.assertIsNotNone(IOPathPathManager)
finally:
self.assertTrue(PathManager.async_close())
|
COCO-LM/fairseq/tests/test_file_io.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_file_io.py",
"repo_id": "COCO-LM",
"token_count": 873
}
| 213 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import unittest
import numpy as np
from fairseq.data import ListDataset, ResamplingDataset
class TestResamplingDataset(unittest.TestCase):
def setUp(self):
self.strings = ["ab", "c", "def", "ghij"]
self.weights = [4.0, 2.0, 7.0, 1.5]
self.size_ratio = 2
self.dataset = ListDataset(
self.strings, np.array([len(s) for s in self.strings])
)
def _test_common(self, resampling_dataset, iters):
assert len(self.dataset) == len(self.strings) == len(self.weights)
assert len(resampling_dataset) == self.size_ratio * len(self.strings)
results = {"ordered_by_size": True, "max_distribution_diff": 0.0}
totalfreqs = 0
freqs = collections.defaultdict(int)
for epoch_num in range(iters):
resampling_dataset.set_epoch(epoch_num)
indices = resampling_dataset.ordered_indices()
assert len(indices) == len(resampling_dataset)
prev_size = -1
for i in indices:
cur_size = resampling_dataset.size(i)
# Make sure indices map to same sequences within an epoch
assert resampling_dataset[i] == resampling_dataset[i]
# Make sure length of sequence is correct
assert cur_size == len(resampling_dataset[i])
freqs[resampling_dataset[i]] += 1
totalfreqs += 1
if prev_size > cur_size:
results["ordered_by_size"] = False
prev_size = cur_size
assert set(freqs.keys()) == set(self.strings)
for s, weight in zip(self.strings, self.weights):
freq = freqs[s] / totalfreqs
expected_freq = weight / sum(self.weights)
results["max_distribution_diff"] = max(
results["max_distribution_diff"], abs(expected_freq - freq)
)
return results
def test_resampling_dataset_batch_by_size_false(self):
resampling_dataset = ResamplingDataset(
self.dataset,
self.weights,
size_ratio=self.size_ratio,
batch_by_size=False,
seed=0,
)
results = self._test_common(resampling_dataset, iters=1000)
# For batch_by_size = False, the batches should be returned in
# arbitrary order of size.
assert not results["ordered_by_size"]
# Allow tolerance in distribution error of 2%.
assert results["max_distribution_diff"] < 0.02
def test_resampling_dataset_batch_by_size_true(self):
resampling_dataset = ResamplingDataset(
self.dataset,
self.weights,
size_ratio=self.size_ratio,
batch_by_size=True,
seed=0,
)
results = self._test_common(resampling_dataset, iters=1000)
# For batch_by_size = True, the batches should be returned in
# increasing order of size.
assert results["ordered_by_size"]
# Allow tolerance in distribution error of 2%.
assert results["max_distribution_diff"] < 0.02
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_resampling_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_resampling_dataset.py",
"repo_id": "COCO-LM",
"token_count": 1558
}
| 214 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
## Finetuning COCO-LM for sequence classification on GLUE.
## The script is largely adapted from the huggingface transformers library.
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import json
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import WEIGHTS_NAME
from transformers import AdamW, get_linear_schedule_with_warmup
from cocolm.modeling_cocolm import COCOLMForSequenceClassification
from cocolm.configuration_cocolm import COCOLMConfig
from cocolm.tokenization_cocolm import COCOLMTokenizer
from utils_for_glue import glue_compute_metrics as compute_metrics
from utils_for_glue import glue_output_modes as output_modes
from utils_for_glue import glue_processors as processors
from utils_for_glue import glue_convert_examples_to_features as convert_examples_to_features
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'cocolm': (COCOLMConfig, COCOLMForSequenceClassification, COCOLMTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_optimizer_grouped_parameters(
model, weight_decay, learning_rate, layer_decay, n_layers, layer_wise_weight_decay=False):
assert isinstance(model, torch.nn.Module)
groups = {}
num_max_layer = 0
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
groups_keys = {}
for para_name, para_var in model.named_parameters():
if any(nd in para_name for nd in no_decay):
weight_decay_in_this_group = 0.0
else:
weight_decay_in_this_group = weight_decay
if para_name.startswith('cocolm.embedding') or para_name == 'cocolm.rel_pos_bias.weight':
depth = 0
elif para_name.startswith('cocolm.encoder.layer'):
depth = int(para_name.split('.')[3]) + 1
num_max_layer = max(num_max_layer, depth)
elif para_name.startswith('classifier') or para_name.startswith('cocolm.pooler'):
depth = n_layers + 2
else:
if layer_decay < 1.0:
logger.warning("para_name %s not find !" % para_name)
raise NotImplementedError()
depth = 0
if layer_decay < 1.0 and layer_wise_weight_decay:
weight_decay_in_this_group *= (layer_decay ** (n_layers + 2 - depth))
if layer_decay < 1.0:
group_name = "layer{}_decay{}".format(depth, weight_decay_in_this_group)
else:
group_name = "weight_decay{}".format(weight_decay_in_this_group)
if group_name not in groups:
group = {
"params": [para_var],
"weight_decay": weight_decay_in_this_group,
}
if layer_decay < 1.0:
group["lr"] = learning_rate * (layer_decay ** (n_layers + 2 - depth))
groups[group_name] = group
groups_keys[group_name] = [para_name]
else:
group = groups[group_name]
group["params"].append(para_var)
groups_keys[group_name].append(para_name)
print(f"num_max_layer: {num_max_layer}; n_layers: {n_layers}")
assert num_max_layer == n_layers
logger.info("Optimizer groups: = %s" % json.dumps(groups_keys))
return list(groups.values())
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0] and args.log_dir:
tb_writer = SummaryWriter(log_dir=args.log_dir)
else:
tb_writer = None
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=1)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
optimizer_grouped_parameters = get_optimizer_grouped_parameters(
model=model, weight_decay=args.weight_decay, learning_rate=args.learning_rate,
layer_decay=args.layer_decay, n_layers=model.config.num_hidden_layers,
)
warmup_steps = t_total * args.warmup_ratio
correct_bias = not args.disable_bias_correct
logger.info("*********** Optimizer setting: ***********")
logger.info("Learning rate = %.10f" % args.learning_rate)
logger.info("Adam epsilon = %.10f" % args.adam_epsilon)
logger.info("Adam_betas = (%.4f, %.4f)" % (float(args.adam_betas[0]), float(args.adam_betas[1])))
logger.info("Correct_bias = %s" % str(correct_bias))
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon,
betas=(float(args.adam_betas[0]), float(args.adam_betas[1])),
correct_bias=correct_bias,
)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
amp_state_dict = amp.state_dict()
amp_state_dict['loss_scaler0']['loss_scale'] = args.fp16_init_loss_scale
logger.info("Set fp16_init_loss_scale to %.1f" % args.fp16_init_loss_scale)
amp.load_state_dict(amp_state_dict)
amp._amp_state.loss_scalers[0]._loss_scale = 2 ** 20
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
if args.disable_tqdm:
epoch_iterator = train_dataloader
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
inputs['token_type_ids'] = None
if args.model_type in ["cocolm"]:
longest_input_length = torch.max(inputs["attention_mask"].argmin(dim=1)).item()
inputs["input_ids"] = inputs["input_ids"][:, :longest_input_length]
inputs["attention_mask"] = inputs["attention_mask"][:, :longest_input_length]
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
if tb_writer is not None:
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{'step': global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
if not args.disable_tqdm:
epoch_iterator.close()
break
if args.local_rank in [-1, 0]:
logs = {}
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, prefix='epoch-{}'.format(_ + 1))
for key, value in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
if metric_for_best is None:
metric_for_best = list(list(results.values())[0].keys())[0]
if best_epoch is None:
best_epoch = _ + 1
best_performance = results
else:
for eval_task in results:
if best_performance[eval_task][metric_for_best] < results[eval_task][metric_for_best]:
best_performance[eval_task] = results[eval_task]
best_epoch = _ + 1
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
if tb_writer is not None:
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'epoch-{}'.format(_ + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not args.do_not_save:
model_to_save = model.module if hasattr(model, 'module') else model
# Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.fp16:
logger.info("Amp state dict = %s" % json.dumps(amp.state_dict()))
if args.local_rank in [-1, 0] and tb_writer is not None:
tb_writer.close()
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, chosen by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
return global_step, tr_loss / global_step
def save_best_result(best_epoch, best_performance, output_dir):
best_performance["checkpoint"] = best_epoch
with open(os.path.join(output_dir, "best_performance.json"), mode="w") as writer:
writer.write(json.dumps(best_performance, indent=2))
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
cached_dev_file = args.cached_dev_file
if cached_dev_file is not None:
cached_dev_file = cached_dev_file + '_' + eval_task
eval_dataset = load_and_cache_examples(
args, eval_task, tokenizer, cached_features_file=cached_dev_file, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
if args.disable_tqdm:
epoch_iterator = eval_dataloader
else:
epoch_iterator = tqdm(eval_dataloader, desc="Evaluating")
for batch in epoch_iterator:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
inputs['token_type_ids'] = None
if args.model_type in ["cocolm"]:
longest_input_length = torch.max(inputs["attention_mask"].argmin(dim=1)).item()
inputs["input_ids"] = inputs["input_ids"][:, :longest_input_length]
inputs["attention_mask"] = inputs["attention_mask"][:, :longest_input_length]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results[eval_task] = result
eval_output_dir = os.path.join(eval_output_dir, prefix)
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
# for key in sorted(result.keys()):
# logger.info(" %s = %s", key, str(result[key]))
# writer.write("%s = %s\n" % (key, str(result[key])))
writer.write(json.dumps(result, indent=2))
logger.info("Result = %s" % json.dumps(result, indent=2))
return results
def load_and_cache_examples(args, task, tokenizer, cached_features_file=None, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
if cached_features_file is None:
if args.disable_auto_cache and args.local_rank != -1:
logger.warning("Please cache the features in DDP mode !")
raise RuntimeError()
if not args.disable_auto_cache:
# Load data features from cache or dataset file
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
if cached_features_file is not None and os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=False,
pad_token_id=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0,
)
if args.local_rank in [-1, 0] and cached_features_file is not None:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default="unilm", type=str,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name")
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--do_not_save", action='store_true',
help="Disable save models after each epoch. ")
parser.add_argument("--log_dir", default=None, type=str,
help="The output directory where the log will be written.")
parser.add_argument("--cached_train_file", default=None, type=str,
help="Path to cache the train set features. ")
parser.add_argument("--cached_dev_file", default=None, type=str,
help="Path to cache the dev set features. ")
parser.add_argument('--disable_auto_cache', action='store_true',
help='Disable the function for automatic cache the training/dev features.')
parser.add_argument('--disable_tqdm', action='store_true',
help='Disable the tqdm bar. ')
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name_or_path", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--layer_decay", default=1.0, type=float,
help="Layer decay rate for the layer-wise learning rate. ")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument('--adam_betas', '--adam_beta', default='0.9,0.999', type=eval_str_list, metavar='B',
help='betas for Adam optimizer')
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--disable_bias_correct", action='store_true',
help="Disable the bias correction items. ")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_ratio", default=0.1, type=float,
help="Linear warmup over warmup_ratio.")
parser.add_argument("--dropout_prob", default=None, type=float,
help="Set dropout prob, default value is read from config. ")
parser.add_argument("--cls_dropout_prob", default=None, type=float,
help="Set cls layer dropout prob. ")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--metric_for_choose_best_checkpoint', type=str, default=None,
help="Set the metric to choose the best checkpoint")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--fp16_init_loss_scale', type=float, default=128.0,
help="For fp16: initial value for loss scale.")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
if args.local_rank in (-1, 0):
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(os.path.join(args.output_dir, 'training_args.json'), mode='w', encoding="utf-8") as writer:
writer.write(json.dumps(args.__dict__, indent=2, sort_keys=True))
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer_name_or_path = args.tokenizer_name_or_path if args.tokenizer_name_or_path else args.model_name_or_path
tokenizer = tokenizer_class.from_pretrained(tokenizer_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.model_type not in ["cocolm"]:
if not hasattr(config, 'need_pooler') or config.need_pooler is not True:
setattr(config, 'need_pooler', True)
if args.dropout_prob is not None:
config.hidden_dropout_prob = args.dropout_prob
config.attention_probs_dropout_prob = args.dropout_prob
if args.cls_dropout_prob is not None:
config.cls_dropout_prob = args.cls_dropout_prob
logger.info("Final model config for finetuning: ")
logger.info("%s" % config.to_json_string())
model = model_class.from_pretrained(
args.model_name_or_path, config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(
args, args.task_name, tokenizer, cached_features_file=args.cached_train_file, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Evaluation
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(tokenizer_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
for checkpoint in checkpoints:
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
checkpoint_config = config_class.from_pretrained(checkpoint)
model = model_class.from_pretrained(checkpoint, config=checkpoint_config)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
if metric_for_best is None:
metric_for_best = list(list(result.values())[0].keys())[0]
if best_epoch is None:
best_epoch = checkpoint
best_performance = result
else:
for eval_task in result:
if best_performance[eval_task][metric_for_best] < result[eval_task][metric_for_best]:
best_performance[eval_task] = result[eval_task]
best_epoch = checkpoint
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, chosen by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
if __name__ == "__main__":
main()
|
COCO-LM/huggingface/run_glue.py/0
|
{
"file_path": "COCO-LM/huggingface/run_glue.py",
"repo_id": "COCO-LM",
"token_count": 15816
}
| 215 |
# ------------------------------------------
# CSWin Transformer
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Xiaoyi Dong
# ------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from einops.layers.torch import Rearrange
import torch.utils.checkpoint as checkpoint
import numpy as np
import time
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'cswin_224': _cfg(),
'cswin_384': _cfg(
crop_pct=1.0
),
}
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class LePEAttention(nn.Module):
def __init__(self, dim, resolution, idx, split_size=7, dim_out=None, num_heads=8, attn_drop=0., proj_drop=0., qk_scale=None):
super().__init__()
self.dim = dim
self.dim_out = dim_out or dim
self.resolution = resolution
self.split_size = split_size
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
if idx == -1:
H_sp, W_sp = self.resolution, self.resolution
elif idx == 0:
H_sp, W_sp = self.resolution, self.split_size
elif idx == 1:
W_sp, H_sp = self.resolution, self.split_size
else:
print ("ERROR MODE", idx)
exit(0)
self.H_sp = H_sp
self.W_sp = W_sp
stride = 1
self.get_v = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1,groups=dim)
self.attn_drop = nn.Dropout(attn_drop)
def im2cswin(self, x):
B, N, C = x.shape
H = W = int(np.sqrt(N))
x = x.transpose(-2,-1).contiguous().view(B, C, H, W)
x = img2windows(x, self.H_sp, self.W_sp)
x = x.reshape(-1, self.H_sp* self.W_sp, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3).contiguous()
return x
def get_lepe(self, x, func):
B, N, C = x.shape
H = W = int(np.sqrt(N))
x = x.transpose(-2,-1).contiguous().view(B, C, H, W)
H_sp, W_sp = self.H_sp, self.W_sp
x = x.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp)
x = x.permute(0, 2, 4, 1, 3, 5).contiguous().reshape(-1, C, H_sp, W_sp) ### B', C, H', W'
lepe = func(x) ### B', C, H', W'
lepe = lepe.reshape(-1, self.num_heads, C // self.num_heads, H_sp * W_sp).permute(0, 1, 3, 2).contiguous()
x = x.reshape(-1, self.num_heads, C // self.num_heads, self.H_sp* self.W_sp).permute(0, 1, 3, 2).contiguous()
return x, lepe
def forward(self, qkv):
"""
x: B L C
"""
q,k,v = qkv[0], qkv[1], qkv[2]
### Img2Window
H = W = self.resolution
B, L, C = q.shape
assert L == H * W, "flatten img_tokens has wrong size"
q = self.im2cswin(q)
k = self.im2cswin(k)
v, lepe = self.get_lepe(v, self.get_v)
q = q * self.scale
attn = (q @ k.transpose(-2, -1)) # B head N C @ B head C N --> B head N N
attn = nn.functional.softmax(attn, dim=-1, dtype=attn.dtype)
attn = self.attn_drop(attn)
x = (attn @ v) + lepe
x = x.transpose(1, 2).reshape(-1, self.H_sp* self.W_sp, C) # B head N N @ B head N C
### Window2Img
x = windows2img(x, self.H_sp, self.W_sp, H, W).view(B, -1, C) # B H' W' C
return x
class CSWinBlock(nn.Module):
def __init__(self, dim, reso, num_heads,
split_size=7, mlp_ratio=4., qkv_bias=False, qk_scale=None,
drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm,
last_stage=False):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.patches_resolution = reso
self.split_size = split_size
self.mlp_ratio = mlp_ratio
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.norm1 = norm_layer(dim)
if self.patches_resolution == split_size:
last_stage = True
if last_stage:
self.branch_num = 1
else:
self.branch_num = 2
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(drop)
if last_stage:
self.attns = nn.ModuleList([
LePEAttention(
dim, resolution=self.patches_resolution, idx = -1,
split_size=split_size, num_heads=num_heads, dim_out=dim,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
for i in range(self.branch_num)])
else:
self.attns = nn.ModuleList([
LePEAttention(
dim//2, resolution=self.patches_resolution, idx = i,
split_size=split_size, num_heads=num_heads//2, dim_out=dim//2,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
for i in range(self.branch_num)])
mlp_hidden_dim = int(dim * mlp_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, out_features=dim, act_layer=act_layer, drop=drop)
self.norm2 = norm_layer(dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H = W = self.patches_resolution
B, L, C = x.shape
assert L == H * W, "flatten img_tokens has wrong size"
img = self.norm1(x)
qkv = self.qkv(img).reshape(B, -1, 3, C).permute(2, 0, 1, 3)
if self.branch_num == 2:
x1 = self.attns[0](qkv[:,:,:,:C//2])
x2 = self.attns[1](qkv[:,:,:,C//2:])
attened_x = torch.cat([x1,x2], dim=2)
else:
attened_x = self.attns[0](qkv)
attened_x = self.proj(attened_x)
x = x + self.drop_path(attened_x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def img2windows(img, H_sp, W_sp):
"""
img: B C H W
"""
B, C, H, W = img.shape
img_reshape = img.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp)
img_perm = img_reshape.permute(0, 2, 4, 3, 5, 1).contiguous().reshape(-1, H_sp* W_sp, C)
return img_perm
def windows2img(img_splits_hw, H_sp, W_sp, H, W):
"""
img_splits_hw: B' H W C
"""
B = int(img_splits_hw.shape[0] / (H * W / H_sp / W_sp))
img = img_splits_hw.view(B, H // H_sp, W // W_sp, H_sp, W_sp, -1)
img = img.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return img
class Merge_Block(nn.Module):
def __init__(self, dim, dim_out, norm_layer=nn.LayerNorm):
super().__init__()
self.conv = nn.Conv2d(dim, dim_out, 3, 2, 1)
self.norm = norm_layer(dim_out)
def forward(self, x):
B, new_HW, C = x.shape
H = W = int(np.sqrt(new_HW))
x = x.transpose(-2, -1).contiguous().view(B, C, H, W)
x = self.conv(x)
B, C = x.shape[:2]
x = x.view(B, C, -1).transpose(-2, -1).contiguous()
x = self.norm(x)
return x
class CSWinTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=96, depth=[2,2,6,2], split_size = [3,5,7],
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, use_chk=False):
super().__init__()
self.use_chk = use_chk
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
heads=num_heads
self.stage1_conv_embed = nn.Sequential(
nn.Conv2d(in_chans, embed_dim, 7, 4, 2),
Rearrange('b c h w -> b (h w) c', h = img_size//4, w = img_size//4),
nn.LayerNorm(embed_dim)
)
curr_dim = embed_dim
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, np.sum(depth))] # stochastic depth decay rule
self.stage1 = nn.ModuleList([
CSWinBlock(
dim=curr_dim, num_heads=heads[0], reso=img_size//4, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale, split_size=split_size[0],
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth[0])])
self.merge1 = Merge_Block(curr_dim, curr_dim*2)
curr_dim = curr_dim*2
self.stage2 = nn.ModuleList(
[CSWinBlock(
dim=curr_dim, num_heads=heads[1], reso=img_size//8, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale, split_size=split_size[1],
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[np.sum(depth[:1])+i], norm_layer=norm_layer)
for i in range(depth[1])])
self.merge2 = Merge_Block(curr_dim, curr_dim*2)
curr_dim = curr_dim*2
temp_stage3 = []
temp_stage3.extend(
[CSWinBlock(
dim=curr_dim, num_heads=heads[2], reso=img_size//16, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale, split_size=split_size[2],
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[np.sum(depth[:2])+i], norm_layer=norm_layer)
for i in range(depth[2])])
self.stage3 = nn.ModuleList(temp_stage3)
self.merge3 = Merge_Block(curr_dim, curr_dim*2)
curr_dim = curr_dim*2
self.stage4 = nn.ModuleList(
[CSWinBlock(
dim=curr_dim, num_heads=heads[3], reso=img_size//32, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale, split_size=split_size[-1],
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[np.sum(depth[:-1])+i], norm_layer=norm_layer, last_stage=True)
for i in range(depth[-1])])
self.norm = norm_layer(curr_dim)
# Classifier head
self.head = nn.Linear(curr_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.head.weight, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
if self.num_classes != num_classes:
print ('reset head to', num_classes)
self.num_classes = num_classes
self.head = nn.Linear(self.out_dim, num_classes) if num_classes > 0 else nn.Identity()
self.head = self.head.cuda()
trunc_normal_(self.head.weight, std=.02)
if self.head.bias is not None:
nn.init.constant_(self.head.bias, 0)
def forward_features(self, x):
B = x.shape[0]
x = self.stage1_conv_embed(x)
for blk in self.stage1:
if self.use_chk:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
for pre, blocks in zip([self.merge1, self.merge2, self.merge3],
[self.stage2, self.stage3, self.stage4]):
x = pre(x)
for blk in blocks:
if self.use_chk:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
x = self.norm(x)
return torch.mean(x, dim=1)
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
### 224 models
@register_model
def CSWin_64_12211_tiny_224(pretrained=False, **kwargs):
model = CSWinTransformer(patch_size=4, embed_dim=64, depth=[1,2,21,1],
split_size=[1,2,7,7], num_heads=[2,4,8,16], mlp_ratio=4., **kwargs)
model.default_cfg = default_cfgs['cswin_224']
return model
@register_model
def CSWin_64_24322_small_224(pretrained=False, **kwargs):
model = CSWinTransformer(patch_size=4, embed_dim=64, depth=[2,4,32,2],
split_size=[1,2,7,7], num_heads=[2,4,8,16], mlp_ratio=4., **kwargs)
model.default_cfg = default_cfgs['cswin_224']
return model
@register_model
def CSWin_96_24322_base_224(pretrained=False, **kwargs):
model = CSWinTransformer(patch_size=4, embed_dim=96, depth=[2,4,32,2],
split_size=[1,2,7,7], num_heads=[4,8,16,32], mlp_ratio=4., **kwargs)
model.default_cfg = default_cfgs['cswin_224']
return model
@register_model
def CSWin_144_24322_large_224(pretrained=False, **kwargs):
model = CSWinTransformer(patch_size=4, embed_dim=144, depth=[2,4,32,2],
split_size=[1,2,7,7], num_heads=[6,12,24,24], mlp_ratio=4., **kwargs)
model.default_cfg = default_cfgs['cswin_224']
return model
### 384 models
@register_model
def CSWin_96_24322_base_384(pretrained=False, **kwargs):
model = CSWinTransformer(patch_size=4, embed_dim=96, depth=[2,4,32,2],
split_size=[1,2,12,12], num_heads=[4,8,16,32], mlp_ratio=4., **kwargs)
model.default_cfg = default_cfgs['cswin_384']
return model
@register_model
def CSWin_144_24322_large_384(pretrained=False, **kwargs):
model = CSWinTransformer(patch_size=4, embed_dim=144, depth=[2,4,32,2],
split_size=[1,2,12,12], num_heads=[6,12,24,24], mlp_ratio=4., **kwargs)
model.default_cfg = default_cfgs['cswin_384']
return model
|
CSWin-Transformer/models/cswin.py/0
|
{
"file_path": "CSWin-Transformer/models/cswin.py",
"repo_id": "CSWin-Transformer",
"token_count": 8006
}
| 216 |
seed_everything: 42
# ---------------------------- TRAINER -------------------------------------------
trainer:
default_root_dir: ${oc.env:OUTPUT_DIR,/home/t-tungnguyen/ClimaX/exps/regional_forecast_climax}
precision: 16
gpus: null
num_nodes: 1
accelerator: gpu
strategy: ddp
min_epochs: 1
max_epochs: 100
enable_progress_bar: true
sync_batchnorm: True
enable_checkpointing: True
resume_from_checkpoint: null
# debugging
fast_dev_run: false
logger:
class_path: pytorch_lightning.loggers.tensorboard.TensorBoardLogger
init_args:
save_dir: ${trainer.default_root_dir}/logs
name: null
version: null
log_graph: False
default_hp_metric: True
prefix: ""
callbacks:
- class_path: pytorch_lightning.callbacks.LearningRateMonitor
init_args:
logging_interval: "step"
- class_path: pytorch_lightning.callbacks.ModelCheckpoint
init_args:
dirpath: "${trainer.default_root_dir}/checkpoints"
monitor: "val/w_rmse" # name of the logged metric which determines when model is improving
mode: "min" # "max" means higher metric value is better, can be also "min"
save_top_k: 1 # save k best models (determined by above metric)
save_last: True # additionaly always save model from last epoch
verbose: False
filename: "epoch_{epoch:03d}"
auto_insert_metric_name: False
- class_path: pytorch_lightning.callbacks.EarlyStopping
init_args:
monitor: "val/w_rmse" # name of the logged metric which determines when model is improving
mode: "min" # "max" means higher metric value is better, can be also "min"
patience: 5 # how many validation epochs of not improving until training stops
min_delta: 0. # minimum change in the monitored metric needed to qualify as an improvement
- class_path: pytorch_lightning.callbacks.RichModelSummary
init_args:
max_depth: -1
- class_path: pytorch_lightning.callbacks.RichProgressBar
# ---------------------------- MODEL -------------------------------------------
model:
lr: 5e-4
beta_1: 0.9
beta_2: 0.99
weight_decay: 1e-5
warmup_epochs: 10000
max_epochs: 100000
warmup_start_lr: 1e-8
eta_min: 1e-8
pretrained_path: ""
net:
class_path: climax.regional_forecast.arch.RegionalClimaX
init_args:
default_vars: [
"land_sea_mask",
"orography",
"lattitude",
"2m_temperature",
"10m_u_component_of_wind",
"10m_v_component_of_wind",
"geopotential_50",
"geopotential_250",
"geopotential_500",
"geopotential_600",
"geopotential_700",
"geopotential_850",
"geopotential_925",
"u_component_of_wind_50",
"u_component_of_wind_250",
"u_component_of_wind_500",
"u_component_of_wind_600",
"u_component_of_wind_700",
"u_component_of_wind_850",
"u_component_of_wind_925",
"v_component_of_wind_50",
"v_component_of_wind_250",
"v_component_of_wind_500",
"v_component_of_wind_600",
"v_component_of_wind_700",
"v_component_of_wind_850",
"v_component_of_wind_925",
"temperature_50",
"temperature_250",
"temperature_500",
"temperature_600",
"temperature_700",
"temperature_850",
"temperature_925",
"relative_humidity_50",
"relative_humidity_250",
"relative_humidity_500",
"relative_humidity_600",
"relative_humidity_700",
"relative_humidity_850",
"relative_humidity_925",
"specific_humidity_50",
"specific_humidity_250",
"specific_humidity_500",
"specific_humidity_600",
"specific_humidity_700",
"specific_humidity_850",
"specific_humidity_925",
]
img_size: [32, 64]
patch_size: 2
embed_dim: 1024
depth: 8
decoder_depth: 2
num_heads: 16
mlp_ratio: 4
drop_path: 0.1
drop_rate: 0.1
# ---------------------------- DATA -------------------------------------------
data:
root_dir: /datadrive/datasets/5.625deg_equally_np/
variables: [
"land_sea_mask",
"orography",
"lattitude",
"2m_temperature",
"10m_u_component_of_wind",
"10m_v_component_of_wind",
"geopotential_50",
"geopotential_250",
"geopotential_500",
"geopotential_600",
"geopotential_700",
"geopotential_850",
"geopotential_925",
"u_component_of_wind_50",
"u_component_of_wind_250",
"u_component_of_wind_500",
"u_component_of_wind_600",
"u_component_of_wind_700",
"u_component_of_wind_850",
"u_component_of_wind_925",
"v_component_of_wind_50",
"v_component_of_wind_250",
"v_component_of_wind_500",
"v_component_of_wind_600",
"v_component_of_wind_700",
"v_component_of_wind_850",
"v_component_of_wind_925",
"temperature_50",
"temperature_250",
"temperature_500",
"temperature_600",
"temperature_700",
"temperature_850",
"temperature_925",
"relative_humidity_50",
"relative_humidity_250",
"relative_humidity_500",
"relative_humidity_600",
"relative_humidity_700",
"relative_humidity_850",
"relative_humidity_925",
"specific_humidity_50",
"specific_humidity_250",
"specific_humidity_500",
"specific_humidity_600",
"specific_humidity_700",
"specific_humidity_850",
"specific_humidity_925",
]
out_variables: ["geopotential_500", "temperature_850", "2m_temperature", "10m_u_component_of_wind", "10m_v_component_of_wind"]
region: "NorthAmerica"
predict_range: 72
hrs_each_step: 1
buffer_size: 10000
batch_size: 128
num_workers: 1
pin_memory: False
|
ClimaX/configs/regional_forecast_climax.yaml/0
|
{
"file_path": "ClimaX/configs/regional_forecast_climax.yaml",
"repo_id": "ClimaX",
"token_count": 2753
}
| 217 |
# Pretraining
::: climax.pretrain.datamodule
::: climax.pretrain.module
|
ClimaX/docs/reference/pretrain.md/0
|
{
"file_path": "ClimaX/docs/reference/pretrain.md",
"repo_id": "ClimaX",
"token_count": 27
}
| 218 |
datadir: /data/CMIP6/CMCC
name: temperature
cmip_name: ta
era_name: t
run: r1i1p1f1
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/CMCC/config_temperature.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/CMCC/config_temperature.yml",
"repo_id": "ClimaX",
"token_count": 58
}
| 219 |
datadir: /data/CMIP6/MPI-ESM
server_prefix: http://esgf-data1.llnl.gov/thredds/fileServer/css03_data/CMIP6/CMIP
name: geopotential
cmip_name: zg
era_name: z
output_type: 6hrPlevPt
run: r1i1p1f1
version: v20190815
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/MPI-ESM/config_geopotential.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/MPI-ESM/config_geopotential.yml",
"repo_id": "ClimaX",
"token_count": 119
}
| 220 |
import os
from typing import Optional
import numpy as np
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from climax.climate_projection.dataset import ClimateBenchDataset, input_for_training, load_x_y, output_for_training, split_train_val
def collate_fn(batch):
inp = torch.stack([batch[i][0] for i in range(len(batch))])
out = torch.stack([batch[i][1] for i in range(len(batch))])
lead_times = torch.cat([batch[i][2] for i in range(len(batch))])
variables = batch[0][3]
out_variables = batch[0][4]
return inp, out, lead_times, variables, out_variables
class ClimateBenchDataModule(LightningDataModule):
def __init__(
self,
root_dir, # contains metadata and train + val + test
history=10,
list_train_simu=[
'ssp126',
'ssp370',
'ssp585',
'historical',
'hist-GHG',
'hist-aer'
],
list_test_simu=[
'ssp245'
],
variables=[
'CO2',
'SO2',
'CH4',
'BC'
],
out_variables='tas',
train_ratio=0.9,
batch_size: int = 128,
num_workers: int = 1,
pin_memory: bool = False,
):
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters(logger=False)
if isinstance(out_variables, str):
out_variables = [out_variables]
self.hparams.out_variables = out_variables
# split train and val datasets
dict_x_train_val, dict_y_train_val, lat, lon = load_x_y(os.path.join(root_dir, 'train_val'), list_train_simu, out_variables)
self.lat, self.lon = lat, lon
x_train_val = np.concatenate([
input_for_training(
dict_x_train_val[simu], skip_historical=(i<2), history=history, len_historical=165
) for i, simu in enumerate(dict_x_train_val.keys())
], axis = 0) # N, T, C, H, W
y_train_val = np.concatenate([
output_for_training(
dict_y_train_val[simu], skip_historical=(i<2), history=history, len_historical=165
) for i, simu in enumerate(dict_y_train_val.keys())
], axis=0) # N, 1, H, W
x_train, y_train, x_val, y_val = split_train_val(x_train_val, y_train_val, train_ratio)
self.dataset_train = ClimateBenchDataset(
x_train, y_train, variables, out_variables, lat, 'train'
)
self.dataset_val = ClimateBenchDataset(
x_val, y_val, variables, out_variables, lat, 'val'
)
self.dataset_val.set_normalize(self.dataset_train.inp_transform, self.dataset_train.out_transform)
dict_x_test, dict_y_test, _, _ = load_x_y(os.path.join(root_dir, 'test'), list_test_simu, out_variables)
x_test = input_for_training(
dict_x_test[list_test_simu[0]], skip_historical=True, history=history, len_historical=165
)
y_test = output_for_training(
dict_y_test[list_test_simu[0]], skip_historical=True, history=history, len_historical=165
)
self.dataset_test = ClimateBenchDataset(
x_test, y_test, variables, out_variables, lat, 'test'
)
self.dataset_test.set_normalize(self.dataset_train.inp_transform, self.dataset_train.out_transform)
def get_lat_lon(self):
return self.lat, self.lon
def set_patch_size(self, p):
self.patch_size = p
def get_test_clim(self):
return self.dataset_test.y_normalization
def train_dataloader(self):
return DataLoader(
self.dataset_train,
batch_size=self.hparams.batch_size,
shuffle=True,
# drop_last=True,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
collate_fn=collate_fn,
)
def val_dataloader(self):
return DataLoader(
self.dataset_val,
batch_size=self.hparams.batch_size,
shuffle=False,
# drop_last=True,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
collate_fn=collate_fn,
)
def test_dataloader(self):
return DataLoader(
self.dataset_test,
batch_size=self.hparams.batch_size,
shuffle=False,
# drop_last=True,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
collate_fn=collate_fn,
)
|
ClimaX/src/climax/climate_projection/datamodule.py/0
|
{
"file_path": "ClimaX/src/climax/climate_projection/datamodule.py",
"repo_id": "ClimaX",
"token_count": 2313
}
| 221 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from typing import Optional
import numpy as np
import torch
import torchdata.datapipes as dp
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, IterableDataset
from torchvision.transforms import transforms
from climax.pretrain.dataset import (
Forecast,
IndividualForecastDataIter,
NpyReader,
ShuffleIterableDataset,
)
from climax.utils.data_utils import get_region_info
def collate_fn_regional(batch):
inp = torch.stack([batch[i][0] for i in range(len(batch))])
out = torch.stack([batch[i][1] for i in range(len(batch))])
lead_times = torch.stack([batch[i][2] for i in range(len(batch))])
variables = batch[0][3]
out_variables = batch[0][4]
region_info = batch[0][5]
return (
inp,
out,
lead_times,
[v for v in variables],
[v for v in out_variables],
region_info,
)
class RegionalForecastDataModule(LightningDataModule):
"""DataModule for regional forecast data.
Args:
root_dir (str): Root directory for sharded data.
variables (list): List of input variables.
buffer_size (int): Buffer size for shuffling.
out_variables (list, optional): List of output variables.
region (str, optional): The name of the region to finetune ClimaX on.
predict_range (int, optional): Predict range.
hrs_each_step (int, optional): Hours each step.
batch_size (int, optional): Batch size.
num_workers (int, optional): Number of workers.
pin_memory (bool, optional): Whether to pin memory.
"""
def __init__(
self,
root_dir,
variables,
buffer_size,
out_variables=None,
region: str = 'NorthAmerica',
predict_range: int = 6,
hrs_each_step: int = 1,
batch_size: int = 64,
num_workers: int = 0,
pin_memory: bool = False,
):
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters(logger=False)
if isinstance(out_variables, str):
out_variables = [out_variables]
self.hparams.out_variables = out_variables
self.lister_train = list(dp.iter.FileLister(os.path.join(root_dir, "train")))
self.lister_val = list(dp.iter.FileLister(os.path.join(root_dir, "val")))
self.lister_test = list(dp.iter.FileLister(os.path.join(root_dir, "test")))
self.transforms = self.get_normalize()
self.output_transforms = self.get_normalize(out_variables)
self.val_clim = self.get_climatology("val", out_variables)
self.test_clim = self.get_climatology("test", out_variables)
self.data_train: Optional[IterableDataset] = None
self.data_val: Optional[IterableDataset] = None
self.data_test: Optional[IterableDataset] = None
def get_normalize(self, variables=None):
if variables is None:
variables = self.hparams.variables
normalize_mean = dict(np.load(os.path.join(self.hparams.root_dir, "normalize_mean.npz")))
mean = []
for var in variables:
if var != "total_precipitation":
mean.append(normalize_mean[var])
else:
mean.append(np.array([0.0]))
normalize_mean = np.concatenate(mean)
normalize_std = dict(np.load(os.path.join(self.hparams.root_dir, "normalize_std.npz")))
normalize_std = np.concatenate([normalize_std[var] for var in variables])
return transforms.Normalize(normalize_mean, normalize_std)
def get_lat_lon(self):
lat = np.load(os.path.join(self.hparams.root_dir, "lat.npy"))
lon = np.load(os.path.join(self.hparams.root_dir, "lon.npy"))
return lat, lon
def get_climatology(self, partition="val", variables=None):
path = os.path.join(self.hparams.root_dir, partition, "climatology.npz")
clim_dict = np.load(path)
if variables is None:
variables = self.hparams.variables
clim = np.concatenate([clim_dict[var] for var in variables])
clim = torch.from_numpy(clim)
return clim
def set_patch_size(self, p):
self.patch_size = p
def setup(self, stage: Optional[str] = None):
lat, lon = self.get_lat_lon()
region_info = get_region_info(self.hparams.region, lat, lon, self.patch_size)
# load datasets only if they're not loaded already
if not self.data_train and not self.data_val and not self.data_test:
self.data_train = ShuffleIterableDataset(
IndividualForecastDataIter(
Forecast(
NpyReader(
file_list=self.lister_train,
start_idx=0,
end_idx=1,
variables=self.hparams.variables,
out_variables=self.hparams.out_variables,
shuffle=True,
multi_dataset_training=False,
),
max_predict_range=self.hparams.predict_range,
random_lead_time=False,
hrs_each_step=self.hparams.hrs_each_step,
),
transforms=self.transforms,
output_transforms=self.output_transforms,
region_info=region_info
),
buffer_size=self.hparams.buffer_size,
)
self.data_val = IndividualForecastDataIter(
Forecast(
NpyReader(
file_list=self.lister_val,
start_idx=0,
end_idx=1,
variables=self.hparams.variables,
out_variables=self.hparams.out_variables,
shuffle=False,
multi_dataset_training=False,
),
max_predict_range=self.hparams.predict_range,
random_lead_time=False,
hrs_each_step=self.hparams.hrs_each_step,
),
transforms=self.transforms,
output_transforms=self.output_transforms,
region_info=region_info
)
self.data_test = IndividualForecastDataIter(
Forecast(
NpyReader(
file_list=self.lister_test,
start_idx=0,
end_idx=1,
variables=self.hparams.variables,
out_variables=self.hparams.out_variables,
shuffle=False,
multi_dataset_training=False,
),
max_predict_range=self.hparams.predict_range,
random_lead_time=False,
hrs_each_step=self.hparams.hrs_each_step,
),
transforms=self.transforms,
output_transforms=self.output_transforms,
region_info=region_info
)
def train_dataloader(self):
return DataLoader(
self.data_train,
batch_size=self.hparams.batch_size,
drop_last=False,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
collate_fn=collate_fn_regional,
)
def val_dataloader(self):
return DataLoader(
self.data_val,
batch_size=self.hparams.batch_size,
shuffle=False,
drop_last=False,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
collate_fn=collate_fn_regional,
)
def test_dataloader(self):
return DataLoader(
self.data_test,
batch_size=self.hparams.batch_size,
shuffle=False,
drop_last=False,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
collate_fn=collate_fn_regional,
)
|
ClimaX/src/climax/regional_forecast/datamodule.py/0
|
{
"file_path": "ClimaX/src/climax/regional_forecast/datamodule.py",
"repo_id": "ClimaX",
"token_count": 4235
}
| 222 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import random
from PIL import Image
from data.base_dataset import BaseDataset, get_params, get_transform
class Pix2pixDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--no_pairing_check', action='store_true', help='If specified, skip sanity check of correct label-image file pairing')
return parser
def initialize(self, opt):
self.opt = opt
label_paths, image_paths = self.get_paths(opt)
label_paths = label_paths[:opt.max_dataset_size]
image_paths = image_paths[:opt.max_dataset_size]
if not opt.no_pairing_check:
for path1, path2 in zip(label_paths, image_paths):
assert self.paths_match(path1, path2), \
"The label-image pair (%s, %s) do not look like the right pair because the filenames are quite different. Are you sure about the pairing? Please see data/pix2pix_dataset.py to see what is going on, and use --no_pairing_check to bypass this." % (path1, path2)
self.label_paths = label_paths
self.image_paths = image_paths
size = len(self.label_paths)
self.dataset_size = size
self.real_reference_probability = 1 if opt.phase == 'test' else opt.real_reference_probability
self.hard_reference_probability = 0 if opt.phase == 'test' else opt.hard_reference_probability
self.ref_dict, self.train_test_folder = self.get_ref(opt)
def get_paths(self, opt):
label_paths = []
image_paths = []
assert False, "A subclass of Pix2pixDataset must override self.get_paths(self, opt)"
return label_paths, image_paths
def paths_match(self, path1, path2):
filename1_without_ext = os.path.splitext(os.path.basename(path1))[0]
filename2_without_ext = os.path.splitext(os.path.basename(path2))[0]
return filename1_without_ext == filename2_without_ext
def get_label_tensor(self, path):
label = Image.open(path)
params1 = get_params(self.opt, label.size)
transform_label = get_transform(self.opt, params1, method=Image.NEAREST, normalize=False)
label_tensor = transform_label(label) * 255.0
label_tensor[label_tensor == 255] = self.opt.label_nc
# 'unknown' is opt.label_nc
return label_tensor, params1
def __getitem__(self, index):
# label Image
label_path = self.label_paths[index]
label_path = os.path.join(self.opt.dataroot, label_path)
label_tensor, params1 = self.get_label_tensor(label_path)
# input image (real images)
image_path = self.image_paths[index]
image_path = os.path.join(self.opt.dataroot, image_path)
image = Image.open(image_path).convert('RGB')
transform_image = get_transform(self.opt, params1)
image_tensor = transform_image(image)
ref_tensor = 0
label_ref_tensor = 0
random_p = random.random()
if random_p < self.real_reference_probability or self.opt.phase == 'test':
key = image_path.split('deepfashionHD/')[-1]
val = self.ref_dict[key]
if random_p < self.hard_reference_probability:
#hard reference
path_ref = val[1]
else:
#easy reference
path_ref = val[0]
if self.opt.dataset_mode == 'deepfashionHD':
path_ref = os.path.join(self.opt.dataroot, path_ref)
else:
path_ref = os.path.dirname(image_path).replace(self.train_test_folder[1], self.train_test_folder[0]) + '/' + path_ref
image_ref = Image.open(path_ref).convert('RGB')
if self.opt.dataset_mode != 'deepfashionHD':
path_ref_label = path_ref.replace('.jpg', '.png')
path_ref_label = self.imgpath_to_labelpath(path_ref_label)
else:
path_ref_label = self.imgpath_to_labelpath(path_ref)
label_ref_tensor, params = self.get_label_tensor(path_ref_label)
transform_image = get_transform(self.opt, params)
ref_tensor = transform_image(image_ref)
self_ref_flag = 0.0
else:
pair = False
if self.opt.dataset_mode == 'deepfashionHD' and self.opt.video_like:
key = image_path.replace('\\', '/').split('deepfashionHD/')[-1]
val = self.ref_dict[key]
ref_name = val[0]
key_name = key
path_ref = os.path.join(self.opt.dataroot, ref_name)
image_ref = Image.open(path_ref).convert('RGB')
label_ref_path = self.imgpath_to_labelpath(path_ref)
label_ref_tensor, params = self.get_label_tensor(label_ref_path)
transform_image = get_transform(self.opt, params)
ref_tensor = transform_image(image_ref)
pair = True
if not pair:
label_ref_tensor, params = self.get_label_tensor(label_path)
transform_image = get_transform(self.opt, params)
ref_tensor = transform_image(image)
self_ref_flag = 1.0
input_dict = {'label': label_tensor,
'image': image_tensor,
'path': image_path,
'self_ref': self_ref_flag,
'ref': ref_tensor,
'label_ref': label_ref_tensor
}
return input_dict
def __len__(self):
return self.dataset_size
def get_ref(self, opt):
pass
def imgpath_to_labelpath(self, path):
return path
|
CoCosNet-v2/data/pix2pix_dataset.py/0
|
{
"file_path": "CoCosNet-v2/data/pix2pix_dataset.py",
"repo_id": "CoCosNet-v2",
"token_count": 2742
}
| 223 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch.nn.functional as F
def convert_1d_to_2d(index, base=64):
x = index // base
y = index % base
return x,y
def convert_2d_to_1d(x, y, base=64):
return x*base+y
def batch_meshgrid(shape, device):
batch_size, _, height, width = shape
x_range = torch.arange(0.0, width, device=device)
y_range = torch.arange(0.0, height, device=device)
x_coordinate, y_coordinate = torch.meshgrid(x_range, y_range)
x_coordinate = x_coordinate.expand(batch_size, -1, -1).unsqueeze(1)
y_coordinate = y_coordinate.expand(batch_size, -1, -1).unsqueeze(1)
return x_coordinate, y_coordinate
def inds_to_offset(inds):
"""
inds: b x number x h x w
"""
shape = inds.size()
device = inds.device
x_coordinate, y_coordinate = batch_meshgrid(shape, device)
batch_size, _, height, width = shape
x = inds // width
y = inds % width
return x - x_coordinate, y - y_coordinate
def offset_to_inds(offset_x, offset_y):
shape = offset_x.size()
device = offset_x.device
x_coordinate, y_coordinate = batch_meshgrid(shape, device)
h, w = offset_x.size()[2:]
x = torch.clamp(x_coordinate + offset_x, 0, h-1)
y = torch.clamp(y_coordinate + offset_y, 0, w-1)
return x * offset_x.size()[3] + y
|
CoCosNet-v2/models/networks/ops.py/0
|
{
"file_path": "CoCosNet-v2/models/networks/ops.py",
"repo_id": "CoCosNet-v2",
"token_count": 585
}
| 224 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch.nn.utils.spectral_norm as spectral_norm
from models.networks.normalization import SPADE, equal_lr, SPADE_TwoPath
# ResNet block that uses SPADE.
# It differs from the ResNet block of pix2pixHD in that
# it takes in the segmentation map as input, learns the skip connection if necessary,
# and applies normalization first and then convolution.
# This architecture seemed like a standard architecture for unconditional or
# class-conditional GAN architecture using residual block.
# The code was inspired from https://github.com/LMescheder/GAN_stability.
class SPADEResnetBlock(nn.Module):
def __init__(self, fin, fout, opt, use_se=False, dilation=1):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
fmiddle = min(fin, fout)
self.opt = opt
self.pad_type = 'nozero'
self.use_se = use_se
# create conv layers
if self.pad_type != 'zero':
self.pad = nn.ReflectionPad2d(dilation)
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=0, dilation=dilation)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=0, dilation=dilation)
else:
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in opt.norm_G:
if opt.eqlr_sn:
self.conv_0 = equal_lr(self.conv_0)
self.conv_1 = equal_lr(self.conv_1)
if self.learned_shortcut:
self.conv_s = equal_lr(self.conv_s)
else:
self.conv_0 = spectral_norm(self.conv_0)
self.conv_1 = spectral_norm(self.conv_1)
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
spade_config_str = opt.norm_G.replace('spectral', '')
if 'spade_ic' in opt:
ic = opt.spade_ic
else:
ic = 0 + (3 if 'warp' in opt.CBN_intype else 0) + (opt.semantic_nc if 'mask' in opt.CBN_intype else 0)
self.norm_0 = SPADE(spade_config_str, fin, ic, PONO=opt.PONO, use_apex=opt.apex)
self.norm_1 = SPADE(spade_config_str, fmiddle, ic, PONO=opt.PONO, use_apex=opt.apex)
if self.learned_shortcut:
self.norm_s = SPADE(spade_config_str, fin, ic, PONO=opt.PONO, use_apex=opt.apex)
if use_se:
self.se_layar = SELayer(fout)
# note the resnet block with SPADE also takes in |seg|,
# the semantic segmentation map as input
def forward(self, x, seg1):
x_s = self.shortcut(x, seg1)
if self.pad_type != 'zero':
dx = self.conv_0(self.pad(self.actvn(self.norm_0(x, seg1))))
dx = self.conv_1(self.pad(self.actvn(self.norm_1(dx, seg1))))
if self.use_se:
dx = self.se_layar(dx)
else:
dx = self.conv_0(self.actvn(self.norm_0(x, seg1)))
dx = self.conv_1(self.actvn(self.norm_1(dx, seg1)))
if self.use_se:
dx = self.se_layar(dx)
out = x_s + dx
return out
def shortcut(self, x, seg1):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg1))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1)
class Attention(nn.Module):
def __init__(self, ch, use_sn):
super(Attention, self).__init__()
# Channel multiplier
self.ch = ch
self.theta = nn.Conv2d(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)
self.phi = nn.Conv2d(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)
self.g = nn.Conv2d(self.ch, self.ch // 2, kernel_size=1, padding=0, bias=False)
self.o = nn.Conv2d(self.ch // 2, self.ch, kernel_size=1, padding=0, bias=False)
if use_sn:
self.theta = spectral_norm(self.theta)
self.phi = spectral_norm(self.phi)
self.g = spectral_norm(self.g)
self.o = spectral_norm(self.o)
# Learnable gain parameter
self.gamma = nn.Parameter(torch.tensor(0.), requires_grad=True)
def forward(self, x, y=None):
# Apply convs
theta = self.theta(x)
phi = F.max_pool2d(self.phi(x), [2,2])
g = F.max_pool2d(self.g(x), [2,2])
# Perform reshapes
theta = theta.view(-1, self. ch // 8, x.shape[2] * x.shape[3])
phi = phi.view(-1, self. ch // 8, x.shape[2] * x.shape[3] // 4)
g = g.view(-1, self. ch // 2, x.shape[2] * x.shape[3] // 4)
# Matmul and softmax to get attention maps
beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1)
# Attention map times g path
o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.ch // 2, x.shape[2], x.shape[3]))
return self.gamma * o + x
# ResNet block used in pix2pixHD
# We keep the same architecture as pix2pixHD.
class ResnetBlock(nn.Module):
def __init__(self, dim, norm_layer, activation=nn.ReLU(False), kernel_size=3):
super().__init__()
pw = (kernel_size - 1) // 2
self.conv_block = nn.Sequential(
nn.ReflectionPad2d(pw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)),
activation,
nn.ReflectionPad2d(pw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size))
)
def forward(self, x):
y = self.conv_block(x)
out = x + y
return out
# VGG architecter, used for the perceptual loss using a pretrained VGG network
class VGG19(torch.nn.Module):
def __init__(self, requires_grad=False):
super().__init__()
vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x]) #r11
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x]) #r21
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x]) #r31
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x]) #r41
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x]) #r51
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
|
CoCosNet/models/networks/architecture.py/0
|
{
"file_path": "CoCosNet/models/networks/architecture.py",
"repo_id": "CoCosNet",
"token_count": 3967
}
| 225 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT classification fine-tuning: utilities to work with GLUE tasks """
from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
from io import open
from sklearn.metrics import f1_score
csv.field_size_limit(sys.maxsize)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
lines = []
for line in f.readlines():
line = line.strip().split('<CODESPLIT>')
if len(line) != 5:
continue
lines.append(line)
return lines
class CodesearchProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir, train_file):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, train_file)))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, train_file)), "train")
def get_dev_examples(self, data_dir, dev_file):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, dev_file)))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, dev_file)), "dev")
def get_test_examples(self, data_dir, test_file):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, test_file)))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, test_file)), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
if (set_type == 'test'):
label = self.get_labels()[0]
else:
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if (set_type == 'test'):
return examples, lines
else:
return examples
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)[:50]
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "codesearch":
return acc_and_f1(preds, labels)
else:
raise KeyError(task_name)
processors = {
"codesearch": CodesearchProcessor,
}
output_modes = {
"codesearch": "classification",
}
GLUE_TASKS_NUM_LABELS = {
"codesearch": 2,
}
|
CodeBERT/CodeBERT/codesearch/utils.py/0
|
{
"file_path": "CodeBERT/CodeBERT/codesearch/utils.py",
"repo_id": "CodeBERT",
"token_count": 5073
}
| 226 |
import random
import torch
import logging
import multiprocessing
import numpy as np
logger = logging.getLogger(__name__)
def add_args(parser):
parser.add_argument(
"--task",
type=str,
required=False,
choices=[
"review",
],
)
parser.add_argument(
"--model_type",
default="codet5",
type=str,
choices=["roberta", "t5", "bart", "codet5", "scratch"],
)
parser.add_argument("--add_lang_ids", action="store_true")
parser.add_argument("--from_scratch", action="store_true")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--start_epoch", default=0, type=int)
parser.add_argument("--train_epochs", default=10, type=int)
parser.add_argument("--tokenizer_path", type=str, required=False)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=False,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--load_model_path",
default=None,
type=str,
required=False
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
help="Path to trained model: Should contain the .bin files",
)
## Other parameters
parser.add_argument(
"--train_path",
default=None,
type=str,
help="The pretrain files path. Should contain the .jsonl files for this task.",
)
parser.add_argument(
"--eval_chunkname",
default=None,
type=str,
help="The eval file name.",
)
parser.add_argument(
"--train_filename",
default=None,
type=str,
help="The train filename. Should contain the .jsonl files for this task.",
)
parser.add_argument(
"--dev_filename",
default=None,
type=str,
help="The dev filename. Should contain the .jsonl files for this task.",
)
parser.add_argument(
"--test_filename",
default=None,
type=str,
help="The test filename. Should contain the .jsonl files for this task.",
)
parser.add_argument(
"--gold_filename",
default=None,
type=str,
help="The gold filename. Should contain the .jsonl files for this task.",
)
parser.add_argument(
"--config_name",
default="Salesforce/codet5-base",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--max_source_length",
default=64,
type=int,
help="The maximum total source sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_target_length",
default=32,
type=int,
help="The maximum total target sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--do_train", action="store_true", help="Whether to run eval on the train set."
)
parser.add_argument(
"--do_eval", action="store_true", help="Whether to run eval on the dev set."
)
parser.add_argument(
"--do_test", action="store_true", help="Whether to run eval on the dev set."
)
parser.add_argument(
"--raw_input", action="store_true", help="Whether to use simple input format (set for baselines)."
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Avoid using CUDA when available"
)
parser.add_argument(
"--train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--mask_rate", default=0.15, type=float, help="The masked rate of input lines.",
)
parser.add_argument(
"--beam_size", default=6, type=int, help="beam size for beam search"
)
parser.add_argument(
"--weight_decay", default=0.0, type=float, help="Weight deay if we apply some."
)
parser.add_argument(
"--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer."
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--save_steps", default=-1, type=int,
)
parser.add_argument(
"--log_steps", default=-1, type=int,
)
parser.add_argument("--eval_steps", default=-1, type=int, help="")
parser.add_argument("--eval_file", default="", type=str)
parser.add_argument("--out_file", default="", type=str)
parser.add_argument("--break_cnt", default=-1, type=int)
parser.add_argument("--train_steps", default=-1, type=int, help="")
parser.add_argument(
"--warmup_steps", default=100, type=int, help="Linear warmup over warmup_steps."
)
parser.add_argument(
"--gpu_per_node",
type=int,
default=4,
help="gpus per node",
)
parser.add_argument(
"--node_index",
type=int,
default=0,
help="For distributed training: node_index",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--seed", type=int, default=2233, help="random seed for initialization"
) # previous one 42
args = parser.parse_args()
return args
def set_dist(args):
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
args.n_gpu = torch.cuda.device_count()
else:
# Setup for distributed data parallel
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
cpu_count = multiprocessing.cpu_count()
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, cpu count: %d",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
cpu_count,
)
args.device = device
args.cpu_count = cpu_count
def set_seed(args):
"""set random seed."""
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
|
CodeBERT/CodeReviewer/code/configs.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/configs.py",
"repo_id": "CodeBERT",
"token_count": 3204
}
| 227 |
#!/usr/bin/python
'''
This script was adapted from the original version by hieuhoang1972 which is part of MOSES.
'''
# $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $
'''Provides:
cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
score_cooked(alltest, n=4): Score a list of cooked test sentences.
score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids.
The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible.
'''
import sys, math, re, xml.sax.saxutils
import subprocess
import os
import nltk
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
nonorm = 0
preserve_case = False
eff_ref_len = "shortest"
normalize1 = [
('<skipped>', ''), # strip "skipped" tags
(r'-\n', ''), # strip end-of-line hyphenation and join lines
(r'\n', ' '), # join lines
# (r'(\d)\s+(?=\d)', r'\1'), # join digits
]
normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1]
normalize2 = [
(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])', r' \1 '), # tokenize punctuation. apostrophe is missing
(r'([^0-9])([\.,])', r'\1 \2 '), # tokenize period and comma unless preceded by a digit
(r'([\.,])([^0-9])', r' \1 \2'), # tokenize period and comma unless followed by a digit
(r'([0-9])(-)', r'\1 \2 ') # tokenize dash when preceded by a digit
]
normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2]
def normalize(s):
'''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.'''
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
if (nonorm):
return s.split()
if type(s) is not str:
s = " ".join(s)
# language-independent part:
for (pattern, replace) in normalize1:
s = re.sub(pattern, replace, s)
s = xml.sax.saxutils.unescape(s, {'"': '"'})
# language-dependent part (assuming Western languages):
s = " %s " % s
if not preserve_case:
s = s.lower() # this might not be identical to the original
for (pattern, replace) in normalize2:
s = re.sub(pattern, replace, s)
return s.split()
def count_ngrams(words, n=4):
counts = {}
for k in range(1, n + 1):
for i in range(len(words) - k + 1):
ngram = tuple(words[i:i + k])
counts[ngram] = counts.get(ngram, 0) + 1
return counts
def cook_refs(refs, n=4):
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
refs = [normalize(ref) for ref in refs]
maxcounts = {}
for ref in refs:
counts = count_ngrams(ref, n)
for (ngram, count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram, 0), count)
return ([len(ref) for ref in refs], maxcounts)
def cook_test(test, item, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflens, refmaxcounts) = item
test = normalize(test)
result = {}
result["testlen"] = len(test)
# Calculate effective reference sentence length.
if eff_ref_len == "shortest":
result["reflen"] = min(reflens)
elif eff_ref_len == "average":
result["reflen"] = float(sum(reflens)) / len(reflens)
elif eff_ref_len == "closest":
min_diff = None
for reflen in reflens:
if min_diff is None or abs(reflen - len(test)) < min_diff:
min_diff = abs(reflen - len(test))
result['reflen'] = reflen
result["guess"] = [max(len(test) - k + 1, 0) for k in range(1, n + 1)]
result['correct'] = [0] * n
counts = count_ngrams(test, n)
for (ngram, count) in counts.items():
result["correct"][len(ngram) - 1] += min(refmaxcounts.get(ngram, 0), count)
return result
def score_cooked(allcomps, n=4, ground=0, smooth=1):
totalcomps = {'testlen': 0, 'reflen': 0, 'guess': [0] * n, 'correct': [0] * n}
for comps in allcomps:
for key in ['testlen', 'reflen']:
totalcomps[key] += comps[key]
for key in ['guess', 'correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
logbleu = 0.0
all_bleus = []
for k in range(n):
correct = totalcomps['correct'][k]
guess = totalcomps['guess'][k]
addsmooth = 0
if smooth == 1 and k > 0:
addsmooth = 1
logbleu += math.log(correct + addsmooth + sys.float_info.min) - math.log(guess + addsmooth + sys.float_info.min)
if guess == 0:
all_bleus.append(-10000000)
else:
all_bleus.append(math.log(correct + sys.float_info.min) - math.log(guess))
logbleu /= float(n)
all_bleus.insert(0, logbleu)
brevPenalty = min(0, 1 - float(totalcomps['reflen'] + 1) / (totalcomps['testlen'] + 1))
for i in range(len(all_bleus)):
if i == 0:
all_bleus[i] += brevPenalty
all_bleus[i] = math.exp(all_bleus[i])
return all_bleus
def bleu(refs, candidate, ground=0, smooth=1):
refs = cook_refs(refs)
test = cook_test(candidate, refs)
return score_cooked([test], ground=ground, smooth=smooth)
def splitPuncts(line):
return ' '.join(re.findall(r"[\w]+|[^\s\w]", line))
def bleu_fromstr(predictions, golds, rmstop=True):
predictions = [" ".join(nltk.wordpunct_tokenize(predictions[i])) for i in range(len(predictions))]
golds = [" ".join(nltk.wordpunct_tokenize(g)) for g in golds]
if rmstop:
pypath = os.path.dirname(os.path.realpath(__file__))
stopwords = open(os.path.join(pypath, "stopwords.txt")).readlines()
stopwords = [stopword.strip() for stopword in stopwords]
golds = [" ".join([word for word in ref.split() if word not in stopwords]) for ref in golds]
predictions = [" ".join([word for word in hyp.split() if word not in stopwords]) for hyp in predictions]
predictions = [str(i) + "\t" + pred.replace("\t", " ") for (i, pred) in enumerate(predictions)]
golds = [str(i) + "\t" + gold.replace("\t", " ") for (i, gold) in enumerate(golds)]
goldMap, predictionMap = computeMaps(predictions, golds)
bleu = round(bleuFromMaps(goldMap, predictionMap)[0], 2)
return bleu
def computeMaps(predictions, goldfile):
predictionMap = {}
goldMap = {}
for row in predictions:
cols = row.strip().split('\t')
if len(cols) == 1:
(rid, pred) = (cols[0], '')
else:
(rid, pred) = (cols[0], cols[1])
predictionMap[rid] = [splitPuncts(pred.strip().lower())]
for row in goldfile:
(rid, pred) = row.split('\t')
if rid in predictionMap: # Only insert if the id exists for the method
if rid not in goldMap:
goldMap[rid] = []
goldMap[rid].append(splitPuncts(pred.strip().lower()))
sys.stderr.write('Total: ' + str(len(goldMap)) + '\n')
return (goldMap, predictionMap)
# m1 is the reference map
# m2 is the prediction map
def bleuFromMaps(m1, m2):
score = [0] * 5
num = 0.0
for key in m1:
if key in m2:
bl = bleu(m1[key], m2[key][0])
score = [score[i] + bl[i] for i in range(0, len(bl))]
num += 1
return [s * 100.0 / num for s in score]
if __name__ == '__main__':
reference_file = sys.argv[1]
predictions = []
for row in sys.stdin:
predictions.append(row)
(goldMap, predictionMap) = computeMaps(predictions, reference_file)
print(bleuFromMaps(goldMap, predictionMap)[0])
|
CodeBERT/CodeReviewer/code/evaluator/smooth_bleu.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/evaluator/smooth_bleu.py",
"repo_id": "CodeBERT",
"token_count": 3403
}
| 228 |
# batch size 6 for 16 GB GPU
mnt_dir="/home/codereview"
MASTER_HOST=localhost && echo MASTER_HOST: ${MASTER_HOST}
MASTER_PORT=23333 && echo MASTER_PORT: ${MASTER_PORT}
RANK=0 && echo RANK: ${RANK}
PER_NODE_GPU=1 && echo PER_NODE_GPU: ${PER_NODE_GPU}
WORLD_SIZE=1 && echo WORLD_SIZE: ${WORLD_SIZE}
NODES=1 && echo NODES: ${NODES}
NCCL_DEBUG=INFO
bash test_nltk.sh
python -m torch.distributed.launch --nproc_per_node ${PER_NODE_GPU} --node_rank=${RANK} --nnodes=${NODES} --master_addr=${MASTER_HOST} --master_port=${MASTER_PORT} ../run_test_ref.py \
--model_name_or_path microsoft/codereviewer \
--output_dir ../../save/gen \
--load_model_path ../../save/gen/checkpoint \
--output_dir empty \
--eval_file ref-test.jsonl \
--max_source_length 200 \
--max_target_length 200 \
--eval_batch_size 12 \
--mask_rate 0.15 \
--save_steps 1800 \
--beam_size 10 \
--log_steps 100 \
--train_steps 120000 \
--gpu_per_node=${PER_NODE_GPU} \
--node_index=${RANK} \
--seed 2233 \
|
CodeBERT/CodeReviewer/code/sh/test-ref.sh/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/sh/test-ref.sh",
"repo_id": "CodeBERT",
"token_count": 419
}
| 229 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import shutil
import json
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)
from tqdm import tqdm, trange
import multiprocessing
from model import Model
cpu_cont = 16
logger = logging.getLogger(__name__)
from parser import DFG_python,DFG_java,DFG_ruby,DFG_go,DFG_php,DFG_javascript
from parser import (remove_comments_and_docstrings,
tree_to_token_index,
index_to_code_token,
tree_to_variable_index)
from tree_sitter import Language, Parser
dfg_function={
'python':DFG_python,
'java':DFG_java,
'ruby':DFG_ruby,
'go':DFG_go,
'php':DFG_php,
'javascript':DFG_javascript
}
#load parsers
parsers={}
for lang in dfg_function:
LANGUAGE = Language('parser/my-languages.so', lang)
parser = Parser()
parser.set_language(LANGUAGE)
parser = [parser,dfg_function[lang]]
parsers[lang]= parser
#remove comments, tokenize code and extract dataflow
def extract_dataflow(code, parser,lang):
#remove comments
try:
code=remove_comments_and_docstrings(code,lang)
except:
pass
#obtain dataflow
if lang=="php":
code="<?php"+code+"?>"
try:
tree = parser[0].parse(bytes(code,'utf8'))
root_node = tree.root_node
tokens_index=tree_to_token_index(root_node)
code=code.split('\n')
code_tokens=[index_to_code_token(x,code) for x in tokens_index]
index_to_code={}
for idx,(index,code) in enumerate(zip(tokens_index,code_tokens)):
index_to_code[index]=(idx,code)
try:
DFG,_=parser[1](root_node,index_to_code,{})
except:
DFG=[]
DFG=sorted(DFG,key=lambda x:x[1])
indexs=set()
for d in DFG:
if len(d[-1])!=0:
indexs.add(d[1])
for x in d[-1]:
indexs.add(x)
new_DFG=[]
for d in DFG:
if d[1] in indexs:
new_DFG.append(d)
dfg=new_DFG
except:
dfg=[]
return code_tokens,dfg
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
input_tokens_1,
input_ids_1,
position_idx_1,
dfg_to_code_1,
dfg_to_dfg_1,
input_tokens_2,
input_ids_2,
position_idx_2,
dfg_to_code_2,
dfg_to_dfg_2,
label,
url1,
url2
):
#The first code function
self.input_tokens_1 = input_tokens_1
self.input_ids_1 = input_ids_1
self.position_idx_1=position_idx_1
self.dfg_to_code_1=dfg_to_code_1
self.dfg_to_dfg_1=dfg_to_dfg_1
#The second code function
self.input_tokens_2 = input_tokens_2
self.input_ids_2 = input_ids_2
self.position_idx_2=position_idx_2
self.dfg_to_code_2=dfg_to_code_2
self.dfg_to_dfg_2=dfg_to_dfg_2
#label
self.label=label
self.url1=url1
self.url2=url2
def convert_examples_to_features(item):
#source
url1,url2,label,tokenizer, args,cache,url_to_code=item
parser=parsers['java']
for url in [url1,url2]:
if url not in cache:
func=url_to_code[url]
#extract data flow
code_tokens,dfg=extract_dataflow(func,parser,'java')
code_tokens=[tokenizer.tokenize('@ '+x)[1:] if idx!=0 else tokenizer.tokenize(x) for idx,x in enumerate(code_tokens)]
ori2cur_pos={}
ori2cur_pos[-1]=(0,0)
for i in range(len(code_tokens)):
ori2cur_pos[i]=(ori2cur_pos[i-1][1],ori2cur_pos[i-1][1]+len(code_tokens[i]))
code_tokens=[y for x in code_tokens for y in x]
#truncating
code_tokens=code_tokens[:args.code_length+args.data_flow_length-3-min(len(dfg),args.data_flow_length)][:512-3]
source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
position_idx = [i+tokenizer.pad_token_id + 1 for i in range(len(source_tokens))]
dfg=dfg[:args.code_length+args.data_flow_length-len(source_tokens)]
source_tokens+=[x[0] for x in dfg]
position_idx+=[0 for x in dfg]
source_ids+=[tokenizer.unk_token_id for x in dfg]
padding_length=args.code_length+args.data_flow_length-len(source_ids)
position_idx+=[tokenizer.pad_token_id]*padding_length
source_ids+=[tokenizer.pad_token_id]*padding_length
#reindex
reverse_index={}
for idx,x in enumerate(dfg):
reverse_index[x[1]]=idx
for idx,x in enumerate(dfg):
dfg[idx]=x[:-1]+([reverse_index[i] for i in x[-1] if i in reverse_index],)
dfg_to_dfg=[x[-1] for x in dfg]
dfg_to_code=[ori2cur_pos[x[1]] for x in dfg]
length=len([tokenizer.cls_token])
dfg_to_code=[(x[0]+length,x[1]+length) for x in dfg_to_code]
cache[url]=source_tokens,source_ids,position_idx,dfg_to_code,dfg_to_dfg
source_tokens_1,source_ids_1,position_idx_1,dfg_to_code_1,dfg_to_dfg_1=cache[url1]
source_tokens_2,source_ids_2,position_idx_2,dfg_to_code_2,dfg_to_dfg_2=cache[url2]
return InputFeatures(source_tokens_1,source_ids_1,position_idx_1,dfg_to_code_1,dfg_to_dfg_1,
source_tokens_2,source_ids_2,position_idx_2,dfg_to_code_2,dfg_to_dfg_2,
label,url1,url2)
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_path='train'):
self.examples = []
self.args=args
index_filename=file_path
#load index
logger.info("Creating features from index file at %s ", index_filename)
url_to_code={}
with open('/'.join(index_filename.split('/')[:-1])+'/data.jsonl') as f:
for line in f:
line=line.strip()
js=json.loads(line)
url_to_code[js['idx']]=js['func']
#load code function according to index
data=[]
cache={}
f=open(index_filename)
with open(index_filename) as f:
for line in f:
line=line.strip()
url1,url2,label=line.split('\t')
if url1 not in url_to_code or url2 not in url_to_code:
continue
if label=='0':
label=0
else:
label=1
data.append((url1,url2,label,tokenizer, args,cache,url_to_code))
#only use 10% valid data to keep best model
if 'valid' in file_path:
data=random.sample(data,int(len(data)*0.1))
#convert example to input features
self.examples=[convert_examples_to_features(x) for x in tqdm(data,total=len(data))]
if 'train' in file_path:
for idx, example in enumerate(self.examples[:3]):
logger.info("*** Example ***")
logger.info("idx: {}".format(idx))
logger.info("label: {}".format(example.label))
logger.info("input_tokens_1: {}".format([x.replace('\u0120','_') for x in example.input_tokens_1]))
logger.info("input_ids_1: {}".format(' '.join(map(str, example.input_ids_1))))
logger.info("position_idx_1: {}".format(example.position_idx_1))
logger.info("dfg_to_code_1: {}".format(' '.join(map(str, example.dfg_to_code_1))))
logger.info("dfg_to_dfg_1: {}".format(' '.join(map(str, example.dfg_to_dfg_1))))
logger.info("input_tokens_2: {}".format([x.replace('\u0120','_') for x in example.input_tokens_2]))
logger.info("input_ids_2: {}".format(' '.join(map(str, example.input_ids_2))))
logger.info("position_idx_2: {}".format(example.position_idx_2))
logger.info("dfg_to_code_2: {}".format(' '.join(map(str, example.dfg_to_code_2))))
logger.info("dfg_to_dfg_2: {}".format(' '.join(map(str, example.dfg_to_dfg_2))))
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
#calculate graph-guided masked function
attn_mask_1= np.zeros((self.args.code_length+self.args.data_flow_length,
self.args.code_length+self.args.data_flow_length),dtype=bool)
#calculate begin index of node and max length of input
node_index=sum([i>1 for i in self.examples[item].position_idx_1])
max_length=sum([i!=1 for i in self.examples[item].position_idx_1])
#sequence can attend to sequence
attn_mask_1[:node_index,:node_index]=True
#special tokens attend to all tokens
for idx,i in enumerate(self.examples[item].input_ids_1):
if i in [0,2]:
attn_mask_1[idx,:max_length]=True
#nodes attend to code tokens that are identified from
for idx,(a,b) in enumerate(self.examples[item].dfg_to_code_1):
if a<node_index and b<node_index:
attn_mask_1[idx+node_index,a:b]=True
attn_mask_1[a:b,idx+node_index]=True
#nodes attend to adjacent nodes
for idx,nodes in enumerate(self.examples[item].dfg_to_dfg_1):
for a in nodes:
if a+node_index<len(self.examples[item].position_idx_1):
attn_mask_1[idx+node_index,a+node_index]=True
#calculate graph-guided masked function
attn_mask_2= np.zeros((self.args.code_length+self.args.data_flow_length,
self.args.code_length+self.args.data_flow_length),dtype=bool)
#calculate begin index of node and max length of input
node_index=sum([i>1 for i in self.examples[item].position_idx_2])
max_length=sum([i!=1 for i in self.examples[item].position_idx_2])
#sequence can attend to sequence
attn_mask_2[:node_index,:node_index]=True
#special tokens attend to all tokens
for idx,i in enumerate(self.examples[item].input_ids_2):
if i in [0,2]:
attn_mask_2[idx,:max_length]=True
#nodes attend to code tokens that are identified from
for idx,(a,b) in enumerate(self.examples[item].dfg_to_code_2):
if a<node_index and b<node_index:
attn_mask_2[idx+node_index,a:b]=True
attn_mask_2[a:b,idx+node_index]=True
#nodes attend to adjacent nodes
for idx,nodes in enumerate(self.examples[item].dfg_to_dfg_2):
for a in nodes:
if a+node_index<len(self.examples[item].position_idx_2):
attn_mask_2[idx+node_index,a+node_index]=True
return (torch.tensor(self.examples[item].input_ids_1),
torch.tensor(self.examples[item].position_idx_1),
torch.tensor(attn_mask_1),
torch.tensor(self.examples[item].input_ids_2),
torch.tensor(self.examples[item].position_idx_2),
torch.tensor(attn_mask_2),
torch.tensor(self.examples[item].label))
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
#build dataloader
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size,num_workers=4)
args.max_steps=args.epochs*len( train_dataloader)
args.save_steps=len( train_dataloader)//10
args.warmup_steps=args.max_steps//5
model.to(args.device)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps)
# multi-gpu training
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size//max(args.n_gpu, 1))
logger.info(" Total train batch size = %d",args.train_batch_size*args.gradient_accumulation_steps)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", args.max_steps)
global_step=0
tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
best_f1=0
model.zero_grad()
for idx in range(args.epochs):
bar = tqdm(train_dataloader,total=len(train_dataloader))
tr_num=0
train_loss=0
for step, batch in enumerate(bar):
(inputs_ids_1,position_idx_1,attn_mask_1,
inputs_ids_2,position_idx_2,attn_mask_2,
labels)=[x.to(args.device) for x in batch]
model.train()
loss,logits = model(inputs_ids_1,position_idx_1,attn_mask_1,inputs_ids_2,position_idx_2,attn_mask_2,labels)
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
tr_num+=1
train_loss+=loss.item()
if avg_loss==0:
avg_loss=tr_loss
avg_loss=round(train_loss/tr_num,5)
bar.set_description("epoch {} loss {}".format(idx,avg_loss))
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
output_flag=True
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
if global_step % args.save_steps == 0:
results = evaluate(args, model, tokenizer, eval_when_training=True)
# Save model checkpoint
if results['eval_f1']>best_f1:
best_f1=results['eval_f1']
logger.info(" "+"*"*20)
logger.info(" Best f1:%s",round(best_f1,4))
logger.info(" "+"*"*20)
checkpoint_prefix = 'checkpoint-best-f1'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,'module') else model
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
def evaluate(args, model, tokenizer, eval_when_training=False):
#build dataloader
eval_dataset = TextDataset(tokenizer, args, file_path=args.eval_data_file)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,batch_size=args.eval_batch_size,num_workers=4)
# multi-gpu evaluate
if args.n_gpu > 1 and eval_when_training is False:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
logits=[]
y_trues=[]
for batch in eval_dataloader:
(inputs_ids_1,position_idx_1,attn_mask_1,
inputs_ids_2,position_idx_2,attn_mask_2,
labels)=[x.to(args.device) for x in batch]
with torch.no_grad():
lm_loss,logit = model(inputs_ids_1,position_idx_1,attn_mask_1,inputs_ids_2,position_idx_2,attn_mask_2,labels)
eval_loss += lm_loss.mean().item()
logits.append(logit.cpu().numpy())
y_trues.append(labels.cpu().numpy())
nb_eval_steps += 1
#calculate scores
logits=np.concatenate(logits,0)
y_trues=np.concatenate(y_trues,0)
best_threshold=0.5
best_f1=0
y_preds=logits[:,1]>best_threshold
from sklearn.metrics import recall_score
recall=recall_score(y_trues, y_preds)
from sklearn.metrics import precision_score
precision=precision_score(y_trues, y_preds)
from sklearn.metrics import f1_score
f1=f1_score(y_trues, y_preds)
result = {
"eval_recall": float(recall),
"eval_precision": float(precision),
"eval_f1": float(f1),
"eval_threshold":best_threshold,
}
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(round(result[key],4)))
return result
def test(args, model, tokenizer, best_threshold=0):
#build dataloader
eval_dataset = TextDataset(tokenizer, args, file_path=args.test_data_file)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running Test *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
logits=[]
y_trues=[]
for batch in eval_dataloader:
(inputs_ids_1,position_idx_1,attn_mask_1,
inputs_ids_2,position_idx_2,attn_mask_2,
labels)=[x.to(args.device) for x in batch]
with torch.no_grad():
lm_loss,logit = model(inputs_ids_1,position_idx_1,attn_mask_1,inputs_ids_2,position_idx_2,attn_mask_2,labels)
eval_loss += lm_loss.mean().item()
logits.append(logit.cpu().numpy())
y_trues.append(labels.cpu().numpy())
nb_eval_steps += 1
#output result
logits=np.concatenate(logits,0)
y_preds=logits[:,1]>best_threshold
with open(os.path.join(args.output_dir,"predictions.txt"),'w') as f:
for example,pred in zip(eval_dataset.examples,y_preds):
if pred:
f.write(example.url1+'\t'+example.url2+'\t'+'1'+'\n')
else:
f.write(example.url1+'\t'+example.url2+'\t'+'0'+'\n')
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--eval_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--test_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--model_name_or_path", default=None, type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--code_length", default=256, type=int,
help="Optional Code input sequence length after tokenization.")
parser.add_argument("--data_flow_length", default=64, type=int,
help="Optional Data Flow input sequence length after tokenization.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--epochs', type=int, default=1,
help="training epochs")
args = parser.parse_args()
# Setup CUDA, GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',datefmt='%m/%d/%Y %H:%M:%S',level=logging.INFO)
logger.warning("device: %s, n_gpu: %s",device, args.n_gpu,)
# Set seed
set_seed(args)
config = RobertaConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
config.num_labels=1
tokenizer = RobertaTokenizer.from_pretrained(args.tokenizer_name)
model = RobertaForSequenceClassification.from_pretrained(args.model_name_or_path,config=config)
model=Model(model,config,tokenizer,args)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = TextDataset(tokenizer, args, file_path=args.train_data_file)
train(args, train_dataset, model, tokenizer)
# Evaluation
results = {}
if args.do_eval:
checkpoint_prefix = 'checkpoint-best-f1/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
result=evaluate(args, model, tokenizer)
if args.do_test:
checkpoint_prefix = 'checkpoint-best-f1/model.bin'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
test(args, model, tokenizer,best_threshold=0.5)
return results
if __name__ == "__main__":
main()
|
CodeBERT/GraphCodeBERT/clonedetection/run.py/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/clonedetection/run.py",
"repo_id": "CodeBERT",
"token_count": 13244
}
| 230 |
from .utils import (remove_comments_and_docstrings,
tree_to_token_index,
index_to_code_token,
tree_to_variable_index)
from .DFG import DFG_python,DFG_java,DFG_ruby,DFG_go,DFG_php,DFG_javascript,DFG_csharp
|
CodeBERT/GraphCodeBERT/refinement/parser/__init__.py/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/refinement/parser/__init__.py",
"repo_id": "CodeBERT",
"token_count": 136
}
| 231 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import
import os
import sys
import pickle
import torch
import json
import random
import logging
import argparse
import numpy as np
from io import open
from itertools import cycle
import torch.nn as nn
from model import Seq2Seq
from tqdm import tqdm, trange
from bleu import _bleu
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer)
MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)}
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
from parser import DFG_python,DFG_java,DFG_ruby,DFG_go,DFG_php,DFG_javascript,DFG_csharp
from parser import (remove_comments_and_docstrings,
tree_to_token_index,
index_to_code_token,
tree_to_variable_index)
from tree_sitter import Language, Parser
dfg_function={
'python':DFG_python,
'java':DFG_java,
'ruby':DFG_ruby,
'go':DFG_go,
'php':DFG_php,
'javascript':DFG_javascript,
'c_sharp':DFG_csharp,
}
logger = logging.getLogger(__name__)
#load parsers
parsers={}
for lang in dfg_function:
LANGUAGE = Language('parser/my-languages.so', lang)
parser = Parser()
parser.set_language(LANGUAGE)
parser = [parser,dfg_function[lang]]
parsers[lang]= parser
#remove comments, tokenize code and extract dataflow
def extract_dataflow(code, parser,lang):
#remove comments
try:
code=remove_comments_and_docstrings(code,lang)
except:
pass
#obtain dataflow
if lang=="php":
code="<?php"+code+"?>"
try:
tree = parser[0].parse(bytes(code,'utf8'))
root_node = tree.root_node
tokens_index=tree_to_token_index(root_node)
code=code.split('\n')
code_tokens=[index_to_code_token(x,code) for x in tokens_index]
index_to_code={}
for idx,(index,code) in enumerate(zip(tokens_index,code_tokens)):
index_to_code[index]=(idx,code)
try:
DFG,_=parser[1](root_node,index_to_code,{})
except:
DFG=[]
DFG=sorted(DFG,key=lambda x:x[1])
indexs=set()
for d in DFG:
if len(d[-1])!=0:
indexs.add(d[1])
for x in d[-1]:
indexs.add(x)
new_DFG=[]
for d in DFG:
if d[1] in indexs:
new_DFG.append(d)
dfg=new_DFG
except:
dfg=[]
return code_tokens,dfg
class Example(object):
"""A single training/test example."""
def __init__(self,
source,
target,
lang
):
self.source = source
self.target = target
self.lang=lang
def read_examples(filename):
"""Read examples from filename."""
examples=[]
source,target=filename.split(',')
lang='java'
if source[-1]=='s':
lang='c_sharp'
with open(source,encoding="utf-8") as f1,open(target,encoding="utf-8") as f2:
for line1,line2 in zip(f1,f2):
line1=line1.strip()
line2=line2.strip()
examples.append(
Example(
source=line1,
target=line2,
lang=lang
)
)
return examples
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
example_id,
source_ids,
position_idx,
dfg_to_code,
dfg_to_dfg,
target_ids,
source_mask,
target_mask,
):
self.example_id = example_id
self.source_ids = source_ids
self.position_idx = position_idx
self.dfg_to_code = dfg_to_code
self.dfg_to_dfg = dfg_to_dfg
self.target_ids = target_ids
self.source_mask = source_mask
self.target_mask = target_mask
parsers={}
for lang in dfg_function:
LANGUAGE = Language('parser/my-languages.so', lang)
parser = Parser()
parser.set_language(LANGUAGE)
parser = [parser,dfg_function[lang]]
parsers[lang]= parser
def convert_examples_to_features(examples, tokenizer, args,stage=None):
features = []
for example_index, example in enumerate(tqdm(examples,total=len(examples))):
##extract data flow
code_tokens,dfg=extract_dataflow(example.source,
parsers["c_sharp" if args.source_lang == "cs" else "java"],
"c_sharp" if args.source_lang == "cs" else "java")
code_tokens=[tokenizer.tokenize('@ '+x)[1:] if idx!=0 else tokenizer.tokenize(x) for idx,x in enumerate(code_tokens)]
ori2cur_pos={}
ori2cur_pos[-1]=(0,0)
for i in range(len(code_tokens)):
ori2cur_pos[i]=(ori2cur_pos[i-1][1],ori2cur_pos[i-1][1]+len(code_tokens[i]))
code_tokens=[y for x in code_tokens for y in x]
#truncating
code_tokens=code_tokens[:args.max_source_length-3][:512-3]
source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
position_idx = [i+tokenizer.pad_token_id + 1 for i in range(len(source_tokens))]
dfg=dfg[:args.max_source_length-len(source_tokens)]
source_tokens+=[x[0] for x in dfg]
position_idx+=[0 for x in dfg]
source_ids+=[tokenizer.unk_token_id for x in dfg]
padding_length=args.max_source_length-len(source_ids)
position_idx+=[tokenizer.pad_token_id]*padding_length
source_ids+=[tokenizer.pad_token_id]*padding_length
source_mask = [1] * (len(source_tokens))
source_mask+=[0]*padding_length
#reindex
reverse_index={}
for idx,x in enumerate(dfg):
reverse_index[x[1]]=idx
for idx,x in enumerate(dfg):
dfg[idx]=x[:-1]+([reverse_index[i] for i in x[-1] if i in reverse_index],)
dfg_to_dfg=[x[-1] for x in dfg]
dfg_to_code=[ori2cur_pos[x[1]] for x in dfg]
length=len([tokenizer.cls_token])
dfg_to_code=[(x[0]+length,x[1]+length) for x in dfg_to_code]
#target
if stage=="test":
target_tokens = tokenizer.tokenize("None")
else:
target_tokens = tokenizer.tokenize(example.target)[:args.max_target_length-2]
target_tokens = [tokenizer.cls_token]+target_tokens+[tokenizer.sep_token]
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
target_mask = [1] *len(target_ids)
padding_length = args.max_target_length - len(target_ids)
target_ids+=[tokenizer.pad_token_id]*padding_length
target_mask+=[0]*padding_length
if example_index < 5:
if stage=='train':
logger.info("*** Example ***")
logger.info("source_tokens: {}".format([x.replace('\u0120','_') for x in source_tokens]))
logger.info("source_ids: {}".format(' '.join(map(str, source_ids))))
logger.info("source_mask: {}".format(' '.join(map(str, source_mask))))
logger.info("position_idx: {}".format(position_idx))
logger.info("dfg_to_code: {}".format(' '.join(map(str, dfg_to_code))))
logger.info("dfg_to_dfg: {}".format(' '.join(map(str, dfg_to_dfg))))
logger.info("target_tokens: {}".format([x.replace('\u0120','_') for x in target_tokens]))
logger.info("target_ids: {}".format(' '.join(map(str, target_ids))))
logger.info("target_mask: {}".format(' '.join(map(str, target_mask))))
features.append(
InputFeatures(
example_index,
source_ids,
position_idx,
dfg_to_code,
dfg_to_dfg,
target_ids,
source_mask,
target_mask,
)
)
return features
class TextDataset(Dataset):
def __init__(self, examples, args):
self.examples = examples
self.args=args
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
#calculate graph-guided masked function
attn_mask=np.zeros((self.args.max_source_length,self.args.max_source_length),dtype=np.bool)
#calculate begin index of node and max length of input
node_index=sum([i>1 for i in self.examples[item].position_idx])
max_length=sum([i!=1 for i in self.examples[item].position_idx])
#sequence can attend to sequence
attn_mask[:node_index,:node_index]=True
#special tokens attend to all tokens
for idx,i in enumerate(self.examples[item].source_ids):
if i in [0,2]:
attn_mask[idx,:max_length]=True
#nodes attend to code tokens that are identified from
for idx,(a,b) in enumerate(self.examples[item].dfg_to_code):
if a<node_index and b<node_index:
attn_mask[idx+node_index,a:b]=True
attn_mask[a:b,idx+node_index]=True
#nodes attend to adjacent nodes
for idx,nodes in enumerate(self.examples[item].dfg_to_dfg):
for a in nodes:
if a+node_index<len(self.examples[item].position_idx):
attn_mask[idx+node_index,a+node_index]=True
return (torch.tensor(self.examples[item].source_ids),
torch.tensor(self.examples[item].source_mask),
torch.tensor(self.examples[item].position_idx),
torch.tensor(attn_mask),
torch.tensor(self.examples[item].target_ids),
torch.tensor(self.examples[item].target_mask),)
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type: e.g. roberta")
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model: e.g. roberta-base" )
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--load_model_path", default=None, type=str,
help="Path to trained model: Should contain the .bin files" )
## Other parameters
parser.add_argument("--train_filename", default=None, type=str,
help="The train filename. Should contain the .jsonl files for this task.")
parser.add_argument("--dev_filename", default=None, type=str,
help="The dev filename. Should contain the .jsonl files for this task.")
parser.add_argument("--test_filename", default=None, type=str,
help="The test filename. Should contain the .jsonl files for this task.")
parser.add_argument("--source_lang", default=None, type=str,
help="The language of input")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--max_source_length", default=64, type=int,
help="The maximum total source sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--max_target_length", default=32, type=int,
help="The maximum total target sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument("--train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--beam_size", default=10, type=int,
help="beam size for beam search")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--eval_steps", default=-1, type=int,
help="")
parser.add_argument("--train_steps", default=-1, type=int,
help="")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
# print arguments
args = parser.parse_args()
logger.info(args)
# Setup CUDA, GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
# Set seed
set_seed(args.seed)
# make dir if output_dir not exist
if os.path.exists(args.output_dir) is False:
os.makedirs(args.output_dir)
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name )
#budild model
encoder = model_class.from_pretrained(args.model_name_or_path,config=config)
decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads)
decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
model=Seq2Seq(encoder=encoder,decoder=decoder,config=config,
beam_size=args.beam_size,max_length=args.max_target_length,
sos_id=tokenizer.cls_token_id,eos_id=tokenizer.sep_token_id)
if args.load_model_path is not None:
logger.info("reload model from {}".format(args.load_model_path))
model.load_state_dict(torch.load(args.load_model_path))
model.to(device)
if args.n_gpu > 1:
# multi-gpu training
model = torch.nn.DataParallel(model)
if args.do_train:
# Prepare training data loader
train_examples = read_examples(args.train_filename)
train_features = convert_examples_to_features(train_examples, tokenizer,args,stage='train')
train_data = TextDataset(train_features,args)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size//args.gradient_accumulation_steps,num_workers=4)
num_train_optimization_steps = args.train_steps
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=len(train_dataloader)*args.num_train_epochs*0.1,num_training_steps=len(train_dataloader)*args.num_train_epochs)
#Start training
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num epoch = %d", args.num_train_epochs)
model.train()
dev_dataset={}
nb_tr_examples, nb_tr_steps,tr_loss,global_step,best_bleu,best_loss = 0, 0,0,0,0,1e6
for epoch in range(args.num_train_epochs):
bar = tqdm(train_dataloader,total=len(train_dataloader))
for batch in bar:
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask,position_idx,att_mask,target_ids,target_mask = batch
loss,_,_ = model(source_ids,source_mask,position_idx,att_mask,target_ids,target_mask)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
train_loss=round(tr_loss*args.gradient_accumulation_steps/(nb_tr_steps+1),4)
bar.set_description("epoch {} loss {}".format(epoch,train_loss))
nb_tr_examples += source_ids.size(0)
nb_tr_steps += 1
loss.backward()
if (nb_tr_steps + 1) % args.gradient_accumulation_steps == 0:
#Update parameters
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
if args.do_eval and epoch in [ int(args.num_train_epochs*(i+1)//20) for i in range(20)]:
#Eval model with dev dataset
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
eval_flag=False
if 'dev_loss' in dev_dataset:
eval_examples,eval_data=dev_dataset['dev_loss']
else:
eval_examples = read_examples(args.dev_filename)
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='dev')
eval_data = TextDataset(eval_features,args)
dev_dataset['dev_loss']=eval_examples,eval_data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4)
logger.info("\n***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
#Start Evaling model
model.eval()
eval_loss,tokens_num = 0,0
for batch in eval_dataloader:
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask,position_idx,att_mask,target_ids,target_mask = batch
with torch.no_grad():
_,loss,num = model(source_ids,source_mask,position_idx,att_mask,target_ids,target_mask)
eval_loss += loss.sum().item()
tokens_num += num.sum().item()
#Pring loss of dev dataset
model.train()
eval_loss = eval_loss / tokens_num
result = {'eval_ppl': round(np.exp(eval_loss),5),
'global_step': global_step+1,
'train_loss': round(train_loss,5)}
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
logger.info(" "+"*"*20)
#save last checkpoint
last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
if not os.path.exists(last_output_dir):
os.makedirs(last_output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(last_output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if eval_loss<best_loss:
logger.info(" Best ppl:%s",round(np.exp(eval_loss),5))
logger.info(" "+"*"*20)
best_loss=eval_loss
# Save best checkpoint for best ppl
output_dir = os.path.join(args.output_dir, 'checkpoint-best-ppl')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
#Calculate bleu
if 'dev_bleu' in dev_dataset:
eval_examples,eval_data=dev_dataset['dev_bleu']
else:
eval_examples = read_examples(args.dev_filename)
eval_examples = random.sample(eval_examples,min(1000,len(eval_examples)))
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
eval_data = TextDataset(eval_features,args)
dev_dataset['dev_bleu']=eval_examples,eval_data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4)
model.eval()
p=[]
for batch in eval_dataloader:
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask,position_idx,att_mask,target_ids,target_mask = batch
with torch.no_grad():
preds = model(source_ids,source_mask,position_idx,att_mask)
for pred in preds:
t=pred[0].cpu().numpy()
t=list(t)
if 0 in t:
t=t[:t.index(0)]
text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
p.append(text)
model.train()
predictions=[]
accs=[]
with open(os.path.join(args.output_dir,"dev.output"),'w') as f, open(os.path.join(args.output_dir,"dev.gold"),'w') as f1:
for ref,gold in zip(p,eval_examples):
predictions.append(ref)
f.write(ref+'\n')
f1.write(gold.target+'\n')
accs.append(ref==gold.target)
dev_bleu=round(_bleu(os.path.join(args.output_dir, "dev.gold"), os.path.join(args.output_dir, "dev.output")),2)
xmatch=round(np.mean(accs)*100,4)
logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
logger.info(" %s = %s "%("xMatch",str(round(np.mean(accs)*100,4))))
logger.info(" "+"*"*20)
if dev_bleu+xmatch>best_bleu:
logger.info(" Best BLEU+xMatch:%s",dev_bleu+xmatch)
logger.info(" "+"*"*20)
best_bleu=dev_bleu+xmatch
# Save best checkpoint for best bleu
output_dir = os.path.join(args.output_dir, 'checkpoint-best-bleu')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
if args.do_test:
files=[]
if args.dev_filename is not None:
files.append(args.dev_filename)
if args.test_filename is not None:
files.append(args.test_filename)
for idx,file in enumerate(files):
logger.info("Test file: {}".format(file))
eval_examples = read_examples(file)
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
eval_data = TextDataset(eval_features,args)
# Calculate bleu
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4)
model.eval()
p=[]
for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):
batch = tuple(t.to(device) for t in batch)
source_ids,source_mask,position_idx,att_mask,target_ids,target_mask = batch
with torch.no_grad():
preds = model(source_ids,source_mask,position_idx,att_mask)
for pred in preds:
t=pred[0].cpu().numpy()
t=list(t)
if 0 in t:
t=t[:t.index(0)]
text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
p.append(text)
model.train()
predictions=[]
accs=[]
with open(os.path.join(args.output_dir,"test_{}.output".format(str(idx))),'w') as f, open(os.path.join(args.output_dir,"test_{}.gold".format(str(idx))),'w') as f1:
for ref,gold in zip(p,eval_examples):
predictions.append(ref)
f.write(ref+'\n')
f1.write(gold.target+'\n')
accs.append(ref==gold.target)
dev_bleu=round(_bleu(os.path.join(args.output_dir, "test_{}.gold".format(str(idx))).format(file),
os.path.join(args.output_dir, "test_{}.output".format(str(idx))).format(file)),2)
logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
logger.info(" %s = %s "%("xMatch",str(round(np.mean(accs)*100,4))))
logger.info(" "+"*"*20)
if __name__ == "__main__":
main()
|
CodeBERT/GraphCodeBERT/translation/run.py/0
|
{
"file_path": "CodeBERT/GraphCodeBERT/translation/run.py",
"repo_id": "CodeBERT",
"token_count": 14964
}
| 232 |
# UniXcoder
This repo will provide the code for reproducing the experiments in [UniXcoder: Unified Cross-Modal Pre-training for Code Representation](https://arxiv.org/pdf/2203.03850.pdf). UniXcoder is a unified cross-modal pre-trained model for programming languages to support both code-related understanding and generation tasks.
Here, we provide three types of UniXcoder:
[unixcoder-base-unimodal](https://huggingface.co/microsoft/unixcoder-base-unimodal): Pre-trained on C4 and CodeSearchNet dataset (without NL)
[unixcoder-base](https://huggingface.co/microsoft/unixcoder-base): Continue pre-training ```unixcoder-base-unimodal``` on NL-PL pairs of CodeSearchNet dataset. The model can support six languages: **java, ruby, python, php, javascript, and go**. This model is reported in the paper.
[unixcoder-base-nine](https://huggingface.co/microsoft/unixcoder-base-nine): Continue pre-training ```unixcoder-base-unimodal``` on NL-PL pairs of CodeSearchNet dataset and additional 1.5M NL-PL pairs of C, C++ and C# programming language. The model can support nine languages: **java, ruby, python, php, javascript, go, c, c++ and c#**.
## 1. Dependency
- pip install torch
- pip install transformers
## 2. Quick Tour
We implement a class to use UniXcoder and you can follow the code to build UniXcoder.
You can download the class by
```shell
wget https://raw.githubusercontent.com/microsoft/CodeBERT/master/UniXcoder/unixcoder.py
```
```python
import torch
from unixcoder import UniXcoder
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = UniXcoder("microsoft/unixcoder-base")
model.to(device)
```
In the following, we will give zero-shot examples for several tasks under different mode, including **code search (encoder-only)**, **code completion (decoder-only)**, **function name prediction (encoder-decoder)** , **API recommendation (encoder-decoder)**, **code summarization (encoder-decoder)**.
## 3. Encoder-only Mode
For encoder-only mode, we give an example of **code search**.
### 1) Code and NL Embeddings
Here, we give an example to obtain code fragment embedding from CodeBERT.
```python
# Encode maximum function
func = "def f(a,b): if a>b: return a else return b"
tokens_ids = model.tokenize([func],max_length=512,mode="<encoder-only>")
source_ids = torch.tensor(tokens_ids).to(device)
tokens_embeddings,max_func_embedding = model(source_ids)
# Encode minimum function
func = "def f(a,b): if a<b: return a else return b"
tokens_ids = model.tokenize([func],max_length=512,mode="<encoder-only>")
source_ids = torch.tensor(tokens_ids).to(device)
tokens_embeddings,min_func_embedding = model(source_ids)
# Encode NL
nl = "return maximum value"
tokens_ids = model.tokenize([nl],max_length=512,mode="<encoder-only>")
source_ids = torch.tensor(tokens_ids).to(device)
tokens_embeddings,nl_embedding = model(source_ids)
print(max_func_embedding.shape)
print(max_func_embedding)
```
```python
torch.Size([1, 768])
tensor([[ 8.6533e-01, -1.9796e+00, -8.6849e-01, 4.2652e-01, -5.3696e-01,
-1.5521e-01, 5.3770e-01, 3.4199e-01, 3.6305e-01, -3.9391e-01,
-1.1816e+00, 2.6010e+00, -7.7133e-01, 1.8441e+00, 2.3645e+00,
...,
-2.9188e+00, 1.2555e+00, -1.9953e+00, -1.9795e+00, 1.7279e+00,
6.4590e-01, -5.2769e-02, 2.4965e-01, 2.3962e-02, 5.9996e-02,
2.5659e+00, 3.6533e+00, 2.0301e+00]], device='cuda:0',
grad_fn=<DivBackward0>)
```
### 2) Similarity between code and NL
Now, we calculate cosine similarity between NL and two functions. Although the difference of two functions is only a operator (```<``` and ```>```), UniXcoder can distinguish them.
```python
# Normalize embedding
norm_max_func_embedding = torch.nn.functional.normalize(max_func_embedding, p=2, dim=1)
norm_min_func_embedding = torch.nn.functional.normalize(min_func_embedding, p=2, dim=1)
norm_nl_embedding = torch.nn.functional.normalize(nl_embedding, p=2, dim=1)
max_func_nl_similarity = torch.einsum("ac,bc->ab",norm_max_func_embedding,norm_nl_embedding)
min_func_nl_similarity = torch.einsum("ac,bc->ab",norm_min_func_embedding,norm_nl_embedding)
print(max_func_nl_similarity)
print(min_func_nl_similarity)
```
```python
tensor([[0.3002]], device='cuda:0', grad_fn=<ViewBackward>)
tensor([[0.1881]], device='cuda:0', grad_fn=<ViewBackward>)
```
## 3. Decoder-only Mode
For decoder-only mode, we give an example of **code completion**.
```python
context = """
def f(data,file_path):
# write json data into file_path in python language
"""
tokens_ids = model.tokenize([context],max_length=512,mode="<decoder-only>")
source_ids = torch.tensor(tokens_ids).to(device)
prediction_ids = model.generate(source_ids, decoder_only=True, beam_size=3, max_length=128)
predictions = model.decode(prediction_ids)
print(context+predictions[0][0])
```
```python
def f(data,file_path):
# write json data into file_path in python language
data = json.dumps(data)
with open(file_path, 'w') as f:
f.write(data)
```
## 4. Encoder-Decoder Mode
For encoder-decoder mode, we give two examples including: **function name prediction**, **API recommendation**, **code summarization**.
### 1) **Function Name Prediction**
```python
context = """
def <mask0>(data,file_path):
data = json.dumps(data)
with open(file_path, 'w') as f:
f.write(data)
"""
tokens_ids = model.tokenize([context],max_length=512,mode="<encoder-decoder>")
source_ids = torch.tensor(tokens_ids).to(device)
prediction_ids = model.generate(source_ids, decoder_only=False, beam_size=3, max_length=128)
predictions = model.decode(prediction_ids)
print([x.replace("<mask0>","").strip() for x in predictions[0]])
```
```python
['write_json', 'write_file', 'to_json']
```
### 2) API Recommendation
```python
context = """
def write_json(data,file_path):
data = <mask0>(data)
with open(file_path, 'w') as f:
f.write(data)
"""
tokens_ids = model.tokenize([context],max_length=512,mode="<encoder-decoder>")
source_ids = torch.tensor(tokens_ids).to(device)
prediction_ids = model.generate(source_ids, decoder_only=False, beam_size=3, max_length=128)
predictions = model.decode(prediction_ids)
print([x.replace("<mask0>","").strip() for x in predictions[0]])
```
```python
['json.dumps', 'json.loads', 'str']
```
### 3) Code Summarization
```python
context = """
# <mask0>
def write_json(data,file_path):
data = json.dumps(data)
with open(file_path, 'w') as f:
f.write(data)
"""
tokens_ids = model.tokenize([context],max_length=512,mode="<encoder-decoder>")
source_ids = torch.tensor(tokens_ids).to(device)
prediction_ids = model.generate(source_ids, decoder_only=False, beam_size=3, max_length=128)
predictions = model.decode(prediction_ids)
print([x.replace("<mask0>","").strip() for x in predictions[0]])
```
```python
['Write JSON to file', 'Write json to file', 'Write a json file']
```
## 5. Fine-tuning
For downstream tasks reported in the paper, please refer to the [downstream-tasks](https://github.com/microsoft/CodeBERT/tree/master/UniXcoder/downstream-tasks) folders.
# Reference
If you use this code or UniXcoder, please consider citing us.
<pre><code>@article{guo2022unixcoder,
title={UniXcoder: Unified Cross-Modal Pre-training for Code Representation},
author={Guo, Daya and Lu, Shuai and Duan, Nan and Wang, Yanlin and Zhou, Ming and Yin, Jian},
journal={arXiv preprint arXiv:2203.03850},
year={2022}
}</code></pre>
|
CodeBERT/UniXcoder/README.md/0
|
{
"file_path": "CodeBERT/UniXcoder/README.md",
"repo_id": "CodeBERT",
"token_count": 2829
}
| 233 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
from __future__ import absolute_import
import os
import sys
from bleu import _bleu
import pickle
import torch
import json
import random
import logging
import argparse
import numpy as np
from io import open
from itertools import cycle
import torch.nn as nn
from model import Seq2Seq
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
RobertaConfig, RobertaModel, RobertaTokenizer)
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class Example(object):
"""A single training/test example."""
def __init__(self,
idx,
source,
target,
):
self.idx = idx
self.source = source
self.target = target
def read_examples(filename):
"""Read examples from filename."""
examples=[]
with open(filename,encoding="utf-8") as f:
for idx, line in enumerate(f):
line=line.strip()
js=json.loads(line)
examples.append(
Example(
idx = idx,
source=" ".join(js['nl'].split()),
target = " ".join(js["code"].split()),
)
)
return examples
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
example_id,
source_ids,
target_ids,
):
self.example_id = example_id
self.source_ids = source_ids
self.target_ids = target_ids
def convert_examples_to_features(examples, tokenizer, args,stage=None):
features = []
for example_index, example in enumerate(examples):
#source
source_tokens = tokenizer.tokenize(example.source)[:args.max_source_length-5]
source_tokens =[tokenizer.cls_token,"<encoder-decoder>",tokenizer.sep_token]+source_tokens+["<mask0>",tokenizer.sep_token]
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
padding_length = args.max_source_length - len(source_ids)
source_ids+=[tokenizer.pad_token_id]*padding_length
#target
if stage=="test":
target_tokens = tokenizer.tokenize("None")
else:
target_tokens = tokenizer.tokenize(example.target)[:args.max_target_length-2]
target_tokens = ["<mask0>"]+target_tokens+[tokenizer.sep_token]
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
padding_length = args.max_target_length - len(target_ids)
target_ids+=[tokenizer.pad_token_id]*padding_length
if example_index < 5:
if stage=='train':
logger.info("*** Example ***")
logger.info("idx: {}".format(example.idx))
logger.info("source_tokens: {}".format([x.replace('\u0120','_') for x in source_tokens]))
logger.info("source_ids: {}".format(' '.join(map(str, source_ids))))
logger.info("target_tokens: {}".format([x.replace('\u0120','_') for x in target_tokens]))
logger.info("target_ids: {}".format(' '.join(map(str, target_ids))))
features.append(
InputFeatures(
example_index,
source_ids,
target_ids,
)
)
return features
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model: e.g. roberta-base" )
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--train_filename", default=None, type=str,
help="The train filename. Should contain the .jsonl files for this task.")
parser.add_argument("--dev_filename", default=None, type=str,
help="The dev filename. Should contain the .jsonl files for this task.")
parser.add_argument("--test_filename", default=None, type=str,
help="The test filename. Should contain the .jsonl files for this task.")
parser.add_argument("--max_source_length", default=64, type=int,
help="The maximum total source sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--max_target_length", default=32, type=int,
help="The maximum total target sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument("--train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--beam_size", default=10, type=int,
help="beam size for beam search")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3, type=int,
help="Total number of training epochs to perform.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
# print arguments
args = parser.parse_args()
# set log
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',level=logging.INFO )
# set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
logger.info("device: %s, n_gpu: %s",device, args.n_gpu)
# Set seed
set_seed(args.seed)
# make dir if output_dir not exist
if os.path.exists(args.output_dir) is False:
os.makedirs(args.output_dir)
# build model
tokenizer = RobertaTokenizer.from_pretrained(args.model_name_or_path)
config = RobertaConfig.from_pretrained(args.model_name_or_path)
# import!!!you must set is_decoder as True for generation
config.is_decoder = True
encoder = RobertaModel.from_pretrained(args.model_name_or_path,config=config)
model = Seq2Seq(encoder=encoder,decoder=encoder,config=config,
beam_size=args.beam_size,max_length=args.max_target_length,
sos_id=tokenizer.convert_tokens_to_ids(["<mask0>"])[0],eos_id=tokenizer.sep_token_id)
logger.info("Training/evaluation parameters %s", args)
model.to(args.device)
if args.n_gpu > 1:
# multi-gpu training
model = torch.nn.DataParallel(model)
if args.do_train:
# Prepare training data loader
train_examples = read_examples(args.train_filename)
train_features = convert_examples_to_features(train_examples, tokenizer,args,stage='train')
all_source_ids = torch.tensor([f.source_ids for f in train_features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_ids for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_source_ids,all_target_ids)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size // args.gradient_accumulation_steps)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=int(len(train_dataloader)*args.num_train_epochs*0.1),
num_training_steps=len(train_dataloader)*args.num_train_epochs)
#Start training
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size * args.gradient_accumulation_steps)
logger.info(" Num epoch = %d", args.num_train_epochs)
model.train()
patience, best_score, losses, dev_dataset = 0, 0, [], {}
for epoch in range(args.num_train_epochs):
for idx,batch in enumerate(train_dataloader):
batch = tuple(t.to(device) for t in batch)
source_ids,target_ids = batch
loss,_,_ = model(source_ids=source_ids,target_ids=target_ids)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
losses.append(loss.item())
loss.backward()
if len(losses) % args.gradient_accumulation_steps == 0:
#Update parameters
optimizer.step()
optimizer.zero_grad()
scheduler.step()
if len(losses) // args.gradient_accumulation_steps % 100 == 0:
logger.info("epoch {} step {} loss {}".format(epoch,
len(losses)//args.gradient_accumulation_steps,
round(np.mean(losses[-100*args.gradient_accumulation_steps:]),4)))
if args.do_eval:
#Eval model with dev dataset
if 'dev_loss' in dev_dataset:
eval_examples,eval_data = dev_dataset['dev_loss']
else:
eval_examples = read_examples(args.dev_filename)
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='dev')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_ids for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids,all_target_ids)
dev_dataset['dev_loss' ]= eval_examples,eval_data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
logger.info("\n***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
#Start Evaling model
model.eval()
eval_loss,tokens_num = 0,0
for batch in eval_dataloader:
batch = tuple(t.to(device) for t in batch)
source_ids,target_ids = batch
with torch.no_grad():
_,loss,num = model(source_ids=source_ids,target_ids=target_ids)
eval_loss += loss.sum().item()
tokens_num += num.sum().item()
#Pring loss of dev dataset
model.train()
eval_loss = eval_loss / tokens_num
result = {'eval_ppl': round(np.exp(eval_loss),5)}
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
logger.info(" "+"*"*20)
#Calculate bleu
if 'dev_bleu' in dev_dataset:
eval_examples,eval_data=dev_dataset['dev_bleu']
else:
eval_examples = read_examples(args.dev_filename)
eval_examples = random.sample(eval_examples,min(1000,len(eval_examples)))
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids)
dev_dataset['dev_bleu'] = eval_examples,eval_data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
p=[]
for batch in eval_dataloader:
batch = tuple(t.to(device) for t in batch)
source_ids = batch[0]
with torch.no_grad():
preds = model(source_ids)
# convert ids to text
for pred in preds:
t = pred[0].cpu().numpy()
t = list(t)
if 0 in t:
t = t[:t.index(0)]
text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
p.append(text)
model.train()
predictions = []
EM = []
with open(args.output_dir+"/dev.output",'w') as f, open(args.output_dir+"/dev.gold",'w') as f1:
for ref,gold in zip(p,eval_examples):
predictions.append(ref)
f.write(ref+'\n')
f1.write(gold.target+'\n')
EM.append(ref.split()==gold.target.split())
dev_bleu = _bleu(args.output_dir+"/dev.gold", args.output_dir+"/dev.output")
logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
logger.info(" %s = %s "%("EM",str(round(np.mean(EM)*100,2))))
logger.info(" "+"*"*20)
dev_score = dev_bleu+round(np.mean(EM)*100,2)
if dev_score>best_score:
logger.info(" Best score:%s",dev_score)
logger.info(" "+"*"*20)
best_score=dev_score
# Save best checkpoint for best bleu
output_dir = os.path.join(args.output_dir, 'checkpoint-best-score')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
patience =0
else:
patience +=1
if patience == -1:
break
if args.do_test:
checkpoint_prefix = 'checkpoint-best-score/pytorch_model.bin'
output_dir = os.path.join(args.output_dir, checkpoint_prefix)
model_to_load = model.module if hasattr(model, 'module') else model
model_to_load.load_state_dict(torch.load(output_dir))
eval_examples = read_examples(args.test_filename)
eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_source_ids)
# Calculate bleu
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
p=[]
for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):
batch = tuple(t.to(device) for t in batch)
source_ids = batch[0]
with torch.no_grad():
preds = model(source_ids)
# convert ids to text
for pred in preds:
t = pred[0].cpu().numpy()
t = list(t)
if 0 in t:
t = t[:t.index(0)]
text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
p.append(text)
predictions=[]
with open(args.output_dir+"/predictions.txt",'w') as f:
for ref,gold in zip(p,eval_examples):
predictions.append(str(gold.idx)+'\t'+ref)
f.write(ref+'\n')
if __name__ == "__main__":
main()
|
CodeBERT/UniXcoder/downstream-tasks/code-generation/run.py/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/code-generation/run.py",
"repo_id": "CodeBERT",
"token_count": 9842
}
| 234 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import torch.nn as nn
from transformers import RobertaTokenizer, RobertaModel, RobertaConfig
class UniXcoder(nn.Module):
def __init__(self, model_name):
"""
Build UniXcoder.
Parameters:
* `model_name`- huggingface model card name. e.g. microsoft/unixcoder-base
"""
super(UniXcoder, self).__init__()
self.tokenizer = RobertaTokenizer.from_pretrained(model_name)
self.config = RobertaConfig.from_pretrained(model_name)
self.config.is_decoder = True
self.model = RobertaModel.from_pretrained(model_name, config=self.config)
self.register_buffer("bias", torch.tril(torch.ones((1024, 1024), dtype=torch.uint8)).view(1,1024, 1024))
self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=False)
self.lm_head.weight = self.model.embeddings.word_embeddings.weight
self.lsm = nn.LogSoftmax(dim=-1)
self.tokenizer.add_tokens(["<mask0>"],special_tokens=True)
def tokenize(self, inputs, mode="<encoder-only>", max_length=512, padding=False):
"""
Convert string to token ids
Parameters:
* `inputs`- list of input strings.
* `max_length`- The maximum total source sequence length after tokenization.
* `padding`- whether to pad source sequence length to max_length.
* `mode`- which mode the sequence will use. i.e. <encoder-only>, <decoder-only>, <encoder-decoder>
"""
assert mode in ["<encoder-only>", "<decoder-only>", "<encoder-decoder>"]
assert max_length < 1024
tokenizer = self.tokenizer
tokens_ids = []
for x in inputs:
tokens = tokenizer.tokenize(x)
if mode == "<encoder-only>":
tokens = tokens[:max_length-4]
tokens = [tokenizer.cls_token,mode,tokenizer.sep_token] + tokens + [tokenizer.sep_token]
elif mode == "<decoder-only>":
tokens = tokens[-(max_length-3):]
tokens = [tokenizer.cls_token,mode,tokenizer.sep_token] + tokens
else:
tokens = tokens[:max_length-5]
tokens = [tokenizer.cls_token,mode,tokenizer.sep_token] + tokens + [tokenizer.sep_token]
tokens_id = tokenizer.convert_tokens_to_ids(tokens)
if padding:
tokens_id = tokens_id + [self.config.pad_token_id] * (max_length-len(tokens_id))
tokens_ids.append(tokens_id)
return tokens_ids
def decode(self, source_ids):
""" Convert token ids to string """
predictions = []
for x in source_ids:
prediction = []
for y in x:
t = y.cpu().numpy()
t = list(t)
if 0 in t:
t = t[:t.index(0)]
text = self.tokenizer.decode(t,clean_up_tokenization_spaces=False)
prediction.append(text)
predictions.append(prediction)
return predictions
def forward(self, source_ids):
""" Obtain token embeddings and sentence embeddings """
mask = source_ids.ne(self.config.pad_token_id)
token_embeddings = self.model(source_ids,attention_mask = mask.unsqueeze(1) * mask.unsqueeze(2))[0]
sentence_embeddings = (token_embeddings * mask.unsqueeze(-1)).sum(1) / mask.sum(-1).unsqueeze(-1)
return token_embeddings, sentence_embeddings
def generate(self, source_ids, decoder_only = True, eos_id = None, beam_size = 5, max_length = 64):
""" Generate sequence given context (source_ids) """
# Set encoder mask attention matrix: bidirectional for <encoder-decoder>, unirectional for <decoder-only>
if decoder_only:
mask = self.bias[:,:source_ids.size(-1),:source_ids.size(-1)]
else:
mask = source_ids.ne(self.config.pad_token_id)
mask = mask.unsqueeze(1) * mask.unsqueeze(2)
if eos_id is None:
eos_id = self.config.eos_token_id
device = source_ids.device
# Decoding using beam search
preds = []
zero = torch.LongTensor(1).fill_(0).to(device)
source_len = list(source_ids.ne(1).sum(-1).cpu().numpy())
length = source_ids.size(-1)
encoder_output = self.model(source_ids,attention_mask=mask)
for i in range(source_ids.shape[0]):
context = [[x[i:i+1,:,:source_len[i]].repeat(beam_size,1,1,1) for x in y]
for y in encoder_output.past_key_values]
beam = Beam(beam_size,eos_id,device)
input_ids = beam.getCurrentState().clone()
context_ids = source_ids[i:i+1,:source_len[i]].repeat(beam_size,1)
out = encoder_output.last_hidden_state[i:i+1,:source_len[i]].repeat(beam_size,1,1)
for _ in range(max_length):
if beam.done():
break
if _ == 0:
hidden_states = out[:,-1,:]
out = self.lsm(self.lm_head(hidden_states)).data
beam.advance(out)
input_ids.data.copy_(input_ids.data.index_select(0, beam.getCurrentOrigin()))
input_ids = beam.getCurrentState().clone()
else:
length = context_ids.size(-1)+input_ids.size(-1)
out = self.model(input_ids,attention_mask=self.bias[:,context_ids.size(-1):length,:length],
past_key_values=context).last_hidden_state
hidden_states = out[:,-1,:]
out = self.lsm(self.lm_head(hidden_states)).data
beam.advance(out)
input_ids.data.copy_(input_ids.data.index_select(0, beam.getCurrentOrigin()))
input_ids = torch.cat((input_ids,beam.getCurrentState().clone()),-1)
hyp = beam.getHyp(beam.getFinal())
pred = beam.buildTargetTokens(hyp)[:beam_size]
pred = [torch.cat([x.view(-1) for x in p]+[zero]*(max_length-len(p))).view(1,-1) for p in pred]
preds.append(torch.cat(pred,0).unsqueeze(0))
preds = torch.cat(preds,0)
return preds
class Beam(object):
def __init__(self, size, eos, device):
self.size = size
self.device = device
# The score for each translation on the beam.
self.scores = torch.FloatTensor(size).zero_().to(device)
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [torch.LongTensor(size).fill_(0).to(device)]
# Has EOS topped the beam yet.
self._eos = eos
self.eosTop = False
# Time and k pair for finished.
self.finished = []
def getCurrentState(self):
"Get the outputs for the current timestep."
batch = self.nextYs[-1].view(-1, 1)
return batch
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
# Don't let EOS have children.
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
beamLk[i] = -1e20
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = torch.div(bestScoresId, numWords, rounding_mode="floor")
self.prevKs.append(prevK)
self.nextYs.append((bestScoresId - prevK * numWords))
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
s = self.scores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.nextYs[-1][0] == self._eos:
self.eosTop = True
def done(self):
return self.eosTop and len(self.finished) >= self.size
def getFinal(self):
if len(self.finished) == 0:
self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
self.finished.sort(key=lambda a: -a[0])
if len(self.finished) != self.size:
unfinished=[]
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] != self._eos:
s = self.scores[i]
unfinished.append((s, len(self.nextYs) - 1, i))
unfinished.sort(key=lambda a: -a[0])
self.finished+=unfinished[:self.size-len(self.finished)]
return self.finished[:self.size]
def getHyp(self, beam_res):
"""
Walk back to construct the full hypothesis.
"""
hyps=[]
for _,timestep, k in beam_res:
hyp = []
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
k = self.prevKs[j][k]
hyps.append(hyp[::-1])
return hyps
def buildTargetTokens(self, preds):
sentence=[]
for pred in preds:
tokens = []
for tok in pred:
if tok==self._eos:
break
tokens.append(tok)
sentence.append(tokens)
return sentence
|
CodeBERT/UniXcoder/unixcoder.py/0
|
{
"file_path": "CodeBERT/UniXcoder/unixcoder.py",
"repo_id": "CodeBERT",
"token_count": 5208
}
| 235 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from collections import defaultdict
from src.io_utils import Tools
STOP_TOKEN = ['\nclass', '\ndef', '\n#', '\nif', '\nprint']
class PostProcessor:
@staticmethod
def map_task_id_for_solution(predict_path, source_path):
database = dict()
raw_problems = Tools.load_tasks(source_path)
for task_id in raw_problems.keys():
database[raw_problems[task_id]['prompt']] = raw_problems[task_id]
result = []
predictions = Tools.load_jsonl(predict_path)
for pre in predictions:
task = database[pre['prompt']]
if not pre['samples']:
result.append({
'task_id': task['task_id'],
'prompt': pre['prompt'],
'test': task['test'],
'entry_point': task['entry_point'],
'completion': 'empty solution here, execution will fail'
})
for sample in pre['samples']:
processed_code = PostProcessor.solution_extract(sample)
result.append({
'task_id': task['task_id'],
'prompt': pre['prompt'],
'test': task['test'],
'entry_point': task['entry_point'],
'completion': processed_code
})
return result, len(raw_problems)
@staticmethod
def map_task_id_for_test_case(predict_path, source_path):
database = dict()
raw_problems = Tools.load_tasks(source_path)
for task_id in raw_problems.keys():
database[raw_problems[task_id]['prompt']] = raw_problems[task_id]
test_cases_by_task = defaultdict(list)
predictions = Tools.load_jsonl(predict_path)
for pre in predictions:
task = database[pre['prompt']]
for sample in pre['samples']:
test_cases = PostProcessor.test_case_extract(sample, task['entry_point'])
test_cases_by_task[task['task_id']].append(test_cases)
return test_cases_by_task
@staticmethod
def solution_extract(content):
for identifier in STOP_TOKEN:
if identifier in content:
content = content.split(identifier)[0]
return content
@staticmethod
def test_case_extract(content, entry_point):
def _truncate(content):
for identifier in STOP_TOKEN:
if identifier in content:
content = content.split(identifier)[0]
return content.strip()
split_by_assert = [f'assert {part}'.strip() for part in f'assert {content}'.split('assert ') if (entry_point.strip() in part) and len(part.strip()) > 0]
truncated_test_cases = [_truncate(i) for i in split_by_assert]
checked_assertions = [i for i in truncated_test_cases if PostProcessor._check_test_case_validation(i)]
return checked_assertions
@staticmethod
def _check_test_case_validation(test_case):
if len(test_case.strip()) < 1:
return False
if 'assert' not in test_case:
return False
try:
multi_line_test_case = test_case.replace("\n", "\n ")
assert_in_a_block = f'try:\n {multi_line_test_case}\nexcept:\n pass\n'
compile(assert_in_a_block, '', 'exec')
return True
except Exception:
return False
|
CodeT/CodeT/src/postprocess.py/0
|
{
"file_path": "CodeT/CodeT/src/postprocess.py",
"repo_id": "CodeT",
"token_count": 1685
}
| 236 |
$schema: http://azureml/sdk-2-0/CommandComponent.json
name: microsoft.msra.dki.verifier_trainer
display_name: Verifier Train
version: 0.1.2-dev1
is_deterministic: True
type: CommandComponent
description: Verifier Train
tags: {category: Verifier Training, contact: [email protected]}
inputs:
wandb_run_name:
type: string
description: A friendly displayed name on wandb.ai for this run
default: GSM8K-5demos-1000examples-alpha0
dataset_name:
type: enum
description: Name of the dataset to be run. GSM8K/CLUTRR/strategyQA
default: GSM8K
enum:
- GSM8K
- CLUTRR
- strategyQA
train_data:
type: path
description: train.txt
test_data:
type: path
description: test.txt
previous_run_dir:
type: path
description: previous_run_dir
optional: true
previous_run_epoch:
type: integer
description: previous_run_epoch
optional: true
default: 1
model_name_or_path:
type: string
description: model_name_or_path
default: microsoft/deberta-v3-large
save_strategy:
type: string
desacription: save_strategy
default: epoch
evaluation_strategy:
type: string
desacription: evaluation_strategy
default: epoch
learning_rate:
type: number
description: Learning rate, default is 1e-5
default: 1e-5
per_device_batch_size:
type: integer
description: per_device_batch_size
default: 8
seed:
type: integer
description: Random seed, default is 1
default: 1
do_train:
type: boolean
description: True or False
default: True
do_eval:
type: boolean
description: True or False
default: True
alpha:
type: number
description: The loss weight of stepwise labels, default is 0.0
default: 0.0
num_train_epochs:
type: integer
description: default is 5
default: 5
outputs:
output_dir:
type: path
optional: false
description: The path of the output
environment:
docker:
image: mcr.microsoft.com/azureml/openmpi4.1.0-cuda11.1-cudnn8-ubuntu18.04:20220516.v1
os: Linux
conda:
conda_dependencies:
name: project_environment
channels:
- defaults
- pytorch
dependencies:
- python=3.8
- pip=20.0
- pip:
- torch==1.7.0+cu110
- -f https://download.pytorch.org/whl/torch_stable.html
- wandb==0.12.7
- future
- numpy==1.20.3
- transformers==4.6.0
- datasets==1.11.0
- huggingface-hub==0.0.8
- nltk
- rouge-score
- sacrebleu==1.5.1
- sentencepiece
- sklearn
- deepspeed
- seqeval
- conllu
- multiset
- azureml-sdk
- azureml-dataset-runtime
successful_return_code: Zero
meta:
requireGpu: True
command: >-
export WANDB_API_KEY={your_wandb_api_key} &&
export WANDB_PROJECT=deberta-verifier &&
export WANDB_RUN_ID={inputs.wandb_run_name} &&
export WANDB_TAGS=deberta_verifier &&
export NCCL_DEBUG=INFO &&
cd src &&
deepspeed --num_gpus=8
run_ner.py
--task_type NER
--dataset_name {inputs.dataset_name}
--train_data {inputs.train_data}
--test_data {inputs.test_data}
[--previous_run_dir {inputs.previous_run_dir}]
[--previous_run_epoch {inputs.previous_run_epoch}]
--model_name_or_path {inputs.model_name_or_path}
--output_dir {outputs.output_dir}
--max_seq_length 512
--per_device_train_batch_size {inputs.per_device_batch_size}
--per_device_eval_batch_size 64
--save_strategy {inputs.save_strategy}
--evaluation_strategy {inputs.evaluation_strategy}
--learning_rate {inputs.learning_rate}
--lr_scheduler_type constant
--seed {inputs.seed}
--do_train {inputs.do_train}
--do_eval {inputs.do_eval}
--num_train_epochs {inputs.num_train_epochs}
--logging_steps 10
--overwrite_output_dir
--alpha {inputs.alpha}
--deepspeed ds_config.json
|
CodeT/DIVERSE/code/verifier_train.yaml/0
|
{
"file_path": "CodeT/DIVERSE/code/verifier_train.yaml",
"repo_id": "CodeT",
"token_count": 1664
}
| 237 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import openai
import sys
import os
import configparser
import re
import psutil
from pathlib import Path
from prompt_file import PromptFile
from commands import get_command_result
MULTI_TURN = "off"
SHELL = ""
ENGINE = ''
TEMPERATURE = 0
MAX_TOKENS = 300
DEBUG_MODE = False
# api keys located in the same directory as this file
API_KEYS_LOCATION = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'openaiapirc')
PROMPT_CONTEXT = Path(__file__).with_name('current_context.txt')
# Read the secret_key from the ini file ~/.config/openaiapirc
# The format is:
# [openai]
# organization=<organization-id>
# secret_key=<your secret key>
# engine=<engine-name>
def create_template_ini_file():
"""
If the ini file does not exist create it and add secret_key
"""
if not os.path.isfile(API_KEYS_LOCATION):
print('# Please create a file at {} and add your secret key'.format(API_KEYS_LOCATION))
print('# The format is:\n')
print('# [openai]')
print('# organization_id=<organization-id>')
print('# secret_key=<your secret key>\n')
print('# engine=<engine-id>')
sys.exit(1)
def initialize():
"""
Initialize openAI and shell mode
"""
global ENGINE
# Check if file at API_KEYS_LOCATION exists
create_template_ini_file()
config = configparser.ConfigParser()
config.read(API_KEYS_LOCATION)
openai.api_key = config['openai']['secret_key'].strip('"').strip("'")
openai.organization = config['openai']['organization_id'].strip('"').strip("'")
ENGINE = config['openai']['engine'].strip('"').strip("'")
prompt_config = {
'engine': ENGINE,
'temperature': TEMPERATURE,
'max_tokens': MAX_TOKENS,
'shell': SHELL,
'multi_turn': MULTI_TURN,
'token_count': 0
}
return PromptFile(PROMPT_CONTEXT.name, prompt_config)
def is_sensitive_content(content):
"""
Check if the content contains sensitive content
Refer to https://beta.openai.com/docs/engines/content-filter for explanation
"""
if len(content) == 0:
return False
response = openai.Completion.create(
engine="content-filter-alpha",
prompt = "<|endoftext|>"+content+"\n--\nLabel:",
temperature=0,
max_tokens=1,
top_p=0,
logprobs=10
)
output_label = response["choices"][0]["text"]
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return (output_label != "0")
def get_query(prompt_file):
"""
uses the stdin to get user input
input is either treated as a command or as a Codex query
Returns: command result or context + input from stdin
"""
# get input from terminal or stdin
if DEBUG_MODE:
entry = input("prompt: ") + '\n'
else:
entry = sys.stdin.read()
# first we check if the input is a command
command_result, prompt_file = get_command_result(entry, prompt_file)
# if input is not a command, then query Codex, otherwise exit command has been run successfully
if command_result == "":
return entry, prompt_file
else:
sys.exit(0)
def detect_shell():
global SHELL
global PROMPT_CONTEXT
parent_process_name = psutil.Process(os.getppid()).name()
POWERSHELL_MODE = bool(re.fullmatch('pwsh|pwsh.exe|powershell.exe', parent_process_name))
BASH_MODE = bool(re.fullmatch('bash|bash.exe', parent_process_name))
ZSH_MODE = bool(re.fullmatch('zsh|zsh.exe', parent_process_name))
SHELL = "powershell" if POWERSHELL_MODE else "bash" if BASH_MODE else "zsh" if ZSH_MODE else "unknown"
shell_prompt_file = Path(os.path.join(os.path.dirname(__file__), "..", "contexts", "{}-context.txt".format(SHELL)))
if shell_prompt_file.is_file():
PROMPT_CONTEXT = shell_prompt_file
if __name__ == '__main__':
detect_shell()
prompt_file = initialize()
try:
user_query, prompt_file = get_query(prompt_file)
config = prompt_file.config if prompt_file else {
'engine': ENGINE,
'temperature': TEMPERATURE,
'max_tokens': MAX_TOKENS,
'shell': SHELL,
'multi_turn': MULTI_TURN,
'token_count': 0
}
# use query prefix to prime Codex for correct scripting language
prefix = ""
# prime codex for the corresponding shell type
if config['shell'] == "zsh":
prefix = '#!/bin/zsh\n\n'
elif config['shell'] == "bash":
prefix = '#!/bin/bash\n\n'
elif config['shell'] == "powershell":
prefix = '<# powershell #>\n\n'
elif config['shell'] == "unknown":
print("\n#\tUnsupported shell type, please use # set shell <shell>")
else:
prefix = '#' + config['shell'] + '\n\n'
codex_query = prefix + prompt_file.read_prompt_file(user_query) + user_query
# get the response from codex
response = openai.Completion.create(engine=config['engine'], prompt=codex_query, temperature=config['temperature'], max_tokens=config['max_tokens'], stop="#")
completion_all = response['choices'][0]['text']
if is_sensitive_content(user_query + '\n' + completion_all):
print("\n# Sensitive content detected, response has been redacted")
else:
print(completion_all)
# append output to prompt context file
if config['multi_turn'] == "on":
if completion_all != "" or len(completion_all) > 0:
prompt_file.add_input_output_pair(user_query, completion_all)
except FileNotFoundError:
print('\n\n# Codex CLI error: Prompt file not found, try again')
except openai.error.RateLimitError:
print('\n\n# Codex CLI error: Rate limit exceeded, try later')
except openai.error.APIConnectionError:
print('\n\n# Codex CLI error: API connection error, are you connected to the internet?')
except openai.error.InvalidRequestError as e:
print('\n\n# Codex CLI error: Invalid request - ' + str(e))
except Exception as e:
print('\n\n# Codex CLI error: Unexpected exception - ' + str(e))
|
Codex-CLI/src/codex_query.py/0
|
{
"file_path": "Codex-CLI/src/codex_query.py",
"repo_id": "Codex-CLI",
"token_count": 3263
}
| 238 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: person.py
Description: Person section of the Cognitive Face API.
"""
from . import util
def add_face(image,
person_group_id,
person_id,
user_data=None,
target_face=None):
"""Add a representative face to a person for identification. The input face
is specified as an image with a `target_face` rectangle. It returns a
`persisted_face_id` representing the added face and this
`persisted_face_id` will not expire. Note `persisted_face_id` is different
from `face_id` which represents the detected face by `face.detect`.
Args:
image: A URL or a file path or a file-like object represents an image.
person_group_id: Specifying the person group containing the target
person.
person_id: Target person that the face is added to.
user_data: Optional parameter. User-specified data about the face list
for any purpose. The maximum length is 1KB.
target_face: Optional parameter. A face rectangle to specify the target
face to be added into the face list, in the format of
"left,top,width,height". E.g. "10,10,100,100". If there are more
than one faces in the image, `target_face` is required to specify
which face to add. No `target_face` means there is only one face
detected in the entire image.
Returns:
A new `persisted_face_id`.
"""
url = 'persongroups/{}/persons/{}/persistedFaces'.format(
person_group_id, person_id)
headers, data, json = util.parse_image(image)
params = {
'userData': user_data,
'targetFace': target_face,
}
return util.request(
'POST', url, headers=headers, params=params, json=json, data=data)
def create(person_group_id, name, user_data=None):
"""Create a new person in a specified person group. A newly created person
have no registered face, you can call `person.add_face` to add faces to the
person.
Args:
person_group_id: Specifying the person group containing the target
person.
name: Display name of the target person. The maximum length is 128.
user_data: Optional parameter. User-specified data about the face list
for any purpose. The maximum length is 1KB.
Returns:
A new `person_id` created.
"""
url = 'persongroups/{}/persons'.format(person_group_id)
json = {
'name': name,
'userData': user_data,
}
return util.request('POST', url, json=json)
def delete(person_group_id, person_id):
"""Delete an existing person from a person group. Persisted face images of
the person will also be deleted.
Args:
person_group_id: Specifying the person group containing the person.
person_id: The target `person_id` to delete.
Returns:
An empty response body.
"""
url = 'persongroups/{}/persons/{}'.format(person_group_id, person_id)
return util.request('DELETE', url)
def delete_face(person_group_id, person_id, persisted_face_id):
"""Delete a face from a person. Relative image for the persisted face will
also be deleted.
Args:
person_group_id: Specifying the person group containing the target
person.
person_id: Specifying the person that the target persisted face belongs
to.
persisted_face_id: The persisted face to remove. This
`persisted_face_id` is returned from `person.add`.
Returns:
An empty response body.
"""
url = 'persongroups/{}/persons/{}/persistedFaces/{}'.format(
person_group_id, person_id, persisted_face_id)
return util.request('DELETE', url)
def get(person_group_id, person_id):
"""Retrieve a person's information, including registered persisted faces,
`name` and `user_data`.
Args:
person_group_id: Specifying the person group containing the target
person.
person_id: Specifying the target person.
Returns:
The person's information.
"""
url = 'persongroups/{}/persons/{}'.format(person_group_id, person_id)
return util.request('GET', url)
def get_face(person_group_id, person_id, persisted_face_id):
"""Retrieve information about a persisted face (specified by
`persisted_face_ids`, `person_id` and its belonging `person_group_id`).
Args:
person_group_id: Specifying the person group containing the target
person.
person_id: Specifying the target person that the face belongs to.
persisted_face_id: The `persisted_face_id` of the target persisted face
of the person.
Returns:
The target persisted face's information (`persisted_face_id` and
`user_data`).
"""
url = 'persongroups/{}/persons/{}/persistedFaces/{}'.format(
person_group_id, person_id, persisted_face_id)
return util.request('GET', url)
def lists(person_group_id, start=None, top=None):
"""List `top` persons in a person group with `person_id` greater than
`start`, and retrieve person information (including `person_id`, `name`,
`user_data` and `persisted_face_ids` of registered faces of the person).
Args:
person_group_id: `person_group_id` of the target person group.
start: List persons from the least `person_id` greater than this.
top: The number of persons to list, rangeing in [1, 1000]. Default is
1000;
Returns:
An array of person information that belong to the person group.
"""
url = 'persongroups/{}/persons'.format(person_group_id)
params = {
'start': start,
'top': top,
}
return util.request('GET', url, params=params)
def update(person_group_id, person_id, name=None, user_data=None):
"""Update `name` or `user_data` of a person.
Args:
person_group_id: Specifying the person group containing the target
person.
person_id: `person_id` of the target person.
name: Target person's display name. Maximum length is 128.
user_data: User-provided data attached to the person. Maximum length is
16KB.
Returns:
An empty response body.
"""
url = 'persongroups/{}/persons/{}'.format(person_group_id, person_id)
json = {
'name': name,
'userData': user_data,
}
return util.request('PATCH', url, json=json)
def update_face(person_group_id, person_id, persisted_face_id, user_data=None):
"""Update a person persisted face's `user_data` field.
Args:
person_group_id: Specifying the person group containing the target
person.
person_id: `person_id` of the target person.
persisted_face_id: `persisted_face_id` of the target face, which is
persisted and will not expire.
user_data: Optional parameter. Attach `user_data` to person's
persisted face. The size limit is 1KB.
Returns:
An empty response body.
"""
url = 'persongroups/{}/persons/{}/persistedFaces/{}'.format(
person_group_id, person_id, persisted_face_id)
json = {
'userData': user_data,
}
return util.request('PATCH', url, json=json)
|
Cognitive-Face-Python/cognitive_face/person.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/person.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 2816
}
| 239 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: main.py
Description: main script for Python SDK sample.
"""
from view import MyApp
if __name__ == "__main__":
app = MyApp(False)
app.MainLoop()
|
Cognitive-Face-Python/sample/__main__.py/0
|
{
"file_path": "Cognitive-Face-Python/sample/__main__.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 82
}
| 240 |
export CUDA_VISIBLE_DEVICES=3
python t5_run_eval.py \
--model_name_or_path ./checkpoint/Com/ContrastExp_finetune_set1_seed1/checkpoint-50000 \
--subtask Com \
--validation_file test \
--ebatch_size 16 \
--set set1
|
ContextualSP/abstraction_probing/code/t5_code/Com_ContrastExp_test.sh/0
|
{
"file_path": "ContextualSP/abstraction_probing/code/t5_code/Com_ContrastExp_test.sh",
"repo_id": "ContextualSP",
"token_count": 85
}
| 241 |
#!/usr/bin/env python
# coding=utf-8
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import nltk
import numpy as np
import transformers
from datasets import load_dataset, load_metric
from filelock import FileLock
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
set_seed
)
from transformers.file_utils import is_offline_mode
from transformers.trainer_utils import get_last_checkpoint, is_main_process
logger = logging.getLogger(__name__)
try:
nltk.data.find("tokenizers/punkt")
except (LookupError, OSError):
if is_offline_mode():
raise LookupError(
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
)
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
freeze_model_parameter: Optional[bool] = field(
default=False, metadata={"help": "You could tune all model parameters if you want."}
)
dev_split: Optional[int] = field(
default=-1, metadata={"help": "You could tune all model parameters if you want."}
)
log_metrics: Optional[bool] = field(
default=False, metadata={"help": "You could tune all model parameters if you want."}
)
log_label: Optional[bool] = field(
default=False, metadata={"help": "You could tune all model parameters if you want."}
)
eval_type: Optional[str] = field(
default='dev', metadata={"help": "You could tune all model parameters if you want."}
)
log_metrics_only: Optional[bool] = field(
default=False, metadata={"help": "You could tune all model parameters if you want."}
)
contain_step: Optional[bool] = field(
default=False, metadata={"help": "You could tune all model parameters if you want."}
)
with_constraint: Optional[bool] = field(
default=True, metadata={"help": "You could tune all model parameters if you want."}
)
metric_bleu: Optional[bool] = field(
default=False, metadata={"help": "You could tune all model parameters if you want."}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
input_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
output_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the metrics (rouge) on "
"(a jsonlines or csv file)."
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input test data file to evaluate the metrics (rouge) on " "(a jsonlines or csv file)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_target_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
def build_special_tokens(num_special_tokens: int):
additional_special_tokens = []
for i in range(num_special_tokens):
additional_special_tokens.append("[special" + str(i) + "]")
return additional_special_tokens
def freezing_model_parameters(model):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
for name, parameter in model.named_parameters():
parameter.requires_grad = False
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# training_args.report_to = ["wandb"]
if model_args.with_constraint is True:
from t5_model_constraint import T5Generation
else:
assert model_args.with_constraint is False
from t5_model import T5Generation
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with "
"`--source_prefix 'summarize: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files this script will use the first column for the full texts and the second column for the
# summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None
)
config.no_repeat_ngram_size = 0
config.max_length = 1024
config.early_stopping = True
global log_metrics
log_metrics = model_args.log_metrics
global log_label
log_label = model_args.log_label
global eval_type
eval_type = model_args.eval_type
global log_metrics_only
log_metrics_only = model_args.log_metrics_only
global metric_bleu
metric_bleu = model_args.metric_bleu
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None
)
model = T5Generation.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = datasets["train"].column_names
elif training_args.do_eval:
column_names = datasets["validation"].column_names
elif training_args.do_predict:
column_names = datasets["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# Get the column names for input/target.
if data_args.input_column is None:
input_column = column_names[0]
else:
input_column = data_args.input_column
if input_column not in column_names:
raise ValueError(
f"--input_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.output_column is None:
output_column = column_names[1]
else:
output_column = data_args.output_column
if output_column not in column_names:
raise ValueError(
f"--output_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}"
)
# Temporarily set max_target_length for training.
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
def preprocess_function(examples):
inputs = examples[input_column]
targets = examples[output_column]
inputs = ['<s>' + input for input in inputs]
padding = "max_length" if data_args.pad_to_max_length else False
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length,
padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=data_args.max_target_length,
padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
# Metric
if model_args.contain_step is False:
metric = load_metric("./seq_acc")
else:
metric = load_metric("./seq_acc_with_step")
if metric_bleu:
metric = load_metric('sacrebleu')
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
return preds, labels
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
if log_metrics_only:
if metric.name == "sacrebleu":
decoded_labels = [[label] for label in decoded_labels]
# pdb.set_trace()
with open(dev_file, 'w', encoding="utf8") as f:
for pred, label in zip(decoded_preds, decoded_labels):
result = metric.compute(predictions=[pred], references=[label])
f.write(str(result['score']) + '\n')
else:
with open(dev_file, 'w', encoding="utf8") as f:
if log_label:
for label, pred in zip(decoded_labels, decoded_preds):
f.write('label:' + str(label) + '\n')
f.write('pred: ' + str(pred) + '\n\n')
else:
for label, pred in zip(decoded_labels, decoded_preds):
f.write(pred + '\n')
if metric.name == "sacrebleu":
decoded_labels = [[label] for label in decoded_labels]
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
elif metric.name == "rouge":
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
else:
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
if log_metrics:
with open(dev_file, 'a', encoding="utf8") as f:
f.write(str(result))
return result
if model_args.freeze_model_parameter:
freezing_model_parameters(model)
# Initialize our Trainer
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None
)
# Evaluation
results = {}
global dev_file
if model_args.dev_split == -1:
dev_file = model_args.model_name_or_path + '_' + eval_type + '_beam' + str(data_args.num_beams) + '.txt'
else:
if not os.path.exists(model_args.model_name_or_path + '_' + eval_type + '_beam' + str(data_args.num_beams) + '/'):
try:
os.mkdir(model_args.model_name_or_path + '_' + eval_type + '_beam' + str(data_args.num_beams) + '/')
except:
print('path exist')
dev_file = model_args.model_name_or_path + '_' + eval_type + '_beam' + str(data_args.num_beams) + '/' + str(model_args.dev_split) + '.txt'
if log_metrics_only:
dev_file = dev_file.replace('txt', 'score')
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(
max_length=config.max_length, num_beams=data_args.num_beams, metric_key_prefix="eval"
)
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
ContextualSP/abstraction_probing/code/t5_code/t5_eval_model.py/0
|
{
"file_path": "ContextualSP/abstraction_probing/code/t5_code/t5_eval_model.py",
"repo_id": "ContextualSP",
"token_count": 10141
}
| 242 |
<h1 align="center"> AdapterShare: Task Correlation Modeling with Adapter Differentiation </h1>
<div align=center><img width="350" height=350" src="assets/adaptershare.png"/></div>
## Introduction
Thanks to the development of pre-trained language models, multitask learning (MTL) methods have achieved great success in natural language understanding. However, current MTL methods pay more attention to task selection or model design to fuse as much knowledge as possible, while the intrinsic task correlation is often neglected. It is important to learn sharing strategies among multiple tasks rather than sharing everything. In this paper, we propose AdapterShare, an adapter differentiation method to explicitly model task correlation among multiple tasks. AdapterShare is automatically learned based on the gradients on tiny held-out validation data. Compared to single-task learning and fully shared MTL methods, our proposed method obtains obvious performance improvements. Compared to the existing MTL method AdapterFusion, AdapterShare achieves an absolute average improvement of 1.90 points on five dialogue understanding tasks and 2.33 points on NLU tasks.
## Quickstart
### Setup Environment
#### Install via pip:
1. install requirements </br>
```> pip install -r requirements.txt```
2. install our modified `adapters` package </br>
```> pip install git+https://github.com/WowCZ/adapter-transformers.git```
#### prepare the nlu dataset:
1. Download data </br>
```> sh download.sh``` </br>
Please refer to download GLUE dataset: https://gluebenchmark.com/
2. Preprocess data </br>
```> sh experiments/glue/prepro.sh```
### Training scripts:
1. Individual training script (baseline multitask learning): </br>
```> sh scripts/adapter_train.sh -tr wnli -te wnli -ls 50 -ss 100 > LOGS/mtnlu_wnli.log``` </br>
2. AdapterShare training script: </br>
```> sh scripts/adapter_train.sh -tr mnli,cola,qnli,qqp,rte,sst,stsb,wnli -te mnli_matched,mnli_mismatched,cola,qnli,qqp,rte,sst,stsb,wnli -ls 1000 -ss 2000 > LOGS/mtnlu_unified.log``` </br>
## Claim
In this repository, the code of NLU dataset collection and baseline multitask learning is based on https://github.com/namisan/mt-dnn. The AdapterShare training process is modified from https://github.com/adapter-hub/adapter-transformers.
|
ContextualSP/adaptershare/README.md/0
|
{
"file_path": "ContextualSP/adaptershare/README.md",
"repo_id": "ContextualSP",
"token_count": 648
}
| 243 |
# Copyright (c) Microsoft. All rights reserved.
import random
import torch
import numpy
import subprocess
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def set_environment(seed, set_cuda=False):
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available() and set_cuda:
torch.cuda.manual_seed_all(seed)
def patch_var(v, cuda=True):
if cuda:
v = v.cuda(non_blocking=True)
return v
def get_gpu_memory_map():
result = subprocess.check_output(
["nvidia-smi", "--query-gpu=memory.used", "--format=csv,nounits,noheader"],
encoding="utf-8",
)
gpu_memory = [int(x) for x in result.strip().split("\n")]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def get_pip_env():
result = subprocess.call(["pip", "freeze"])
return result
|
ContextualSP/adaptershare/data_utils/utils.py/0
|
{
"file_path": "ContextualSP/adaptershare/data_utils/utils.py",
"repo_id": "ContextualSP",
"token_count": 538
}
| 244 |
cola:
data_format: PremiseOnly
dropout_p: 0.05
enable_san: false
metric_meta:
- ACC
- MCC
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 2
task_type: Classification
mnli:
data_format: PremiseAndOneHypothesis
dropout_p: 0.1
enable_san: false
labels:
- contradiction
- neutral
- entailment
metric_meta:
- ACC
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 3
split_names:
- train
- matched_dev
- mismatched_dev
- matched_test
- mismatched_test
task_type: Classification
mrpc:
data_format: PremiseAndOneHypothesis
enable_san: false
metric_meta:
- ACC
- F1
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 2
task_type: Classification
qnli:
data_format: PremiseAndOneHypothesis
enable_san: false
labels:
- not_entailment
- entailment
metric_meta:
- ACC
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 2
task_type: Classification
qqp:
data_format: PremiseAndOneHypothesis
enable_san: false
metric_meta:
- ACC
- F1
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 2
task_type: Classification
rte:
data_format: PremiseAndOneHypothesis
enable_san: false
labels:
- not_entailment
- entailment
metric_meta:
- ACC
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 2
task_type: Classification
sst:
data_format: PremiseOnly
enable_san: false
metric_meta:
- ACC
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 2
task_type: Classification
stsb:
data_format: PremiseAndOneHypothesis
enable_san: false
metric_meta:
- Pearson
- Spearman
n_class: 1
loss: MseCriterion
kd_loss: MseCriterion
adv_loss: MseCriterion
task_type: Regression
wnli:
data_format: PremiseAndOneHypothesis
enable_san: false
metric_meta:
- ACC
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 2
task_type: Classification
|
ContextualSP/adaptershare/experiments/glue/glue_task_def.yml/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/glue/glue_task_def.yml",
"repo_id": "ContextualSP",
"token_count": 819
}
| 245 |
# Copyright (c) Microsoft. All rights reserved.
# some codes are from: https://github.com/namisan/mt-dnn
# please cite the (arXiv preprint arXiv:2002.07972) if you use the script
# by Xiaodong Liu
# [email protected]
# 10/08/2021
import os
import argparse
from random import shuffle
import json
import pandas as pd
import random
def load_boolq(file):
rows = []
with open(file, encoding="utf8") as f:
for line in f:
data = json.loads(line)
label = data['label'] if 'label' in data else False
label = 1 if label else 0
uid = data['idx']
sample = {'uid': uid, 'premise': data['passage'], 'hypothesis': data['question'], 'label': label}
rows.append(sample)
return rows
def load_cb(file):
rows = []
with open(file, encoding="utf8") as f:
for line in f:
data = json.loads(line)
label = data['label'] if 'label' in data else 0
uid = data['idx']
sample = {'uid': uid, 'premise': data['premise'], 'hypothesis': data['hypothesis'], 'label': label}
rows.append(sample)
return rows
def load_multirc(file):
rows = []
with open(file, encoding="utf8") as f:
for line in f:
data = json.loads(line)
pidx = data['idx']
passage = data['passage']['text']
questionts = data['passage']['questions']
assert type(questionts) is list
for question in questionts:
q = question['question']
qidx = question['idx']
answers = question['answers']
for answer in answers:
a = answer['text']
aidx = answer['idx']
label = answer['label'] if 'label' in answer else 0
uid = "{}_{}_{}".format(pidx, qidx, aidx)
sample = {'uid': uid, 'premise': passage, 'hypothesis': q, 'label': label, 'answer': a}
rows.append(sample)
return rows
def load_multirc_mtdnn(file):
rows = []
with open(file, encoding="utf8") as f:
for line in f:
data = json.loads(line)
pidx = data['idx']
passage = data['passage']['text']
questionts = data['passage']['questions']
assert type(questionts) is list
for question in questionts:
q = question['question']
qidx = question['idx']
answers = question['answers']
for answer in answers:
a = answer['text']
aidx = answer['idx']
label = answer['label'] if 'label' in answer else 0
uid = "{}_{}_{}".format(pidx, qidx, aidx)
sample = {'uid': uid, 'premise': passage, 'hypothesis': "{} {}".format(q, a), 'label': label}
rows.append(sample)
return rows
def load_wic(file):
rows = []
with open(file, encoding="utf8") as f:
for line in f:
data = json.loads(line)
label = data['label'] if 'label' in data else False
label = 1 if label else 0
uid = data['idx']
word = data['word']
premise = data['sentence1']
hyp = data['sentence2']
sample = {'uid': uid, 'premise': word, 'hypothesis': premise, 'hypothesis_extra': hyp, 'label': label}
rows.append(sample)
return rows
def load_wic_mtdnn(file):
rows = []
with open(file, encoding="utf8") as f:
for line in f:
data = json.loads(line)
label = data['label'] if 'label' in data else False
label = 1 if label else 0
uid = data['idx']
word = data['word']
premise = data['sentence1']
hyp = data['sentence2']
# the given word is placed at the begining of seq.
sample = {'uid': uid, 'premise': "{} {}".format(word, premise), 'hypothesis': hyp, 'label': label}
rows.append(sample)
return rows
def load_record(file):
rows = []
is_training =True if ("train" in file or "val" in file) else False
with open(file, encoding="utf8") as f:
cnt = 0
for line in f:
data = json.loads(line)
passage = data['passage']['text']
passage = passage.replace('\n', ' ')
passage_idx = data['idx']
entities = data['passage']['entities']
entities_set = set([passage[entity["start"] : entity["end"] + 1] for entity in entities])
qas = data['qas']
for qa in qas:
query = qa['query']
answers_dict = {}
answers_set = set()
if "answers" in qa:
answers_dict = {(answer["start"], answer["end"]): answer["text"] for answer in qa["answers"]}
answers_set= set(answer["text"] for answer in qa["answers"])
query_idx = qa['idx']
if is_training:
negative_set = entities_set - answers_set
# enumerate all the nagative set
positives = list(answers_set)
for negative in negative_set:
orders = [0, 1]
# shuffle the order of pos/negative samples
if "train" in file: shuffle(orders)
query_n = query.replace("@placeholder", negative)
positive = random.sample(positives, 1).pop()
query_p = query.replace("@placeholder", positive)
queries = [query_n, query_p]
queries = [queries[idx] for idx in orders]
new_answers = [negative, positive]
new_answers = [new_answers[idx] for idx in orders]
label = 1 if orders[0] == 0 else 0
sample = {'uid': str(query_idx), 'premise': passage, 'hypothesis': queries[0], 'hypothesis_extra': queries[1], 'label': label, "answer": str(new_answers)}
rows.append(sample)
else:
for entity in entities_set:
label = False
if len(answers_dict) > 0:
if entity in answers_set:
label = True
updated_query = query.replace("@placeholder", entity)
uid = str(query_idx)
label = 1 if label else 0
sample = {'uid': uid, 'premise': passage, 'hypothesis': updated_query, 'hypothesis_extra': updated_query, 'label': label, "answer": entity}
rows.append(sample)
return rows
def load_record_mtdnn(file):
rows = []
is_training =True if "train" in file else False
with open(file, encoding="utf8") as f:
cnt = 0
for line in f:
data = json.loads(line)
passage = data['passage']['text']
passage = passage.replace('\n', ' ')
passage_idx = data['idx']
entities = data['passage']['entities']
entities_set = set([passage[entity["start"] : entity["end"] + 1] for entity in entities])
qas = data['qas']
for qa in qas:
query = qa['query']
answers_dict = {}
answers_set = set()
if "answers" in qa:
answers_dict = {(answer["start"], answer["end"]): answer["text"] for answer in qa["answers"]}
answers_set= set(answer["text"] for answer in qa["answers"])
query_idx = qa['idx']
if is_training:
negative_set = entities_set - answers_set
# enumerate all the nagative set
positives = list(answers_set)
for negative in negative_set:
orders = [0, 1]
# shuffle the order of pos/negative samples
if "train" in file: shuffle(orders)
query_n = query.replace("@placeholder", negative)
positive = random.sample(positives, 1).pop()
query_p = query.replace("@placeholder", positive)
queries = [query_n, query_p]
queries = [queries[idx] for idx in orders]
new_answers = [negative, positive]
new_answers = [new_answers[idx] for idx in orders]
label = 1 if orders[0] == 0 else 0
labels = [0, 0]
labels[label] = 1
sample = {'uid': str(query_idx), 'premise': passage, 'hypothesis': [queries[0], queries[1]], 'label': labels, 'choice': str(new_answers), "answer": str(list(answers_set))}
rows.append(sample)
else:
entities_set = list(entities_set)
hypothesis = [query.replace("@placeholder", entity) for entity in entities_set]
label = [1 if entity in answers_set else 0 for entity in entities_set]
uid = str(query_idx)
sample = {'uid': uid, 'premise': passage, 'hypothesis': hypothesis, 'label': label, "choice": str(entities_set), "answer": str(list(answers_set))}
rows.append(sample)
return rows
def load_record_eval(file):
rows = []
with open(file, encoding="utf8") as f:
cnt = 0
for line in f:
data = json.loads(line)
passage = data['passage']['text']
passage = passage.replace('\n', ' ')
passage_idx = data['idx']
entities = data['passage']['entities']
entities_set = set([passage[entity["start"] : entity["end"] + 1] for entity in entities])
qas = data['qas']
for qa in qas:
query = qa['query']
answers_dict = {}
answers_set = set()
if "answers" in qa:
answers_dict = {(answer["start"], answer["end"]): answer["text"] for answer in qa["answers"]}
answers_set= set(answer["text"] for answer in qa["answers"])
query_idx = qa['idx']
for entity in entities_set:
label = False
if len(answers_dict) > 0:
if entity in answers_set:
label = True
updated_query = query.replace("@placeholder", entity)
uid = str(query_idx)
label = 1 if label else 0
sample = {'uid': uid, 'premise': passage, 'hypothesis': updated_query, 'label': label, "answer": entity}
rows.append(sample)
return rows
def load_copa(file):
rows = []
with open(file, encoding="utf8") as f:
for line in f:
data = json.loads(line)
label = data['label'] if 'label' in data else 0
uid = data['idx']
# the token replacement idea is from RoBERTa
# please cite RoBERTa paper if you use this
# explanation by [email protected]
token = "because" if data["question"] == "cause" else "so"
hyp1 = '{} {}'.format(token, data['choice1'])
hyp2 = '{} {}'.format(token, data['choice2'])
sample = {'uid': uid, 'premise': data['premise'], 'hypothesis': hyp1, 'hypothesis_extra': hyp2, 'label': label}
rows.append(sample)
return rows
def load_copa_mtdnn(file):
rows = []
with open(file, encoding="utf8") as f:
for line in f:
data = json.loads(line)
label = data['label'] if 'label' in data else 0
uid = data['idx']
token = "because" if data["question"] == "cause" else "so"
hyp1 = '{} {}'.format(token, data['choice1'])
hyp2 = '{} {}'.format(token, data['choice2'])
hyp = [hyp1, hyp2]
labels = [0, 0]
labels[label] = 1
sample = {'uid': uid, 'ruid': "{},{}".format(uid, uid), 'premise': data['premise'], 'hypothesis': hyp, 'label': ",".join([str(lab) for lab in labels])}
rows.append(sample)
return rows
def load_wsc(file, is_train=True):
rows = []
with open(file, encoding="utf8") as f:
for line in f:
data = json.loads(line.strip())
premise = data['text']
tokens = data['text'].split()
target = data['target']
tokens[target['span2_index']] = target['span1_text']
hypothesis = ' '.join(tokens)
label = str(data.get('label', "false")).lower()
label = 1 if label == "true" else 0
sample = {'uid': data['idx'], 'premise': premise, 'hypothesis': hypothesis, 'label': label}
rows.append(sample)
return rows
TASKS = {
'boolq': ["train.jsonl", "val.jsonl", "test.jsonl"],
'cb': ["train.jsonl", "val.jsonl", "test.jsonl"],
'multirc': ["train.jsonl", "val.jsonl", "test.jsonl"],
'record': ["train.jsonl", "val.jsonl", "test.jsonl"],
'copa': ["train.jsonl", "val.jsonl", "test.jsonl"],
'wic': ["train.jsonl", "val.jsonl", "test.jsonl"],
'recordeval': ["train.jsonl", "val.jsonl", "test.jsonl"],
'wsc': ["train.jsonl", "val.jsonl", "test.jsonl"],
}
LOAD_FUNCS = {
'boolq': load_boolq,
'cb': load_cb,
'multirc': load_multirc,
'record': load_record,
'copa': load_copa,
'wic': load_wic,
'recordeval': load_record_eval,
'wsc': load_wsc,
}
def save(data, fout):
with open(fout, 'w', encoding='utf-8') as writer:
writer.write("\n".join(data))
|
ContextualSP/adaptershare/experiments/superglue/superglue_utils.py/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/superglue/superglue_utils.py",
"repo_id": "ContextualSP",
"token_count": 7168
}
| 246 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import enum
from numpy.lib.arraysetops import isin
from numpy.lib.function_base import insert
from data_utils.metrics import calc_metrics
from mt_dnn.batcher import Collater
from data_utils.task_def import TaskType
from data_utils.utils_qa import postprocess_qa_predictions
from copy import deepcopy
import numpy as np
import torch
from tqdm import tqdm
def extract_encoding(model, data, use_cuda=True):
if use_cuda:
model.cuda()
sequence_outputs = []
max_seq_len = 0
for idx, (batch_info, batch_data) in enumerate(data):
batch_info, batch_data = Collater.patch_data(use_cuda, batch_info, batch_data)
sequence_output = model.encode(batch_info, batch_data)
sequence_outputs.append(sequence_output)
max_seq_len = max(max_seq_len, sequence_output.shape[1])
new_sequence_outputs = []
for sequence_output in sequence_outputs:
new_sequence_output = torch.zeros(
sequence_output.shape[0], max_seq_len, sequence_output.shape[2]
)
new_sequence_output[:, : sequence_output.shape[1], :] = sequence_output
new_sequence_outputs.append(new_sequence_output)
return torch.cat(new_sequence_outputs)
def reduce_multirc(uids, predictions, golds):
assert len(uids) == len(predictions)
assert len(uids) == len(golds)
from collections import defaultdict
predict_map = defaultdict(list)
gold_map = defaultdict(list)
for idx, uid in enumerate(uids):
blocks = uid.split("_")
assert len(blocks) == 3
nuid = "_".join(blocks[:-1])
predict_map[uid].append(predictions[idx])
gold_map[uid].append(golds[idx])
return predict_map, gold_map
def merge(src, tgt):
def _mg(src, tgt):
if isinstance(src, dict):
for k, v in src.items():
if k in tgt:
tgt[k] = _mg(v, tgt[k])
else:
tgt[k] = v
elif isinstance(src, list):
tgt.extend(src)
elif isinstance(src, tuple):
if isinstance(src[0], list):
for i, k in enumerate(src):
tgt[i].extend(src[i])
else:
tgt.extend(src)
else:
tgt = src
return tgt
if tgt is None or len(tgt) == 0:
tgt = deepcopy(src)
return tgt
else:
return _mg(src, tgt)
def eval_model(
model,
data,
metric_meta,
device,
with_label=True,
label_mapper=None,
task_type=TaskType.Classification,
):
predictions = []
golds = []
scores = []
ids = []
metrics = {}
for (batch_info, batch_data) in tqdm(data, total=len(data)):
batch_info, batch_data = Collater.patch_data(device, batch_info, batch_data)
score, pred, gold = model.predict(batch_info, batch_data)
scores = merge(score, scores)
golds = merge(gold, golds)
predictions = merge(pred, predictions)
ids = merge(batch_info["uids"], ids)
if task_type == TaskType.Span:
predictions, golds = postprocess_qa_predictions(
golds, scores, version_2_with_negative=False
)
elif task_type == TaskType.SpanYN:
predictions, golds = postprocess_qa_predictions(
golds, scores, version_2_with_negative=True
)
if with_label:
metrics = calc_metrics(metric_meta, golds, predictions, scores, label_mapper)
return metrics, predictions, scores, golds, ids
|
ContextualSP/adaptershare/mt_dnn/inference.py/0
|
{
"file_path": "ContextualSP/adaptershare/mt_dnn/inference.py",
"repo_id": "ContextualSP",
"token_count": 1596
}
| 247 |
mnli:
data_format: PremiseAndOneHypothesis
dropout_p: 0
enable_san: false
labels:
- contradiction
- neutral
- entailment
metric_meta:
- ACC
loss: CeCriterion
kd_loss: MseCriterion
adv_loss: SymKlCriterion
n_class: 3
split_names:
- train
- matched_dev
- mismatched_dev
- matched_test
- mismatched_test
task_type: Classification
|
ContextualSP/adaptershare/tests/mnli_task_def.yml/0
|
{
"file_path": "ContextualSP/adaptershare/tests/mnli_task_def.yml",
"repo_id": "ContextualSP",
"token_count": 138
}
| 248 |
{"uid": "0", "label": 0, "token_id": [101, 3021, 26265, 2627, 1996, 2160, 1012, 102], "type_id": [0, 0, 0, 0, 0, 0, 0, 0], "attention_mask": [1, 1, 1, 1, 1, 1, 1, 1]}
|
ContextualSP/adaptershare/tests/sample_data/output/cola_test.json/0
|
{
"file_path": "ContextualSP/adaptershare/tests/sample_data/output/cola_test.json",
"repo_id": "ContextualSP",
"token_count": 84
}
| 249 |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple
from utils import *
def get_bert_hidden_size(bert_version: str) -> int:
if bert_version in ['bert-base-uncased', 'bert-base-chinese', 'bert-base-multilingual-cased',
'hfl/chinese-bert-wwm', 'hfl/chinese-bert-wwm-ext', 'hfl/chinese-roberta-wwm-ext']:
return 768
if bert_version in ['bert-large-cased', 'bert-large-uncased', 'bert-large-uncased-whole-word-masking',
'hfl/chinese-roberta-wwm-ext-large']:
return 1024
raise NotImplementedError(f"not supported bert version: {bert_version}")
class AttentivePointer(nn.Module):
def __init__(self, hidden_size: int):
super(AttentivePointer, self).__init__()
self.hidden_size = hidden_size
self.linear_query = nn.Linear(hidden_size, hidden_size)
self.linear_key = nn.Linear(hidden_size, hidden_size)
self.linear_value = nn.Linear(hidden_size, hidden_size)
self.linear_out = nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), nn.LayerNorm(hidden_size))
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.BoolTensor = None) -> \
Tuple[torch.Tensor, torch.Tensor]:
query = self.linear_query(query)
key = self.linear_key(key)
value = self.linear_value(value)
attn_logits = torch.matmul(query, key.transpose(-2, -1))
attn_logits /= math.sqrt(self.hidden_size)
if mask is not None:
attn_logits.masked_fill_(mask == 0, float('-inf'))
# [batch_size, query_length, key_length]
attn_weights = F.softmax(attn_logits, dim=-1)
attn_outputs = torch.matmul(attn_weights, value)
attn_outputs = self.linear_out(torch.cat((attn_outputs, query), dim=-1))
return attn_outputs, attn_weights
class LabelSmoothingLoss(nn.Module):
def __init__(self, label_smoothing):
assert 0.0 < label_smoothing <= 1.0
super(LabelSmoothingLoss, self).__init__()
self.smoothing = label_smoothing
self.confidence = 1.0 - label_smoothing
def forward(self, output, target):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
n_classes = output.size(-1)
log_logits = F.log_softmax(output, dim=-1)
with torch.no_grad():
true_dist = torch.zeros_like(log_logits)
true_dist.fill_(self.smoothing / (n_classes - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * log_logits, dim=-1))
|
ContextualSP/awakening_latent_grounding/models/nn_utils.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/models/nn_utils.py",
"repo_id": "ContextualSP",
"token_count": 1210
}
| 250 |
import math
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.dataset import Dataset
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.sampler import Sampler, SequentialSampler, BatchSampler, SubsetRandomSampler
from logging import warning, info
from utils.data_types import *
from utils.nlp_utils import *
from tqdm import tqdm
class SortedSampler(Sampler):
""" Samples elements sequentially, always in the same order.
Args:
data (iterable): Iterable data.
Example:
>>> list(SortedSampler(range(10)))
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
"""
def __init__(self, data):
super().__init__(data)
self.data = data
self.sort_key = lambda x: x
zip_ = [(i, self.sort_key(row)) for i, row in enumerate(self.data)]
zip_ = sorted(zip_, key=lambda r: r[1])
self.sorted_indexes = [item[0] for item in zip_]
def __iter__(self):
return iter(self.sorted_indexes)
def __len__(self):
return len(self.data)
class BucketBatchSampler(BatchSampler):
""" `BucketBatchSampler` toggles between `sampler` batches and sorted batches.
Typically, the `sampler` will be a `RandomSampler` allowing the user to toggle between
random batches and sorted batches. A larger `bucket_size_multiplier` is more sorted and vice
versa.
"""
def __init__(self, sampler, batch_size, drop_last, bucket_size_multiplier=100) -> None:
super().__init__(sampler, batch_size, drop_last)
self.bucket_sampler = BatchSampler(sampler,
min(batch_size * bucket_size_multiplier, len(sampler)),
False)
def __iter__(self):
for bucket in self.bucket_sampler:
sorted_sampler = SortedSampler(bucket)
for batch in SubsetRandomSampler(list(BatchSampler(sorted_sampler, self.batch_size, self.drop_last))):
yield [bucket[i] for i in batch]
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return math.ceil(len(self.sampler) / self.batch_size)
@dataclass
class MetaIndex:
question_spans: List[Tuple[Token, int, int]] # (token, start, end)
entity_spans: List[Tuple[str, int, int, int]] # (type, entity_id, start, end)
column2table_indices: List[int] # table index for each column
table_spans: List[Tuple[int, int, int]] = field(init=False)
column_spans: List[Tuple[int, int, int]] = field(init=False)
value_spans: List[Tuple[int, int, int]] = field(init=False)
def __post_init__(self):
self.table_spans = [(e_id, start, end) for e_type, e_id, start, end in self.entity_spans if e_type == 'tbl']
self.column_spans = [(e_id, start, end) for e_type, e_id, start, end in self.entity_spans if e_type == 'col']
self.value_spans = [(e_id, start, end) for e_type, e_id, start, end in self.entity_spans if e_type == 'val']
@property
def num_question_tokens(self):
return len(self.question_spans)
@property
def num_columns(self):
return len(self.col_encode_indices)
@property
def num_values(self):
return len(self.val_encode_indices)
@property
def num_tables(self):
return len(self.tbl_encode_indices)
@property
def question_sep_index(self):
return self.question_spans[-1][1] + 1
@property
def question_encode_indices(self) -> List[int]:
return [start for _, start, _ in self.question_spans]
@property
def tbl_encode_indices(self) -> List[int]:
return [start for _, start, _ in self.table_spans]
@property
def col_encode_indices(self) -> List[int]:
return [start for _, start, _ in self.column_spans]
@property
def val_encode_indices(self) -> List[int]:
return [start for _, start, _ in self.value_spans]
@property
def col_tbl_encode_indices(self) -> List[int]:
col_tbl_enc_indices = []
for col_idx, tbl_idx in enumerate(self.column2table_indices):
if tbl_idx == -1:
col_tbl_enc_indices.append(self.column_spans[col_idx][1])
else:
col_tbl_enc_indices.append(self.table_spans[tbl_idx][1])
assert len(col_tbl_enc_indices) == len(self.column_spans)
return col_tbl_enc_indices
def split(self, outputs, dim=0):
return torch.split(outputs, [self.num_tables, self.num_columns, self.num_values], dim=dim)
def lookup_entity_id(self, e_type: str, type_encode_idx: int) -> int:
count = 0
for t, e_idx, _, _ in self.entity_spans:
if t != e_type:
continue
if count == type_encode_idx:
return e_idx
count += 1
raise ValueError("Index {} of range for type {}".format(type_encode_idx, e_type))
class WTQDataset(Dataset):
def __init__(self, examples: List[Dict], tokenizer: BertTokenizer, device: torch.device, max_enc_length: int,
sort_by_length: bool) -> None:
super().__init__()
self.tokenizer = tokenizer
self.device = device
self.max_enc_length = max_enc_length
self.examples = self._encode_examples(examples, sort_by_length)
def __len__(self) -> int:
return len(self.examples)
def __getitem__(self, index: int) -> Dict:
return self.examples[index]
def _encode_examples(self, examples: List[Dict], sort_by_length: bool) -> List[Dict]:
new_examples = []
t_examples = tqdm(examples) if len(examples) > 100 else examples
for example in t_examples:
new_example = self._encode_example(example)
if new_example is None:
continue
new_examples += [new_example]
if len(new_examples) < len(examples):
warning('Ignore {} examples >= max encode length {}'.format(len(examples) - len(new_examples),
self.max_enc_length))
if not sort_by_length:
return new_examples
sorted_examples = sorted(new_examples, key=lambda x: x['input_token_ids'].size(0))
return list(sorted_examples)
def _encode_example(self, example: Dict) -> Dict:
question: Utterance = Utterance.from_json(example['question'])
input_tokens = [self.tokenizer.cls_token]
question_spans = []
for token in question.tokens:
start = len(input_tokens)
input_tokens += token.pieces
question_spans += [(token, start, len(input_tokens) - 1)]
input_tokens += [self.tokenizer.sep_token]
input_token_types = [0] * len(input_tokens)
schema: WTQSchema = WTQSchema.from_json(example['schema'])
entity_spans = []
for c_idx, column in enumerate(example['columns']):
column_utterance: Utterance = Utterance.from_json(column['utterance'])
start = len(input_tokens)
input_tokens += [Bert_Special_Tokens[column['data_type']]] + column_utterance.pieces
assert column['index'] == c_idx
entity_spans += [('col', column['index'], start, len(input_tokens) - 1)]
input_tokens += [self.tokenizer.sep_token]
if len(input_tokens) > self.max_enc_length:
return None
input_token_types += [1] * (len(input_tokens) - len(input_token_types))
input_token_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
assert len(input_tokens) == len(input_token_types)
meta_index = MetaIndex(question_spans=question_spans, entity_spans=entity_spans, column2table_indices=None)
column_labels = [0 for _ in entity_spans]
assert len(entity_spans) == len(schema.column_headers)
if 'identify_labels' in example:
for col_name in example['identify_labels'][str(SQLTokenType.column)]:
col_id = schema.column_header_to_id[col_name]
column_labels[col_id] = 1
return {
'input_token_ids': torch.tensor(input_token_ids, dtype=torch.long, device=self.device),
'input_token_types': torch.tensor(input_token_types, dtype=torch.long, device=self.device),
'column_labels': torch.tensor(column_labels, dtype=torch.long, device=self.device),
'meta_index': meta_index,
'input_tokens': input_tokens,
'example': example
}
class SpiderDataset(WTQDataset):
def __init__(self, examples: List[Dict], tokenizer: BertTokenizer, device: torch.device, max_enc_length: int,
sort_by_length: bool) -> None:
super().__init__(examples, tokenizer, device, max_enc_length, sort_by_length)
def fix_span(self, span: Tuple[str, int, int, int]):
start, end = span[2], span[3]
if start >= self.max_enc_length:
start = 0
if end >= self.max_enc_length:
end = 0
return (span[0], span[1], start, end)
def get_matched_values(self, example: Dict, schema: SpiderSchema, threshold: float = 0.82) -> List[ValueMatch]:
values = [ValueMatch.from_json(v) for v in example['values']]
filter_values = []
for value in values:
if value.score < threshold and value.score > 0.51: # 0.5 is matched by sub string
if value.label:
warning('Ignore gold value with confidence < {}, {} ({})'.format(threshold, str(value),
example['question']['text']))
continue
filter_values.append(value)
sorted_values = list(sorted(filter_values, key=lambda v: schema.id_map[v.column]))
example['values'] = [v.to_json() for v in sorted_values]
return sorted_values
def build_relations(self, schema: SpiderSchema, values: List[ValueMatch]):
col_idx_offset, val_idx_offset = schema.num_tables, schema.num_tables + schema.num_columns
relations = {}
def update_relation(i, j, r):
if (i, j) not in relations:
relations[(i, j)] = int(r)
# primary key
for col_idx in schema.primary_keys:
tbl_idx = schema.column_to_table[col_idx]
update_relation(tbl_idx, col_idx_offset + col_idx, SchemaRelation.table_column_pk)
update_relation(col_idx_offset + col_idx, tbl_idx, SchemaRelation.column_table_pk)
# foreign key
for col_idx1, col_idx2 in schema.foreign_keys:
update_relation(col_idx1 + col_idx_offset, col_idx2 + col_idx_offset, SchemaRelation.column_column_fk_fw)
update_relation(col_idx2 + col_idx_offset, col_idx1 + col_idx_offset, SchemaRelation.column_column_fk_bw)
for col_idx in range(schema.num_columns):
tbl_idx = schema.column_to_table[col_idx]
update_relation(tbl_idx, col_idx_offset + col_idx, SchemaRelation.table_column)
update_relation(col_idx_offset + col_idx, tbl_idx, SchemaRelation.column_table)
for tbl_idx in range(schema.num_tables):
for tbl_idx2 in range(schema.num_tables):
update_relation(tbl_idx, tbl_idx2, SchemaRelation.table_table)
col_indices = schema.table_to_columns
for col_idx1 in col_indices:
for col_idx2 in col_indices:
update_relation(col_idx1 + col_idx_offset, col_idx2 + col_idx_offset, SchemaRelation.column_column)
for val_idx in range(len(values)):
col_idx = schema.id_map[values[val_idx].column]
update_relation(col_idx + col_idx_offset, val_idx + val_idx_offset, SchemaRelation.column_value)
update_relation(val_idx + val_idx_offset, col_idx + col_idx_offset, SchemaRelation.value_column)
relations_tensor = torch.zeros((schema.num_tables + schema.num_columns + len(values),
schema.num_tables + schema.num_columns + len(values)), dtype=torch.long,
device=self.device)
for (i, j), r in relations.items():
relations_tensor[i, j] = r
return relations_tensor
def _encode_example(self, example: Dict) -> Dict:
question: Utterance = Utterance.from_json(example['question'])
input_tokens = [self.tokenizer.cls_token]
question_spans = []
for token in question.tokens:
start = len(input_tokens)
input_tokens += token.pieces
question_spans += [(token, start, len(input_tokens) - 1)]
input_tokens += [self.tokenizer.sep_token]
input_token_types = [0] * len(input_tokens)
assert len(input_tokens) < self.max_enc_length
schema: SpiderSchema = SpiderSchema.from_json(example['schema'])
values = self.get_matched_values(example, schema)
grouped_values = defaultdict(list)
for i, value in enumerate(values):
column_idx = schema.id_map[value.column]
grouped_values[column_idx].append(i)
entity_spans, idx2spans = [], {}
for table in example['tables']:
table_utterance: Utterance = Utterance.from_json(table['utterance'])
if table_utterance.text == '*':
start = len(input_tokens)
input_tokens += [Bert_Special_Tokens['*']]
idx2spans[('col', 0)] = len(entity_spans)
entity_spans += [('col', 0, start, len(input_tokens) - 1)]
for value_idx in grouped_values[0]:
start = len(input_tokens)
input_tokens += [Col_Val_Sep]
input_tokens += self.tokenizer.tokenize(str(values[value_idx].value))
idx2spans[('val', value_idx)] = len(entity_spans)
entity_spans += [('val', value_idx, start, len(input_tokens) - 1)]
input_tokens += [self.tokenizer.sep_token]
continue
start = len(input_tokens)
input_tokens += [TBL_Token] + table_utterance.pieces
idx2spans[('tbl', table['index'])] = len(entity_spans)
entity_spans += [('tbl', table['index'], start, len(input_tokens) - 1)]
for column in table['columns']:
column_utterance: Utterance = Utterance.from_json(column['utterance'])
start = len(input_tokens)
col_db_key = schema.get_column_key_code(column['index'])
input_tokens += [Tbl_Col_Sep, DB_Col_Keys[col_db_key], column['data_type']]
# If column name is not unique, append table name
# assert column_utterance.text.lower() in column2ids
# if len(column2ids[column_utterance.text.lower()]) > 1 and col_db_key == 0:
# input_tokens += table_utterance.pieces
col_pieces = column_utterance.pieces
if len(col_pieces) == 0: # column share same same with table
col_pieces = table_utterance.pieces
input_tokens += col_pieces
idx2spans[('col', column['index'])] = len(entity_spans)
entity_spans += [('col', column['index'], start, len(input_tokens) - 1)]
for value_idx in grouped_values[column['index']]:
start = len(input_tokens)
input_tokens += [Col_Val_Sep]
input_tokens += self.tokenizer.tokenize(str(values[value_idx].value))
idx2spans[('val', value_idx)] = len(entity_spans)
entity_spans += [('val', value_idx, start, len(input_tokens) - 1)]
input_tokens += [self.tokenizer.sep_token]
if len(input_tokens) > self.max_enc_length:
# warning("Length out of max: {}\t{}".format(schema.db_id, question.text))
input_tokens = input_tokens[:self.max_enc_length]
input_token_types += [1] * (len(input_tokens) - len(input_token_types))
input_token_ids = self.tokenizer.convert_tokens_to_ids(
[x if x not in Bert_Special_Tokens else Bert_Special_Tokens[x] for x in input_tokens])
ordered_spans = []
for tbl_idx in range(len(schema.table_names_original)):
ordered_spans.append(self.fix_span(entity_spans[idx2spans[('tbl', tbl_idx)]]))
for col_idx in range(len(schema.column_names_original)):
ordered_spans.append(self.fix_span(entity_spans[idx2spans[('col', col_idx)]]))
for val_idx in range(len(values)):
ordered_spans.append(self.fix_span(entity_spans[idx2spans[('val', val_idx)]]))
column2table = [schema.column_to_table[c_idx] for c_idx in range(len(schema.column_names))]
meta_index = MetaIndex(question_spans=question_spans, entity_spans=ordered_spans,
column2table_indices=column2table)
assert meta_index.num_tables == len(schema.table_names)
assert meta_index.num_columns == len(schema.column_names)
table_labels = [0] * meta_index.num_tables
column_labels = [0] * meta_index.num_columns
if 'identify_labels' in example:
for table_name in example['identify_labels'][str(SQLTokenType.table)]:
table_idx = schema.id_map[table_name.lower()]
assert table_idx < meta_index.num_tables
table_labels[table_idx] = 1
for column_name in example['identify_labels'][str(SQLTokenType.column)]:
column_idx = schema.id_map[column_name.lower()]
assert column_idx < meta_index.num_columns
column_labels[column_idx] = 1
value_labels = [0] * meta_index.num_values
for i, value in enumerate(values):
value_labels[i] = int(value.label)
# relations = self.build_relations(schema, values)
return {
'input_token_ids': torch.tensor(input_token_ids, dtype=torch.long, device=self.device),
'input_token_types': torch.tensor(input_token_types, dtype=torch.long, device=self.device),
'table_labels': torch.tensor(table_labels, dtype=torch.long, device=self.device),
'column_labels': torch.tensor(column_labels, dtype=torch.long, device=self.device),
'value_labels': torch.tensor(value_labels, dtype=torch.long, device=self.device),
# 'relations': relations,
'meta_index': meta_index,
'input_tokens': input_tokens,
'example': example
}
def tensor_collate_fn(inputs: List[Dict], is_training: bool) -> Dict:
assert len(inputs) > 0
collated = {}
for key in inputs[0]:
values = [x[key] for x in inputs]
if key == 'input_token_ids':
collated[key] = pad_sequence(values, batch_first=True, padding_value=0)
elif key == 'input_token_types':
collated[key] = pad_sequence(values, batch_first=True, padding_value=1)
else:
collated[key] = values
collated['is_training'] = is_training
return collated
def load_wtq_data_iterator(paths, tokenizer: BertTokenizer, batch_size: int, device: torch.device,
bucket: bool, shuffle: bool, max_enc_length: int, sampling_size: int = None) -> DataLoader:
all_examples = []
if isinstance(paths, list):
for path in paths:
examples = json.load(open(path, 'r', encoding='utf-8'))
if sampling_size is not None:
all_examples += examples[:sampling_size]
info("Sampling {}/{} examples from {} over.".format(sampling_size, len(examples), path))
else:
all_examples += examples
info("Load {} examples from {} over.".format(len(examples), path))
elif isinstance(paths, str):
all_examples = json.load(open(paths, 'r', encoding='utf-8'))
if sampling_size is not None:
info("Sampling {}/{} examples from {} over.".format(sampling_size, len(all_examples), paths))
all_examples += all_examples[:sampling_size]
else:
info("Load {} examples from {} over.".format(len(all_examples), paths))
else:
raise ValueError("Invalid path input: {}".format(paths))
if bucket:
dataset = WTQDataset(all_examples, tokenizer, device, max_enc_length, True)
data_loader = DataLoader(
dataset,
batch_sampler=BucketBatchSampler(SequentialSampler(list(range(len(dataset)))), batch_size=batch_size,
drop_last=False),
collate_fn=lambda x: tensor_collate_fn(x, shuffle))
return data_loader
else:
dataset = WTQDataset(all_examples, tokenizer, device, max_enc_length, False)
data_loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=lambda x: tensor_collate_fn(x, shuffle))
return data_loader
def load_spider_data_iterator(paths: List[str], tokenizer: BertTokenizer, batch_size: int, device: torch.device,
bucket: bool, shuffle: bool, max_enc_length: int,
sampling_size: int = None) -> DataLoader:
all_examples = []
if isinstance(paths, list):
for path in paths:
examples = json.load(open(path, 'r', encoding='utf-8'))
if sampling_size is not None:
all_examples += examples[:sampling_size]
info("Sampling {}/{} examples from {} over.".format(sampling_size, len(examples), path))
else:
all_examples += examples
info("Load {} examples from {} over.".format(len(examples), path))
elif isinstance(paths, str):
all_examples = json.load(open(paths, 'r', encoding='utf-8'))
if sampling_size is not None:
info("Sampling {}/{} examples from {} over.".format(sampling_size, len(all_examples), paths))
all_examples += all_examples[:sampling_size]
else:
info("Load {} examples from {} over.".format(len(all_examples), paths))
else:
raise ValueError("Invalid path input: {}".format(paths))
if bucket:
dataset = SpiderDataset(all_examples, tokenizer, device, max_enc_length, True)
data_loader = DataLoader(
dataset,
batch_sampler=BucketBatchSampler(SequentialSampler(list(range(len(dataset)))), batch_size=batch_size,
drop_last=False),
collate_fn=lambda x: tensor_collate_fn(x, shuffle))
return data_loader
else:
dataset = SpiderDataset(all_examples, tokenizer, device, max_enc_length, False)
data_loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=lambda x: tensor_collate_fn(x, shuffle))
return data_loader
|
ContextualSP/awakening_latent_grounding/utils/data_iter.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/utils/data_iter.py",
"repo_id": "ContextualSP",
"token_count": 10904
}
| 251 |
import torch
from torch.autograd import Variable
from torch.nn import functional
def sequence_mask(sequence_length, max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_range_expand = Variable(seq_range_expand)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = (sequence_length.unsqueeze(1)
.expand_as(seq_range_expand))
return seq_range_expand < seq_length_expand
def masked_cross_entropy(logits, target, length):
length = Variable(torch.LongTensor(length)).cuda()
"""
Args:
logits: A Variable containing a FloatTensor of size
(batch, max_len, num_classes) which contains the
unnormalized probability for each class.
target: A Variable containing a LongTensor of size
(batch, max_len) which contains the index of the true
class for each corresponding step.
length: A Variable containing a LongTensor of size (batch,)
which contains the length of each data in a batch.
Returns:
loss: An average loss value masked by the length.
"""
# logits_flat: (batch * max_len, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# log_probs_flat: (batch * max_len, num_classes)
log_probs_flat = functional.log_softmax(logits_flat, dim=-1)
# target_flat: (batch * max_len, 1)
target_flat = target.view(-1, 1)
# losses_flat: (batch * max_len, 1)
losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
# losses: (batch, max_len)
losses = losses_flat.view(*target.size())
# mask: (batch, max_len)
mask = sequence_mask(sequence_length=length, max_len=target.size(1))
losses = losses * mask.float()
loss = torch.div(losses.sum(dim=-1), length.float())
# loss = losses.sum() / length.float().sum()
return loss
|
ContextualSP/compositional_generalization/masked_cross_entropy.py/0
|
{
"file_path": "ContextualSP/compositional_generalization/masked_cross_entropy.py",
"repo_id": "ContextualSP",
"token_count": 834
}
| 252 |
# 不完整话语重写 <img src="https://pytorch.org/assets/images/logo-dark.svg" height = "25" align=center />
[English Version](README.md)
本仓库是论文[Incomplete Utterance Rewriting as Semantic Segmentation](https://arxiv.org/pdf/2009.13166.pdf)的官方实现。在这篇论文中,我们将*不完整话语重写*任务视为一个面向对话编辑的任务,并据此提出一个全新的、使用语义分割思路来解决该任务的模型。
如果本仓库或论文对您的研究有所帮助,请考虑使用以下bibtex引用我们的论文:
```bib
@inproceedings{qian2020incomplete,
title={Incomplete Utterance Rewriting as Semantic Segmentation},
author={Liu, Qian and Chen, Bei and Lou, Jian-Guang and Zhou, Bin and Zhang, Dongmei},
booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing},
year={2020}
}
```
## 目录
- [依赖安装](#依赖安装)
- [数据集下载与预处理](#数据集下载与预处理)
- [训练模型](#训练模型)
- [评测模型](#评测模型)
- [预测模型](#预测模型)
- [预训练模型权重](#预训练模型权重)
## 依赖安装
### Python 环境
首先,你应该设置一个python环境。本仓库理论上可以在python 3.x下直接运行,我们实验中所使用到的是 python 3.7。在安装了python 3.7之后,我们强烈建议你使用 `virtualenv`(一个创建独立Python环境的工具)来管理python环境。你可以使用以下命令来创建环境:
```bash
python -m pip install virtualenv
virtualenv venv
```
### 激活虚拟环境
在安装完虚拟环境后,你需要激活环境才能安装本仓库依赖的库。你可以通过使用下面的命令来安装 (注意需要把 $ENV_FOLDER 改为你自己的 virtualenv 文件夹路径,例如 venv):
```bash
$ENV_FOLDER\Scripts\activate.bat (Windows)
source $ENV_FOLDER/bin/activate (Linux)
```
### 安装依赖
本仓库最主要的两个依赖库是`pytorch`和`allennlp`,其版本需求如下:
- pytorch >= 1.2.0 (没有在其他版本上测试过,但1.0.0可能可以用)
- allennlp == 0.9.0
其他所有依赖都可以通过以下命令安装:
```console
pip install -r requirement.txt
```
## 数据集下载与预处理
### 准备数据集
虽然我们不能在本仓库中直接提供数据集(因为版权问题),但我们提供了`download.sh`用于自动下载和预处理论文中所用到的数据集。
> 值得注意的是,对数据集的预处理过程不包括导出论文中使用的远端监督(Distant Supervision)数据,也就是词级别的编辑矩阵。对该处理流程感兴趣的读者可以关注文件`src/data_reader.py(第178-200行)`。
### 准备Glove文件
如果你想在英文数据集(即`Task`和`CANARD`)上训练模型,需要下载[Glove 6B 词向量](http://nlp.stanford.edu/data/glove.6B.zip)。解压该文件,并将`glove.6B.100d.txt`文件移动到`glove`文件夹中。
## 训练模型
你可以使用`src`文件夹下的`*.sh`文件在不同的数据集上训练模型。例如,你可以在`src`文件夹下运行以下命令,以在`Multi`数据集上训练`RUN + BERT`模型。
```console
./train_multi_bert.sh
```
### 配置表
| 配置文件名 | 论文中对应设置 |
| :--- | :---: |
| canard.jsonnet | RUN on CANARD (Elgohary et al. 2019) |
| multi.jsonnet | RUN on Multi (Pan et al. 2019) |
| multi_bert.jsonnet | RUN + BERT on Multi (Pan et al. 2019) |
| rewrite.jsonnet | RUN on Rewrite (Su et al. 2019) |
| rewrite_bert.jsonnet | RUN + BERT on Rewrite (Su et al. 2019) |
| task.jsonnet | RUN on Task (Quan et al. 2019) |
### 训练小提示
1. 如果读者并不依赖`BLEU`度量来得到开发集上表现最佳的权重文件,你可以禁用它来实现更快的评测速度。
2. 默认情况下,我们不会在训练集上计算任何指标以节省训练时间,但你可以通过在`*.jsonnet`中设置`enable_training_log`为`True`来启用它(请读者参考`task.jsonnet`)。
3. 所有模型的训练和评测均在`Tesla M40 (24GB)`下测试通过,如果在读者本机中出现诸如`CUDA Out Of Memory`之类的错误,读者可通过降低 `*.jsonnet` 中的超参数 `batch_size` 来解决。 根据我们的经验,这将不会对性能造成很大的影响。
## 评测模型
当模型的训练正常结束时,`allennlp`将保存一个压缩的模型文件,该文件通常以checkpoint文件夹下的`model.tar.gz`命名,我们后续的评估就基于该压缩文件。
我们在src文件夹下提供了一个用于模型评测的脚本`evaluate.py`,读者可以通过运行以下命令来评估模型文件:
```concolse
python evaluate.py --model_file model.tar.gz --test_file ../dataset/Multi/test.txt
```
上面的脚本将生成一个文件`model.tar.gz.json`,其中记录了模型详细的指标。 例如,`RUN + BERT`在`Rewrite`的性能为:
```json
{
"ROUGE": 0.9394040084189113,
"_ROUGE1": 0.961865057419486,
"_ROUGE2": 0.9113051224617216,
"EM": 0.688,
"_P1": 0.9451903332806824,
"_R1": 0.8668694770389685,
"F1": 0.9043373129817137,
"_P2": 0.8648273949812838,
"_R2": 0.7989241803278688,
"F2": 0.8305705345849144,
"_P3": 0.8075098814229249,
"_R3": 0.7449860216360763,
"F3": 0.774988935954985,
"_BLEU1": 0.9405510823944796,
"_BLEU2": 0.9172718486250105,
"_BLEU3": 0.8932687251641028,
"BLEU4": 0.8691863201601382,
"loss": 0.2084200546145439
}
```
接下来,我们将提供所有预训练好的模型文件,以重现论文中报告的结果。 我们建议读者下载它们并将其放入`pretrained_weights`文件夹中,然后运行以下命令:
```concolse
python evaluate.py --model_file ../pretrianed_weights/rewrite.tar.gz --test_file ../dataset/Multi/test.txt
```
## 预测模型
我们在`src/predict.py`中提供了一个简单的函数调用,来实现在python code中为一个给定的对话上下文生成改写句。读者可根据实际项目需要进行改造。
## 预训练模型权重
| Dataset | BERT | Config | EM | Rewriting F1 | BLEU4 | Pretrained_Weights |
| :---: | :---: |:--- | :---: | :---: | :---: | :---: |
| Rewrite | No | rewrite.jsonnet | 53.6 | 81.3 | 79.6 | [rewrite.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/rewrite/rewrite.tar.gz)|
| Rewrite | Yes | rewrite_bert.jsonnet | 68.8 | 90.4 | 86.9 | [rewrite_bert.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/rewrite.bert/rewrite_bert.tar.gz)|
| CANARD | No | canard.jsonnet | 18.3 | 44.2 | 49.8 | [canard.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/canard/canard.tar.gz) |
| Multi | No | multi.jsonnet | 43.3 | 60.7 | 81.1 | [multi.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/multi/multi.tar.gz) |
| Multi | Yes | multi_bert.jsonnet | 49.3 | 69.5 | 83.7 | [multi_bert.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/multi.bert/multi_bert.tar.gz) |
|
ContextualSP/incomplete_utterance_rewriting/README_zh.md/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/README_zh.md",
"repo_id": "ContextualSP",
"token_count": 4021
}
| 253 |
#!/usr/bin/env bash
export model_file=../checkpoints/run_multi_bert
export config_file=../configs/multi_bert.jsonnet
export train_data_path=../dataset/Multi/train.txt
export validation_data_path=../dataset/Multi/valid.txt
export seed=1
allennlp train -s ${model_file} ${config_file} \
--include-package data_reader \
--include-package model \
-o "{\"random_seed\":\"${seed}\",\"numpy_seed\":\"${seed}\",\"pytorch_seed\":\"${seed}\", \"train_data_path\":\"${train_data_path}\",\"validation_data_path\":\"${validation_data_path}\"}"
|
ContextualSP/incomplete_utterance_rewriting/src/train_multi_bert.sh/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/src/train_multi_bert.sh",
"repo_id": "ContextualSP",
"token_count": 186
}
| 254 |
"""
This script is responsible for translating SQL to SemQL in a flexible and readable method.
"""
from typing import Dict, List, Tuple, Optional
import json
from src.context.grammar import *
from allennlp.common.checks import ConfigurationError
from src.context.graph import Graph
from collections import deque
from copy import deepcopy
import logging
import re
class SparcType:
# https://github.com/taoyds/spider/blob/master/preprocess/parsed_sql_examples.sql#L30
# 'not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists'
FilterNot = 0
FilterBetween = 1
FilterEqual = 2
FilterGreater = 3
FilterLess = 4
FilterGeq = 5
FilterLeq = 6
FilterNeq = 7
FilterIn = 8
FilterLike = 9
FilterIs = 10
FilterExist = 11
# https://github.com/taoyds/spider/blob/master/preprocess/parsed_sql_examples.sql#L37
# 'and', 'or'
# https://github.com/taoyds/spider/blob/0b0c9cad97e4deeef1bc37c8435950f4bdefc141/preprocess/parsed_sql_examples.sql#L32
# 'none', 'max', 'min', 'count', 'sum', 'avg'
ANone = 0
AMax = 1
AMin = 2
ACount = 3
ASum = 4
AAvg = 5
# https://github.com/taoyds/spider/blob/master/preprocess/parsed_sql_examples.sql#L31
# 'none', '-', '+', "*", '/'
class SQLConverter(object):
"""
The class is designed to handle the process from structural query dict into intermediate action sequence and vice versa.
"""
def __init__(self, db_context: SparcDBContext):
"""
:param db_context: data context for database
"""
self.db_context = db_context
self.col_names: Dict[int, TableColumn] = db_context.id_to_col
self.table_names: Dict[int, Table] = db_context.id_to_tab
def translate_to_intermediate(self, sql_clause: Dict) -> List[Action]:
"""
Given a SQL clause, this function is designed to translate it into intermediate logic form like SemQL.
TODO: Once the intermediate grammar rules updates, we should check or update the translate processing too.
{
"select": [
...
],
"union": null,
"except": null,
"groupBy": [],
"limit": null,
"intersect": null,
"where": [
...
],
"having": [],
"orderBy": [],
"from": {
...
}
}
:return: intermediate grammar sequence list, typing `List[str]`.
"""
return self._process_statement(sql_clause=sql_clause)
def _process_statement(self, sql_clause: Dict) -> List[Action]:
"""
Except the intersect/union/except, the remaining parsing method iss implemented here.
:return:
"""
if sql_clause['intersect'] is not None:
inter_seq = [Statement(GrammarType.StateInter)]
nest_sql_clause = sql_clause['intersect']
inter_seq.extend(self._process_root(sql_clause))
inter_seq.extend(self._process_root(nest_sql_clause))
return inter_seq
if sql_clause['union'] is not None:
inter_seq = [Statement(GrammarType.StateUnion)]
nest_sql_clause = sql_clause['union']
inter_seq.extend(self._process_root(sql_clause))
inter_seq.extend(self._process_root(nest_sql_clause))
return inter_seq
if sql_clause['except'] is not None:
inter_seq = [Statement(GrammarType.StateExcept)]
nest_sql_clause = sql_clause['except']
inter_seq.extend(self._process_root(sql_clause))
inter_seq.extend(self._process_root(nest_sql_clause))
return inter_seq
# Statement None
inter_seq = [Statement(GrammarType.StateNone)]
inter_seq.extend(self._process_root(sql_clause))
return inter_seq
def _process_agg_col_table(self, sql_clause, agg_id, col_ind) -> [A, C, T]:
"""
Combine the three operators into one function to process
:param col_ind: col index in table. Equal to 0 means occurrence of `*`.
:param agg_id: aggregation operation id
:param sql_clause: sql clause for get all tables
:return: the list of A, C and T.
"""
def _process_tab() -> T:
"""
Get table grammar according to col index. Note that the most difficult thing is that
we should decide a specific table for `*` token.
:return: selected table grammar
"""
# col index not equal to 0 means specific column, return its table
if col_ind != 0:
table_name = self.col_names[col_ind].refer_table.name
_table_grammar = T(table_name)
# * case
else:
# Fetch table names, check data format
from_clause = sql_clause['from']
assert 'table_units' in from_clause and 'conds' in from_clause
#assert isinstance(from_clause['table_units'], list)
#assert isinstance(from_clause['conds'], list)
table_units = from_clause['table_units']
# only one table
if len(table_units) == 1:
ret = table_units[0][1]
if type(ret) != int:
# use default setting
ret = 0
# get table name
table_name = self.table_names[ret].name
_table_grammar = T(table_name)
# multiple tables
else:
table_set = set()
for table_unit_tuple in table_units:
# table unit tuple[1] is the table id
if type(table_unit_tuple[1]) == int:
table_set.add(self.table_names[table_unit_tuple[1]].name)
# collect other tables
other_set = set()
select_clause = sql_clause['select']
where_clause = sql_clause['where']
group_clause = sql_clause['groupBy']
for sel_part in select_clause[1]:
sel_col_ind = sel_part[1][1][1]
if sel_col_ind != 0:
# find table according to col index
other_set.add(self.col_names[sel_col_ind].refer_table.name)
# number of where clause
where_num = len(where_clause)
if where_num >= 1:
where_col_ind = where_clause[0][2][1][1]
other_set.add(self.col_names[where_col_ind].refer_table.name)
# 3, 5
if where_num >= 3:
where_col_ind = where_clause[2][2][1][1]
other_set.add(self.col_names[where_col_ind].refer_table.name)
# 5
if where_num >= 5:
where_col_ind = where_clause[4][2][1][1]
other_set.add(self.col_names[where_col_ind].refer_table.name)
# get candidates
candi_set = table_set - other_set
if len(candi_set) == 1:
table_name = candi_set.pop()
_table_grammar = T(table_name)
elif len(candi_set) == 0 and len(group_clause) != 0:
group_col_ind = group_clause[0][1]
# get table name
table_name = self.col_names[group_col_ind].refer_table.name
_table_grammar = T(table_name)
# add the first of table unit
else:
tab_ind = table_units[0][1]
_table_grammar = T(self.table_names[tab_ind].name)
return _table_grammar
def _process_agg() -> A:
"""
map sparc id into corresponding grammar for A
:return: aggregation grammar
"""
sparc_to_grammar = {
SparcType.ANone: GrammarType.ANone,
SparcType.AMax: GrammarType.AMax,
SparcType.AMin: GrammarType.AMin,
SparcType.ASum: GrammarType.ASum,
SparcType.AAvg: GrammarType.AAvg,
SparcType.ACount: GrammarType.ACount
}
if agg_id in sparc_to_grammar:
_agg_grammar = A(sparc_to_grammar[agg_id])
else:
raise ConfigurationError(f"No support for the aggregate {agg_id}")
return _agg_grammar
def _process_col() -> C:
sel_col_name = self.col_names[col_ind].name
# TODO: why use set id
# col_set_id = self.col_set.index(sel_col_name)
_col_grammar = C(sel_col_name)
return _col_grammar
agg_grammar = _process_agg()
col_grammar = _process_col()
table_grammar = _process_tab()
return [agg_grammar, col_grammar, table_grammar]
def _process_select(self, sql_clause) -> List[Action]:
"""
the select clause will be mapped into A, C and T.
:return:
"""
sql_select_clause = sql_clause['select']
# check the instance type
#assert isinstance(sql_select_clause, list) or isinstance(sql_select_clause, tuple)
# boolean / list of column items
distinct, sel_items = sql_select_clause[0], sql_select_clause[1]
# find index of @Select.grammar_dict and initialize intermediate select action sequence
inter_seq = [Select(len(sel_items) - 1)]
# traverse sel items, including aggregation and others
for sel_item in sel_items:
# aggregation grammar
agg_id = sel_item[0]
col_ind = sel_item[1][1][1]
inter_seq.extend(self._process_agg_col_table(sql_clause=sql_clause,
agg_id=agg_id,
col_ind=col_ind))
return inter_seq
def _process_condition(self, sql_clause, cond: List) -> List[Action]:
"""
Son function of filter, which aims to align @SparcType with @GrammarType.
:return:
"""
inter_seq: List[Action] = []
# if the condition is a nested query
is_nested_query = True if type(cond[3]) == dict else False
# corresponding where operation index
sparc_type = cond[1]
# if there is `Not`, cond[0] becomes `True`
if cond[0] is True:
sparc_to_grammar = {
# add not
SparcType.FilterIn: GrammarType.FilterNotInNes,
SparcType.FilterLike: GrammarType.FilterNotLike
}
if sparc_type in sparc_to_grammar:
filter_grammar = Filter(sparc_to_grammar[sparc_type])
else:
raise ConfigurationError(f"No support for sparc type:{sparc_type}")
else:
if is_nested_query:
sparc_to_direct_nested = {
SparcType.FilterBetween: GrammarType.FilterBetweenNes,
SparcType.FilterEqual: GrammarType.FilterEqualNes,
SparcType.FilterNeq: GrammarType.FilterNeqNes,
SparcType.FilterGreater: GrammarType.FilterGreaterNes,
SparcType.FilterLess: GrammarType.FilterLessNes,
SparcType.FilterLeq: GrammarType.FilterLeqNes,
SparcType.FilterGeq: GrammarType.FilterGeqNes,
# TODO: like and in does not care nested
SparcType.FilterLike: GrammarType.FilterLike,
SparcType.FilterIn: GrammarType.FilterInNes,
}
if sparc_type in sparc_to_direct_nested:
filter_grammar = Filter(sparc_to_direct_nested[sparc_type])
else:
raise ConfigurationError(f"Grammar {sparc_type} does not support nested setting")
else:
sparc_to_grammar = {
SparcType.FilterBetween: GrammarType.FilterBetween,
SparcType.FilterEqual: GrammarType.FilterEqual,
SparcType.FilterNeq: GrammarType.FilterNeq,
SparcType.FilterGreater: GrammarType.FilterGreater,
SparcType.FilterLess: GrammarType.FilterLess,
SparcType.FilterLeq: GrammarType.FilterLeq,
SparcType.FilterGeq: GrammarType.FilterGeq,
SparcType.FilterLike: GrammarType.FilterLike,
SparcType.FilterIn: GrammarType.FilterInNes,
}
if sparc_type in sparc_to_grammar:
filter_grammar = Filter(sparc_to_grammar[sparc_type])
else:
raise ConfigurationError(f"Grammar {sparc_type} does not have a corresponding Filter")
inter_seq.append(filter_grammar)
# A, C, T
agg_id = cond[2][1][0]
col_ind = cond[2][1][1]
inter_seq.extend(self._process_agg_col_table(sql_clause=sql_clause,
agg_id=agg_id,
col_ind=col_ind))
# handle with nested query
if is_nested_query:
nested_sql_clause = cond[3]
root_grammar = self._process_root(nested_sql_clause)
inter_seq.extend(root_grammar)
return inter_seq
def _process_filter(self, sql_clause) -> List[Action]:
"""
Process where and having clause, merge them into filter operations
:return: filter action sequences
"""
sql_where_clause = sql_clause['where']
sql_having_clause = sql_clause['having']
#assert isinstance(sql_where_clause, list) or isinstance(sql_where_clause, tuple)
#assert isinstance(sql_having_clause, list) or isinstance(sql_having_clause, tuple)
# pre-condition: the where or having has one non-zero
assert len(sql_where_clause) != 0 or len(sql_having_clause) != 0
inter_seq = []
if len(sql_where_clause) != 0 and len(sql_having_clause) != 0:
# TODO: why do not statistic them together
filter_grammar = Filter(GrammarType.FilterAnd)
inter_seq.append(filter_grammar)
if len(sql_where_clause) != 0:
# only ordinary number : where1 and where2 or where3 ...
if len(sql_where_clause) == 1:
cond_grammar = self._process_condition(sql_clause=sql_clause,
cond=sql_where_clause[0])
inter_seq.extend(cond_grammar)
elif len(sql_where_clause) == 3:
# check what is the operation
if sql_where_clause[1] == 'or':
filter_grammar = Filter(GrammarType.FilterOr)
else:
filter_grammar = Filter(GrammarType.FilterAnd)
inter_seq.append(filter_grammar)
# TODO: parent feeding
left_cond_grammar = self._process_condition(sql_clause=sql_clause,
cond=sql_where_clause[0])
right_cond_grammar = self._process_condition(sql_clause=sql_clause,
cond=sql_where_clause[2])
inter_seq.extend(left_cond_grammar)
inter_seq.extend(right_cond_grammar)
else:
# enumerate all combinations
op_to_grammar = {
'and': [Filter(GrammarType.FilterAnd)],
'or': [Filter(GrammarType.FilterOr)]
}
# get operation str, and convert them into grammar
left_op = sql_where_clause[1]
left_filter_grammar = op_to_grammar[left_op]
right_op = sql_where_clause[3]
right_filter_grammar = op_to_grammar[right_op]
left_cond_grammar: List[Action] = self._process_condition(sql_clause=sql_clause,
cond=sql_where_clause[0])
middle_cond_grammar = self._process_condition(sql_clause=sql_clause,
cond=sql_where_clause[2])
right_cond_grammar = self._process_condition(sql_clause=sql_clause,
cond=sql_where_clause[4])
# the priority of `and` is higher than `or`, so we care for the order
extend_list = [left_cond_grammar, middle_cond_grammar, right_cond_grammar]
combine_type = f'{left_op}@{right_op}'
if combine_type == 'and@and' or combine_type == 'or@or' or combine_type == 'and@or':
# 1. where1 and(l) where2 and(r) where3 -> and(r) and(l) where1 where2 where3
# 2. where1 and(l) where2 or(r) where3 -> or(r) and(l) where1 where2 where3
extend_list.insert(0, left_filter_grammar)
extend_list.insert(0, right_filter_grammar)
elif combine_type == 'or@and':
# where1 or(l) where2 and(r) where3 -> or(l) where1 and(r) where2 where3
extend_list.insert(1, right_filter_grammar)
extend_list.insert(0, left_filter_grammar)
else:
raise ConfigurationError(f'We do not support Filter combine type:{combine_type}')
for extend_grammar in extend_list:
inter_seq.extend(extend_grammar)
# TODO: now we do not consider where which has more clauses than 3
# handle having clause
if len(sql_having_clause) != 0:
cond_grammar = self._process_condition(sql_clause=sql_clause,
cond=sql_having_clause[0])
inter_seq.extend(cond_grammar)
# no non-terminal
return inter_seq
def _process_order(self, sql_clause) -> List[Action]:
"""
the orderby clause will be mapped into Order, A, C, T
:return:
"""
sql_order_clause = sql_clause['orderBy']
sql_limit_clause = sql_clause['limit']
# pre-condition: if has order by, it will be processed by this function
assert len(sql_order_clause) != 0
inter_seq = []
if sql_limit_clause is not None:
if sql_order_clause[0] == 'asc':
order_grammar = Order(GrammarType.OrderAscLim)
else:
order_grammar = Order(GrammarType.OrderDesLim)
else:
if sql_order_clause[0] == 'asc':
order_grammar = Order(GrammarType.OrderAsc)
else:
order_grammar = Order(GrammarType.OrderDes)
# orderBy grammar
inter_seq.append(order_grammar)
# aggregate grammar
agg_id = sql_order_clause[1][0][1][0]
col_ind = sql_order_clause[1][0][1][1]
inter_seq.extend(self._process_agg_col_table(sql_clause=sql_clause,
agg_id=agg_id,
col_ind=col_ind))
# no non-terminal
return inter_seq
def _process_root(self, sql_clause: Dict) -> List[Action]:
"""
Process statement and return its corresponding transaction
:return: grammar transaction clauses
"""
def _process_step(step_state: str):
"""
Process every step using the step state
:param step_state: represent the top state which should be parsed
:return: returned inner intermediate action sequence and the next state
"""
call_back_mapping = {
'Select': self._process_select,
'Order': self._process_order,
'Filter': self._process_filter
}
return call_back_mapping[step_state](sql_clause)
if sql_clause['orderBy']:
order_used = True
else:
order_used = False
# check the where
if sql_clause['where'] == [] and sql_clause['having'] == []:
filter_used = False
else:
filter_used = True
if filter_used and order_used:
inter_seq, next_states = [Root(GrammarType.RootSFO)], ['Select', 'Filter', 'Order']
elif filter_used:
inter_seq, next_states = [Root(GrammarType.RootSF)], ['Select', 'Filter']
elif order_used:
inter_seq, next_states = [Root(GrammarType.RootSO)], ['Select', 'Order']
else:
inter_seq, next_states = [Root(GrammarType.RootS)], ['Select']
while len(next_states) > 0:
# pop from left to right, to keep the readable
cur_state = next_states.pop(0)
# parse it
step_inter_seq = _process_step(cur_state)
inter_seq.extend(step_inter_seq)
return inter_seq
class ActionTreeNode(object):
def __init__(self, action: Action):
self.action = action
self.child: List[Optional[ActionTreeNode]] = []
# drop self
if isinstance(self.action.ins_id, int):
all_child = self.action.grammar_dict[self.action.ins_id].split(' ')[1:]
else:
all_child = []
for child_name in all_child:
if child_name not in Keywords:
# placeholder
self.child.append(None)
def full_in_child(self) -> bool:
"""
test if an action could be inserted into self's child, if fail, return false; otherwise, return true.
:return:
"""
# if is a non terminal
if None in self.child:
return False
# successfully add the child, return true.
return True
def add_child(self, action_node):
ind = self.child.index(None)
self.child[ind] = action_node
class ActionConverter(object):
"""
This class is designed for post-processing on SemQL(also named action) sequence into SQL clause. Note that we DO NOT
handle any logical problem in this class(e.g. the column should be in the corresponding SQL in `A -> None C T`. You
should process it in another separate function such as `ConditionStatelet`.
"""
def __init__(self, db_context: SparcDBContext):
"""
:param db_context: data context for database
"""
self.db_context = db_context
self.col_names: Dict[int, TableColumn] = db_context.id_to_col
self.table_names: Dict[int, Table] = db_context.id_to_tab
self.graph, self.foreign_pairs = self._build_graph()
self.processor = {
Select: self._process_select,
Order: self._process_order,
Filter: self._process_filter
}
def translate_to_sql(self, action_seq: List[str]):
"""
Given an action sequence, we should postprocessing it into
:param action_seq: `List[str]` each item represents an action defined in context/grammar.py
:return:
"""
# convert action sequence into action
action_seq = [Action.from_str(action_repr) for action_repr in action_seq]
# translation sequence into tree
node_queue: List[ActionTreeNode] = []
root_node = None
seq_len = len(action_seq)
for i in range(seq_len):
# build tree node
tree_node = ActionTreeNode(action_seq[i])
if i == 0:
root_node = tree_node
# try to append current node into the first element of node queue
else:
cur_node = node_queue[-1]
# cannot insert, pop the least node
while cur_node.full_in_child():
# break the first node
node_queue.pop(-1)
# update current node
cur_node = node_queue[-1]
cur_node.add_child(tree_node)
node_queue.append(tree_node)
# from root node, traverse the tree
statement_clause = self._process_statement(root_node)
# clean up spaces
statement_clause = re.sub('\\s+', ' ', statement_clause)
return statement_clause
def _process_join(self, component_mapping: Dict[str, List[ActionTreeNode]], repr: Dict[str, str],
is_subquery: bool):
"""
:param component_mapping: component mapping records `select`, `order`, `where`, `having`, `from`.
:param is_subquery: whether is subqueyry, if true, show ON clause.
:return: From clause result
"""
def _process_on(_route):
for i in range(1, len(_route)):
for j in range(0, i):
tab_1_name, tab_2_name = _route[j].split(' ')[0], _route[i].split(' ')[0]
candidate_routes = []
for key_tab_name, key_col_name, val_tab_name, val_col_name in self.foreign_pairs:
if tab_1_name == key_tab_name and tab_2_name == val_tab_name:
# TODO: the order of ON matters?
candidate_routes.append((
f' ON {key_tab_name}.{key_col_name} = {val_tab_name}.{val_col_name}',
key_col_name, val_col_name))
# check the number of valid routes
if len(candidate_routes) == 1:
_route[i] += candidate_routes[0][0]
elif len(candidate_routes) > 1:
# course_id = pred_id, course_id = course_id (between two tables)
best_route = candidate_routes[0][0]
# there is a circle, we should select the val col and key col euqal one
for _route_repr, key_col_name, val_col_name in candidate_routes:
if key_col_name == val_col_name:
best_route = _route_repr
break
_route[i] += best_route
return _route
# for group by between two tables
used_tab_names = [node.action.ins_id for node in component_mapping['from']]
join_tables = []
if len(used_tab_names) != 2:
# TODO: too complex to handle
join_tables = used_tab_names
elif len(used_tab_names) == 2:
for tab_name in used_tab_names:
# any table not appeared
if tab_name not in self.graph.vertices:
join_tables = used_tab_names
break
# break will break the else clause
else:
tab_start, tab_end = used_tab_names[0], used_tab_names[1]
route = list(self.graph.dijkstra(tab_start, tab_end))
if is_subquery:
route = _process_on(route)
join_tables = route if len(route) != 0 else used_tab_names
repr['from'] = 'FROM ' + ' JOIN '.join(join_tables)
def _process_group_by(self, component_mapping: Dict[str, List[ActionTreeNode]], repr: Dict[str, str]):
"""
Define rules to judge whether the SQL should contain GROUP BY
:param component_mapping:
:return:
"""
having_nodes = component_mapping['having']
# first determine whether to group by
if len(having_nodes) > 0:
keep_group_by = True
else:
keep_group_by = False
select_nodes = component_mapping['select']
order_nodes = component_mapping['order']
# if there are two or more columns in select and any one in [count,max,min,avg,sum], we need group
if len(select_nodes) > 1 and any([node for node in select_nodes
if node.action.ins_id != GrammarType.ANone]):
keep_group_by = True
elif any([node for node in order_nodes if node.action.ins_id != GrammarType.ANone]):
keep_group_by = True
# for group by between two tables
used_tab_names = [node.action.ins_id for node in component_mapping['from']]
if not keep_group_by:
return
else:
group_by_clause = None
from_nodes = component_mapping['from']
if len(from_nodes) != 2:
# TODO: if contains table > 2, we select a column which has no agg as group by
# the algorithm may be unstable, but we now use it.
for node in select_nodes:
if node.action.ins_id == GrammarType.ANone:
# TODO: no more mapping
agg_repr = self._process_agg(node, {'from': []})
group_by_clause = f'GROUP BY {agg_repr}'
break
# if all have aggregation
# TODO: where is ORDER BY ?
if group_by_clause is None and len(having_nodes) > 0:
# without any aggregator
for agg_node in select_nodes:
col_name = agg_node.child[0].action.ins_id
tab_name = agg_node.child[1].action.ins_id
if col_name == '*':
continue
agg_repr = f'{tab_name}.{col_name}'
group_by_clause = f'GROUP BY {agg_repr}'
break
# TODO: rule-based. When there are two tables, we should group by via foreign keys
else:
if len(select_nodes) == 1 and len(having_nodes) == 0:
# TODO: check the linking
pass
# find foreign key
for key_tab_name, key_col_name, val_tab_name, _ in self.foreign_pairs:
if key_tab_name in used_tab_names and val_tab_name in used_tab_names:
agg_repr = f'{key_tab_name}.{key_col_name}'
assert key_col_name != '*'
group_by_clause = f'GROUP BY {agg_repr}'
break
# if having, select the column in select as the group by one
if group_by_clause is None:
for node in select_nodes:
if node.action.ins_id == GrammarType.ANone:
agg_repr = self._process_agg(node, {'from': []})
if agg_repr == '*':
continue
group_by_clause = f'GROUP BY {agg_repr}'
break
if group_by_clause is None:
# remove having
if 'having' in repr:
repr.pop('having')
if group_by_clause is not None:
# for separate group by and others
repr['group'] = group_by_clause
else:
return
def _process_statement(self, node: Optional[ActionTreeNode]) -> str:
"""
Process statement node and return the SQL clause of statement
:return: SQL clause equal to node
"""
action = node.action
action_type = action.ins_id
assert isinstance(action, Statement)
if action_type == GrammarType.StateNone:
assert len(node.child) == 1
root_repr = self._process_root(node.child[0], False)
return root_repr
else:
# two children
assert len(node.child) == 2
left_child = self._process_root(node.child[0], False)
right_child = self._process_root(node.child[1], False)
if action_type == GrammarType.StateInter:
return f'{left_child} INTERSECT {right_child}'
elif action_type == GrammarType.StateExcept:
return f'{left_child} EXCEPT {right_child}'
elif action_type == GrammarType.StateUnion:
return f'{left_child} UNION {right_child}'
else:
raise ConfigurationError(f'Not support for statement type:{action_type}')
def _process_root(self, node: Optional[ActionTreeNode], is_subquery):
"""
Process root node and return the root representation
:param node:
:return:
"""
# traverse node child
assert isinstance(node.action, Root)
component_mapping: Dict[str, List[ActionTreeNode]] = {
'select': [],
'where': [],
'having': [],
'order': [],
'from': []
}
repr_mapping: Dict[str, str] = {}
for node_son in node.child:
action_cls = node_son.action.__class__
# must in Select, Order or Filter
assert action_cls in [Select, Order, Filter]
process_func = self.processor[action_cls]
process_func(node_son, component_mapping, repr_mapping)
# process group by
# TODO: here we assume that group by could occur in sub-queries.
self._process_group_by(component_mapping, repr_mapping)
# TODO: if is subquery, we should explain ON clause explicitly
self._process_join(component_mapping, repr_mapping, is_subquery)
action_repr = ''
# handle them in order
for component_key in ['select', 'from', 'where', 'group', 'having', 'order']:
if component_key in repr_mapping:
action_repr += repr_mapping[component_key] + ' '
action_repr = action_repr.strip()
return action_repr
def _process_order(self, node: Optional[ActionTreeNode], component: Dict[str, List[ActionTreeNode]],
repr: Dict[str, str]):
"""
Process order by clause
"""
assert isinstance(node.action, Order)
assert len(node.child) == 1
assert isinstance(node.child[0].action, A)
agg_repr = self._process_agg(node.child[0], component)
basic_repr = f'ORDER BY {agg_repr} '
action_type = node.action.ins_id
if action_type == GrammarType.OrderAsc:
basic_repr += 'ASC'
elif action_type == GrammarType.OrderDes:
basic_repr += 'DESC'
elif action_type == GrammarType.OrderAscLim:
basic_repr += 'ASC LIMIT 1'
elif action_type == GrammarType.OrderDesLim:
basic_repr += 'DESC LIMIT 1'
else:
raise ConfigurationError(f'Not support for order type:{action_type}')
repr['order'] = basic_repr
component['order'] = node.child
def _process_select(self, node: Optional[ActionTreeNode], component: Dict[str, List[ActionTreeNode]],
repr: Dict[str, str]):
"""
process select clause and return the select clause
:return: modifiy repr and make the key `select` as SELECT representation
"""
assert isinstance(node.action, Select)
agg_reprs = []
for child in node.child:
agg_reprs.append(self._process_agg(child, component))
action_repr = ','.join(agg_reprs)
repr['select'] = f'SELECT {action_repr}'
component['select'] = node.child
def _process_filter(self, node: Optional[ActionTreeNode], component: Dict[str, List[ActionTreeNode]],
repr: Dict[str, str]):
"""
Process filter, return where clause and having clause
:param node: root node of Filter
:return: modifies on repr to return the representation of different components
"""
def _mark_node_type(cur_node: ActionTreeNode, keep_having: bool):
"""
Split current node into two separate trees.
:param keep_having: specify whether to keep having nodes
"""
# deep copy a node
for ind, child_node in enumerate(cur_node.child):
action_type = child_node.action.ins_id
# if root node, do not mark
if isinstance(child_node.action, Root):
continue
if isinstance(child_node.action, Filter):
_mark_node_type(child_node, keep_having)
continue
# if it is A node and mark where
if action_type != GrammarType.ANone:
# action max/min... -> having, not keep having -> where
if not keep_having:
# assign having node as None
cur_node.child[ind] = None
else:
# add having node into current mapping
component['having'].append(child_node)
elif action_type == GrammarType.ANone:
# action none -> where, keep having -> having
if keep_having:
cur_node.child[ind] = None
else:
component['where'].append(child_node)
# get two separate root nodes
where_root_node = deepcopy(node)
_mark_node_type(where_root_node, False)
having_root_node = deepcopy(node)
_mark_node_type(having_root_node, True)
def _recursive_repr(_inner_node: Optional[ActionTreeNode]) -> Optional[str]:
"""
Recursively represent the _inner_node
:return: string or None(if all marked as None)
"""
assert isinstance(_inner_node.action, Filter)
action_type = _inner_node.action.ins_id
if action_type == GrammarType.FilterAnd or action_type == GrammarType.FilterOr:
# recursive find filter
assert len(_inner_node.child) == 2
left_repr = None
right_repr = None
if _inner_node.child[0] is not None:
# left repr and right repr
left_repr = _recursive_repr(_inner_node.child[0])
if _inner_node.child[1] is not None:
right_repr = _recursive_repr(_inner_node.child[1])
# return AND OR and etc.
if left_repr and right_repr:
if action_type == GrammarType.FilterAnd:
return f'{left_repr} AND {right_repr}'
else:
return f'{left_repr} OR {right_repr}'
# if right is None, means (AND WHERE HAVING)
elif left_repr:
return left_repr
elif right_repr:
return right_repr
else:
return None
# plain or subquery
else:
if action_type == GrammarType.FilterNotLike:
template = '{} NOT LIKE '
elif action_type == GrammarType.FilterLike:
template = '{} LIKE '
elif action_type == GrammarType.FilterEqual or action_type == GrammarType.FilterEqualNes:
template = '{} = '
elif action_type == GrammarType.FilterGreater or action_type == GrammarType.FilterGreaterNes:
template = '{} > '
elif action_type == GrammarType.FilterLess or action_type == GrammarType.FilterLessNes:
template = '{} < '
elif action_type == GrammarType.FilterGeq or action_type == GrammarType.FilterGeqNes:
template = '{} >= '
elif action_type == GrammarType.FilterLeq or action_type == GrammarType.FilterLeqNes:
template = '{} <= '
elif action_type == GrammarType.FilterNeq or action_type == GrammarType.FilterNeqNes:
template = '{} != '
elif action_type == GrammarType.FilterBetween or action_type == GrammarType.FilterBetweenNes:
template = '{} BETWEEN 1 AND '
elif action_type == GrammarType.FilterInNes:
template = '{} IN '
elif action_type == GrammarType.FilterNotInNes:
template = '{} NOT IN '
else:
raise ConfigurationError(f'Error on Filter processing: not filter type: {len(action_type)}')
assert len(_inner_node.child) >= 1
if _inner_node.child[0] is not None:
assert isinstance(_inner_node.child[0].action, A)
agg_repr = self._process_agg(_inner_node.child[0], component)
# judge as HAVING
if len(_inner_node.child) == 1:
return template.format(agg_repr) + '1'
# sub query
elif len(_inner_node.child) == 2:
assert isinstance(_inner_node.child[0].action, A)
assert isinstance(_inner_node.child[1].action, Root)
agg_repr = self._process_agg(_inner_node.child[0], component)
# sub-query start, allocate a new mapping dictionary
root_repr = self._process_root(_inner_node.child[1], is_subquery=True)
return template.format(agg_repr) + f'( {root_repr} )'
else:
raise ConfigurationError(
f'Error on Filter processing: not supported child number: {len(_inner_node.child)}')
else:
return None
where_clause = _recursive_repr(where_root_node)
having_clause = _recursive_repr(having_root_node)
if where_clause:
repr['where'] = f'WHERE {where_clause}'
if having_clause:
repr['having'] = f'HAVING {having_clause}'
if not where_clause and not having_clause:
raise ConfigurationError('There is no WHERE and HAVING, but there is an Filter Node.')
@staticmethod
def _process_agg(node: Optional[ActionTreeNode], mapping: Dict[str, List[ActionTreeNode]]) -> str:
"""
Process column, table and aggregation, return the representation
:return: representation of aggregation
"""
# process aggregation, column and table
assert isinstance(node.action, A)
# C and T
assert len(node.child) == 2
# TODO: assum C is always before T
assert isinstance(node.child[0].action, C)
assert isinstance(node.child[1].action, T)
col_name = node.child[0].action.ins_id
tab_name = node.child[1].action.ins_id
# TODO: we use Node instead of table name to keep consistent
used_tab_names = [node.action.ins_id for node in mapping['from']]
if tab_name not in used_tab_names:
mapping['from'].append(node.child[1])
# add tab_name into mapping
action_type = node.action.ins_id
if action_type == GrammarType.ANone:
if col_name == '*':
return '*'
else:
return f'{tab_name}.{col_name}'
else:
template = ''
if action_type == GrammarType.ACount:
template = 'count({})'
elif action_type == GrammarType.AAvg:
template = 'avg({})'
elif action_type == GrammarType.ASum:
template = 'sum({})'
elif action_type == GrammarType.AMin:
template = 'min({})'
elif action_type == GrammarType.AMax:
template = 'max({})'
# * means direct return
if col_name == '*':
return template.format(col_name)
else:
return template.format(f'{tab_name}.{col_name}')
def _build_graph(self):
"""
Build the graph using primary/foregin key. Applied for multi-table scenario.
:return:
"""
# edges for building graph to find the shorted path
relations = []
foreign_pairs = []
def _get_tab_col_name(full_name):
name_parts = full_name.split(':')
# the first one is type
name_type = name_parts[0]
if name_type != 'column':
return None, None
col_type = name_parts[1]
# TODO: single direction, make the key only FOREIGN
if col_type not in ['foreign', 'primary']:
return None, None
# fetch the column name
tab_name = name_parts[2]
col_name = name_parts[3]
return tab_name, col_name
for key_full_name in self.db_context.knowledge_graph.neighbors.keys():
# get key name
key_tab_name, key_col_name = _get_tab_col_name(key_full_name)
if key_col_name is None:
continue
linked_value_set = self.db_context.knowledge_graph.neighbors[key_full_name]
for val_full_name in linked_value_set:
val_tab_name, val_col_name = _get_tab_col_name(val_full_name)
if val_col_name is None:
continue
# else add them into graph
relations.append((key_tab_name, val_tab_name))
foreign_pairs.append((key_tab_name, key_col_name, val_tab_name, val_col_name))
return Graph(relations), foreign_pairs
|
ContextualSP/interactive_text_to_sql/src/context/converter.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/src/context/converter.py",
"repo_id": "ContextualSP",
"token_count": 23521
}
| 255 |
# coding: utf-8
import random
import copy
from typing import List
class Node:
STATEMENT_TYPE = 0
ROOT_TYPE = 1
SELECT_TYPE = 2
FILTER_TYPE = 3
ORDER_TYPE = 4
A_TYPE = 5
COLUMN_TYPE = 11
TABLE_TYPE = 12
KEYWORD_TYPE = 13
VALUE_TYPE = '3'
TYPE_DICT = {'SQL': None, 'Statement': STATEMENT_TYPE, 'Root': ROOT_TYPE, 'Select': SELECT_TYPE, 'Filter': FILTER_TYPE,
'Order': ORDER_TYPE, 'A': A_TYPE, 'C': COLUMN_TYPE, 'T': TABLE_TYPE, 'Value': VALUE_TYPE}
KEYWORD_LIST = [
'intersect', 'union', 'except', # Statement
'asc', 'des', 'limit', # Order
'and', 'or', '>', '<', '>=', '<=', '=', '!=', 'between', 'like', 'not_like', 'in', 'not_in', # Filter
'max', 'min', 'count', 'sum', 'avg', 'none' # A
]
RULE_TEMPLATES = {
# SQL
'Statement': 'Find out {0}',
# Statement
'intersect Root Root': 'the common part of the set of {0} and the set of {1}',
'union Root Root': 'everyone in the set of {0} and the set of {1}',
'except Root Root': 'everyone in the set of {0} but not in the set of {1}',
'Root': '{0}',
# Root
'Select Filter Order': '{0}, {1}, {2}',
'Select Filter': '{0}, {1}',
'Select Order': '{0}, {1}',
'Select': '{0}',
# Select
'A': '{0}',
'A A': '{0} and {1}',
'A A A': '{0} , {1} and {2}',
'A A A A': '{0} , {1} , {2} and {3}',
'A A A A A': '{0} , {1} , {2} , {3} and {4}',
'A A A A A A': '{0} , {1} , {2} , {3} , {4} and {5}',
# A
'none C T': 'the {0} of {1}',
'max C T': 'the maximum {0} of {1}',
'min C T': 'the minimum {0} of {1}',
'count C T': 'the number of {0} of {1}',
'sum C T': 'the sum of {0} of {1}',
'avg C T': 'the average {0} of {1}',
# Filter
'Filter and Filter': '{0} and {1}',
'Filter or Filter': '{0} or {1}',
'= A': 'where {0} is {1}'.format('{0}', VALUE_TYPE),#['where {0} equals to {1}'.format('{0}', VALUE_TYPE), 'where {0} is {1}'.format('{0}', VALUE_TYPE)],
'> A': 'where {0} greater than {1}'.format('{0}', VALUE_TYPE),
'< A': 'where {0} less than {1}'.format('{0}', VALUE_TYPE),
'>= A': 'where {0} greater than or equals to {1}'.format('{0}', VALUE_TYPE),
'<= A': 'where {0} less than or equals to {1}'.format('{0}', VALUE_TYPE),
'!= A': 'where {0} not equals to {1}'.format('{0}', VALUE_TYPE),
'between A': 'where {0} between {1} and {2}'.format('{0}', VALUE_TYPE, VALUE_TYPE),
'like A': 'where {0} like {1}'.format('{0}', VALUE_TYPE),
'not_like A': 'where {0} not like {1}'.format('{0}', VALUE_TYPE),
'= A Root': ['where {0} equals to {1}', 'where {0} is {1}'],
'> A Root': 'where {0} greater than {1}',
'< A Root': 'where {0} less than {1}',
'>= A Root': 'where {0} greater than or equals to {1}',
'<= A Root': 'where {0} less than or equals to {1}',
'!= A Root': 'where {0} not equals to {1}',
'between A Root': 'where {0} is between {1}', # todo: useless
'in A Root': 'where {0} is in the set of {1}',
'not_in A Root': 'where {0} is not in the set of {1}',
# Order
'asc A': 'in ascending order of {0}',
'des A': 'in descending order of {0}',
'asc A limit': 'in ascending order of {0}' + 'with maximum {0} item(s)'.format(VALUE_TYPE),
'des A limit': 'in descending order of {0}' + 'with maximum {0} item(s)'.format(VALUE_TYPE),
}
RULE_TEMPLATES_WITHOUT_TABLE = copy.copy(RULE_TEMPLATES)
RULE_TEMPLATES_WITHOUT_TABLE.update({
'none C T': 'the {0}',
'max C T': 'the maximum {0}',
'min C T': 'the minimum {0}',
'count C T': 'the number of {0}',
'sum C T': 'the sum of {0}',
'avg C T': 'the average {0}',
})
def __init__(self, node_id: int,
text: str,
statement: str,
children_tokens: List[str],
father=None,
depth=0):
self.node_id = node_id
self.text = text
self.statement = statement.split(' -> ')[1].strip() # align with irnet.context.grammar
if text in Node.TYPE_DICT:
self.type = Node.TYPE_DICT.get(text)
elif text in Node.KEYWORD_LIST:
self.type = Node.KEYWORD_TYPE
else:
raise Exception('Node type error')
self._children_tokens = children_tokens
self.children = []
self.father = father
self.depth = depth
# father_text = 'None' if father is None else father.text
# print(f'Created node: text={text}, children_tokens={children_tokens}, father={father_text}')
self.checked: bool = True
self.more_info = {} # data container for outside operations
def bfs(self, process_f=lambda x: x, node_only=True):
ret_list = []
queue = [self]
while queue:
node = queue[0]
del queue[0]
if isinstance(node, Node):
ret_list.append(process_f(node))
elif isinstance(node, str) and not node_only:
ret_list.append(process_f(node))
if isinstance(node, Node) and node.children:
queue += node.children
return ret_list
def restatement(self, with_table=True):
# 1. Find if Root node exists in subtree, if unchecked leaved, raise error # todo: may be eliminated
subtree_nodes = self.bfs()
if False in [node.checked for node in subtree_nodes if not isinstance(node, str) and node.type == Node.ROOT_TYPE]:
raise Exception('Unchecked Root node exists, check it first')
# 2. Restate each child
return self._restate_node(self, with_table=with_table)
def restatement_with_tag(self):
# 1. Find if Root node exists in subtree, if unchecked leaved, raise error # todo: may be eliminated
subtree_nodes = self.bfs()
if False in [node.checked for node in subtree_nodes if
not isinstance(node, str) and node.type == Node.ROOT_TYPE]:
raise Exception('Unchecked Root node exists, check it first')
# 2. Restate each child
return self._restate_node_with_tag(self)
@staticmethod
def _restate_node(node, with_table=True):
if isinstance(node, str) and node not in Node.KEYWORD_LIST:
raise Exception('WA!!!')
node_statement = node.statement
templates = Node.RULE_TEMPLATES if with_table else Node.RULE_TEMPLATES_WITHOUT_TABLE
if node_statement in templates:
rule_template = templates.get(node_statement)
if isinstance(rule_template, List):
rule_template = random.sample(rule_template, 1)[0]
format_strings = []
for child in node.children:
if isinstance(child, str) and child in Node.KEYWORD_LIST:
continue
format_strings.append(Node._restate_node(child, with_table=with_table))
return rule_template.format(*format_strings)
else:
return ' '.join(node_statement.split('_')) if node_statement is not '*' else 'items' # select *
@staticmethod
def _restate_node_with_tag(node, with_table=True):
if isinstance(node, str) and node not in Node.KEYWORD_LIST:
raise Exception('WA!!!')
node_statement = node.statement
templates = Node.RULE_TEMPLATES if with_table else Node.RULE_TEMPLATES_WITHOUT_TABLE
if node_statement in templates:
rule_template = templates.get(node_statement)
if isinstance(rule_template, List):
rule_template = random.sample(rule_template, 1)[0]
sub_strings = []
sub_string_tags = []
for child in node.children:
if isinstance(child, str) and child in Node.KEYWORD_LIST:
continue
node_restatement_string, node_restatement_tag = Node._restate_node_with_tag(child)
sub_strings.append(node_restatement_string)
sub_string_tags.append(node_restatement_tag)
restatement_string = rule_template.format(*sub_strings)
restatement_tag = []
nonterminal_children = [_ for _ in node.children if isinstance(_, Node)]
for word in rule_template.split():
if word.startswith('{') and word.endswith('}'):
placeholder_idx = int(word[1:-1])
if sub_string_tags[placeholder_idx] is not None:
restatement_tag += sub_string_tags[placeholder_idx]
else:
restatement_tag += nonterminal_children[placeholder_idx].text * len(sub_strings[placeholder_idx].split())
else:
restatement_tag.append(node.text)
return restatement_string, restatement_tag
else:
return ' '.join(node_statement.split('_')) if node_statement is not '*' else 'items', None
# todo: tag of table and column with split char
@staticmethod
def print_subtree(node):
def _print_subtree(node):
print(' ' * node.depth + node.text)
for child in node.children:
if isinstance(child, Node):
_print_subtree(child)
else:
print(' ' * (node.depth + 1) + child)
_print_subtree(node)
def clear_more_info_recursively(self, keys=None):
if keys is None:
def clear_more_info(node):
node.more_info.clear()
else:
def clear_more_info(node):
for key in keys:
if key in node.more_info:
del node.more_info[key]
self.bfs(process_f=clear_more_info)
def compare_node(self, node) -> bool:
if self.type == node.type and self.children == node.children:
return True
else:
return False
def compare_tree(self, node) -> bool:
if self.type != node.type or self.statement != node.statement:
self.more_info['subtree_equal'] = node.more_info['subtree_equal'] = False
self.bfs(lambda x: x.more_info.update({'subtree_equal': False}))
node.bfs(lambda x: x.more_info.update({'subtree_equal': False}))
return False
else:
assert len(self.children) == len(node.children)
status = True
for child1, child2 in zip(self.children, node.children):
if isinstance(child1, str) or isinstance(child2, str):
if child1 != child2:
status = False
elif child1.compare_tree(child2) is False:
status = False
self.more_info['subtree_equal'] = node.more_info['subtree_equal'] = status
self.bfs(lambda x: x.more_info.update({'subtree_equal': status}))
node.bfs(lambda x: x.more_info.update({'subtree_equal': status}))
return status
@staticmethod
def from_statements(statements):
root, _ = parse_sql_tree(statements)
return root
def is_nonterminal(token):
letter = token[0]
if ord('A') <= ord(letter) <= ord('Z'):
return True
else:
return False
def parse_sql_tree(tree_statements):
# print(tree_statements)
max_depth = -1
depth = 0
stack = []
root = Node(0, 'SQL', 'SQL -> Statement', ['Statement'], depth=0)
stack.append(root)
for state_id, statement in enumerate(tree_statements):
assert statement.split(' -> ')[0] == stack[-1]._children_tokens[0] # non-terminal match
# print(f'statement = {statement}')
nonterminal, children = statement.split('->')
nonterminal = nonterminal.strip()
children = [child.strip() for child in children.strip().split(' ')]
node = Node(state_id, nonterminal, statement, children, father=stack[-1], depth=depth)
stack[-1].children.append(node)
del stack[-1]._children_tokens[0]
stack.append(node)
depth += 1
max_depth = max(max_depth, depth)
while stack:
# move terminal tokens from children_tokens into children
while stack[-1]._children_tokens and not is_nonterminal(stack[-1]._children_tokens[0]):
stack[-1].children.append(stack[-1]._children_tokens[0])
del stack[-1]._children_tokens[0]
# layer up if no child waiting for process
if len(stack[-1]._children_tokens) == 0:
stack.pop()
depth -= 1
if len(stack) == 0:
return root, max_depth
else:
break
return root, max_depth
if __name__ == '__main__':
# sql_tree = ['Statement -> Root', 'Root -> Select Filter',
# 'Select -> A', 'A -> count C T', 'C -> budget_in_billions', 'T -> department',
# 'Filter -> > A', 'A -> none C T', 'C -> age', 'T -> head']
sql_tree = ['Statement -> Root', 'Root -> Select Filter',
'Select -> A', 'A -> sum C T', 'C -> enr', 'T -> college',
'Filter -> not_in A Root', 'A -> none C T', 'C -> cname', 'T -> college',
'Root -> Select Filter',
'Select -> A', 'A -> none C T', 'C -> cname', 'T -> tryout',
'Filter -> = A', 'A -> none C T', 'C -> ppos', 'T -> tryout']
root, max_depth = parse_sql_tree(sql_tree)
Node.print_subtree(root)
restatement = root.restatement_with_tag()
print(restatement)
|
ContextualSP/interactive_text_to_sql/src/utils/semql_tree_util.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/src/utils/semql_tree_util.py",
"repo_id": "ContextualSP",
"token_count": 6627
}
| 256 |
"""Tools for working with CodaLab."""
import pickle as pickle
import json
import os
import platform
import shutil
import sys
import tempfile
from contextlib import contextmanager
import matplotlib.image as mpimg
from gtd.io import shell
__author__ = 'kelvinguu'
# need to be specified by user
worksheet = None
site = None
def get_uuids():
"""List all bundle UUIDs in the worksheet."""
result = shell('cl ls -w {} -u'.format(worksheet))
uuids = result.split('\n')
uuids = uuids[1:-1] # trim non uuids
return uuids
@contextmanager
def open_file(uuid, path):
"""Get the raw file content within a particular bundle at a particular path.
Path have no leading slash.
"""
# create temporary file just so we can get an unused file path
f = tempfile.NamedTemporaryFile()
f.close() # close and delete right away
fname = f.name
# download file to temporary path
cmd ='cl down -o {} -w {} {}/{}'.format(fname, worksheet, uuid, path)
try:
shell(cmd)
except RuntimeError:
try:
os.remove(fname) # if file exists, remove it
except OSError:
pass
raise IOError('Failed to open file {}/{}'.format(uuid, path))
f = open(fname)
yield f
f.close()
os.remove(fname) # delete temp file
class Bundle(object):
def __init__(self, uuid):
self.uuid = uuid
def __getattr__(self, item):
"""
Load attributes: history, meta on demand
"""
if item == 'history':
try:
with open_file(self.uuid, 'history.cpkl') as f:
value = pickle.load(f)
except IOError:
value = {}
elif item == 'meta':
try:
with open_file(self.uuid, 'meta.json') as f:
value = json.load(f)
except IOError:
value = {}
# load codalab info
fields = ('uuid', 'name', 'bundle_type', 'state', 'time', 'remote')
cmd = 'cl info -w {} -f {} {}'.format(worksheet, ','.join(fields), self.uuid)
result = shell(cmd)
info = dict(list(zip(fields, result.split())))
value.update(info)
elif item in ('stderr', 'stdout'):
with open_file(self.uuid, item) as f:
value = f.read()
else:
raise AttributeError(item)
self.__setattr__(item, value)
return value
def __repr__(self):
return self.uuid
def load_img(self, img_path):
"""
Return an image object that can be immediately plotted with matplotlib
"""
with open_file(self.uuid, img_path) as f:
return mpimg.imread(f)
def download_logs(bundle, log_dir):
if bundle.meta['bundle_type'] != 'run' or bundle.meta['state'] == 'queued':
print('Skipped {}\n'.format(bundle.uuid))
return
if isinstance(bundle, str):
bundle = Bundle(bundle)
uuid = bundle.uuid
name = bundle.meta['name']
log_path = os.path.join(log_dir, '{}_{}'.format(name, uuid))
cmd ='cl down -o {} -w {} {}/logs'.format(log_path, worksheet, uuid)
print(uuid)
try:
shell(cmd, verbose=True)
except RuntimeError:
print('Failed to download', bundle.uuid)
print()
def report(render, uuids=None, reverse=True, limit=None):
if uuids is None:
uuids = get_uuids()
if reverse:
uuids = uuids[::-1]
if limit is not None:
uuids = uuids[:limit]
for uuid in uuids:
bundle = Bundle(uuid)
try:
render(bundle)
except Exception:
print('Failed to render', bundle.uuid)
def monitor_jobs(logdir, uuids=None, reverse=True, limit=None):
if os.path.exists(logdir):
delete = input('Overwrite existing logdir? ({})'.format(logdir))
if delete == 'y':
shutil.rmtree(logdir)
os.makedirs(logdir)
else:
os.makedirs(logdir)
print('Using logdir:', logdir)
report(lambda bd: download_logs(bd, logdir), uuids, reverse, limit)
def tensorboard(logdir):
print('Run this in bash:')
shell('tensorboard --logdir={}'.format(logdir), verbose=True, debug=True)
print('\nGo to TensorBoard: http://localhost:6006/')
def add_to_sys_path(path):
"""Add a path to the system PATH."""
sys.path.insert(0, path)
def configure_matplotlib():
"""Set Matplotlib backend to 'Agg', which is necessary on CodaLab docker image."""
import warnings
import matplotlib
with warnings.catch_warnings():
warnings.simplefilter('ignore')
matplotlib.use('Agg') # needed when running from server
def in_codalab():
"""Check if we are running inside CodaLab Docker container or not."""
# TODO: below is a total hack. If the OS is not a Mac, we assume we're on CodaLab.
return platform.system() != 'Darwin'
def upload(full_path, bundle_name=None, excludes='*.ipynb .git .ipynb_checkpoints .ignore'):
"""
Upload a file or directory to the codalab worksheet
Args:
full_path: Path + filename of file to upload
bundle_name: Name to upload file/directory as. I
"""
directory, filename = os.path.split(full_path)
if bundle_name is None:
bundle_name = filename
shell('cl up -n {} -w {} {} -x {}'.format(bundle_name, worksheet, full_path, excludes), verbose=True)
def launch_job(job_name, cmd,
dependencies=tuple(),
queue='john', image='kelvinguu/gtd:1.0',
memory=None, cpus='5',
network=False,
debug=False, tail=False):
"""Launch a job on CodaLab (optionally upload code that the job depends on).
Args:
job_name: name of the job
cmd: command to execute
dependencies: list of other bundles that we depend on
debug: if True, prints SSH commands, but does not execute them
tail: show the streaming output returned by CodaLab once it launches the job
"""
print('Remember to set up SSH tunnel and LOG IN through the command line before calling this.')
options = '-v -n {} -w {} --request-queue {} --request-docker-image {} --request-cpus {}'.format(
job_name, worksheet, queue, image, cpus)
if memory:
options += ' --request-memory {}'.format(memory)
if network:
options += ' --request-network'
dep_str = ' '.join(['{0}:{0}'.format(dep) for dep in dependencies])
full_cmd = "cl run {} {} '{}'".format(options, dep_str, cmd)
if tail:
full_cmd += ' -t'
shell(full_cmd, verbose=True, debug=debug)
if in_codalab():
configure_matplotlib()
|
ContextualSP/lemon/executor/gtd/codalab.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/codalab.py",
"repo_id": "ContextualSP",
"token_count": 2892
}
| 257 |
from collections import defaultdict
from contextlib import contextmanager
import logging
import psycopg2
from psycopg2.extras import RealDictCursor
from gtd.utils import Bunch
class Postgres(object):
"""Provides a wrapper around postgres.
Args:
db_name (str): name of database.
schema_name (str): name of schema.
user (str): name of user.
verbose (bool): if True, prints queries when they are executed.
debug (bool): if True, does not actually execute any queries.
If the specified schema does not exist, creates it.
Example:
with db as Postgres(...):
db.execute(...)
"""
def __init__(self, db_name, schema_name, user, password=None, host=None, port=None, verbose=False, debug=False):
self.db_name = db_name
self.user = user
self.verbose = verbose
self.debug = debug
self.cursors_opened = 0 # counts the # cursors opened over this connection's lifetime
self._table_columns = {}
self.connection = psycopg2.connect(database=db_name, user=user, password=password, host=host, port=port)
self.cursor = self.connection.cursor() # this cursor is exclusively used for formatting queries
self._create_schema(schema_name) # create if it doesn't exist
self.execute("SET search_path TO {}, public".format(schema_name))
self.schema_name = schema_name
def format(self, query, as_is, params):
if as_is:
query = query.format(*as_is) # literal substitution
return self.cursor.mogrify(query, params)
def __enter__(self):
return self
def __exit__(self, typ, value, tb):
self.close()
def close(self):
self.cursor.close()
self.connection.close()
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
@contextmanager
def query_cursor(self, q, lazy_fetch=False, commit=True):
"""Execute a query and yield a cursor.
All execution performed by the Postgres object uses this method.
Args:
q (str): SQL query
lazy_fetch (bool): whether to use a server-side cursor (lazily fetches results).
"""
self.cursors_opened += 1
if self.verbose:
logging.debug(q)
if self.debug:
empty_cursor = Bunch()
empty_cursor.fetchmany = lambda size: []
empty_cursor.fetchall = lambda: []
yield empty_cursor
return
cursor_name = 'server_side_{}'.format(self.cursors_opened) if lazy_fetch else None
with self.connection.cursor(cursor_name, cursor_factory=RealDictCursor) as cursor:
cursor.execute(q)
yield cursor
if commit:
self.commit()
def execute(self, q, commit=True):
"""Execute query, return nothing."""
with self.query_cursor(q, commit=commit):
pass
def has_results(self, q):
"""Check if this query returns any results."""
with self.query_cursor(q) as cursor:
results = cursor.fetchall()
return len(results) > 0
def query(self, q, fetch_size=10000):
"""Return a generator of results from query.
Uses lazy fetching.
Args:
q (str): a SQL query
fetch_size (int): number of results to fetch at a time (for efficiency purposes)
Returns:
Generator[Dict[str, T]]: A generator of results as dicts
"""
if self.verbose:
logging.debug(q)
with self.query_cursor(q, lazy_fetch=True) as cursor:
while True:
results = cursor.fetchmany(fetch_size)
for result in results:
yield result
if len(results) == 0:
break
def iter_table(self, table_name):
"""Return a generator that iterates through all entries in a table.
Args:
table_name (str): name of table
Returns:
Generator[Dict[str, T]]
"""
q = self.format("SELECT * from {}", (table_name,), None)
return self.query(q)
def match_field(self, table_name, field, value):
"""Get all rows with a particular field value.
Args:
table_name (str): Table to query
field (str): Name of field
value: Desired value of field.
Returns:
Generator[Dict[str, T]]
"""
q = self.format("SELECT * from {} where {}=%s", (table_name, field), (value,))
return self.query(q)
def match_fields(self, table_name, fields):
"""Get all rows with a particular set of field values
Args:
table_name (str): name of table
fields (dict): a map from field names to values
Returns:
Generator[Dict[str, T]]
"""
keys, vals = list(zip(*list(fields.items())))
field_query = ' AND '.join(['{}=%s'.format(k) for k in keys])
field_vals = tuple(vals)
q = self.format("SELECT * from {} where {}", (table_name, field_query), field_vals)
return self.query(q)
def match_field_any(self, table_name, field, values):
"""Get all rows with a field value in a particular set.
Args:
table_name (str): Table to query
field (str): Name of field
value: a list or set of allowed values.
Returns:
Generator[Dict[str, T]]
"""
q = self.format("SELECT * from {} where {} in %s", (table_name, field), (tuple(values),))
return self.query(q)
def _schema_exists(self, name):
"""Check if schema exists."""
q = self.format("SELECT schema_name FROM information_schema.schemata WHERE schema_name = %s", None, (name,))
return self.has_results(q)
def table_exists(self, name):
"""Check if table exists (under the default schema)."""
name = name.lower() # psql tables are always lower-case
q = self.format("SELECT table_name FROM information_schema.tables WHERE table_schema = %s AND table_name = %s",
None, (self.schema_name, name))
return self.has_results(q)
def _create_schema(self, name):
"""Create schema if it doesn't exist."""
if not self._schema_exists(name):
q = self.format("CREATE SCHEMA {}", (name,), None)
self.execute(q)
def create_table(self, name, col_to_type):
"""Create table if it doesn't exist."""
if not self.table_exists(name):
col_to_type_pairs = [' '.join(i) for i in list(col_to_type.items())]
col_type_str = ', '.join(col_to_type_pairs)
q = self.format("CREATE TABLE {} ({})", (name, col_type_str), None)
self.execute(q)
def drop_table(self, name):
if self.table_exists(name):
q = self.format("DROP TABLE {}", (name,), None)
self.execute(q)
def add_row(self, table_name, row):
"""Add row to table.
Args:
table_name (str)
row (dict[str, T]): a map from column names to values
"""
columns, vals = list(zip(*list(row.items())))
col_str = ', '.join(columns)
vals = tuple(vals)
q = self.format("INSERT INTO {} ({}) VALUES %s", (table_name, col_str), (vals,))
self.execute(q)
def add_rows(self, table_name, table):
"""Efficiently add a batch of rows to a table.
For an explanation, see:
https://trvrm.github.io/bulk-psycopg2-inserts.html
http://stackoverflow.com/questions/8134602/psycopg2-insert-multiple-rows-with-one-query
http://stackoverflow.com/questions/2271787/psycopg2-postgresql-python-fastest-way-to-bulk-insert
Args:
table_name (str): name of table
table (dict[str, list]): map from a column name to a list of column values
"""
col_names = list(table.keys())
col_str = ', '.join(col_names)
unnest = ', '.join(['unnest(%({})s)'.format(n) for n in col_names])
for column in list(table.values()):
assert isinstance(column, list) # must be a list for unnest to work
q = self.format("INSERT INTO {} ({}) SELECT {}", (table_name, col_str, unnest), table)
self.execute(q)
def add_table(self, table_name, table, col_types):
"""Create table in SQL and add data to it.
Args:
table_name (str): name of table
table (dict[str, list]): a map from column name to column values
col_types (dict[str, str]): a map from column name to psql column type
"""
assert not self.table_exists(table_name)
self.create_table(table_name, col_types)
self.add_rows(table_name, table)
def table(self, name):
results = list(self.iter_table(name))
table = defaultdict(list)
for res in results:
for key, val in res.items():
table[key].append(val)
return table
def row_count(self, table_name, approx=False):
q = self.format("select count(*) from {}", (table_name,), None)
q_approx = self.format("SELECT reltuples AS approximate_row_count FROM pg_class WHERE relname = %s",
None, (table_name,))
if approx:
row = next(self.query(q_approx))
count = row['approximate_row_count']
else:
row = next(self.query(q))
count = row['count']
return int(count)
def dict_to_table(d):
"""Convert dict into a two-column table (one col for key, one col for value)."""
keys, vals = [list(l) for l in zip(*list(d.items()))]
return {'key': keys, 'val': vals}
def table_to_dict(table):
keys = table['key']
vals = table['val']
return {k: v for k, v in zip(keys, vals)}
|
ContextualSP/lemon/executor/gtd/postgres.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/postgres.py",
"repo_id": "ContextualSP",
"token_count": 4403
}
| 258 |
'''
Created on Oct 23, 2015
@author: kelvinguu
'''
import logging
import operator
import os.path
import random
import shutil
import traceback
import types
import json
import warnings
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import OrderedDict, defaultdict, MutableMapping, Mapping
from contextlib import contextmanager
import numpy as np
from concurrent.futures import ThreadPoolExecutor, as_completed
from pyhocon import ConfigTree, HOCONConverter, ConfigFactory
# from gtd.io import makedirs
def sorted_by_value(d, ascending=True):
return OrderedDict(sorted(list(d.items()), key=operator.itemgetter(1), reverse=not ascending))
class FunctionWrapper(object, metaclass=ABCMeta):
"""Turn a function or method into a callable object.
Can be used as a decorator above method definitions, e.g.
class Something(object):
...
@FunctionWrapper
def some_method(self, ...):
...
Or, bound methods of an instance can be directly overriden
obj = Something()
obj.some_method = FunctionWrapper(obj.some_method)
"""
def __init__(self, fxn):
self._orig_fxn = fxn
@property
def orig_fxn(self):
return self._orig_fxn
def __get__(self, instance, objtype=None):
"""Implement descriptor functionality."""
return self.as_method(instance, objtype)
def as_method(self, instance, objtype=None):
"""Make this object a method of the given object instance.
Args:
instance: any object instance
"""
return types.MethodType(self, instance, objtype)
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError
class Memoized(FunctionWrapper, metaclass=ABCMeta):
def __init__(self, fxn):
"""Create memoized version of a function.
Args:
fxn (Callable): function to be memoized
"""
super(Memoized, self).__init__(fxn)
self._cache_hits = 0
self._calls = 0.
def __call__(self, *args, **kwargs):
use_cache = kwargs.pop('use_cache', True)
if not use_cache:
return self.orig_fxn(*args, **kwargs)
key = self._cache_key(args, kwargs)
# logging.debug('cache key: {}'.format(key))
if self._in_cache(key):
# logging.debug('load from cache')
self._cache_hits += 1 # successfully return from cache
return self._from_cache(key)
# logging.debug('compute and save to cache')
val = self.orig_fxn(*args, **kwargs)
self._to_cache(key, val)
return val
@property
def hit_rate(self):
if self._calls <= 0:
return 0.
return self._cache_hits / self._calls
@abstractmethod
def _cache_key(self, args, kwargs):
raise NotImplementedError
@abstractmethod
def clear_cache(self):
raise NotImplementedError
@abstractmethod
def _in_cache(self, key):
raise NotImplementedError
@abstractmethod
def _from_cache(self, key):
raise NotImplementedError
@abstractmethod
def _to_cache(self, key, val):
raise NotImplementedError
@abstractproperty
def cache_size(self):
pass
class DictMemoized(Memoized):
def __init__(self, fxn, custom_key_fxn=None):
super(DictMemoized, self).__init__(fxn)
self.cache = {}
self._custom_key_fxn = custom_key_fxn
def _cache_key(self, args, kwargs):
if self._custom_key_fxn:
return self._custom_key_fxn(*args, **kwargs)
kwargs_key = tuple(sorted(kwargs.items()))
return (args, kwargs_key)
def clear_cache(self):
self.cache = {}
def _in_cache(self, key):
return key in self.cache
def _from_cache(self, key):
return self.cache[key]
def _to_cache(self, key, val):
self.cache[key] = val
@property
def cache_size(self):
return len(self.cache)
def memoize(fxn):
return DictMemoized(fxn)
def memoize_with_key_fxn(key_fxn):
return lambda fxn: DictMemoized(fxn, custom_key_fxn=key_fxn)
def args_as_string(args, kwargs):
args_str = '_'.join([str(a) for a in args])
kwargs_str = '_'.join(['{}={}'.format(k, v) for k, v in kwargs.items()])
items = [args_str, kwargs_str]
items = [s for s in items if s] # remove empty elements
key_str = '_'.join(items)
if not key_str:
key_str = 'NO_KEY'
return key_str
class FileMemoized(Memoized):
def __init__(self, fxn, cache_dir, serialize, deserialize):
super(FileMemoized, self).__init__(fxn)
self.cache_dir = cache_dir
self.serialize = serialize
self.deserialize = deserialize
makedirs(cache_dir)
def _cache_key(self, args, kwargs):
"""Compute the name of the file."""
key_str = args_as_string(args, kwargs)
return os.path.join(self.cache_dir, '{}.txt'.format(key_str))
def _in_cache(self, key):
return os.path.exists(key)
def clear_cache(self):
shutil.rmtree(self.cache_dir)
makedirs(self.cache_dir)
def _to_cache(self, key, val):
with open(key, 'w') as f:
self.serialize(f, val)
def _from_cache(self, key):
with open(key, 'r') as f:
return self.deserialize(f)
@property
def cache_size(self):
raise NotImplementedError
def file_memoize(cache_dir, serialize, deserialize):
return lambda fxn: FileMemoized(fxn, cache_dir, serialize, deserialize)
def sample_if_large(arr, max_size, replace=True):
if len(arr) > max_size:
idx = np.random.choice(len(arr), size=max_size, replace=replace)
return [arr[i] for i in idx]
return list(arr)
def flatten(lol):
"""
Flatten a list of lists
"""
return [item for sublist in lol for item in sublist]
def chunks(l, n):
"""
Return a generator of lists, each of size n (the last list may be less than n)
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def ensure_unicode(s):
assert isinstance(s, str)
if not isinstance(s, str):
s = str(s, 'utf-8')
return s
class UnicodeMixin(object):
__slots__ = []
@abstractmethod
def __unicode__(self):
raise NotImplementedError
def __str__(self):
return repr(self)
def __repr__(self):
return str(self).encode('utf-8')
class EqualityMixinSlots(object):
"""Equality mixin for classes using __slots__"""
__slots__ = []
class Missing(object):
pass # just a special object to denote that a value is missing. Is only equal to itself.
__MISSING = Missing()
@property
def _slot_vals(self):
vals = []
for slots in [getattr(cls, '__slots__', tuple()) for cls in type(self).__mro__]:
for slot in slots:
try:
val = getattr(self, slot)
except AttributeError:
val = self.__MISSING
vals.append(val)
return tuple(vals)
def __eq__(self, other):
# must be strictly same type
if type(other) != type(self):
return False
if self._slot_vals != other._slot_vals:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._slot_vals)
class EqualityMixin(object):
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def data_split(items, dev_part=0.1, test_part=0.1):
# don't allow duplicates
assert len(set(items)) == len(items)
# remaining portion is set aside for train
assert dev_part + test_part < 1.0
items_copy = list(items)
random.shuffle(items_copy)
n = len(items_copy)
ndev = int(n * dev_part)
ntest = int(n * test_part)
dev = items_copy[:ndev]
test = items_copy[ndev:ndev + ntest]
train = items_copy[ndev + ntest:]
# verify that there is no overlap
train_set = set(train)
dev_set = set(dev)
test_set = set(test)
assert len(train_set.intersection(dev_set)) == 0
assert len(train_set.intersection(test_set)) == 0
print(('train {}, dev {}, test {}'.format(len(train), len(dev), len(test))))
return train, dev, test
def compute_if_absent(d, key, keyfunc):
val = d.get(key)
if val is None:
val = keyfunc(key)
d[key] = val
return val
class Bunch(object):
"""A simple class for holding arbitrary attributes. Recommended by the famous Martelli bot."""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
return repr(self.__dict__)
def best_threshold(scores, labels, debug=False):
# find best threshold in O(nlogn)
# does not handle scores of infinity or -infinity
items = list(zip(scores, labels))
items.sort()
total = len(items)
total_pos = len([l for l in labels if l])
def accuracy(p, n):
correct_n = n
correct_p = total_pos - p
return float(correct_n + correct_p) / total
# predict True iff score > thresh
pos = 0 # no. pos <= thresh
neg = 0 # no. neg <= thresh
thresh_accs = [(float('-inf'), accuracy(pos, neg))]
for thresh, label in items:
if label:
pos += 1
else:
neg += 1
thresh_accs.append((thresh, accuracy(pos, neg)))
if debug:
import matplotlib.pyplot as plt
from gtd.plot import plot_pdf
x, y = list(zip(*thresh_accs))
plt.figure()
plt.plot(x, y)
pos_scores = [s for s, l in items if l]
neg_scores = [s for s, l in items if not l]
plot_pdf(pos_scores, 0.1, color='b')
plot_pdf(neg_scores, 0.1, color='r')
plt.show()
return max(thresh_accs, key=operator.itemgetter(1))[0]
def as_batches(l, batch_size):
assert batch_size >= 1
batch = []
for item in l:
if len(batch) == batch_size:
yield batch
batch = []
batch.append(item)
# final batch may be smaller
if len(batch) != 0:
yield batch
# TODO: test
def get_batch(data, batch_size, k):
"""Get the kth batch from a data sequence
If the final batch is less than batch_size, this function loops back to the beginning of data
so that the returned batch is exactly batch_size.
Args:
data: a list of examples
batch_size: the size of the returned batch
k: the batch index you want to get.
"""
return [data[i % len(data)] for i in range(k * batch_size, (k + 1) * batch_size)]
# TODO: test
def batch_compute(data, batch_fxn, batch_size):
"""Evaluate the batch function on a list of items.
Args:
data: a list of examples
batch_fxn: a function which only accepts a list of exactly length batch_size,
and returns a list of the same length
batch_size: the batch size
Returns:
a list of length = len(data)
"""
n = len(data)
num_batches = n / batch_size + 1
final_trim_size = n % batch_size
# map
results = []
for k in range(num_batches):
batch = get_batch(data, batch_size, k) # circles around
result = batch_fxn(batch)
results.append(result)
# remove the examples that looped around to the beginning of data
results[-1] = results[-1][:final_trim_size]
return flatten(results)
def fixed_length(l, length, pad_val):
"""Given a list of arbitrary length, make it fixed length by padding or truncating.
(Makes a shallow copy of l, then modifies this copy.)
Args:
l: a list
length: desired length
pad_val: values padded to the end of l, if l is too short
Returns:
a list of with length exactly as specified.
"""
if len(l) < length:
fixed = list(l) # make shallow copy
fixed += [pad_val] * (length - len(l)) # pad
return fixed
else:
return l[:length] # truncate
class HomogeneousBatchSampler(object):
def __init__(self, data, bucket_fxn):
buckets = defaultdict(list)
for d in data:
buckets[bucket_fxn(d)].append(d)
keys = list(buckets.keys())
freqs = np.array([len(buckets[k]) for k in keys], dtype=float)
probs = freqs / np.sum(freqs)
self.keys = keys
self.probs = probs
self.buckets = buckets
def sample(self, batch_size):
# WARNING! This sampling scheme is only "correct" if each len(bucket) > batch_size
# sample a bucket according to its frequency
key = np.random.choice(self.keys, p=self.probs)
bucket = self.buckets[key]
# sample a batch from the bucket
batch = np.random.choice(bucket, size=batch_size, replace=True)
return batch
class Frozen(object):
"""Objects that inherit from Frozen cannot set or add new attributes unless inside an `unfreeze` context."""
__frozen = True
@staticmethod
@contextmanager
def unfreeze():
prev_state = Frozen.__frozen
Frozen.__frozen = False
yield
Frozen.__frozen = prev_state # set back to previous state
def __setattr__(self, key, value):
if Frozen.__frozen:
raise NotImplementedError('Object is frozen.')
else:
super(Frozen, self).__setattr__(key, value)
def __delattr__(self, item):
if Frozen.__frozen:
raise NotImplementedError('Object is frozen.')
else:
super(Frozen, self).__delattr__(item)
def sigmoid(x):
# scipy.special.expit will return NaN if x gets larger than about 700, which is just wrong
# compute using two different approaches
# they are each stable over a different interval of x
with warnings.catch_warnings():
warnings.simplefilter('ignore')
numer = np.exp(x)
s0 = numer / (1.0 + numer)
denom = 1.0 + np.exp(-x)
s1 = 1.0 / denom
# replace nans
if isinstance(x, float):
if np.isnan(s0):
s0 = s1
else:
nans = np.isnan(s0)
s0[nans] = s1[nans]
return s0
class NestedDict(MutableMapping):
def __init__(self, d=None):
"""Create a NestedDict.
Args:
d (dict): a nested Python dictionary. Defaults to an empty dictionary.
NOTE: if d contains empty dicts at its leaves, these will be dropped.
"""
if d is None:
d = {}
self.d = {}
for keys, val in self._flatten(d).items():
self.set_nested(keys, val)
def __iter__(self):
"""Iterate through top-level keys."""
return iter(self.d)
def __delitem__(self, key):
del self.d[key]
def __getitem__(self, key):
return self.d[key]
def __len__(self):
"""Total number of leaf nodes."""
l = 0
for v in self.values():
if isinstance(v, NestedDict):
l += len(v)
else:
l += 1
return l
def __setitem__(self, key, value):
"""Set a key-value pair.
If value is a Mapping, it will be converted into a NestedDict.
"""
if isinstance(value, Mapping):
value = NestedDict(value)
self.d[key] = value
def get_nested(self, keys):
d = self
for k in keys:
if not isinstance(d, NestedDict):
raise KeyError(keys)
d = d[k]
return d
def set_nested(self, keys, val):
first_keys, last_key = keys[:-1], keys[-1]
d = self
for k in first_keys:
if k not in d:
d[k] = NestedDict()
d = d[k]
d[last_key] = val
def __repr__(self):
return repr(self.d)
def as_dict(self):
d = {}
for key, sub in self.items():
if isinstance(sub, NestedDict):
val = sub.as_dict()
else:
val = sub
d[key] = val
return d
@staticmethod
def _flatten(d):
flattened = {}
def helper(key_tuple, d):
if not isinstance(d, Mapping): # leaf node
flattened[key_tuple] = d
return
for key, val in d.items():
helper(key_tuple + (key,), val)
helper(tuple(), d)
return flattened
def flattened(self):
return self._flatten(self)
def leaves(self):
return list(self.flattened().values())
def ranks(scores, ascending=True):
"""Assign a rank to each score.
Args:
scores (list[float]): a list of scores
ascending (bool): if True, then higher scores will have smaller rank
Returns:
list[int]: a list of ranks, where ranks[i] is the rank of the value scores[i]
"""
if isinstance(scores, list):
scores = np.array(scores)
else:
assert len(scores.shape) == 1
flip = 1 if ascending else -1
idx = np.argsort(flip * scores)
ranks = np.empty(scores.shape, dtype=int)
ranks[idx] = np.arange(len(scores))
# ranks should start from 1
ranks += 1
return list(ranks)
def quantiles(vals, ps):
vals = sorted(vals)
max_idx = len(vals) - 1
qs = []
for p in ps:
assert 0 <= p <= 1
i = int(round(max_idx * p))
qs.append(vals[i])
return qs
def sample_excluding(items, exclude):
candidates = list(items) # shallow copy
random.shuffle(candidates)
for cand in candidates:
if cand not in exclude:
return cand
# if everything is excluded, return None
return None
def map_array(fxn, array):
"""Apply fxn to all elements of array.
Args:
fxn: a function
array: a list of lists of lists of ... If it is a numpy array, converts it to a list.
Returns:
a new array, mapped
>>> arr = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
>>> map_array(lambda x: 10 * x, arr)
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]]
"""
if isinstance(array, np.ndarray):
array = array.tolist()
new_array = []
for val in array:
new_val = map_array(fxn, val) if isinstance(val, list) else fxn(val)
new_array.append(new_val)
return new_array
def group(items, grouper):
d = defaultdict(list)
for item in items:
labels = grouper(item)
for label in labels:
d[label].append(item)
return d
# TODO(kelvin): test this
def generator_ignore_errors(iterator):
"""Loop through iterator, but ignore exceptions.
Logs a warning if there is an exception.
Args:
iterator: any object with a __next__ method
Yields:
the next element of the iterator
"""
i = 0
while True:
try:
try:
yield next(iterator)
except StopIteration:
# stop when we're out of elements
break
except Exception:
# If this generator is closed before it is exhausted (e.g. if we break out of a for-loop)
# it will get garbage collected, and throw a GeneratorExit error
# GeneratorExit does not inherit from Exception in Python >2.6, so we will not catch it here
# Critically, this line should NOT be changed to just "except:", as it would catch GeneratorExit
logging.warn('Error parsing line {}'.format(i))
i += 1
class SimpleExecutor(object):
def __init__(self, fxn, max_workers=120):
self._fxn = fxn
self._executor = ThreadPoolExecutor(max_workers)
self._future_to_key = {} # map from future to a key for later access
def submit(self, key, x):
future = self._executor.submit(self._fxn, x)
self._future_to_key[future] = key
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
def results(self):
for future in as_completed(self._future_to_key):
key = self._future_to_key[future]
try:
result = future.result()
except BaseException:
f = Failure.with_message('SimpleExecutor failed to compute key: {}'.format(key))
logging.error(f.traceback)
result = f
yield key, result
def shutdown(self):
self._executor.shutdown()
class Failure(object):
"""Represents the result of a failed computation."""
@staticmethod
def with_message(msg):
f = Failure(message=msg)
logging.error(f.message)
return f
@staticmethod
def silent(msg):
return Failure(message=msg)
def __init__(self, uid=None, message='Failure'):
if uid is None:
uid = id(self)
self._uid = uid
self._msg = message
self._traceback = traceback.format_exc()
def __repr__(self):
return self._msg
@property
def uid(self):
return self._uid
@property
def traceback(self):
return self._traceback
@property
def message(self):
return self._msg
def __eq__(self, other):
if not isinstance(other, Failure):
return False
return self.uid == other.uid
def __ne__(self, other):
return not self.__eq__(other)
@contextmanager
def random_seed(seed=None):
"""Execute code inside this with-block using the specified seed.
If no seed is specified, nothing happens.
Does not affect the state of the random number generator outside this block.
Not thread-safe.
Args:
seed (int): random seed
"""
if seed is None:
yield
else:
py_state = random.getstate() # save state
np_state = np.random.get_state()
random.seed(seed) # alter state
np.random.seed(seed)
yield
random.setstate(py_state) # restore state
np.random.set_state(np_state)
class cached_property(object):
"""Descriptor (non-data) for building an attribute on-demand on first use."""
def __init__(self, factory):
self._attr_name = factory.__name__
self._factory = factory
def __get__(self, instance, owner):
# Build the attribute.
attr = self._factory(instance)
# Cache the value; hide ourselves.
setattr(instance, self._attr_name, attr)
return attr
class set_once_attribute(object):
def __init__(self, attr_name):
self._attr_name = attr_name
def __get__(self, instance, owner):
return getattr(instance, self._attr_name)
def __set__(self, instance, value):
if hasattr(instance, self._attr_name):
raise RuntimeError('Cannot set {} more than once.'.format(self._attr_name))
setattr(instance, self._attr_name, value)
class Config(object):
"""A wrapper around the pyhocon ConfigTree object.
Allows you to access values in the ConfigTree as attributes.
"""
def __init__(self, config_tree=None):
"""Create a Config.
Args:
config_tree (ConfigTree)
"""
if config_tree is None:
config_tree = ConfigTree()
self._config_tree = config_tree
def __getattr__(self, item):
val = self._config_tree[item]
if isinstance(val, ConfigTree):
return Config(val)
else:
return val
def get(self, key, default=None):
val = self._config_tree.get(key, default)
if isinstance(val, ConfigTree):
return Config(val)
else:
return val
def put(self, key, value, append=False):
"""Put a value into the Config (dot separated)
Args:
key (str): key to use (dot separated). E.g. `a.b.c`
value (object): value to put
"""
self._config_tree.put(key, value, append=append)
def __repr__(self):
return self.to_str()
def to_str(self):
return HOCONConverter.convert(self._config_tree, 'hocon')
def to_json(self):
return json.loads(HOCONConverter.convert(self._config_tree, 'json'))
def to_file(self, path):
with open(path, 'w') as f:
f.write(self.to_str())
@classmethod
def from_file(cls, path):
config_tree = ConfigFactory.parse_file(path)
return cls(config_tree)
@classmethod
def from_dict(cls, d):
return Config(ConfigFactory.from_dict(d))
@classmethod
def merge(cls, config1, config2):
assert isinstance(config1, Config)
assert isinstance(config2, Config)
return cls(ConfigTree.merge_configs(config1._config_tree, config2._config_tree))
def softmax(logits):
"""Take the softmax over a set of logit scores.
Args:
logits (np.array): a 1D numpy array
Returns:
a 1D numpy array of probabilities, of the same shape.
"""
if not isinstance(logits, np.ndarray):
logits = np.array(logits) # 1D array
logits = logits - np.max(logits) # re-center
exp_logits = np.exp(logits)
probs = exp_logits / np.sum(exp_logits)
return probs
def bleu(reference, predict):
"""Compute sentence-level bleu score.
Args:
reference (list[str])
predict (list[str])
"""
from nltk.translate import bleu_score
if len(predict) == 0:
if len(reference) == 0:
return 1.0
else:
return 0.0
# TODO(kelvin): is this quite right?
# use a maximum of 4-grams. If 4-grams aren't present, use only lower n-grams.
n = min(4, len(reference), len(predict))
weights = tuple([1. / n] * n) # uniform weight on n-gram precisions
return bleu_score.sentence_bleu([reference], predict, weights)
class ComparableMixin(object, metaclass=ABCMeta):
__slots__ = []
@abstractproperty
def _cmpkey(self):
pass
def _compare(self, other, method):
try:
return method(self._cmpkey, other._cmpkey)
except (AttributeError, TypeError):
# _cmpkey not implemented, or return different type,
# so I can't compare with "other".
return NotImplemented
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
|
ContextualSP/lemon/executor/gtd/utils.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/utils.py",
"repo_id": "ContextualSP",
"token_count": 11839
}
| 259 |
"""Predicate: output token."""
from gtd.utils import ComparableMixin
class Predicate(ComparableMixin):
"""Represents a step in the logical form (i.e., an output token)."""
__slots__ = ['_name', '_original_string', '_types']
def __init__(self, name, original_string=None, types=None):
"""Create Predicate.
Args:
name (unicode)
original_string (unicode)
types (tuple[unicode])
"""
self._name = name
self._original_string = original_string
self._types = types or tuple()
def __eq__(self, other):
return (isinstance(other, Predicate)
and self._name == other._name)
def __hash__(self):
return hash(self._name)
@property
def _cmpkey(self):
return self._name
def __str__(self):
return self._name
__repr__ = __str__
@property
def name(self):
"""Name of the predicate.
Should be unique among the predicates in the same context.
Returns:
unicode
"""
return self._name
@property
def original_string(self):
"""Original string of the predicate. Can be None.
Returns:
unicode or None
"""
return self._original_string
@property
def types(self):
"""A collection of types.
Returns:
tuple[unicode]
"""
return self._types
@property
def delexicalized_name(self):
"""A placeholder used in a delexicalized utterance.
Can be None if the predicate should not be used for delexicalization.
A subclass can customize this method to return different placeholders
for different predicate types.
Returns:
unicode or None
"""
return 'PRED'
|
ContextualSP/lemon/executor/strongsup/predicate.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/predicate.py",
"repo_id": "ContextualSP",
"token_count": 782
}
| 260 |
from strongsup.predicates_computer import PredicatesComputer
from strongsup.rlong.predicate import RLongPredicate
class RLongPredicatesComputer(PredicatesComputer):
def compute_predicates(self, tokens):
"""Return list[(Predicate, alignment)]"""
return [(x, []) for x in self._ALL_PREDICATES]
class RLongAlchemyPredicatesComputer(RLongPredicatesComputer):
_ALL_PREDICATES = [
RLongPredicate(x) for x in
[
'r', 'y', 'g', 'o', 'p', 'b',
'1', '2', '3', '4', '5', '6', '7',
'-1',
'X1/1',
'PColor',
'APour', 'AMix', 'ADrain',
'all-objects', 'index',
'H0', 'H1', 'H2',
]]
class RLongScenePredicatesComputer(RLongPredicatesComputer):
_ALL_PREDICATES = [
RLongPredicate(x) for x in
[
'r', 'y', 'g', 'o', 'p', 'b', 'e',
'1', '2', '3', '4', '5', '6', '7', '8', '9', '10',
'-1',
'PShirt', 'PHat', 'PLeft', 'PRight', 'DShirtHat',
'ALeave', 'ASwapHats', 'AMove', 'ACreate',
'all-objects', 'index',
'H0', 'H1', 'H2', 'H3',
]]
class RLongTangramsPredicatesComputer(RLongPredicatesComputer):
_ALL_PREDICATES = [
RLongPredicate(x) for x in
[
'1', '2', '3', '4', '5',
'-1',
'AAdd', 'ASwap', 'ARemove',
'all-objects', 'index',
'H0', 'H1', 'H2',
]]
class RLongUndogramsPredicatesComputer(RLongPredicatesComputer):
_ALL_PREDICATES = [
RLongPredicate(x) for x in
[
'1', '2', '3', '4', '5',
'-1',
'AAdd', 'ASwap', 'ARemove',
'all-objects', 'index',
'H0', 'H1', 'H2', 'HUndo',
]]
################################
# Singletons
SINGLETONS = {
'alchemy': RLongAlchemyPredicatesComputer(),
'scene': RLongScenePredicatesComputer(),
'tangrams': RLongTangramsPredicatesComputer(),
'undograms': RLongUndogramsPredicatesComputer(),
}
def get_predicates_computer(domain_name):
return SINGLETONS[domain_name]
def get_fixed_predicates(domain_name):
return SINGLETONS[domain_name]._ALL_PREDICATES
|
ContextualSP/lemon/executor/strongsup/rlong/predicates_computer.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/rlong/predicates_computer.py",
"repo_id": "ContextualSP",
"token_count": 1304
}
| 261 |
import os
import re
from abc import ABCMeta, abstractproperty
from gtd.utils import cached_property
from dependency.data_directory import DataDirectory
from strongsup.world import World
from strongsup.tables.executor import TablesPostfixExecutor
from strongsup.tables.graph import TablesKnowledgeGraph
from strongsup.tables.predicates_computer import TablesPredicatesComputer
class TableWorld(World, metaclass=ABCMeta):
"""World based on a table.
The table is actually represented as a TableKnowledgeGraph.
"""
@abstractproperty
def graph(self):
"""Return a TablesKnowledgeGraph object."""
raise NotImplementedError
@abstractproperty
def human_readable_path(self):
"""Return the relative path from the root data directory
to the human readable version of the table.
"""
raise NotImplementedError
@cached_property
def executor(self):
return TablesPostfixExecutor(self.graph)
@cached_property
def predicates_computer(self):
return TablesPredicatesComputer(self.graph)
def __repr__(self):
return '{}({})'.format(type(self).__name__, self.human_readable_path)
def dump_human_readable(self, fout):
full_path = os.path.join(DataDirectory.root, self.human_readable_path)
with open(full_path) as fin:
print(fin.read(), file=fout)
################################
# WikiTableQuestions tables
class WikiTableWorld(TableWorld):
def __init__(self, xxx, yyy):
"""Constructs a new GraphPath within the WikiTableQuestions dataset.
Args:
xxx, yyy (int): graph path ID; the graph will be loaded from
`{base_dir}/tagged/{xxx}-tagged/{yyy}.tagged`
"""
self.xxx, self.yyy = xxx, yyy
@cached_property
def graph(self):
return TablesKnowledgeGraph(
os.path.join(DataDirectory.wiki_table_questions, 'tagged',
str(self.xxx) + '-tagged', str(self.yyy) + '.tagged'))
@property
def human_readable_path(self):
abs_path = os.path.join(
DataDirectory.wiki_table_questions, 'csv',
'{}-csv'.format(self.xxx), '{}.table'.format(self.yyy))
return DataDirectory.relative_path(abs_path)
# Add a wrapper to enable caching
# Usage: one of the following
# WikiTableWorld('csv/204-csv/56.csv')
# WikiTableWorld(204, 56)
def wiki_table_world_cache_wrapper(cls):
CACHE = {}
def get(*args):
"""Get a WikiTableWorld instance from the ID.
Args:
Either a single string argument 'csv/{xxx}-csv/{yyy}.csv'
or two int arguments xxx, yyy. The graph will be loaded from
`{base_dir}/tagged/{xxx}-tagged/{yyy}.tagged`
Returns:
WikiTableWorld
"""
if len(args) == 1 and isinstance(args[0], str):
match = re.match(r'csv/(\d+)-csv/(\d+)\.csv', args[0])
if not match:
raise ValueError('wikitable id must have the form '
'csv/{xxx}-csv/{yyy}.csv; got ' + args[0])
xxx, yyy = int(match.group(1)), int(match.group(2))
elif len(args) == 2:
xxx, yyy = int(args[0]), int(args[1])
else:
raise ValueError('Unrecognized arguments: {}'.format(args))
if (xxx, yyy) not in CACHE:
CACHE[xxx, yyy] = cls(xxx, yyy)
return CACHE[xxx, yyy]
return get
WikiTableWorld = wiki_table_world_cache_wrapper(WikiTableWorld)
################################
# Artificial tables
class ArtificialTableWorld(TableWorld):
def __init__(self, path):
self._path = path
@cached_property
def graph(self):
return TablesKnowledgeGraph(
os.path.join(DataDirectory.root, self._path))
@property
def human_readable_path(self):
return self._path + '.human'
# Add a wrapper to enable caching
def artificial_table_cache_wrapper(cls):
CACHE = {}
def get(path):
if path not in CACHE:
CACHE[path] = cls(path)
return CACHE[path]
return get
ArtificialTableWorld = artificial_table_cache_wrapper(ArtificialTableWorld)
|
ContextualSP/lemon/executor/strongsup/tables/world.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/tables/world.py",
"repo_id": "ContextualSP",
"token_count": 1768
}
| 262 |
import numpy as np
import pytest
import tensorflow as tf
from gtd.ml.framework import Feedable
from gtd.ml.utils import guarantee_initialized_variables
from strongsup.value_function import LogisticValueFunction, ValueFunctionExample
from strongsup.utils import OptimizerOptions
class DummyParseModel(Feedable):
def __init__(self, weights):
self._weights = tf.Variable(weights, dtype=tf.float32)
# Batch size x Embedding size
self._placeholder = tf.placeholder(tf.float32, shape=[None, 2])
self._case_encodings = tf.matmul(self._placeholder, self._weights)
@property
def case_encodings(self):
return self._case_encodings
def inputs_to_feed_dict(self, cases, ignore_previous_utterances, caching):
# Ignore cases, ignore_previous_utterances, and caching
dummy_parse_model_inputs = np.array([[1.0, 2.0], [2.0, 3.0]])
return {self._placeholder: dummy_parse_model_inputs}
@pytest.fixture
def weights():
return np.array([[0.0, 1.0], [1.0, 0.0]])
@pytest.fixture
def dummy_cases():
# Never gets used
return [1, 2]
@pytest.fixture
def rewards():
return [1.0, 0.0]
@pytest.fixture
def value_function(weights):
return LogisticValueFunction(
DummyParseModel(weights), 0.01, OptimizerOptions("adam"))
def test_value_function(value_function, weights, dummy_cases, rewards):
sess = tf.InteractiveSession()
guarantee_initialized_variables(sess)
fetch = {
"loss": value_function._loss
}
feed_dict = value_function.inputs_to_feed_dict(dummy_cases, rewards)
# Test that the loss decreases after taking a train step
loss = sess.run(fetch, feed_dict=feed_dict)["loss"]
values = value_function.values(dummy_cases)
for i in range(10):
vf_examples = [ValueFunctionExample(c, r) for c, r in zip(dummy_cases, rewards)]
value_function.train_step(vf_examples)
new_loss = sess.run(fetch, feed_dict=feed_dict)["loss"]
new_values = value_function.values(dummy_cases)
assert new_loss < loss
# Test that the weights didn't propagate to the ParseModel
fetch = {
"weights": value_function._parse_model._weights
}
model_weights = sess.run(fetch, feed_dict=feed_dict)["weights"]
assert np.array_equal(model_weights, weights)
|
ContextualSP/lemon/executor/strongsup/tests/test_value_function.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/tests/test_value_function.py",
"repo_id": "ContextualSP",
"token_count": 882
}
| 263 |
#!/usr/bin/env python3
import csv
from typing import *
import logging
import sys
import json
EXIT_STATUS_ANSWERS_MALFORMED = 1
EXIT_STATUS_PREDICTIONS_MALFORMED = 2
EXIT_STATUS_PREDICTIONS_EXTRA = 3
EXIT_STATUS_PREDICTION_MISSING = 4
def calculate_accuracy(question_answers: Dict[str, str], predictions: Dict[str, List[str]]) -> float:
score = 0.0
for question_id, answer in question_answers.items():
try:
predictions_for_q = predictions[question_id]
except KeyError:
logging.error("Missing prediction for question '%s'.", question_id)
sys.exit(EXIT_STATUS_PREDICTION_MISSING)
if answer in predictions_for_q:
score += 1.0 / len(predictions_for_q)
del predictions[question_id]
if len(predictions) > 0:
logging.error("Found %d extra predictions, for example: %s", len(predictions),
", ".join(list(predictions.keys())[:3]))
sys.exit(EXIT_STATUS_PREDICTIONS_EXTRA)
return score / len(question_answers)
def read_answers(filename: str) -> Dict[str, str]:
answers = {}
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
for line in f:
line = line.strip()
try:
record = json.loads(line)
except ValueError as e:
logging.error("Error while reading file %s: %s", filename, e)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
question_id = record["id"]
answer = record["answerKey"]
if question_id in answers:
logging.error("Key %s repeated in %s", question_id, filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
answers[question_id] = answer
if len(answers) == 0:
logging.error("No answers found in file %s", filename)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
return answers
def read_predictions(filename: str) -> Dict[str, List[str]]:
predictions = {}
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
reader = csv.reader(f)
try:
for row in reader:
try:
question_id = row[0]
prediction_raw = row[1]
except IndexError as e:
logging.error("Error reading value from CSV file %s on line %d: %s", filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if question_id in predictions:
logging.error("Key %s repeated in file %s on line %d", question_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if question_id == "":
logging.error("Key is empty in file %s on line %d", filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
prediction = prediction_raw.split(";")
# prediction labels cannot be empty strings
for p in prediction:
if p == "":
logging.error("Key %s has empty labels for prediction in file %s on line %d",
question_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
predictions[question_id] = prediction
except csv.Error as e:
logging.error('file %s, line %d: %s', filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
return predictions
def main():
import argparse
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for questions.')
parser.add_argument(
'--question-answers', '-qa',
help='Filename of the question answers to read. Expects a JSONL file with documents that have field "id" and "answerKey".',
required=True)
parser.add_argument(
'--predictions', '-p',
help="Filename of the leaderboard predictions, in CSV format.",
required=True)
parser.add_argument(
'--output', '-o',
help='Output results to this file.',
required=True)
args = parser.parse_args()
question_answers = read_answers(args.question_answers)
predictions = read_predictions(args.predictions)
accuracy = calculate_accuracy(question_answers, predictions)
with open(args.output, "wt", encoding="UTF-8") as output:
output.write(json.dumps({"accuracy": accuracy}))
if __name__ == '__main__':
main()
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/evaluator/evaluator.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/evaluator/evaluator.py",
"repo_id": "ContextualSP",
"token_count": 2084
}
| 264 |
import random
from collections import Counter
import numpy as np
from allennlp_reasoning_explainqa.common.constants import *
def dcg_score(y_true, y_score, k=10, gains="exponential"):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
DCG @k : float
"""
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
if gains == "exponential":
gains = 2 ** y_true - 1
elif gains == "linear":
gains = y_true
else:
raise ValueError("Invalid gains option.")
# highest rank is 1 so +2 instead of +1
discounts = np.log2(np.arange(len(y_true)) + 2)
# print("gains,discounts = ", gains,discounts)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10, gains="exponential"):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
NDCG @k : float
"""
best = dcg_score(y_true, y_true, k, gains)
actual = dcg_score(y_true, y_score, k, gains)
return actual / best
class ExplanationEval:
def __init__(self, pos_label=1, neg_label=0) -> None:
self._predictions = {}
self._id_count = 0
self._pos_label = pos_label
self._neg_label = neg_label
self._labels = [pos_label, neg_label]
def __call__(self, ques_id, choice_type, ground_truth_label, score):
"""
Parameters
----------
value : ``float``
The value to average.
"""
if choice_type in CORRECT_OPTION_TAG_LIST:
assert ground_truth_label in self._labels, "Label not known"
if ques_id not in self._predictions:
self._predictions[ques_id] = []
self._id_count += 1
self._predictions[ques_id].append(
{"score": score, "ground_truth": ground_truth_label}
)
def get_metric(self, reset: bool = False):
if reset:
print(
"explain_eval: Counter(len(vals)) : ",
Counter([len(val) for val in self._predictions.values()]),
)
ret = {
"explainP1": [],
"explainP1_normalized": [],
"explainP2": [],
"explainP5": [],
"explainNDCG": [],
}
total_label_counts = {"label_" + str(k): 0 for k in self._labels}
for id, vals in self._predictions.items():
random.shuffle(
vals
) # hack to avoid high scores in case of ties and correct ones got in first
vals = sorted(
vals, key=lambda x: -x["score"]
) # sort by decreasing order of score
cnt_pos_flag = 0
y_true = [val["ground_truth"] for val in vals]
y_score = [val["score"] for val in vals]
total_true = sum(y_true)
if total_true > 0:
ndcg = ndcg_score(y_true, y_score, k=10, gains="linear")
else:
ndcg = 0
ret["explainNDCG"].append(ndcg)
ndcg_numerator = 0.0
ndcg_denominator = 0.0
discount = 1.0
discount_den = 1.0
for j, val in enumerate(
vals
): # to do what if num items is less than 5 ? -- will effect R@5
if val["ground_truth"] == self._pos_label:
cnt_pos_flag = (
1 # since just want to know ehteher it is there or not
)
ndcg_numerator += discount * 1.0
# denominator represents maximum possible. whenever encounter a positive, denominator value should increase
# since it is 0/1, it simple here. no need to sort.
# cnt_pos += 1
ndcg_denominator += discount_den * 1.0
discount_den *= 0.5
labelk = self._pos_label
else:
labelk = self._neg_label
total_label_counts["label_" + str(labelk)] += 1
if j == 0:
ret["explainP1"].append(cnt_pos_flag)
if j == 1:
ret["explainP2"].append(cnt_pos_flag)
if j == 4:
ret["explainP5"].append(cnt_pos_flag)
discount *= 0.5
if cnt_pos_flag > 0:
ret["explainP1_normalized"].append(ret["explainP1"][-1])
assert ndcg_numerator <= ndcg_denominator # sanity check
self.ret = {
k: {"items": len(lst), "score": np.mean(lst)} for k, lst in ret.items()
}
return_metric = {}
for k, lst in ret.items():
return_metric[k + "_items"] = len(lst)
if len(lst) > 0:
return_metric[k] = np.mean(lst)
return_metric.update(total_label_counts)
if reset:
self.reset()
return return_metric
def reset(self):
self._predictions = {}
# self._gt = {}
self._id_count = 0
self.ret = {}
def __str__(self):
return str(self.ret)
if __name__ == "__main__":
explain_eval = ExplanationEval()
dummy1 = [[1, 1, 1.5], [1, 1, 1.0], [1, 0, 0.9]] # perfect ranking
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
# {'explainP1_items': 1, 'explainP1': 1.0, 'explainP1_normalized_items': 1, 'explainP1_normalized': 1.0,
# 'explainP2_items': 1, 'explainP2': 1.0, 'explainP5_items': 0, 'explainNDCG_items': 1, 'explainNDCG': 1.0,
# 'explainNDCG_exp_items': 1, 'explainNDCG_exp': 1.0, 'label_1': 2, 'label_0': 1}
dummy1 = [
[1, 1, 1.5],
[1, 1, 1.0],
[1, 0, 0.9], # perfect ranking
[2, 0, 1.5],
[2, 0, 1.0],
[2, 1, 0.9], # completely opposite ranking
]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 0, 1.0], [1, 1, 1.0], [1, 1, 1.0]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 1, 1.0], [1, 1, 1.0], [1, 0, 1.0]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 0, 1.0], [1, 1, 1.01], [1, 1, 1.01]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 0, 1.02], [1, 1, 1.01], [1, 1, 1.01]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 0, 1.0], [1, 0, 1.0], [1, 1, 1.0]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [[1, 0, 1.0], [1, 0, 1.0], [1, 0, 1.0]]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
print("============")
dummy1 = [
[1, 1, 1.0],
[1, 1, 1.0],
]
for ques_id, ground_truth_label, score in dummy1:
explain_eval(ques_id, CORRECT_OPTION_TAG, ground_truth_label, score)
print(explain_eval.get_metric(reset=True))
# env PYTHONPATH=. python allennlp_reasoning_explainqa/training/metrics/explanation_eval.py
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/allennlp_reasoning_explainqa/training/metrics/explanation_eval.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/allennlp_reasoning_explainqa/training/metrics/explanation_eval.py",
"repo_id": "ContextualSP",
"token_count": 4267
}
| 265 |
# ProPara Leaderboard training data
* `answers.tsv` is an actions file with correct answers
* `dummy-predictions.tsv` is an actions file with dummy predictions (action is "NONE")
* `sentences.tsv` is a list of sentences (steps) for each process.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/data/train/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/data/train/README.md",
"repo_id": "ContextualSP",
"token_count": 75
}
| 266 |
#!/usr/bin/env python3
# % cat testfiles-5/predictions.tsv | sort | python3 explainer.py
# In paragraph 4, sentence 2, the participant "plants" is moved from an unknown location to sediment
# In paragraph 4, sentence 3, the participant "bacteria" is moved from an unknown location to sediment
# In paragraph 4, sentence 8, the participant "plants" is moved from sediment to one mile underground
# In paragraph 4, sentence 8, the participant "sediment" is moved from an unknown location to underground
# In paragraph 4, sentence 10, the participant "bacteria" is destroyed at "sediment"
# In paragraph 4, sentence 10, the participant "oil" is created at "underground"
# In paragraph 4, sentence 10, the participant "plants" is destroyed at "one mile underground"
import sys
explanations = []
for line in sys.stdin:
line = line.strip()
paragraph_id, sentence, participant, action, location_before, location_after = line.split("\t")
event = ""
if action == "DESTROY":
if location_before == "?":
location_before = f"an unknown location"
event = f"destroyed at `{location_before}`"
elif action == "CREATE":
if location_after == "?":
location_after = f"an unknown location"
event = f"created at `{location_after}`"
elif action == "MOVE":
if location_before == "?":
location_before = f"an unknown location"
if location_after == "?":
location_after = f"an unknown location"
event = f"moved from `{location_before}` to `{location_after}`"
if event:
explanation = f"In paragraph {paragraph_id}, sentence {sentence}, the participant `{participant}` is {event}"
explanations.append((int(paragraph_id), int(sentence), explanation))
for _, _, explanation in sorted(explanations):
print(explanation)
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/explainer.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/explainer.py",
"repo_id": "ContextualSP",
"token_count": 615
}
| 267 |
## Test case: ProStruct prediction on test set
* answers.tsv is a sorted copy of the answers to the [ProPara test set](../../data/test/).
* predictions.tsv is the prediction generated by ProStruct.
An evaluation on this prediction should result in an F1 score of 0.545.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/testfiles-1/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/testfiles-1/README.md",
"repo_id": "ContextualSP",
"token_count": 74
}
| 268 |
# LogiGAN
This repository serves primarily as codebase and data, model for training, evaluation and inference of the logical pre-training method LogiGAN.
[LogiGAN](https://arxiv.org/abs/2205.08794) (NeurIPS 2022) is the adversarial logical pre-training method with Transformer-based encoder-decoder backbone.
The data and model are released in [Here](https://github.com/microsoft/ContextualSP/releases/tag/logigan).
# Preprocessing
## Logic MLM Corpus Construction
```angular2
cd corpus_construction/mlm_corpus
bash construct_premise.sh
bash construct_conclusion.sh
```
## Elastic Search for External Negatives
```
cd corpus_construction/elastic_search
bash run_gen.sh
bash run_ver.sh
```
# Adversarial Pretraining
Noting that the generator and verifier should be warmed up with constructed corpus to achieve better performance.
Afterwards,
```
cd pre-training
#launcher the program, the setting of each step is adjusted in:
python launcher_es.py
(The parameters are adjusted in parameters16g_es_corpusb.py)
```
# Citation
If you find this resource useful, please cite the paper introducing LogiGAN:
```
@article{pi2022logigan,
title={LogiGAN: Learning Logical Reasoning via Adversarial Pre-training},
author={Pi, Xinyu and Zhong, Wanjun and Gao, Yan and Duan, Nan and Lou, Jian-Guang},
journal={arXiv preprint arXiv:2205.08794},
year={2022}
}
```
|
ContextualSP/logigan/README.md/0
|
{
"file_path": "ContextualSP/logigan/README.md",
"repo_id": "ContextualSP",
"token_count": 406
}
| 269 |
indicator_type=$1
tmp_dir=./filter_${indicator_type}
if [ -d ${tmp_dir} ]
then
rm -r ${tmp_dir}
fi
mkdir ${tmp_dir}
if [ ${indicator_type} == premise ]
then
python filter.py --start_index 0 --end_index 500000 --indicator_type premise &
python filter.py --start_index 500000 --end_index 1000000 --indicator_type premise &
python filter.py --start_index 1000000 --end_index 1500000 --indicator_type premise
# python filter.py --start_index 0 --end_index 50 --indicator_type premise &
# python filter.py --start_index 50 --end_index 100 --indicator_type premise &
# python filter.py --start_index 150 --end_index 200 --indicator_type premise
fi
if [ ${indicator_type} == conclusion ]
then
python filter.py --start_index 0 --end_index 500000 --indicator_type conclusion &
python filter.py --start_index 500000 --end_index 1000000 --indicator_type conclusion &
python filter.py --start_index 1000000 --end_index 1500000 --indicator_type conclusion
# python filter.py --start_index 0 --end_index 50 --indicator_type conclusion &
# python filter.py --start_index 50 --end_index 100 --indicator_type conclusion &
# python filter.py --start_index 150 --end_index 200 --indicator_type conclusion
fi
wait
cat ${tmp_dir}/*.jsonl > ./filter_${indicator_type}.jsonl
rm -r ${tmp_dir}
|
ContextualSP/logigan/corpus_construction/mlm_corpus/filter.sh/0
|
{
"file_path": "ContextualSP/logigan/corpus_construction/mlm_corpus/filter.sh",
"repo_id": "ContextualSP",
"token_count": 437
}
| 270 |
# POET
This is the official repo for the paper [Reasoning Like Program Executors](https://arxiv.org/pdf/2201.11473.pdf).
## Pre-training Corpus
You can find the pre-training SQL corpus from [here](https://drive.google.com/file/d/1dg3NwPT2vWTcj2rx7S6GN8x5EywZiXQr), the pre-training Math corpus from [here](https://huggingface.co/datasets/SivilTaram/poet-math).
The pre-training SQL corpus can be synthesized following the same procedure as done in [TAPEX](https://github.com/microsoft/Table-Pretraining#-synthesize-your-own-pre-training-data) with the `expand_numbers_in_text` function below:
```python
def expand_numbers_in_text(text, delim=" ", ignore_chars=[","], reverse_num=False):
number_pattern = r"[-+]?[.]?[\d]+(,\d+)*[\.]?\d*(?:[eE][-+]?\d+)?%?"
num_char_spans = [(m.start(0), m.end(0)) for m in re.finditer(number_pattern, text)]
if len(num_char_spans) == 0: return text
out_text = ""
last_e = -1
for i, (s, e) in enumerate(num_char_spans):
out_text += text[:s] if i == 0 else text[last_e:s]
num_str = delim.join([c for c in list(text[s:e]) if c not in ignore_chars])
out_text += num_str if not reverse_num else num_str[::-1]
last_e = e
out_text += text[last_e:] # append rest
return out_text
```
The pre-training Math corpus can be synthesized by the script [synthesize_math_corpus.py](synthesize_math_corpus.py).
The pre-training Logic corpus can be synthesized by the script [synthesize_logic_corpus.py](synthesize_logic_corpus.py).
For all BART-based experiments, we use the [fairseq](https://github.com/facebookresearch/fairseq) implementation, which means that you can prepare the dataset as the following format:
```
|- dataset
|- train.src
|- train.tgt
|- valid.src
|- valid.tgt
```
After necessary preprocessing (you can follow the official guide in fairseq machin translation task), you can use the following script to train the model:
```shell
fairseq-train dataset/bin/ \
--save-dir models \
--tensorboard-logdir tensorboard_logs \
--restore-file BART-large/model.pt \
--arch bart_large \
--task translation \
--maximize-best-checkpoint-metric \
--criterion label_smoothed_cross_entropy \
--source-lang src \
--target-lang tgt \
--label-smoothing 0.1 \
--max-tokens 1536 \
--validate-interval 50 \
--save-interval 50 \
--save-interval-updates 3001 \
--validate-interval-updates 3001 \
--keep-interval-updates 5 \
--update-freq 16 \
--warmup-updates 500 \
--max-update 20000 \
--total-num-update 20000 \
--required-batch-size-multiple 1 \
--dropout 0.1 \
--attention-dropout 0.1 \
--relu-dropout 0.0 \
--weight-decay 0.01 \
--optimizer adam \
--adam-betas "(0.9, 0.999)" \
--adam-eps 1e-08 \
--clip-norm 0.1 \
--lr-scheduler polynomial_decay \
--lr 3e-5 \
--ddp-backend no_c10d \
--num-workers 1 \
--reset-meters \
--reset-optimizer \
--reset-dataloader \
--share-all-embeddings \
--layernorm-embedding \
--share-decoder-input-output-embed \
--skip-invalid-size-inputs-valid-test \
--log-format json \
--log-interval 10 \
--patience 10 \
--keep-best-checkpoints 1 \
--report-accuracy \
--no-epoch-checkpoints \
--no-last-checkpoints \
--no-save-optimizer-state
```
## Pre-trained Model Weights
You can find all the available POET model weights at [Huggingface Hub](https://huggingface.co/models?search=siviltaram/poet).
For all these models, you can try to fine-tune them as the vanilla models. And these models are pre-trained on the following format of `natural context` and `sentence`:
```
[sentence] col : [natural context]
```
where `[sentence]` is usually the question in the task, and `[natural context]` is usually the passage in the task. Please refer to our paper for more details.
|
ContextualSP/poet/README.md/0
|
{
"file_path": "ContextualSP/poet/README.md",
"repo_id": "ContextualSP",
"token_count": 1548
}
| 271 |
import torch
import sys
from torch import nn, optim
import os
from data import treeDataset, Dictionary, customDataset
from torch.utils.data import DataLoader
from model import Seq2Seq, Encoder, Decoder, Attention, Parser
from utils import collate_fn
import argparse
import numpy as np
import sys
import time
import random
import json
SPOUSE_PRED="people.person.spouse_s/ns:people.marriage.spouse|ns:fictional_universe.fictional_character.married_to/ns:fictional_universe.marriage_of_fictional_characters.spouses"
SIBLING_PRED="people.person.sibling_s/ns:people.sibling_relationship.sibling|ns:fictional_universe.fictional_character.siblings/ns:fictional_universe.sibling_relationship_of_fictional_characters.siblings"
N_EPOCHS = 50
TRAIN='train'
TEST='test'
DEBUG='debug'
INFERENCE='inference'
def set_seed(seed, device):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device :
torch.cuda.manual_seed_all(seed)
def train(src_dictionary, trg_dictionary, train_data_loader, dev_data_loader, model, device, args, clip = 1):
"""
Train a base simple neural end-to-end semantic parser from (utterance, logic form) dataset.
:param en_path: utterance file path, see 'data/geo/geoquery.abstract.en' as an example.
:param funql_path: logic form file path, see 'data/geo/geoquery.abstract.funql' as an example.
:param train_ids_path: train data sampling file path, see 'data/geo/split880_train_ids.txt' as an example.
:param model_output_path: where the trained simple parser model should be stored.
:param model_init_path: if not None, load an initial model from this path and fine-tune it.
:param refine_data: additional (utterance, logic form) data for training.
:return:
"""
# set_seed(args.seed, device)
# model = model.to(device)
optimizer = optim.Adam(model.parameters())
# print("class_Weight shape:", class_weight.shape)
# criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([args.weight]).to(device))
criterion = nn.BCEWithLogitsLoss()
# criterion = FocalLoss(args.alpha, args.gamma)
hit, total, acc = evaluate_iter_loss2(model, dev_data_loader, src_dictionary ,trg_dictionary, device)
best_hit = hit
torch.save(model, args.save_path +'parser_model_best.pt')
print(f'Best Dev hit: {best_hit: d} | Dev total: {total: d} | Dev acc: {acc: f}', flush=True)
for epoch in range(args.iterations):
train_loss = train_iter(model, train_data_loader, src_dictionary, trg_dictionary, optimizer, criterion, clip, device)
# if loss < best_loss:
# best_loss = loss
# torch.save(model, model_output_path)
print(f'EPOCH: {epoch: d} | Train Loss: {train_loss: f}', flush=True)
if epoch %5 == 0:
torch.save(model, args.save_path +f'parser_model_{epoch}.pt')
# hit, count, _ = evaluate_iter_loss(model, dev_iterator, trg_dictionary)
hit, total, acc = evaluate_iter_loss2(model, dev_data_loader, src_dictionary, trg_dictionary, device)
if hit > best_hit:
best_hit = hit
# best_loss = loss
torch.save(model, args.save_path +'parser_model_best.pt')
print(f'Epoch: {epoch: d} | Best Dev hit: {best_hit: d}| Dev hit: {hit: d} |Dev total: {total: d} | Dev acc: {acc: f}', flush=True)
def create_index_mask(modified_labels):
pad_mask = modified_labels.sum(dim = -1).gt(0)
# pad_mask [src_len, bsz, vocab_size]
pad_mask = pad_mask.unsqueeze(2).repeat(1, 1, modified_labels.shape[-1])
# index_matrix = torch.tensor([i for i in range(modified_labels.shape[0])] * modified_labels.shape[1])
# indices = torch.arange(0,pad_mask.size(0)).expand(pad_mask.shape[1], -1).transpose(0,1)[pad_mask].long()
# print("indices:", indices)
return pad_mask
# pad_mask = sum(modified_labels[0,:,:]).eq(0)
def train_iter(model, iterator, src_dictionary, trg_dictionary, optimizer, criterion, clip, device):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
nl, trg, label, _= batch
src_info, trg_info, label_info, _ = treeDataset.transform(src_dictionary, trg_dictionary, nl, trg, label, device)
modified_labels, _ = label_info
optimizer.zero_grad()
output = model(src_info, trg_info, 1)
# print(output.shape, modified_labels.shape)
## output_labels, modified_labels :[bsz, trg_length, vocab_size]
output = output.transpose(0,1)
modified_labels = modified_labels.transpose(0, 1)
pad_mask = create_index_mask(modified_labels)
loss = criterion(output[pad_mask], modified_labels[pad_mask])
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def debug_iter(model, iterator, src_dictionary,trg_dictionary, device):
model.eval()
for i, batch in enumerate(iterator):
nl, trg, candidate_tokens, label, ori_idx= batch
# print("nl:", nl)
# print("trg:", trg)
# print("label:", label)
src_info, trg_info, label_info, ori_idx = treeDataset.transform(src_dictionary, trg_dictionary, nl, trg, candidate_tokens, label, device)
modified_labels, _ = label_info
# optimizer.zero_grad()
output = model(src_info, trg_info, 1)
# print("fff",ori_idx.dtype, modified_labels.dtype)
output_labels = torch.sigmoid(output.transpose(0,1))
modified_labels = modified_labels.transpose(0,1)
# ori_idx = ori_idx.to(modified_labels).long()
# # print("fff",ori_idx.dtype, modified_labels.dtype)
# # print(ori_idx.dtype, modified_labels.dtype)
modified_labels = modified_labels.index_select(0, ori_idx)
output_labels = output_labels.index_select(0, ori_idx)
pad_mask = create_index_mask(modified_labels)
# loss = criterion(output, modified_labels)
for batch_nl, batch_trg, batch_raw_label, batch_pred, batch_label in zip(nl, trg, label, output_labels, modified_labels):
# print(f"nl:{batch_nl}\ntrg:{batch_trg}\ncandidate:{batch_candidate}")
for step in range(len(batch_raw_label)):
print(f"nl:{batch_nl}\ntrg:{batch_trg}\n")
print("pos labels:", [(idx, model.trg_dictionary.ids2sentence([idx])) for idx, i in enumerate(batch_label[step]) if i==1])
print(f"\nstep:{step}, step_label:{batch_raw_label[step]}")
print([(idx, model.trg_dictionary.ids2sentence([idx])) for idx, i in enumerate(batch_pred[step]) if i> 0.5])
for idx in range(93):
print(f"idx:{idx},token:{model.trg_dictionary.ids2sentence([idx])}, batch_label:{batch_label[step][idx]}, batch_pred_loss:{batch_pred[step][idx]}")
# loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
# optimizer.step()
# epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate_iter_loss(model, iterator, criterion, trg_dictionary):
model.eval()
epoch_loss = 0
with torch.no_grad():
for _, batch in enumerate(iterator):
nl, trg, label, _= batch
output, modified_labels, _ = model(nl, trg, label, 1) # turn off teacher forcing
pad_mask = create_index_mask(modified_labels)
loss = criterion(output[pad_mask], modified_labels[pad_mask])
epoch_loss += loss.item()
# print("len iterator:", len(iterator))
return epoch_loss / len(iterator)
def evaluate_iter_loss2(model, iterator, src_dictionary, trg_dictionary, device):
model.eval()
epoch_loss = 0
hit = 0
total = 0
with torch.no_grad():
for _, batch in enumerate(iterator):
nl, trg, label, dup_len= batch
src_info, trg_info, label_info, ori_idx = treeDataset.transform(src_dictionary, trg_dictionary, nl, trg, label, device)
modified_labels, _ = label_info
output = model(src_info, trg_info, 1) # turn off teacher forcing
# print("output:", output)
output_labels = torch.sigmoid(output).transpose(0, 1).index_select(0, ori_idx)
modified_labels = modified_labels.transpose(0,1).long().index_select(0, ori_idx)
alpha = torch.ones(output_labels.shape[0], output_labels.shape[2]).to(output_labels)
alpha[:, 0] = 0
alpha = alpha.unsqueeze(1).repeat(1, output_labels.shape[1], 1)
output_labels = (alpha * output_labels).gt(0.5).long()
# output_labels = (output_labels).gt(0.5).long()
pad_mask = create_index_mask(modified_labels)
start_pos = 0
for idx, length in enumerate(dup_len):
predict_labels_idx = output_labels[start_pos:start_pos+length, :, :]
modified_labels_idx = modified_labels[start_pos:start_pos+length, :, :]
pad_mask_idx = pad_mask[start_pos:start_pos+length, :, :]
if predict_labels_idx[pad_mask_idx].equal(modified_labels_idx[pad_mask_idx]):
hit += 1
total += 1
start_pos += length
return hit, total ,hit / total
def evaluate_iter_acc(model, iterator, src_dictionary, trg_dictionary, device, output_path):
model.eval()
hit = 0
total = 0
errors = {}
ans, golden = [], []
error_ids = []
def calculate_acc(predict, golden):
hit, total, errors = 0, 0, 0
for p, g in zip(predict, golden):
if p.strip() == g.strip():
hit+=1
total += 1
return hit, total, hit / total
# golden = open(GOLDEN_PATH).readlines()
with torch.no_grad():
for _, batch in enumerate(iterator):
nl, trg = batch
src_info = customDataset.transform(src_dictionary, trg_dictionary, nl, device)
output = model.inference(src_info) # turn off teacher forcing
output = trg_dictionary.ids2sentences(output)
sparql_set = set()
for s in output:
triples = s.split(" . ")
for triple in triples:
sparql_set.add(triple)
predict_sent = sorted(list(sparql_set))
num = len(predict_sent)
for i in range(num):
pred = predict_sent[i]
if SPOUSE_PRED in pred or SIBLING_PRED in pred:
try:
a1, r, a2 = pred.split()
predict_sent.append(f"FILTER ( {a1} != {a2} )")
except Exception as e:
pass
predict_sent = ' . '.join(predict_sent)
# print("trg:", trg)
# if predict_sent.strip() == trg[0].strip():
# flag = True
# else:
# flag = False
# error_ids.append(str(_))
# print(f"\n{'='*60}")
# print("idx:", _, "original_output:", output)
# print(f"!!!!!!!!!!!!!!{flag}!!!!!!!!!!!!!\nNL:{nl}\nTRG:{trg[0]}\nPREDICT:{predict_sent}", flush=True)
ans.append(predict_sent.strip())
golden.append(trg[0].strip())
open(output_path, "w").write('\n'.join(ans))
return calculate_acc(ans, golden)
def evaluate(src_dictionary,trg_dictionary, dev_data_loader, model, device):
hit, total, acc = evaluate_iter_loss2(model, dev_data_dataloader, trg_dictionary)
print(f'hit: {hit: d} | total: {total: d} | acc: {acc: f}', flush=True)
def main():
parser = argparse.ArgumentParser(description='Train a neural machine translation model')
# Training corpus
corpora_group = parser.add_argument_group('training corpora', 'Corpora related arguments; specify either monolingual or parallel training corpora (or both)')
corpora_group.add_argument('--src_path', help='the source language monolingual corpus')
corpora_group.add_argument('--trg_path', help='the target language monolingual corpus')
corpora_group.add_argument('--max_sentence_length', type=int, default=90, help='the maximum sentence length for training (defaults to 50)')
# Embeddings/vocabulary
embedding_group = parser.add_argument_group('embeddings', 'Embedding related arguments; either give pre-trained cross-lingual embeddings, or a vocabulary and embedding dimensionality to randomly initialize them')
embedding_group.add_argument('--src_vocabulary', help='the source language vocabulary')
embedding_group.add_argument('--trg_vocabulary', help='the target language vocabulary')
embedding_group.add_argument('--embedding_size', type=int, default=0, help='the word embedding size')
# Architecture
architecture_group = parser.add_argument_group('architecture', 'Architecture related arguments')
architecture_group.add_argument('--layers', type=int, default=2, help='the number of encoder/decoder layers (defaults to 2)')
architecture_group.add_argument('--enc_hid_dim', type=int, default=512, help='the number of dimensions for the hidden layer (defaults to 600)')
architecture_group.add_argument('--dec_hid_dim', type=int, default=512, help='the number of dimensions for the hidden layer (defaults to 600)')
# Optimization
optimization_group = parser.add_argument_group('optimization', 'Optimization related arguments')
optimization_group.add_argument('--batch_size', type=int, default=128, help='the batch size (defaults to 50)')
optimization_group.add_argument('--learning_rate', type=float, default=0.0002, help='the global learning rate (defaults to 0.0002)')
optimization_group.add_argument('--dropout', metavar='PROB', type=float, default=0.4, help='dropout probability for the encoder/decoder (defaults to 0.3)')
optimization_group.add_argument('--param_init', metavar='RANGE', type=float, default=0.1, help='uniform initialization in the specified range (defaults to 0.1, 0 for module specific default initialization)')
optimization_group.add_argument('--iterations', type=int, default=50, help='the number of training iterations (defaults to 300000)')
# Model saving
saving_group = parser.add_argument_group('model saving', 'Arguments for saving the trained model')
saving_group.add_argument('--save_path', metavar='PREFIX', help='save models with the given prefix')
saving_group.add_argument('--save_interval', type=int, default=0, help='save intermediate models at this interval')
saving_group.add_argument('--model_init_path', help='model init path')
# Logging/validation
logging_group = parser.add_argument_group('logging', 'Logging and validation arguments')
logging_group.add_argument('--log_interval', type=int, default=1000, help='log at this interval (defaults to 1000)')
logging_group.add_argument('--validate_batch_size', type=int, default=1, help='the batch size (defaults to 50)')
corpora_group.add_argument('--inference_output', help='the source language monolingual corpus')
corpora_group.add_argument('--validation_src_path', help='the source language monolingual corpus')
corpora_group.add_argument('--validation_trg_path', help='the source language monolingual corpus')
# Other
parser.add_argument('--encoding', default='utf-8', help='the character encoding for input/output (defaults to utf-8)')
parser.add_argument('--cuda', default=False, action='store_true', help='use cuda')
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--type", type=str, default='train', help="type: train/inference/debug")
args = parser.parse_args()
print(args)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
src_dictionary = Dictionary([word.strip() for word in open(args.src_vocabulary).readlines()])
trg_dictionary = Dictionary([word.strip() for word in open(args.trg_vocabulary).readlines()])
def init_weights(m):
for name, param in m.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
if not args.model_init_path:
attn = Attention(args.enc_hid_dim, args.dec_hid_dim)
enc = Encoder(src_dictionary.size(), args.embedding_size, args.enc_hid_dim, args.dec_hid_dim, args.dropout, src_dictionary.PAD)
dec = Decoder(trg_dictionary.size(), args.embedding_size, args.enc_hid_dim, args.dec_hid_dim, args.dropout, attn)
s2s = Seq2Seq(enc, dec, src_dictionary.PAD, device)
parallel_model = Parser(src_dictionary, trg_dictionary, s2s, device)
parallel_model.apply(init_weights)
else:
print(f"load init model from {args.model_init_path}")
parallel_model = torch.load(args.model_init_path)
parallel_model = parallel_model.to(device)
if args.type ==TEST:
test_dataset = treeDataset(args.validation_src_path, args.validation_trg_path)
test_dataloader = DataLoader(test_dataset, shuffle = False, batch_size = args.validate_batch_size,collate_fn = collate_fn)
hit, total, acc = evaluate_iter_loss2(parallel_model, test_dataloader, src_dictionary, trg_dictionary, device)
print(f'hit: {hit: d} | total: {total: d} | acc: {acc: f}', flush=True)
elif args.type==INFERENCE:
test_dataset = customDataset(args.validation_src_path, args.validation_trg_path)
test_dataloader = DataLoader(test_dataset, shuffle = False, batch_size = args.validate_batch_size)
hit, total, acc = evaluate_iter_acc(parallel_model, test_dataloader, src_dictionary, trg_dictionary, device, args.inference_output)
print(f'hit: {hit: d} | total: {total: d} | acc: {acc: f}', flush=True)
elif args.type == DEBUG:
test_dataset = treeDataset(args.validation_src_path, args.validation_trg_path)
test_dataloader = DataLoader(test_dataset, shuffle = False, batch_size = args.validate_batch_size,collate_fn = collate_fn)
hit, total, acc = debug_iter(parallel_model, test_dataloader, src_dictionary, trg_dictionary, device)
print(f'hit: {hit: d} | total: {total: d} | acc: {acc: f}', flush=True)
else:
train_dataset = treeDataset(args.src_path, args.trg_path)
train_dataloader = DataLoader(train_dataset, shuffle = True, batch_size = args.batch_size, collate_fn = collate_fn)
test_dataset = treeDataset(args.validation_src_path, args.validation_trg_path)
test_dataloader = DataLoader(test_dataset, shuffle = False, batch_size = args.validate_batch_size,collate_fn = collate_fn)
train(src_dictionary,trg_dictionary, train_dataloader, test_dataloader, parallel_model, device, args)
if __name__ == '__main__':
main()
# evaluate(device = torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
|
ContextualSP/poset_decoding/sketch_prediction/main.py/0
|
{
"file_path": "ContextualSP/poset_decoding/sketch_prediction/main.py",
"repo_id": "ContextualSP",
"token_count": 8044
}
| 272 |
import copy
import typing
import logging
import torch
import hyperopt
import numpy as np
import matchzoo as mz
from matchzoo.engine.base_metric import BaseMetric
from matchzoo.utils import parse_optimizer
class Tuner(object):
"""
Model hyper-parameters tuner.
`model.params.hyper_space` reprensents the model's hyper-parameters
search space, which is the cross-product of individual hyper parameter's
hyper space. When a `Tuner` builds a model, for each hyper parameter in
`model.params`, if the hyper-parameter has a hyper-space, then a sample
will be taken in the space. However, if the hyper-parameter does not
have a hyper-space, then the default value of the hyper-parameter will
be used.
See `tutorials/model_tuning.ipynb` for a detailed walkthrough on usage.
:param params: A completed parameter table to tune. Usually `model.params`
of the desired model to tune. `params.completed()` should be `True`.
:param optimizer: Str or `Optimizer` class. Optimizer for optimizing model.
:param trainloader: Training data to use. Should be a `DataLoader`.
:param validloader: Testing data to use. Should be a `DataLoader`.
:param embedding: Embedding used by model.
:param fit_kwargs: Extra keyword arguments to pass to `fit`.
(default: `dict(epochs=10, verbose=0)`)
:param metric: Metric to tune upon. Must be one of the metrics in
`model.params['task'].metrics`. (default: the first metric in
`params.['task'].metrics`.
:param mode: Either `maximize` the metric or `minimize` the metric.
(default: 'maximize')
:param num_runs: Number of runs. Each run takes a sample in
`params.hyper_space` and build a model based on the sample.
(default: 10)
:param verbose: Verbosity. (default: 1)
"""
def __init__(
self,
params: 'mz.ParamTable',
optimizer: str = 'adam',
trainloader: mz.dataloader.DataLoader = None,
validloader: mz.dataloader.DataLoader = None,
embedding: np.ndarray = None,
fit_kwargs: dict = None,
metric: typing.Union[str, BaseMetric] = None,
mode: str = 'maximize',
num_runs: int = 10,
verbose=1
):
"""Tuner."""
if fit_kwargs is None:
fit_kwargs = dict(epochs=5, verbose=0)
if 'with_embedding' in params:
params['embedding'] = embedding
params['embedding_input_dim'] = embedding.shape[0]
params['embedding_output_dim'] = embedding.shape[1]
self._validate_params(params)
metric = metric or params['task'].metrics[0]
self._validate_optimizer(optimizer)
self._validate_dataloader(trainloader)
self._validate_dataloader(validloader)
self._validate_kwargs(fit_kwargs)
self._validate_mode(mode)
self._validate_metric(params, metric)
self.__curr_run_num = 0
# these variables should not change within the same `tune` call
self._params = params
self._optimizer = parse_optimizer(optimizer)
self._trainloader = trainloader
self._validloader = validloader
self._embedding = embedding
self._fit_kwargs = fit_kwargs
self._metric = metric
self._mode = mode
self._num_runs = num_runs
self._verbose = verbose
def tune(self):
"""
Start tuning.
Notice that `tune` does not affect the tuner's inner state, so each
new call to `tune` starts fresh. In other words, hyperspaces are
suggestive only within the same `tune` call.
"""
if self.__curr_run_num != 0:
print(
"""WARNING: `tune` does not affect the tuner's inner state, so
each new call to `tune` starts fresh. In other words,
hyperspaces are suggestive only within the same `tune` call."""
)
self.__curr_run_num = 0
logging.getLogger('hyperopt').setLevel(logging.CRITICAL)
trials = hyperopt.Trials()
self._fmin(trials)
return {
'best': trials.best_trial['result']['mz_result'],
'trials': [trial['result']['mz_result'] for trial in trials.trials]
}
def _fmin(self, trials):
# new version of hyperopt has keyword argument `show_progressbar` that
# breaks doctests, so here's a workaround
fmin_kwargs = dict(
fn=self._run,
space=self._params.hyper_space,
algo=hyperopt.tpe.suggest,
max_evals=self._num_runs,
trials=trials
)
try:
hyperopt.fmin(
**fmin_kwargs,
show_progressbar=False
)
except TypeError:
hyperopt.fmin(**fmin_kwargs)
def _run(self, sample):
self.__curr_run_num += 1
# build model
params = self._create_full_params(sample)
model = params['model_class'](params=params)
model.build()
trainer = mz.trainers.Trainer(
model=model,
optimizer=self._optimizer(model.parameters()),
trainloader=self._trainloader,
validloader=self._validloader,
**self._fit_kwargs,
)
# fit & evaluate
trainer.run()
lookup = trainer.evaluate(self._validloader)
score = lookup[self._metric]
# collect result
# this result is for users, visible outside
mz_result = {
'#': self.__curr_run_num,
'params': params,
'sample': sample,
'score': score
}
if self._verbose:
self._log_result(mz_result)
return {
# these two items are for hyperopt
'loss': self._fix_loss_sign(score),
'status': hyperopt.STATUS_OK,
# this item is for storing matchzoo information
'mz_result': mz_result
}
def _create_full_params(self, sample):
params = copy.deepcopy(self._params)
params.update(sample)
return params
def _fix_loss_sign(self, loss):
if self._mode == 'maximize':
loss = -loss
return loss
@classmethod
def _log_result(cls, result):
print(f"Run #{result['#']}")
print(f"Score: {result['score']}")
print(result['params'])
print()
@property
def params(self):
"""`params` getter."""
return self._params
@params.setter
def params(self, value):
"""`params` setter."""
self._validate_params(value)
self._validate_metric(value, self._metric)
self._params = value
@property
def trainloader(self):
"""`trainloader` getter."""
return self._trainloader
@trainloader.setter
def trainloader(self, value):
"""`trainloader` setter."""
self._validate_dataloader(value)
self._trainloader = value
@property
def validloader(self):
"""`validloader` getter."""
return self._validloader
@validloader.setter
def validloader(self, value):
"""`validloader` setter."""
self._validate_dataloader(value)
self._validloader = value
@property
def fit_kwargs(self):
"""`fit_kwargs` getter."""
return self._fit_kwargs
@fit_kwargs.setter
def fit_kwargs(self, value):
"""`fit_kwargs` setter."""
self._validate_kwargs(value)
self._fit_kwargs = value
@property
def metric(self):
"""`metric` getter."""
return self._metric
@metric.setter
def metric(self, value):
"""`metric` setter."""
self._validate_metric(self._params, value)
self._metric = value
@property
def mode(self):
"""`mode` getter."""
return self._mode
@mode.setter
def mode(self, value):
"""`mode` setter."""
self._validate_mode(value)
self._mode = value
@property
def num_runs(self):
"""`num_runs` getter."""
return self._num_runs
@num_runs.setter
def num_runs(self, value):
"""`num_runs` setter."""
self._validate_num_runs(value)
self._num_runs = value
@property
def verbose(self):
"""`verbose` getter."""
return self._verbose
@verbose.setter
def verbose(self, value):
"""`verbose` setter."""
self._verbose = value
@classmethod
def _validate_params(cls, params):
if not isinstance(params, mz.ParamTable):
raise TypeError("Only accepts a `ParamTable` instance.")
if not params.hyper_space:
raise ValueError("Parameter hyper-space empty.")
if not params.completed(exclude=['out_activation_func']):
raise ValueError("Parameters not complete.")
@classmethod
def _validate_optimizer(cls, optimizer):
if not isinstance(optimizer, (str, torch.optim.Optimizer)):
raise TypeError(
"Only accepts a `Optimizer` instance.")
@classmethod
def _validate_dataloader(cls, data):
if not isinstance(data, mz.dataloader.DataLoader):
raise TypeError(
"Only accepts a `DataLoader` instance.")
@classmethod
def _validate_kwargs(cls, kwargs):
if not isinstance(kwargs, dict):
raise TypeError('Only accepts a `dict` instance.')
@classmethod
def _validate_mode(cls, mode):
if mode not in ('maximize', 'minimize'):
raise ValueError('`mode` should be one of `maximize`, `minimize`.')
@classmethod
def _validate_metric(cls, params, metric):
if metric not in params['task'].metrics:
raise ValueError('Target metric does not exist in the task.')
@classmethod
def _validate_num_runs(cls, num_runs):
if not isinstance(num_runs, int):
raise TypeError('Only accepts an `int` value.')
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/tuner/tuner.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/tuner/tuner.py",
"repo_id": "ContextualSP",
"token_count": 4419
}
| 273 |
"""SNLI data loader."""
import typing
from pathlib import Path
import pandas as pd
import keras
import matchzoo
_url = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip"
def load_data(
stage: str = 'train',
task: str = 'classification',
target_label: str = 'entailment',
return_classes: bool = False,
data_root:str='',
suffix:str='mask_classification.csv'
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load SNLI data.
:param stage: One of `train`, `dev`, and `test`. (default: `train`)
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance. (default: `ranking`)
:param target_label: If `ranking`, chose one of `entailment`,
`contradiction`, `neutral`, and `-` as the positive label.
(default: `entailment`)
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test', 'debug'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
split = "mcd1"
file_path=f"{data_root}/{split}/{stage}/{stage}_{suffix}"
data_pack = _read_data(file_path, task, target_label)
if task == 'ranking':
task = matchzoo.tasks.Ranking()
if task == 'classification':
task = matchzoo.tasks.Classification()
if isinstance(task, matchzoo.tasks.Ranking):
if target_label not in ['entailment', 'contradiction', 'neutral', '-']:
raise ValueError(f"{target_label} is not a valid target label."
f"Must be one of `entailment`, `contradiction`, "
f"`neutral` and `-`.")
binary = (data_pack.relation['label'] == target_label).astype(float)
data_pack.relation['label'] = binary
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
classes = [True, False]
if return_classes:
return data_pack, classes
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
# def _download_data():
# ref_path = keras.utils.data_utils.get_file(
# 'snli', _url, extract=True,
# cache_dir=matchzoo.USER_DATA_DIR,
# cache_subdir='snli'
# )
# return Path(ref_path).parent.joinpath('snli_1.0')
def _read_data(path, task, target_label):
table = pd.read_csv(path, sep='\t')
df = pd.DataFrame({
'text_left': table['sentence1'],
'text_right': table['sentence2'],
'label': table['gold_label']
})
df = df.dropna(axis=0, how='any').reset_index(drop=True)
df = df.dropna(axis=0, how='any').reset_index(drop=True)
filter_id = df[df['label'] == '-'].index.tolist()
df.drop(filter_id, inplace=True)
if task == 'ranking' or isinstance(task, matchzoo.tasks.Ranking):
if target_label not in ['entailment', 'contradiction', 'neutral']:
raise ValueError(f"{target_label} is not a valid target label."
f"Must be one of `entailment`, `contradiction`"
f" and `neutral`")
df['label'] = (df['label'] == target_label)
elif task == 'classification' or isinstance(
task, matchzoo.tasks.Classification):
classes = [True, False]
df['label'] = df['label'].apply(classes.index)
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
return matchzoo.pack(df, task)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/cfq/load_data.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/cfq/load_data.py",
"repo_id": "ContextualSP",
"token_count": 1699
}
| 274 |
"""The rank cross entropy loss."""
import torch
from torch import nn
import torch.nn.functional as F
class RankCrossEntropyLoss(nn.Module):
"""Creates a criterion that measures rank cross entropy loss."""
__constants__ = ['num_neg']
def __init__(self, num_neg: int = 1):
"""
:class:`RankCrossEntropyLoss` constructor.
:param num_neg: Number of negative instances in hinge loss.
"""
super().__init__()
self.num_neg = num_neg
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor):
"""
Calculate rank cross entropy loss.
:param y_pred: Predicted result.
:param y_true: Label.
:return: Rank cross loss.
"""
logits = y_pred[::(self.num_neg + 1), :]
labels = y_true[::(self.num_neg + 1), :]
for neg_idx in range(self.num_neg):
neg_logits = y_pred[(neg_idx + 1)::(self.num_neg + 1), :]
neg_labels = y_true[(neg_idx + 1)::(self.num_neg + 1), :]
logits = torch.cat((logits, neg_logits), dim=-1)
labels = torch.cat((labels, neg_labels), dim=-1)
return -torch.mean(
torch.sum(
labels * torch.log(F.softmax(logits, dim=-1) + torch.finfo(float).eps),
dim=-1
)
)
@property
def num_neg(self):
"""`num_neg` getter."""
return self._num_neg
@num_neg.setter
def num_neg(self, value):
"""`num_neg` setter."""
self._num_neg = value
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/losses/rank_cross_entropy_loss.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/losses/rank_cross_entropy_loss.py",
"repo_id": "ContextualSP",
"token_count": 730
}
| 275 |
"""An implementation of Bert Model."""
import typing
import torch
import torch.nn as nn
from pytorch_transformers import BertModel
from matchzoo import preprocessors
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.base_preprocessor import BasePreprocessor
from matchzoo.engine import hyper_spaces
from matchzoo.dataloader import callbacks
from matchzoo.modules import BertModule
class Bert(BaseModel):
"""Bert Model."""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params()
params.add(Param(name='mode', value='bert-base-uncased',
desc="Pretrained Bert model."))
params.add(Param(
'dropout_rate', 0.0,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="The dropout rate."
))
return params
@classmethod
def get_default_preprocessor(
cls,
mode: str = 'bert-base-uncased'
) -> BasePreprocessor:
""":return: Default preprocessor."""
return preprocessors.BertPreprocessor(mode=mode)
@classmethod
def get_default_padding_callback(
cls,
fixed_length_left: int = None,
fixed_length_right: int = None,
pad_value: typing.Union[int, str] = 0,
pad_mode: str = 'pre'
):
""":return: Default padding callback."""
return callbacks.BertPadding(
fixed_length_left=fixed_length_left,
fixed_length_right=fixed_length_right,
pad_value=pad_value,
pad_mode=pad_mode)
def build(self):
"""Build model structure."""
self.bert = BertModule(mode=self._params['mode'])
self.dropout = nn.Dropout(p=self._params['dropout_rate'])
if 'base' in self._params['mode']:
dim = 768
elif 'large' in self._params['mode']:
dim = 1024
self.out = self._make_output_layer(dim)
def forward(self, inputs):
"""Forward."""
input_left, input_right = inputs['text_left'], inputs['text_right']
bert_output = self.bert(input_left, input_right)[1]
out = self.out(self.dropout(bert_output))
return out
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/bert.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/bert.py",
"repo_id": "ContextualSP",
"token_count": 1023
}
| 276 |
"""An implementation of Match LSTM Model."""
import typing
import torch
import torch.nn as nn
from torch.nn import functional as F
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.modules import MatchModule
from matchzoo.modules import StackedBRNN
class MatchLSTM(BaseModel):
"""
MatchLSTM Model.
https://github.com/shuohangwang/mprc/blob/master/qa/rankerReader.lua.
Examples:
>>> model = MatchLSTM()
>>> model.params['dropout'] = 0.2
>>> model.params['hidden_size'] = 200
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=False
)
params.add(Param(name='mask_value', value=0,
desc="The value to be masked from inputs."))
params.add(Param(name='dropout', value=0.2,
desc="Dropout rate."))
params.add(Param(name='hidden_size', value=200,
desc="Hidden size."))
params.add(Param(name='lstm_layer', value=1,
desc="Number of LSTM layers"))
params.add(Param(name='drop_lstm', value=False,
desc="Whether dropout LSTM."))
params.add(Param(name='concat_lstm', value=True,
desc="Whether concat intermediate outputs."))
params.add(Param(name='rnn_type', value='lstm',
desc="Choose rnn type, lstm or gru."))
return params
def build(self):
"""Instantiating layers."""
rnn_mapping = {'lstm': nn.LSTM, 'gru': nn.GRU}
self.embedding = self._make_default_embedding_layer()
self.dropout = nn.Dropout(p=self._params['dropout'])
if self._params['concat_lstm']:
lstm_layer = self._params['lstm_layer']
lstm_size = self._params['hidden_size'] / lstm_layer
self.input_proj = StackedBRNN(
self._params['embedding_output_dim'],
int(lstm_size / 2),
self._params['lstm_layer'],
dropout_rate=self._params['dropout'],
dropout_output=self._params['drop_lstm'],
rnn_type=rnn_mapping[self._params['rnn_type'].lower()],
concat_layers=self._params['concat_lstm'])
self.match_module = MatchModule(
self._params['hidden_size'], dropout_rate=self._params['dropout'])
self.mlstm_module = StackedBRNN(
2 * self._params['hidden_size'],
int(lstm_size / 2),
self._params['lstm_layer'],
dropout_rate=self._params['dropout'],
dropout_output=self._params['drop_lstm'],
rnn_type=rnn_mapping[self._params['rnn_type'].lower()],
concat_layers=self._params['concat_lstm'])
self.classification = nn.Sequential(
nn.Dropout(
p=self._params['dropout']),
nn.Linear(
self._params['hidden_size'],
self._params['hidden_size']),
nn.Tanh())
self.out = self._make_output_layer(self._params['hidden_size'])
def forward(self, inputs):
"""Forward."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# H = hidden size
# [B, L], [B, R]
query, doc = inputs['text_left'].long(), inputs['text_right'].long()
# [B, L]
# [B, R]
query_mask = (query == self._params['mask_value'])
doc_mask = (doc == self._params['mask_value'])
# [B, L, D]
# [B, R, D]
query = self.embedding(query)
doc = self.embedding(doc)
# [B, L, D]
# [B, R, D]
query = self.dropout(query)
doc = self.dropout(doc)
# [B, L, H]
# [B, R, H]
query = self.input_proj(query, query_mask)
doc = self.input_proj(doc, doc_mask)
# [B, L, H]
match_out = self.match_module(
query, doc, doc_mask)
# [B, L, H]
mlstm_out = self.mlstm_module(match_out, query_mask)
# [B, H]
max_pool_rep, _ = mlstm_out.max(dim=1)
# [B, H]
hidden = self.classification(max_pool_rep)
# [B, num_classes]
out = self.out(hidden)
return out
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/matchlstm.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/matchlstm.py",
"repo_id": "ContextualSP",
"token_count": 2316
}
| 277 |
from . import units
from .naive_preprocessor import NaivePreprocessor
from .basic_preprocessor import BasicPreprocessor
from .bert_preprocessor import BertPreprocessor
def list_available() -> list:
from matchzoo.engine.base_preprocessor import BasePreprocessor
from matchzoo.utils import list_recursive_concrete_subclasses
return list_recursive_concrete_subclasses(BasePreprocessor)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/__init__.py",
"repo_id": "ContextualSP",
"token_count": 110
}
| 278 |
import abc
import typing
from .unit import Unit
class StatefulUnit(Unit, metaclass=abc.ABCMeta):
"""
Unit with inner state.
Usually need to be fit before transforming. All information gathered in the
fit phrase will be stored into its `context`.
"""
def __init__(self):
"""Initialization."""
self._context = {}
@property
def state(self):
"""
Get current context. Same as `unit.context`.
Deprecated since v2.2.0, and will be removed in the future.
Used `unit.context` instead.
"""
return self._context
@property
def context(self):
"""Get current context. Same as `unit.state`."""
return self._context
@abc.abstractmethod
def fit(self, input_: typing.Any):
"""Abstract base method, need to be implemented in subclass."""
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/stateful_unit.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/stateful_unit.py",
"repo_id": "ContextualSP",
"token_count": 321
}
| 279 |
"""Early stopping."""
import typing
import torch
import numpy as np
class EarlyStopping:
"""
EarlyStopping stops training if no improvement after a given patience.
:param patience: Number fo events to wait if no improvement and then
stop the training.
:param should_decrease: The way to judge the best so far.
:param key: Key of metric to be compared.
"""
def __init__(
self,
patience: typing.Optional[int] = None,
should_decrease: bool = None,
key: typing.Any = None
):
"""Early stopping Constructor."""
self._patience = patience
self._key = key
self._best_so_far = 0
self._epochs_with_no_improvement = 0
self._is_best_so_far = False
self._early_stop = False
def state_dict(self) -> typing.Dict[str, typing.Any]:
"""A `Trainer` can use this to serialize the state."""
return {
'patience': self._patience,
'best_so_far': self._best_so_far,
'is_best_so_far': self._is_best_so_far,
'epochs_with_no_improvement': self._epochs_with_no_improvement,
}
def load_state_dict(
self,
state_dict: typing.Dict[str, typing.Any]
) -> None:
"""Hydrate a early stopping from a serialized state."""
self._patience = state_dict["patience"]
self._is_best_so_far = state_dict["is_best_so_far"]
self._best_so_far = state_dict["best_so_far"]
self._epochs_with_no_improvement = \
state_dict["epochs_with_no_improvement"]
def update(self, result: list):
"""Call function."""
score = result[self._key]
if score > self._best_so_far:
self._best_so_far = score
self._is_best_so_far = True
self._epochs_with_no_improvement = 0
else:
self._is_best_so_far = False
self._epochs_with_no_improvement += 1
@property
def best_so_far(self) -> bool:
"""Returns best so far."""
return self._best_so_far
@property
def is_best_so_far(self) -> bool:
"""Returns true if it is the best so far."""
return self._is_best_so_far
@property
def should_stop_early(self) -> bool:
"""Returns true if improvement has stopped for long enough."""
if not self._patience:
return False
else:
return self._epochs_with_no_improvement >= self._patience
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/early_stopping.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/utils/early_stopping.py",
"repo_id": "ContextualSP",
"token_count": 1107
}
| 280 |
import pytest
from matchzoo.engine.base_task import BaseTask
def test_base_task_instantiation():
with pytest.raises(TypeError):
BaseTask()
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/engine/test_base_task.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/engine/test_base_task.py",
"repo_id": "ContextualSP",
"token_count": 56
}
| 281 |
<jupyter_start><jupyter_code>import torch
import numpy as np
import pandas as pd
import matchzoo as mz
print('matchzoo version', mz.__version__)
classification_task = mz.tasks.Classification(num_classes=2)
classification_task.metrics = ['acc']
print("`classification_task` initialized with metrics", classification_task.metrics)
print('data loading ...')
train_pack_raw = mz.datasets.wiki_qa.load_data('train', task=classification_task)
dev_pack_raw = mz.datasets.wiki_qa.load_data('dev', task=classification_task)
test_pack_raw = mz.datasets.wiki_qa.load_data('test', task=classification_task)
print('data loaded as `train_pack_raw` `dev_pack_raw` `test_pack_raw`')<jupyter_output>data loading ...
data loaded as `train_pack_raw` `dev_pack_raw` `test_pack_raw`
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/classification/init.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/classification/init.ipynb",
"repo_id": "ContextualSP",
"token_count": 270
}
| 282 |
<jupyter_start><jupyter_code>%run init.ipynb
ranking_task = mz.tasks.Ranking(losses=mz.losses.RankCrossEntropyLoss(num_neg=10))
ranking_task.metrics = [
mz.metrics.NormalizedDiscountedCumulativeGain(k=3),
mz.metrics.NormalizedDiscountedCumulativeGain(k=5),
mz.metrics.MeanAveragePrecision()
]
preprocessor = mz.models.MatchLSTM.get_default_preprocessor()
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
preprocessor.context
glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=300)
term_index = preprocessor.context['vocab_unit'].state['term_index']
embedding_matrix = glove_embedding.build_matrix(term_index)
l2_norm = np.sqrt((embedding_matrix * embedding_matrix).sum(axis=1))
embedding_matrix = embedding_matrix / l2_norm[:, np.newaxis]
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=5,
num_neg=10
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed
)
padding_callback = mz.models.MatchLSTM.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=20,
stage='train',
resample=True,
sort=False,
shuffle=True,
callback=padding_callback
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=20,
stage='dev',
sort=False,
shuffle=False,
callback=padding_callback
)
model = mz.models.MatchLSTM()
model.params['task'] = ranking_task
model.params['mask_value'] = 0
model.params['embedding'] = embedding_matrix
model.build()
print(model, sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adadelta(model.parameters())
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=10
)
trainer.run()<jupyter_output><empty_output>
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/matchlstm.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/matchlstm.ipynb",
"repo_id": "ContextualSP",
"token_count": 804
}
| 283 |
set model_file=checkpoints_sparc/sparc_concat_none_model
python -m allennlp.service.server_simple ^
--archive-path %model_file%/model.tar.gz ^
--predictor sparc ^
--include-package predictor.sparc_predictor ^
--include-package dataset_reader.sparc_reader ^
--include-package models.sparc_parser ^
--title "Contextual Semantic Parsing Demo" ^
--field-name question ^
--field-name database_id
|
ContextualSP/semantic_parsing_in_context/bash_files/windows/demo.bat/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/bash_files/windows/demo.bat",
"repo_id": "ContextualSP",
"token_count": 157
}
| 284 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import os
import random
import sys
import traceback
from typing import List, Dict, Iterable, Optional
import dill
import numpy as np
from allennlp.common.checks import ConfigurationError
from allennlp.data import DatasetReader, TokenIndexer, Field, Instance
from allennlp.data import Token
from allennlp.data.fields import TextField, ListField, IndexField, MetadataField, ArrayField
from context.copy_production_rule_field import CopyProductionRuleField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import WordTokenizer, Tokenizer
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter, JustSpacesWordSplitter
from overrides import overrides
from spacy.symbols import ORTH, LEMMA
import multiprocessing as mp
import string
from context.db_context import SparcDBContext
from context.grammar import Grammar
from context.world import SparcWorld
from .util import SparcKnowledgeGraphField, index_entity_type
from constant import *
from .reader_queue import QIterable
import pickle
from tqdm import tqdm
from dataset_reader.util import diff_tree
@DatasetReader.register('sparc')
class SparcDatasetReader(DatasetReader):
def __init__(self,
lazy: bool = False,
utterance_tokenizer: Tokenizer = None,
utterance_token_indexers: Dict[str, TokenIndexer] = None,
# none, dis, concat
context_mode: str = ContextMode.context_independent,
copy_mode: str = CopyMode.no_copy,
# none, token, segment
# copy_mode: str = CopyMode.no_copy,
bert_mode: str = "v0",
num_workers: int = 1,
tables_file: str = None,
database_path: str = 'dataset\\database',
cache_method: str = 'pickle',
cache_mode: str = 'all',
load_cache: bool = True,
save_cache: bool = True,
loading_limit: int = -1,
# utilize how many context
maximum_history_len: int = 5,
memory_friend: bool = False):
super().__init__(lazy=lazy)
# we use spacy tokenizer as the default tokenizer
# default spacy tokenizer splits the common token 'id' to ['i', 'd'], we here write a manual fix for that
spacy_splitter = SpacyWordSplitter(pos_tags=True)
spacy_splitter.spacy.tokenizer.add_special_case(u'id', [{ORTH: u'id', LEMMA: u'id'}])
self._tokenizer = utterance_tokenizer or WordTokenizer(spacy_splitter)
self._indexer = utterance_token_indexers or {'tokens': SingleIdTokenIndexer(namespace='tokens')}
# space tokenizer is used for nonterminal tokenize
self._non_terminal_tokenizer = WordTokenizer(JustSpacesWordSplitter())
self._non_terminal_indexer = {'nonterminals': SingleIdTokenIndexer(namespace='nonterminals')}
self._table_file = tables_file
self._database_path = database_path
self._loading_limit = loading_limit
self._load_cache = load_cache
self._save_cache = save_cache
# determine the context mode
self._context_mode = context_mode
self._copy_mode = copy_mode
# v0: no bert
# v3: encode utterance with table jointly, add [CLS] and [SEP]
self._bert_mode = bert_mode
# we do not care the problem of maximum length because allen nlp has helped us to do this
self._maximum_seq_len = np.inf
self._number_workers = num_workers
# dill, json
self._cache_method = cache_method
# overall, single
self._cache_mode = cache_mode
# maximum_context
self._maximum_history_len = maximum_history_len
# if enable memory friend, use sentence rather than interaction as a basic unit of batch
self._memory_friend = memory_friend
if memory_friend:
assert self._context_mode == "concat", "We only support to use less memory in concat mode since others" \
"depend on dynamic context such as context representations" \
"and generated SQLs, which assumes batching on interactions."
assert self._copy_mode == "none", "We only support to use less memory in concat mode since others" \
"depend on dynamic context such as context representations" \
"and generated SQLs, which assumes batching on interactions."
def build_instance(self, parameter) -> Iterable[Instance]:
# loading some examples
# if self._loading_limit == total_cnt:
# break
total_cnt, inter_ex = parameter
extension = 'bin' if self._cache_method == CacheMethod.dil else 'pkl'
cache_file = os.path.join(self._cache_dir, f'ins-{total_cnt}.{extension}')
if self._load_cache and os.path.exists(cache_file) and self._cache_mode == CacheMode.single:
if self._cache_method == CacheMethod.dil:
ins = dill.load(open(cache_file, 'rb'))
elif self._cache_method == CacheMethod.pick:
ins = pickle.load(open(cache_file, 'rb'))
else:
raise ConfigurationError("Not such cache method!")
# None passing.
if ins is not None:
# print(max([len(action.field_list) for action in ins.fields['action_sequence']]))
return ins
db_id = inter_ex['database_id']
inter_utter_list = [ex['utterance'] for ex in inter_ex['interaction']]
# use structural sql instead of plain tokens
sql_list = [ex['sql'] for ex in inter_ex['interaction']]
sql_query_list = [ex['query'] for ex in inter_ex['interaction']]
# TODO: now one interaction composes a instance, we should do a more careful design
try:
ins = self.text_to_instance(
utter_list=inter_utter_list,
db_id=db_id,
sql_list=sql_list,
sql_query_list=sql_query_list
)
if self._save_cache and self._cache_mode == CacheMode.single:
# save cache into file
if self._cache_method == CacheMethod.dil:
dill.dump(ins, open(cache_file, 'wb'))
elif self._cache_method == CacheMethod.pick:
pickle.dump(ins, open(cache_file, 'wb'))
if ins is not None:
return ins
except Exception as e:
print(f'Error in db_id: {db_id}, utterance: {inter_utter_list}')
exec_info = sys.exc_info()
traceback.print_exception(*exec_info)
@staticmethod
def _dill_load(file_path):
return dill.load(open(file_path, "rb"))
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
if not file_path.endswith(".json"):
raise ConfigurationError(f"The file path is not designed for SParC dataset {file_path}")
self._cache_dir = os.path.join('cache',
"_".join(file_path.split("\\")[-2:]) +
"_con[" + self._context_mode +
"]_bert[" + str(self._bert_mode) +
"]_cache[" + str(self._cache_mode) + "]")
if self._copy_mode != CopyMode.no_copy:
self._cache_dir += "_copy[" + str(self._copy_mode) + "]"
if self._memory_friend:
self._cache_dir += "_memory[true]"
extension = 'bin' if self._cache_method == CacheMethod.dil else 'pkl'
cache_all_file = os.path.join(self._cache_dir, f"cache_all.{extension}")
if self._load_cache:
if not os.path.exists(self._cache_dir):
os.makedirs(self._cache_dir)
elif self._cache_mode == CacheMode.all and os.path.exists(cache_all_file):
# read without multiple instance
instances = [ins for ins in pickle.load(open(cache_all_file, "rb")) if ins]
return instances
elif self._cache_mode == CacheMode.single and self._number_workers > 1:
instances = []
for ins in QIterable(output_queue_size=400,
epochs_per_read=1,
num_workers=self._number_workers,
call_back=SparcDatasetReader._dill_load,
file_path="{}\\ins-*.bin".format(self._cache_dir)):
if ins: instances.append(ins)
return instances
with open(file_path, "r", encoding="utf8") as data_file:
json_obj = json.load(data_file)
# list of interactions
assert isinstance(json_obj, list)
if self._cache_mode == CacheMode.all:
# write cache here
instances = []
# FIXME: we do not use multiprocessing in caching all
for json_ins in tqdm(enumerate(json_obj)):
ins = self.build_instance(json_ins)
if isinstance(ins, List):
instances.extend(ins)
else:
instances.append(ins)
if self._save_cache:
with open(cache_all_file, 'wb') as cache: # Use highest protocol for speed.
if self._cache_method == CacheMethod.pick:
pickle.dump(instances, cache, protocol=pickle.HIGHEST_PROTOCOL)
elif self._cache_method == CacheMethod.dil:
dill.dump(instances, cache)
return instances
else:
instances = []
# write cache inside build_instance
if self._number_workers > 1:
pool = mp.Pool(processes=self._number_workers)
for ins in tqdm(pool.imap(self.build_instance, enumerate(json_obj))):
if ins: instances.append(ins)
else:
for json_ins in tqdm(enumerate(json_obj)):
ins = self.build_instance(json_ins)
if ins: instances.append(ins)
return instances
@overrides
def text_to_instance(self,
utter_list: List[str],
db_id: str,
sql_list: Optional[List[Dict]] = None,
sql_query_list: Optional[List[Dict]] = None) -> Optional[Instance]:
# return invalid instances
if len(utter_list) == 0:
return None
entity_mask_fields = []
for ind, utter in enumerate(utter_list):
if utter[-1] not in string.punctuation:
utter_list[ind] += ' ' + random.choice(['.', '?'])
cached_global_utterance = utter_list[:]
cached_db_contexts = []
# expand all possible entities
for i in range(len(cached_global_utterance)):
tokenized_utterance = self._tokenizer.tokenize(" ".join(
cached_global_utterance[i - self._maximum_history_len: i + 1]).lower())
tokenized_utterance = [Token(text=t.text, lemma_=t.lemma_) if t.lemma_ != '-PRON-'
else Token(text=t.text, lemma_=t.text) for t in tokenized_utterance]
# unify the entity number for different utterances
temp_db_context = SparcDBContext(db_id=db_id,
utterance=tokenized_utterance,
tokenizer=self._tokenizer,
tables_file=self._table_file,
database_path=self._database_path,
bert_mode=self._bert_mode)
cur_entity_num = len(temp_db_context.knowledge_graph.entities)
cached_db_contexts.append(temp_db_context)
entity_mask_fields.append(ArrayField(np.array([1] * cur_entity_num)))
# from the first to the last
fields: Dict[str, Field] = {}
# world list
world_fields: List[Field] = []
# knowledge list
knowledge_fields: List[Field] = []
# record all utterances
utter_fields: List[Field] = []
# record all segments
segment_fields: List[Field] = []
# record all nonterminals
nonterminal_fields: List[Field] = []
# record all valid actions
valid_rules_fields: List[ListField] = []
# record the action sequence index
index_fields: List[ListField] = []
# record the action sequence (mixed with copy operation) index
index_with_copy_fields: List[ListField] = []
# record the entity type index
entity_type_fields: List[Field] = []
# sql_clauses metadata
schema_position_fields: List[Field] = []
past_utters = []
past_history_len = []
# TODO: record all segment ids, which is supposed no larger than 5
past_segments = []
# if sql_list is None, use default sql list to fill in it to avoid fault
if sql_list is None:
new_sql_list = [Grammar.default_sql_clause() for _ in range(len(utter_list))]
else:
new_sql_list = sql_list
if sql_query_list is None:
new_sql_query_list = ['' for _ in range(len(utter_list))]
else:
new_sql_query_list = sql_query_list
utter_ind = 1
use_hard_token_as_segment = self._context_mode in [ContextMode.copy_hard_token,
ContextMode.concat_hard_token]
# record precedent action sequence, used in copy (either segment-level or token-level)
precedent_action_seq = None
index = 0
# allocate memory instances
memory_instances = []
for fol_utter, sql_clause, sql_query in zip(utter_list, new_sql_list, new_sql_query_list):
# tokenize history and so on
tokenized_utter = self._tokenizer.tokenize(fol_utter.lower())
# the extra END means the end of prediction on latent
tokenized_utter = [Token(text=t.text, lemma_=t.lemma_) if t.lemma_ != '-PRON-'
else Token(text=t.text, lemma_=t.text) for t in tokenized_utter]
# TODO: cur_segment is only used for scenarios where not joint training.
cur_segment = [utter_ind] * len(tokenized_utter)
# TODO: default we use tokens with context as data
if self._context_mode in [ContextMode.context_independent,
ContextMode.turn_model,
ContextMode.copy_hard_token]:
tokenized_utterance = tokenized_utter
segment_ids = np.array(cur_segment)
elif self._context_mode in [ContextMode.concat_previous,
ContextMode.concat_history,
ContextMode.concat_hard_token]:
tokenized_utterance = past_utters + tokenized_utter
segment_ids = np.array(past_segments + cur_segment)
else:
raise Exception("Not support for mode :{}".format(self._context_mode))
if self._context_mode == ContextMode.concat_previous:
# update past utterance, ignore the last element(which is the @END@ symbol)
past_utters = tokenized_utter
# add segments, start from 0 (no padding)
past_segments = cur_segment
past_history_len = [len(cur_segment)]
elif self._context_mode in [ContextMode.concat_history, ContextMode.concat_hard_token]:
# update past utterance, ignore the last element(which is the @END@ symbol)
past_utters.extend(tokenized_utter)
# add segments, start from 0 (no padding)
past_segments.extend(cur_segment)
past_history_len.append(len(cur_segment))
else:
past_utters = []
past_segments = []
db_context = cached_db_contexts[index]
assert len(past_segments) == len(past_utters)
table_field = SparcKnowledgeGraphField(db_context.knowledge_graph,
tokenized_utterance,
self._indexer,
bert_mode=db_context.bert_mode,
entity_tokens=db_context.entity_tokens,
include_in_vocab=False, # TODO: self._use_table_for_vocab,
max_table_tokens=None) # self._max_table_tokens)
if self._bert_mode == "v3":
# we prepare [SEP] before each entity text, and use the indexer to identify them automatically
# here we concat the utterance with database schemas together, and feed it into the BERT model.
schema_position = []
schema_tokens = []
utterance_len = len(tokenized_utterance)
for ind, entity_id in enumerate(db_context.knowledge_graph.entities):
entity_tokens = db_context.entity_tokens[ind]
# Utterance [SEP] Col1 [SEP] Col2 [SEP]
schema_tokens.extend([Token(text="[SEP]")])
schema_start = utterance_len + len(schema_tokens)
# currently we only add Table name and Col name
if entity_id.startswith("column"):
# add column
schema_tokens.extend(entity_tokens)
schema_end = utterance_len + len(schema_tokens)
elif entity_id.startswith("table"):
# add table
schema_tokens.extend(entity_tokens)
schema_end = utterance_len + len(schema_tokens)
else:
raise Exception("Currently we do not support encoding for other entities!")
schema_position.append([schema_start, schema_end])
tokenized_utterance = tokenized_utterance + schema_tokens
schema_position_fields.append(ArrayField(np.array(schema_position, dtype=np.int)))
# build world
world = SparcWorld(db_context=db_context,
sql_clause=sql_clause,
sql_query=sql_query)
entity_type_fields.append(ArrayField(index_entity_type(world=world)))
action_non_terminal, action_seq, all_valid_actions = world.get_action_sequence_and_all_actions()
# the update precedent sql must be executed after the get_action_sequence_and_all!
if precedent_action_seq is not None:
# assign precedent subtrees
world.update_precedent_state(precedent_action_seq,
extract_tree=not use_hard_token_as_segment)
# make precedent action under the nonterminal rules
world.update_copy_valid_action()
assert action_seq is not None
for action_rule in action_seq:
assert action_rule in all_valid_actions
# append utterance into utter field
utter_fields.append(TextField(tokenized_utterance, self._indexer))
segment_fields.append(ArrayField(segment_ids))
# tokenize non terminal
nonterminal_utter = ' '.join(action_non_terminal)
tokenized_nonterminal = self._non_terminal_tokenizer.tokenize(nonterminal_utter)
nonterminal_fields.append(TextField(tokenized_nonterminal, self._non_terminal_indexer))
# allocate new product file field
temp_rule_fields: List[CopyProductionRuleField] = []
temp_index_fields: List[IndexField] = []
for prod_rule in all_valid_actions:
# get rule's nonterminal name
nonterminal = prod_rule.nonterminal
field = CopyProductionRuleField(rule=str(prod_rule),
is_global_rule=prod_rule.is_global(),
is_copy_rule=False,
nonterminal=nonterminal)
temp_rule_fields.append(field)
single_rule_field = ListField(temp_rule_fields)
# add action sequence into list
action_map = {action.rule: i # type: ignore
for i, action in enumerate(single_rule_field.field_list)}
for prod_rule in action_seq:
field = IndexField(index=action_map[str(prod_rule)],
sequence_field=single_rule_field)
temp_index_fields.append(field)
single_index_field = ListField(temp_index_fields)
# update copy actions
if precedent_action_seq is not None:
# index copy rule
copy_rule_fields: List[CopyProductionRuleField] = []
copy_rule_dict = {}
for local_ind, prod_rule in enumerate(world.precedent_segment_seq):
# get nonterminal name
nonterminal = prod_rule.nonterminal
rule_repr = str(prod_rule)
field = CopyProductionRuleField(rule=rule_repr,
# the copy rule is appended dynamically
is_global_rule=False,
is_copy_rule=True,
nonterminal=nonterminal)
copy_rule_fields.append(field)
copy_rule_dict[local_ind] = prod_rule
# add rule into action_map
copy_rule_idx = len(action_map)
action_map[rule_repr] = copy_rule_idx
# use diff to find
# TODO: we do not use simplediff to avoid the following scenarios:
# Precedent: ... T -> department, Order -> des A limit, ...
# Follow: ... T -> department, Order -> des A limit, ...
# the same sub-sequence will be matched, but it is NOT a subtree!
action_seq_with_copy = diff_tree(precedent_action_seq,
action_seq,
copy_rule_dict,
ret_tree=not use_hard_token_as_segment)
# merge copy rule fields with temp rule fields
temp_rule_fields.extend(copy_rule_fields)
# update single rule_field
single_rule_field = ListField(temp_rule_fields)
temp_index_with_copy_fields: List[IndexField] = []
for prod_rule in action_seq_with_copy:
field = IndexField(index=action_map[str(prod_rule)],
sequence_field=single_rule_field)
temp_index_with_copy_fields.append(field)
single_index_with_copy_field = ListField(temp_index_with_copy_fields)
index_with_copy_fields.append(single_index_with_copy_field)
else:
index_with_copy_fields.append(single_index_field)
# record into the instance-level fields
valid_rules_fields.append(single_rule_field)
index_fields.append(single_index_field)
world_fields.append(MetadataField(world))
knowledge_fields.append(table_field)
# assign state
utter_ind += 1
precedent_action_seq = action_seq
if len(past_history_len) >= self._maximum_history_len:
pop_seq_len = past_history_len.pop(0)
past_segments = past_segments[pop_seq_len:]
past_utters = past_utters[pop_seq_len:]
past_segments = [segment_id - 1 for segment_id in past_segments]
# yield multiple instances by sentences
if self._memory_friend:
if self._bert_mode == "v3":
fields['schema_position'] = ListField(schema_position_fields)
fields['inter_utterance'] = ListField(utter_fields)
fields['inter_schema'] = ListField(knowledge_fields)
fields['inter_nonterminal'] = ListField(nonterminal_fields)
fields['inter_segment'] = ListField(segment_fields)
fields['valid_actions_list'] = ListField(valid_rules_fields)
fields['action_sequence'] = ListField(index_fields)
fields['action_sequence_with_copy'] = ListField(index_with_copy_fields)
fields['worlds'] = ListField(world_fields)
fields['entity_type'] = ListField(entity_type_fields)
# clear fields
schema_position_fields = []
utter_fields = []
knowledge_fields = []
nonterminal_fields = []
segment_fields = []
valid_rules_fields = []
index_fields = []
index_with_copy_fields = []
world_fields = []
entity_type_fields = []
# entity mask is prepared already
fields['entity_mask'] = ListField([entity_mask_fields[index]])
memory_instances.append(Instance(fields))
# update index
index += 1
if self._memory_friend:
return memory_instances
if self._bert_mode == "v3":
fields['schema_position'] = ListField(schema_position_fields)
# all utterances in one interaction
fields['inter_utterance'] = ListField(utter_fields)
fields['inter_schema'] = ListField(knowledge_fields)
fields['inter_nonterminal'] = ListField(nonterminal_fields)
fields['inter_segment'] = ListField(segment_fields)
fields['valid_actions_list'] = ListField(valid_rules_fields)
fields['action_sequence'] = ListField(index_fields)
fields['action_sequence_with_copy'] = ListField(index_with_copy_fields)
fields['worlds'] = ListField(world_fields)
fields['entity_type'] = ListField(entity_type_fields)
fields['entity_mask'] = ListField(entity_mask_fields)
# return the instance
return Instance(fields)
|
ContextualSP/semantic_parsing_in_context/dataset_reader/sparc_reader.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/dataset_reader/sparc_reader.py",
"repo_id": "ContextualSP",
"token_count": 13494
}
| 285 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
The code body is borrowed from allennlp package. We modify it to adapt our tree-level copy
@Author: Qian Liu
"""
from collections import defaultdict
from typing import Any, Dict, List, Tuple
import torch
from allennlp.modules import Attention, FeedForward
from allennlp.nn import Activation
from overrides import overrides
from models.states_machine.grammar_based_state import GrammarBasedState
from models.transition_functions.basic_transition_function import BasicTransitionFunction
class LinkingTransitionFunction(BasicTransitionFunction):
"""
This transition function adds the ability to consider `linked` actions to the
``BasicTransitionFunction`` (which is just an LSTM decoder with attention). These actions are
potentially unseen at training time, so we need to handle them without requiring the action to
have an embedding. Instead, we rely on a `linking score` between each action and the words in
the question/utterance, and use these scores, along with the attention, to do something similar
to a copy mechanism when producing these actions.
When both linked and global (embedded) actions are available, we need some way to compare the
scores for these two sets of actions. The original WikiTableQuestion semantic parser just
concatenated the logits together before doing a joint softmax, but this is quite brittle,
because the logits might have quite different scales. So we have the option here of predicting
a mixture probability between two independently normalized distributions.
Parameters
----------
encoder_output_dim : ``int``
action_embedding_dim : ``int``
input_attention : ``Attention``
activation : ``Activation``, optional (default=relu)
The activation that gets applied to the decoder LSTM input and to the action query.
predict_start_type_separately : ``bool``, optional (default=True)
If ``True``, we will predict the initial action (which is typically the base type of the
logical form) using a different mechanism than our typical action decoder. We basically
just do a projection of the hidden state, and don't update the decoder RNN.
num_start_types : ``int``, optional (default=None)
If ``predict_start_type_separately`` is ``True``, this is the number of start types that
are in the grammar. We need this so we can construct parameters with the right shape.
This is unused if ``predict_start_type_separately`` is ``False``.
add_action_bias : ``bool``, optional (default=True)
If ``True``, there has been a bias dimension added to the embedding of each action, which
gets used when predicting the next action. We add a dimension of ones to our predicted
action vector in this case to account for that.
dropout : ``float`` (optional, default=0.0)
num_layers: ``int`` (optional, default=1)
The number of layers in the decoder LSTM.
"""
def __init__(self,
encoder_output_dim: int,
decoder_input_dim: int,
action_embedding_dim: int,
input_attention: Attention,
sql_attention: Attention = None,
sql_output_dim: int = 100,
activation: Activation = Activation.by_name('relu')(),
predict_start_type_separately: bool = True,
num_start_types: int = None,
add_action_bias: bool = True,
copy_gate: FeedForward = None,
dropout: float = 0.0,
num_layers: int = 1) -> None:
super().__init__(encoder_output_dim=encoder_output_dim,
decoder_input_dim=decoder_input_dim,
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
sql_attention=sql_attention,
sql_output_dim=sql_output_dim,
num_start_types=num_start_types,
activation=activation,
predict_start_type_separately=predict_start_type_separately,
add_action_bias=add_action_bias,
dropout=dropout,
num_layers=num_layers)
# control the copy gate
self._copy_gate = copy_gate
@overrides
def _compute_action_probabilities(self,
state: GrammarBasedState,
hidden_state: torch.Tensor,
attention_weights: torch.Tensor,
predicted_action_embeddings: torch.Tensor
) -> Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]]:
# In this section we take our predicted action embedding and compare it to the available
# actions in our current state (which might be different for each group element). For
# computing action scores, we'll forget about doing batched / grouped computation, as it
# adds too much complexity and doesn't speed things up, anyway, with the operations we're
# doing here. This means we don't need any action masks, as we'll only get the right
# lengths for what we're computing.
group_size = len(state.batch_indices)
actions = state.get_valid_actions()
batch_results: Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]] = defaultdict(list)
for group_index in range(group_size):
instance_actions = actions[group_index]
predicted_action_embedding = predicted_action_embeddings[group_index]
embedded_actions: List[int] = []
output_action_embeddings = None
embedded_action_logits = None
if not instance_actions:
action_ids = None
current_log_probs = float('inf')
else:
if 'global' in instance_actions:
action_embeddings, output_action_embeddings, embedded_actions = instance_actions['global']
# This is just a matrix product between a (num_actions, embedding_dim) matrix and an
# (embedding_dim, 1) matrix.
action_logits = action_embeddings.mm(predicted_action_embedding.unsqueeze(-1)).squeeze(-1)
action_ids = embedded_actions
# 'copy_seg' is designed to compatible with global, not with linked
if 'copy_seg' in instance_actions:
# we should concat copy logits into action_logits
copy_action_encodings, output_copy_action_encodings, copy_action_ids = instance_actions[
'copy_seg']
copy_action_logits = copy_action_encodings.mm(predicted_action_embedding.unsqueeze(-1)).squeeze(
-1)
# concat logits with action_logits
action_logits = torch.cat([action_logits, copy_action_logits], dim=0)
output_action_embeddings = torch.cat([output_action_embeddings, output_copy_action_encodings],
dim=0)
action_ids = action_ids + copy_action_ids
elif 'linked' in instance_actions:
linking_scores, type_embeddings, linked_actions = instance_actions['linked']
action_ids = embedded_actions + linked_actions
# (num_question_tokens, 1)
linked_action_logits = linking_scores.mm(attention_weights[group_index].unsqueeze(-1)).squeeze(-1)
# The `output_action_embeddings` tensor gets used later as the input to the next
# decoder step. For linked actions, we don't have any action embedding, so we use
# the entity type instead.
if output_action_embeddings is not None:
output_action_embeddings = torch.cat([output_action_embeddings, type_embeddings], dim=0)
else:
output_action_embeddings = type_embeddings
# 'copy_seg' is designed to compatible with global, not with linked
if embedded_action_logits is not None:
action_logits = torch.cat([embedded_action_logits, linked_action_logits], dim=-1)
else:
action_logits = linked_action_logits
# in hard token copy, the column could also be copied
if 'copy_seg' in instance_actions:
# we should concat copy logits into action_logits
copy_action_encodings, output_copy_action_encodings, copy_action_ids = instance_actions['copy_seg']
copy_action_logits = copy_action_encodings.mm(predicted_action_embedding.unsqueeze(-1)).squeeze(-1)
output_action_embeddings = torch.cat([output_action_embeddings, output_copy_action_encodings],
dim=0)
# concat logits with action_logits
action_logits = torch.cat([action_logits, copy_action_logits], dim=0)
output_action_embeddings = torch.cat([output_action_embeddings, output_copy_action_encodings],
dim=0)
action_ids = action_ids + copy_action_ids
else:
raise Exception("Not support for such an instance action")
# we will use copy gate to obtain the overall probability as:
# p = gen * action_prob + copy * copy_action_prob
if 'copy_token' in instance_actions:
copy_action_encodings, output_copy_action_encodings, copy_action_ids = instance_actions[
'copy_token']
copy_action_logits = copy_action_encodings.mm(predicted_action_embedding.unsqueeze(-1)).squeeze(-1)
copy_action_prob = torch.softmax(copy_action_logits, dim=0)
generate_action_prob = torch.softmax(action_logits, dim=0)
# align token id to generation ones
copy_to_gen_prob = torch.zeros(generate_action_prob.size(),
device=generate_action_prob.device).float()
for i in range(len(copy_action_ids)):
copy_action = copy_action_ids[i]
if copy_action in action_ids:
ind = action_ids.index(copy_action)
copy_to_gen_prob[ind] = copy_action_prob[i]
assert self._copy_gate is not None
# use copy_gate to calculate the copy gate
copy_gate = torch.sigmoid(self._copy_gate(hidden_state[group_index]))
action_prob = generate_action_prob * (1 - copy_gate) + copy_gate * copy_to_gen_prob
current_log_probs = torch.log(torch.clamp(action_prob, min=1e-10))
else:
current_log_probs = torch.log_softmax(action_logits, dim=-1)
# This is now the total score for each state after taking each action. We're going to
# sort by this later, so it's important that this is the total score, not just the
# score for the current action.
log_probs = state.score[group_index] + current_log_probs
batch_results[state.batch_indices[group_index]].append((group_index,
log_probs,
current_log_probs,
output_action_embeddings,
action_ids))
return batch_results
|
ContextualSP/semantic_parsing_in_context/models/transition_functions/linking_transition_function.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/models/transition_functions/linking_transition_function.py",
"repo_id": "ContextualSP",
"token_count": 5760
}
| 286 |
################################
# val: number(float)/string(str)/sql(dict)
# col_unit: (agg_id, col_id, isDistinct(bool))
# val_unit: (unit_op, col_unit1, col_unit2)
# table_unit: (table_type, col_unit/sql)
# cond_unit: (not_op, op_id, val_unit, val1, val2)
# condition: [cond_unit1, 'and'/'or', cond_unit2, ...]
# sql {
# 'select': (isDistinct(bool), [(agg_id, val_unit), (agg_id, val_unit), ...])
# 'from': {'table_units': [table_unit1, table_unit2, ...], 'conds': condition}
# 'where': condition
# 'groupBy': [col_unit1, col_unit2, ...]
# 'orderBy': ('asc'/'desc', [val_unit1, val_unit2, ...])
# 'having': condition
# 'limit': None/limit value
# 'intersect': None/sql
# 'except': None/sql
# 'union': None/sql
# }
################################
import os, sys
import json
import sqlite3
import traceback
import argparse
from .process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
# Flag to disable value evaluation
DISABLE_VALUE = True
# Flag to disable distinct in select evaluation
DISABLE_DISTINCT = True
CLAUSE_KEYWORDS = (
"select",
"from",
"where",
"group",
"order",
"limit",
"intersect",
"union",
"except",
)
JOIN_KEYWORDS = ("join", "on", "as")
WHERE_OPS = (
"not",
"between",
"=",
">",
"<",
">=",
"<=",
"!=",
"in",
"like",
"is",
"exists",
)
UNIT_OPS = ("none", "-", "+", "*", "/")
AGG_OPS = ("none", "max", "min", "count", "sum", "avg")
TABLE_TYPE = {
"sql": "sql",
"table_unit": "table_unit",
}
COND_OPS = ("and", "or")
SQL_OPS = ("intersect", "union", "except")
ORDER_OPS = ("desc", "asc")
HARDNESS = {
"component1": ("where", "group", "order", "limit", "join", "or", "like"),
"component2": ("except", "union", "intersect"),
}
LEVELS = ["easy", "medium", "hard", "extra", "all"]
PARTIAL_TYPES = [
"select",
"select(no AGG)",
"where",
"where(no OP)",
"group(no Having)",
"group",
"order",
"and/or",
"IUEN",
"keywords",
]
def condition_has_or(conds):
return "or" in conds[1::2]
def condition_has_like(conds):
return WHERE_OPS.index("like") in [cond_unit[1] for cond_unit in conds[::2]]
def condition_has_sql(conds):
for cond_unit in conds[::2]:
val1, val2 = cond_unit[3], cond_unit[4]
if val1 is not None and type(val1) is dict:
return True
if val2 is not None and type(val2) is dict:
return True
return False
def val_has_op(val_unit):
return val_unit[0] != UNIT_OPS.index("none")
def has_agg(unit):
return unit[0] != AGG_OPS.index("none")
def accuracy(count, total):
if count == total:
return 1
return 0
def recall(count, total):
if count == total:
return 1
return 0
def F1(acc, rec):
if (acc + rec) == 0:
return 0
return (2.0 * acc * rec) / (acc + rec)
def get_scores(count, pred_total, label_total):
if pred_total != label_total:
return 0, 0, 0
elif count == pred_total:
return 1, 1, 1
return 0, 0, 0
def eval_sel(pred, label):
pred_sel = pred["select"][1]
label_sel = label["select"][1]
label_wo_agg = [unit[1] for unit in label_sel]
pred_total = len(pred_sel)
label_total = len(label_sel)
cnt = 0
cnt_wo_agg = 0
for unit in pred_sel:
if unit in label_sel:
cnt += 1
label_sel.remove(unit)
if unit[1] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[1])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_where(pred, label):
pred_conds = [unit for unit in pred["where"][::2]]
label_conds = [unit for unit in label["where"][::2]]
label_wo_agg = [unit[2] for unit in label_conds]
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if unit in label_conds:
cnt += 1
label_conds.remove(unit)
if unit[2] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred["groupBy"]]
label_cols = [unit[1] for unit in label["groupBy"]]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [pred.split(".")[1] if "." in pred else pred for pred in pred_cols]
label_cols = [
label.split(".")[1] if "." in label else label for label in label_cols
]
for col in pred_cols:
if col in label_cols:
cnt += 1
label_cols.remove(col)
return label_total, pred_total, cnt
def eval_having(pred, label):
pred_total = label_total = cnt = 0
if len(pred["groupBy"]) > 0:
pred_total = 1
if len(label["groupBy"]) > 0:
label_total = 1
pred_cols = [unit[1] for unit in pred["groupBy"]]
label_cols = [unit[1] for unit in label["groupBy"]]
if (
pred_total == label_total == 1
and pred_cols == label_cols
and pred["having"] == label["having"]
):
cnt = 1
return label_total, pred_total, cnt
def eval_order(pred, label):
pred_total = label_total = cnt = 0
if len(pred["orderBy"]) > 0:
pred_total = 1
if len(label["orderBy"]) > 0:
label_total = 1
if (
len(label["orderBy"]) > 0
and pred["orderBy"] == label["orderBy"]
and (
(pred["limit"] is None and label["limit"] is None)
or (pred["limit"] is not None and label["limit"] is not None)
)
):
cnt = 1
return label_total, pred_total, cnt
def eval_and_or(pred, label):
pred_ao = pred["where"][1::2]
label_ao = label["where"][1::2]
pred_ao = set(pred_ao)
label_ao = set(label_ao)
if pred_ao == label_ao:
return 1, 1, 1
return len(pred_ao), len(label_ao), 0
def get_nestedSQL(sql):
nested = []
for cond_unit in sql["from"]["conds"][::2] + sql["where"][::2] + sql["having"][::2]:
if type(cond_unit[3]) is dict:
nested.append(cond_unit[3])
if type(cond_unit[4]) is dict:
nested.append(cond_unit[4])
if sql["intersect"] is not None:
nested.append(sql["intersect"])
if sql["except"] is not None:
nested.append(sql["except"])
if sql["union"] is not None:
nested.append(sql["union"])
return nested
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if pred is not None:
pred_total += 1
if label is not None:
label_total += 1
if pred is not None and label is not None:
partial_scores = Evaluator.eval_partial_match(pred, label)
cnt += Evaluator.eval_exact_match(pred, label, partial_scores)
return label_total, pred_total, cnt
def eval_IUEN(pred, label):
lt1, pt1, cnt1 = eval_nested(pred["intersect"], label["intersect"])
lt2, pt2, cnt2 = eval_nested(pred["except"], label["except"])
lt3, pt3, cnt3 = eval_nested(pred["union"], label["union"])
label_total = lt1 + lt2 + lt3
pred_total = pt1 + pt2 + pt3
cnt = cnt1 + cnt2 + cnt3
return label_total, pred_total, cnt
def get_keywords(sql):
res = set()
if len(sql["where"]) > 0:
res.add("where")
if len(sql["groupBy"]) > 0:
res.add("group")
if len(sql["having"]) > 0:
res.add("having")
if len(sql["orderBy"]) > 0:
res.add(sql["orderBy"][0])
res.add("order")
if sql["limit"] is not None:
res.add("limit")
if sql["except"] is not None:
res.add("except")
if sql["union"] is not None:
res.add("union")
if sql["intersect"] is not None:
res.add("intersect")
# or keyword
ao = sql["from"]["conds"][1::2] + sql["where"][1::2] + sql["having"][1::2]
if len([token for token in ao if token == "or"]) > 0:
res.add("or")
cond_units = sql["from"]["conds"][::2] + sql["where"][::2] + sql["having"][::2]
# not keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0:
res.add("not")
# in keyword
if (
len(
[
cond_unit
for cond_unit in cond_units
if cond_unit[1] == WHERE_OPS.index("in")
]
)
> 0
):
res.add("in")
# like keyword
if (
len(
[
cond_unit
for cond_unit in cond_units
if cond_unit[1] == WHERE_OPS.index("like")
]
)
> 0
):
res.add("like")
return res
def eval_keywords(pred, label):
pred_keywords = get_keywords(pred)
label_keywords = get_keywords(label)
pred_total = len(pred_keywords)
label_total = len(label_keywords)
cnt = 0
for k in pred_keywords:
if k in label_keywords:
cnt += 1
return label_total, pred_total, cnt
def count_agg(units):
return len([unit for unit in units if has_agg(unit)])
def count_component1(sql):
count = 0
if len(sql["where"]) > 0:
count += 1
if len(sql["groupBy"]) > 0:
count += 1
if len(sql["orderBy"]) > 0:
count += 1
if sql["limit"] is not None:
count += 1
if len(sql["from"]["table_units"]) > 0: # JOIN
count += len(sql["from"]["table_units"]) - 1
ao = sql["from"]["conds"][1::2] + sql["where"][1::2] + sql["having"][1::2]
count += len([token for token in ao if token == "or"])
cond_units = sql["from"]["conds"][::2] + sql["where"][::2] + sql["having"][::2]
count += len(
[
cond_unit
for cond_unit in cond_units
if cond_unit[1] == WHERE_OPS.index("like")
]
)
return count
def count_component2(sql):
nested = get_nestedSQL(sql)
return len(nested)
def count_others(sql):
count = 0
# number of aggregation
agg_count = count_agg(sql["select"][1])
agg_count += count_agg(sql["where"][::2])
agg_count += count_agg(sql["groupBy"])
if len(sql["orderBy"]) > 0:
agg_count += count_agg(
[unit[1] for unit in sql["orderBy"][1] if unit[1]]
+ [unit[2] for unit in sql["orderBy"][1] if unit[2]]
)
agg_count += count_agg(sql["having"])
if agg_count > 1:
count += 1
# number of select columns
if len(sql["select"][1]) > 1:
count += 1
# number of where conditions
if len(sql["where"]) > 1:
count += 1
# number of group by clauses
if len(sql["groupBy"]) > 1:
count += 1
return count
class Evaluator:
"""A simple evaluator"""
def __init__(self, db_dir, kmaps, etype):
self.db_dir = db_dir
self.kmaps = kmaps
self.etype = etype
self.db_paths = {}
self.schemas = {}
for db_name in self.kmaps.keys():
db_path = os.path.join(db_dir, db_name, db_name + ".sqlite")
self.db_paths[db_name] = db_path
self.schemas[db_name] = Schema(get_schema(db_path))
self.scores = {
level: {
"count": 0,
"partial": {
type_: {
"acc": 0.0,
"rec": 0.0,
"f1": 0.0,
"acc_count": 0,
"rec_count": 0,
}
for type_ in PARTIAL_TYPES
},
"exact": 0.0,
"exec": 0,
}
for level in LEVELS
}
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or (
count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0
):
return "medium"
elif (
(count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0)
or (2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0)
or (count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1)
):
return "hard"
else:
return "extra"
@classmethod
def eval_exact_match(cls, pred, label, partial_scores):
for _, score in list(partial_scores.items()):
if score["f1"] != 1:
return 0
if len(label["from"]["table_units"]) > 0:
label_tables = sorted(label["from"]["table_units"])
pred_tables = sorted(pred["from"]["table_units"])
return label_tables == pred_tables
return 1
@classmethod
def eval_partial_match(cls, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["select"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res["select(no AGG)"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["where"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res["where(no OP)"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["group(no Having)"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["group"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["order"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["and/or"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["IUEN"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res["keywords"] = {
"acc": acc,
"rec": rec,
"f1": f1,
"label_total": label_total,
"pred_total": pred_total,
}
return res
def evaluate_one(self, db_name, gold, predicted, question=None, index=None):
schema = self.schemas[db_name]
try:
g_sql = get_sql(schema, gold)
except:
g_sql = {
"except": None,
"from": {"conds": [], "table_units": []},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [False, []],
"union": None,
"where": [],
}
hardness = self.eval_hardness(g_sql)
self.scores[hardness]["count"] += 1
self.scores["all"]["count"] += 1
parse_error = False
try:
p_sql = get_sql(schema, predicted)
except:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {"conds": [], "table_units": []},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [False, []],
"union": None,
"where": [],
}
# TODO fix
parse_error = True
# rebuild sql for value evaluation
kmap = self.kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql["from"]["table_units"], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql["from"]["table_units"], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if self.etype in ["all", "exec"]:
self.scores[hardness]["exec"] += eval_exec_match(
self.db_paths[db_name], predicted, gold, p_sql, g_sql
)
if self.etype in ["all", "match"]:
partial_scores = self.eval_partial_match(p_sql, g_sql)
exact_score = self.eval_exact_match(p_sql, g_sql, partial_scores)
update_scores_match(self.scores, exact_score, hardness, partial_scores, PARTIAL_TYPES)
return {
"question": question,
"predicted": predicted,
"gold": gold,
"predicted_parse_error": parse_error,
"hardness": hardness,
"exact": exact_score,
"partial": partial_scores,
}
def finalize(self):
finalize(self.scores, self.etype, PARTIAL_TYPES)
def update_scores_match(scores, exact_score, hardness, partial_scores, partial_types):
scores[hardness]["exact"] += exact_score
scores["all"]["exact"] += exact_score
for type_ in partial_types:
if partial_scores[type_]["pred_total"] > 0:
scores[hardness]["partial"][type_]["acc"] += partial_scores[
type_
]["acc"]
scores[hardness]["partial"][type_]["acc_count"] += 1
if partial_scores[type_]["label_total"] > 0:
scores[hardness]["partial"][type_]["rec"] += partial_scores[
type_
]["rec"]
scores[hardness]["partial"][type_]["rec_count"] += 1
scores[hardness]["partial"][type_]["f1"] += partial_scores[type_][
"f1"
]
if partial_scores[type_]["pred_total"] > 0:
scores["all"]["partial"][type_]["acc"] += partial_scores[
type_
]["acc"]
scores["all"]["partial"][type_]["acc_count"] += 1
if partial_scores[type_]["label_total"] > 0:
scores["all"]["partial"][type_]["rec"] += partial_scores[
type_
]["rec"]
scores["all"]["partial"][type_]["rec_count"] += 1
scores["all"]["partial"][type_]["f1"] += partial_scores[type_][
"f1"
]
def finalize(scores, etype, partial_types):
for level in LEVELS:
if scores[level]["count"] == 0:
continue
if etype in ["all", "exec"]:
scores[level]["exec"] /= scores[level]["count"]
if etype in ["all", "match"]:
scores[level]["exact"] /= scores[level]["count"]
for type_ in partial_types:
if scores[level]["partial"][type_]["acc_count"] == 0:
scores[level]["partial"][type_]["acc"] = 0
else:
scores[level]["partial"][type_]["acc"] = (
scores[level]["partial"][type_]["acc"]
/ scores[level]["partial"][type_]["acc_count"]
* 1.0
)
if scores[level]["partial"][type_]["rec_count"] == 0:
scores[level]["partial"][type_]["rec"] = 0
else:
scores[level]["partial"][type_]["rec"] = (
scores[level]["partial"][type_]["rec"]
/ scores[level]["partial"][type_]["rec_count"]
* 1.0
)
if (
scores[level]["partial"][type_]["acc"] == 0
and scores[level]["partial"][type_]["rec"] == 0
):
scores[level]["partial"][type_]["f1"] = 1
else:
scores[level]["partial"][type_]["f1"] = (
2.0
* scores[level]["partial"][type_]["acc"]
* scores[level]["partial"][type_]["rec"]
/ (
scores[level]["partial"][type_]["rec"]
+ scores[level]["partial"][type_]["acc"]
)
)
def isValidSQL(sql, db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(sql)
except:
return False
return True
def print_scores(scores, etype):
LEVELS = ["easy", "medium", "hard", "extra", "all"]
PARTIAL_TYPES = [
"select",
"select(no AGG)",
"where",
"where(no OP)",
"group(no Having)",
"group",
"order",
"and/or",
"IUEN",
"keywords",
]
print("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *LEVELS))
counts = [scores[level]["count"] for level in LEVELS]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print("===================== EXECUTION ACCURACY =====================")
this_scores = [scores[level]["exec"] for level in LEVELS]
print(
"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
"execution", *this_scores
)
)
if etype in ["all", "match"]:
print("\n====================== EXACT MATCHING ACCURACY =====================")
exact_scores = [scores[level]["exact"] for level in LEVELS]
print(
"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
"exact match", *exact_scores
)
)
print("\n---------------------PARTIAL MATCHING ACCURACY----------------------")
for type_ in PARTIAL_TYPES:
this_scores = [scores[level]["partial"][type_]["acc"] for level in LEVELS]
print(
"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
type_, *this_scores
)
)
print("---------------------- PARTIAL MATCHING RECALL ----------------------")
for type_ in PARTIAL_TYPES:
this_scores = [scores[level]["partial"][type_]["rec"] for level in LEVELS]
print(
"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
type_, *this_scores
)
)
print("---------------------- PARTIAL MATCHING F1 --------------------------")
for type_ in PARTIAL_TYPES:
this_scores = [scores[level]["partial"][type_]["f1"] for level in LEVELS]
print(
"{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(
type_, *this_scores
)
)
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = [l.strip().split("\t") for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = [l.strip().split("\t") for l in f.readlines() if len(l.strip()) > 0]
# plist = [("select max(Share),min(Share) from performance where Type != 'terminal'", "orchestra")]
# glist = [("SELECT max(SHARE) , min(SHARE) FROM performance WHERE TYPE != 'Live final'", "orchestra")]
evaluator = Evaluator(db_dir, kmaps, etype)
results = []
for p, g in zip(plist, glist):
(predicted,) = p
gold, db_name = g
results.append(evaluator.evaluate_one(db_name, gold, predicted))
evaluator.finalize()
print_scores(evaluator.scores, etype)
return {
"per_item": results,
"total_scores": evaluator.scores,
}
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = (
tuple(val_unit[1])
if not val_unit[2]
else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
)
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred["select"][1]]
q_val_units = [unit[1] for unit in gold["select"][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
# Rebuild SQL functions for value evaluation
def rebuild_cond_unit_val(cond_unit):
if cond_unit is None or not DISABLE_VALUE:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
if type(val1) is not dict:
val1 = None
else:
val1 = rebuild_sql_val(val1)
if type(val2) is not dict:
val2 = None
else:
val2 = rebuild_sql_val(val2)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_val(condition):
if condition is None or not DISABLE_VALUE:
return condition
res = []
for idx, it in enumerate(condition):
if idx % 2 == 0:
res.append(rebuild_cond_unit_val(it))
else:
res.append(it)
return res
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql["from"]["conds"] = rebuild_condition_val(sql["from"]["conds"])
sql["having"] = rebuild_condition_val(sql["having"])
sql["where"] = rebuild_condition_val(sql["where"])
sql["intersect"] = rebuild_sql_val(sql["intersect"])
sql["except"] = rebuild_sql_val(sql["except"])
sql["union"] = rebuild_sql_val(sql["union"])
return sql
# Rebuild SQL functions for foreign key evaluation
def build_valid_col_units(table_units, schema):
col_ids = [
table_unit[1]
for table_unit in table_units
if table_unit[0] == TABLE_TYPE["table_unit"]
]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units = []
for value in list(schema.idMap.values()):
if "." in value and value[: value.index(".")] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_col_unit_col(valid_col_units, col_unit, kmap):
if col_unit is None:
return col_unit
agg_id, col_id, distinct = col_unit
if col_id in kmap and col_id in valid_col_units:
col_id = kmap[col_id]
if DISABLE_DISTINCT:
distinct = None
return agg_id, col_id, distinct
def rebuild_val_unit_col(valid_col_units, val_unit, kmap):
if val_unit is None:
return val_unit
unit_op, col_unit1, col_unit2 = val_unit
col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap)
col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap)
return unit_op, col_unit1, col_unit2
def rebuild_table_unit_col(valid_col_units, table_unit, kmap):
if table_unit is None:
return table_unit
table_type, col_unit_or_sql = table_unit
if isinstance(col_unit_or_sql, tuple):
col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap)
return table_type, col_unit_or_sql
def rebuild_cond_unit_col(valid_col_units, cond_unit, kmap):
if cond_unit is None:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_col(valid_col_units, condition, kmap):
for idx in range(len(condition)):
if idx % 2 == 0:
condition[idx] = rebuild_cond_unit_col(
valid_col_units, condition[idx], kmap
)
return condition
def rebuild_select_col(valid_col_units, sel, kmap):
if sel is None:
return sel
distinct, _list = sel
new_list = []
for it in _list:
agg_id, val_unit = it
new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap)))
if DISABLE_DISTINCT:
distinct = None
return distinct, new_list
def rebuild_from_col(valid_col_units, from_, kmap):
if from_ is None:
return from_
from_["table_units"] = [
rebuild_table_unit_col(valid_col_units, table_unit, kmap)
for table_unit in from_["table_units"]
]
from_["conds"] = rebuild_condition_col(valid_col_units, from_["conds"], kmap)
return from_
def rebuild_group_by_col(valid_col_units, group_by, kmap):
if group_by is None:
return group_by
return [
rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by
]
def rebuild_order_by_col(valid_col_units, order_by, kmap):
if order_by is None or len(order_by) == 0:
return order_by
direction, val_units = order_by
new_val_units = [
rebuild_val_unit_col(valid_col_units, val_unit, kmap) for val_unit in val_units
]
return direction, new_val_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql["select"] = rebuild_select_col(valid_col_units, sql["select"], kmap)
sql["from"] = rebuild_from_col(valid_col_units, sql["from"], kmap)
sql["where"] = rebuild_condition_col(valid_col_units, sql["where"], kmap)
sql["groupBy"] = rebuild_group_by_col(valid_col_units, sql["groupBy"], kmap)
sql["orderBy"] = rebuild_order_by_col(valid_col_units, sql["orderBy"], kmap)
sql["having"] = rebuild_condition_col(valid_col_units, sql["having"], kmap)
sql["intersect"] = rebuild_sql_col(valid_col_units, sql["intersect"], kmap)
sql["except"] = rebuild_sql_col(valid_col_units, sql["except"], kmap)
sql["union"] = rebuild_sql_col(valid_col_units, sql["union"], kmap)
return sql
def build_foreign_key_map(entry):
cols_orig = entry["column_names_original"]
tables_orig = entry["table_names_original"]
# rebuild cols corresponding to idmap in Schema
cols = []
for col_orig in cols_orig:
if col_orig[0] >= 0:
t = tables_orig[col_orig[0]]
c = col_orig[1]
cols.append("__" + t.lower() + "." + c.lower() + "__")
else:
cols.append("__all__")
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if k1 in k_set or k2 in k_set:
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry["foreign_keys"]
for fkey in foreign_keys:
key1, key2 = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry["db_id"]] = build_foreign_key_map(entry)
return tables
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gold", dest="gold", type=str)
parser.add_argument("--pred", dest="pred", type=str)
parser.add_argument("--db", dest="db", type=str)
parser.add_argument("--table", dest="table", type=str)
parser.add_argument("--etype", dest="etype", type=str)
parser.add_argument("--output")
args = parser.parse_args()
gold = args.gold
pred = args.pred
db_dir = args.db
table = args.table
etype = args.etype
assert etype in ["all", "exec", "match"], "Unknown evaluation method"
kmaps = build_foreign_key_map_from_json(table)
results = evaluate(gold, pred, db_dir, etype, kmaps)
if args.output:
with open(args.output, "w") as f:
json.dump(results, f)
|
ContextualSP/unified_parser_text_to_sql/third_party/spider/evaluation.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/third_party/spider/evaluation.py",
"repo_id": "ContextualSP",
"token_count": 16797
}
| 287 |
from .quantization import quantize, dequantize
__all__ = ['quantize', 'dequantize']
|
Cream/CDARTS/CDARTS_detection/mmcv/arraymisc/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/arraymisc/__init__.py",
"repo_id": "Cream",
"token_count": 27
}
| 288 |
import os.path as osp
import cv2
import numpy as np
from mmcv.opencv_info import USE_OPENCV2
from mmcv.utils import check_file_exist, is_str, mkdir_or_exist
if not USE_OPENCV2:
from cv2 import IMREAD_COLOR, IMREAD_GRAYSCALE, IMREAD_UNCHANGED
else:
from cv2 import CV_LOAD_IMAGE_COLOR as IMREAD_COLOR
from cv2 import CV_LOAD_IMAGE_GRAYSCALE as IMREAD_GRAYSCALE
from cv2 import CV_LOAD_IMAGE_UNCHANGED as IMREAD_UNCHANGED
imread_flags = {
'color': IMREAD_COLOR,
'grayscale': IMREAD_GRAYSCALE,
'unchanged': IMREAD_UNCHANGED
}
def imread(img_or_path, flag='color'):
"""Read an image.
Args:
img_or_path (ndarray or str): Either a numpy array or image path.
If it is a numpy array (loaded image), then it will be returned
as is.
flag (str): Flags specifying the color type of a loaded image,
candidates are `color`, `grayscale` and `unchanged`.
Returns:
ndarray: Loaded image array.
"""
if isinstance(img_or_path, np.ndarray):
return img_or_path
elif is_str(img_or_path):
flag = imread_flags[flag] if is_str(flag) else flag
check_file_exist(img_or_path,
'img file does not exist: {}'.format(img_or_path))
return cv2.imread(img_or_path, flag)
else:
raise TypeError('"img" must be a numpy array or a filename')
def imfrombytes(content, flag='color'):
"""Read an image from bytes.
Args:
content (bytes): Image bytes got from files or other streams.
flag (str): Same as :func:`imread`.
Returns:
ndarray: Loaded image array.
"""
img_np = np.frombuffer(content, np.uint8)
flag = imread_flags[flag] if is_str(flag) else flag
img = cv2.imdecode(img_np, flag)
return img
def imwrite(img, file_path, params=None, auto_mkdir=True):
"""Write image to file
Args:
img (ndarray): Image array to be written.
file_path (str): Image file path.
params (None or list): Same as opencv's :func:`imwrite` interface.
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
whether to create it automatically.
Returns:
bool: Successful or not.
"""
if auto_mkdir:
dir_name = osp.abspath(osp.dirname(file_path))
mkdir_or_exist(dir_name)
return cv2.imwrite(file_path, img, params)
|
Cream/CDARTS/CDARTS_detection/mmcv/image/io.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/image/io.py",
"repo_id": "Cream",
"token_count": 1026
}
| 289 |
# Copyright (c) Open-MMLab. All rights reserved.
import functools
import os
import subprocess
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'mpi':
_init_dist_mpi(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError('Invalid launcher type: {}'.format(launcher))
def _init_dist_pytorch(backend, **kwargs):
# TODO: use local_rank instead of rank % num_gpus
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
def _init_dist_mpi(backend, **kwargs):
raise NotImplementedError
def _init_dist_slurm(backend, port=29500, **kwargs):
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput(
'scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(port)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
def get_dist_info():
if torch.__version__ < '1.0':
initialized = dist._initialized
else:
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/dist_utils.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/dist_utils.py",
"repo_id": "Cream",
"token_count": 939
}
| 290 |
import multiprocessing
import torch
import mmcv
from .checkpoint import load_checkpoint
def worker_func(model_cls, model_kwargs, checkpoint, dataset, data_func,
gpu_id, idx_queue, result_queue):
model = model_cls(**model_kwargs)
load_checkpoint(model, checkpoint, map_location='cpu')
torch.cuda.set_device(gpu_id)
model.cuda()
model.eval()
with torch.no_grad():
while True:
idx = idx_queue.get()
data = dataset[idx]
result = model(**data_func(data, gpu_id))
result_queue.put((idx, result))
def parallel_test(model_cls,
model_kwargs,
checkpoint,
dataset,
data_func,
gpus,
workers_per_gpu=1):
"""Parallel testing on multiple GPUs.
Args:
model_cls (type): Model class type.
model_kwargs (dict): Arguments to init the model.
checkpoint (str): Checkpoint filepath.
dataset (:obj:`Dataset`): The dataset to be tested.
data_func (callable): The function that generates model inputs.
gpus (list[int]): GPU ids to be used.
workers_per_gpu (int): Number of processes on each GPU. It is possible
to run multiple workers on each GPU.
Returns:
list: Test results.
"""
ctx = multiprocessing.get_context('spawn')
idx_queue = ctx.Queue()
result_queue = ctx.Queue()
num_workers = len(gpus) * workers_per_gpu
workers = [
ctx.Process(
target=worker_func,
args=(model_cls, model_kwargs, checkpoint, dataset, data_func,
gpus[i % len(gpus)], idx_queue, result_queue))
for i in range(num_workers)
]
for w in workers:
w.daemon = True
w.start()
for i in range(len(dataset)):
idx_queue.put(i)
results = [None for _ in range(len(dataset))]
prog_bar = mmcv.ProgressBar(task_num=len(dataset))
for _ in range(len(dataset)):
idx, res = result_queue.get()
results[idx] = res
prog_bar.update()
print('\n')
for worker in workers:
worker.terminate()
return results
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/parallel_test.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/parallel_test.py",
"repo_id": "Cream",
"token_count": 1032
}
| 291 |
#include <math.h>
#include <string.h>
using namespace std;
void FlowWarp(double* img, double* flow1, double* out, const int height,
const int width, const int channels, const int filling_value,
const int interpolateMode);
void BilinearInterpolate(const double* img, int width, int height, int channels,
double x, double y, double* out);
void NNInterpolate(const double* img, int width, int height, int channels,
double x, double y, double* out);
template <typename T>
inline T __min__(T a, T b) {
return a > b ? b : a;
}
template <typename T>
inline T __max__(T a, T b) {
return (a < b) ? b : a;
}
template <typename T>
inline T EnforceRange(const T x, const int MaxValue) {
return __min__(__max__(x, 0), MaxValue);
}
|
Cream/CDARTS/CDARTS_detection/mmcv/video/optflow_warp/flow_warp.hpp/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/video/optflow_warp/flow_warp.hpp",
"repo_id": "Cream",
"token_count": 325
}
| 292 |
import logging
import os
import random
import subprocess
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from mmcv.runner import get_dist_info
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'mpi':
_init_dist_mpi(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError('Invalid launcher type: {}'.format(launcher))
def _init_dist_pytorch(backend, **kwargs):
# TODO: use local_rank instead of rank % num_gpus
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
def _init_dist_mpi(backend, **kwargs):
raise NotImplementedError
def _init_dist_slurm(backend, port=29500, **kwargs):
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput(
'scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(port)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_root_logger(log_level=logging.INFO):
logger = logging.getLogger()
if not logger.hasHandlers():
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
level=log_level)
rank, _ = get_dist_info()
if rank != 0:
logger.setLevel('ERROR')
return logger
|
Cream/CDARTS/CDARTS_detection/mmdet/apis/env.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/apis/env.py",
"repo_id": "Cream",
"token_count": 879
}
| 293 |
import torch
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate overlap between two set of bboxes.
If ``is_aligned`` is ``False``, then calculate the ious between each bbox
of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (m, 4)
bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n
must be equal.
mode (str): "iou" (intersection over union) or iof (intersection over
foreground).
Returns:
ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
"""
assert mode in ['iou', 'iof']
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if is_aligned:
assert rows == cols
if rows * cols == 0:
return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)
if is_aligned:
lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]
rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, 2]
overlap = wh[:, 0] * wh[:, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1 + area2 - overlap)
else:
ious = overlap / area1
else:
lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2]
rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2]
wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2]
overlap = wh[:, :, 0] * wh[:, :, 1]
area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
bboxes1[:, 3] - bboxes1[:, 1] + 1)
if mode == 'iou':
area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
bboxes2[:, 3] - bboxes2[:, 1] + 1)
ious = overlap / (area1[:, None] + area2 - overlap)
else:
ious = overlap / (area1[:, None])
return ious
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/geometry.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/geometry.py",
"repo_id": "Cream",
"token_count": 1094
}
| 294 |
from multiprocessing import Pool
import mmcv
import numpy as np
from terminaltables import AsciiTable
from mmdet.utils import print_log
from .bbox_overlaps import bbox_overlaps
from .class_names import get_classes
def average_precision(recalls, precisions, mode='area'):
"""Calculate average precision (for single or multiple scales).
Args:
recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
mode (str): 'area' or '11points', 'area' means calculating the area
under precision-recall curve, '11points' means calculating
the average precision of recalls at [0, 0.1, ..., 1]
Returns:
float or ndarray: calculated average precision
"""
no_scale = False
if recalls.ndim == 1:
no_scale = True
recalls = recalls[np.newaxis, :]
precisions = precisions[np.newaxis, :]
assert recalls.shape == precisions.shape and recalls.ndim == 2
num_scales = recalls.shape[0]
ap = np.zeros(num_scales, dtype=np.float32)
if mode == 'area':
zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
ones = np.ones((num_scales, 1), dtype=recalls.dtype)
mrec = np.hstack((zeros, recalls, ones))
mpre = np.hstack((zeros, precisions, zeros))
for i in range(mpre.shape[1] - 1, 0, -1):
mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
for i in range(num_scales):
ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
ap[i] = np.sum(
(mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
elif mode == '11points':
for i in range(num_scales):
for thr in np.arange(0, 1 + 1e-3, 0.1):
precs = precisions[i, recalls[i, :] >= thr]
prec = precs.max() if precs.size > 0 else 0
ap[i] += prec
ap /= 11
else:
raise ValueError(
'Unrecognized mode, only "area" and "11points" are supported')
if no_scale:
ap = ap[0]
return ap
def tpfp_imagenet(det_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
default_iou_thr=0.5,
area_ranges=None):
"""Check if detected bboxes are true positive or false positive.
Args:
det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
of shape (k, 4). Default: None
default_iou_thr (float): IoU threshold to be considered as matched for
medium and large bboxes (small ones have special rules).
Default: 0.5.
area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
in the format [(min1, max1), (min2, max2), ...]. Default: None.
Returns:
tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
each array is (num_scales, m).
"""
# an indicator of ignored gts
gt_ignore_inds = np.concatenate(
(np.zeros(gt_bboxes.shape[0], dtype=np.bool),
np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
# stack gt_bboxes and gt_bboxes_ignore for convenience
gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
if area_ranges is None:
area_ranges = [(None, None)]
num_scales = len(area_ranges)
# tp and fp are of shape (num_scales, num_gts), each row is tp or fp
# of a certain scale.
tp = np.zeros((num_scales, num_dets), dtype=np.float32)
fp = np.zeros((num_scales, num_dets), dtype=np.float32)
if gt_bboxes.shape[0] == 0:
if area_ranges == [(None, None)]:
fp[...] = 1
else:
det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0] + 1) * (
det_bboxes[:, 3] - det_bboxes[:, 1] + 1)
for i, (min_area, max_area) in enumerate(area_ranges):
fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
return tp, fp
ious = bbox_overlaps(det_bboxes, gt_bboxes - 1)
gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1
gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1
iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),
default_iou_thr)
# sort all detections by scores in descending order
sort_inds = np.argsort(-det_bboxes[:, -1])
for k, (min_area, max_area) in enumerate(area_ranges):
gt_covered = np.zeros(num_gts, dtype=bool)
# if no area range is specified, gt_area_ignore is all False
if min_area is None:
gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
else:
gt_areas = gt_w * gt_h
gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
for i in sort_inds:
max_iou = -1
matched_gt = -1
# find best overlapped available gt
for j in range(num_gts):
# different from PASCAL VOC: allow finding other gts if the
# best overlaped ones are already matched by other det bboxes
if gt_covered[j]:
continue
elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:
max_iou = ious[i, j]
matched_gt = j
# there are 4 cases for a det bbox:
# 1. it matches a gt, tp = 1, fp = 0
# 2. it matches an ignored gt, tp = 0, fp = 0
# 3. it matches no gt and within area range, tp = 0, fp = 1
# 4. it matches no gt but is beyond area range, tp = 0, fp = 0
if matched_gt >= 0:
gt_covered[matched_gt] = 1
if not (gt_ignore_inds[matched_gt]
or gt_area_ignore[matched_gt]):
tp[k, i] = 1
elif min_area is None:
fp[k, i] = 1
else:
bbox = det_bboxes[i, :4]
area = (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1)
if area >= min_area and area < max_area:
fp[k, i] = 1
return tp, fp
def tpfp_default(det_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
iou_thr=0.5,
area_ranges=None):
"""Check if detected bboxes are true positive or false positive.
Args:
det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
of shape (k, 4). Default: None
iou_thr (float): IoU threshold to be considered as matched.
Default: 0.5.
area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
in the format [(min1, max1), (min2, max2), ...]. Default: None.
Returns:
tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
each array is (num_scales, m).
"""
# an indicator of ignored gts
gt_ignore_inds = np.concatenate(
(np.zeros(gt_bboxes.shape[0], dtype=np.bool),
np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
# stack gt_bboxes and gt_bboxes_ignore for convenience
gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
if area_ranges is None:
area_ranges = [(None, None)]
num_scales = len(area_ranges)
# tp and fp are of shape (num_scales, num_gts), each row is tp or fp of
# a certain scale
tp = np.zeros((num_scales, num_dets), dtype=np.float32)
fp = np.zeros((num_scales, num_dets), dtype=np.float32)
# if there is no gt bboxes in this image, then all det bboxes
# within area range are false positives
if gt_bboxes.shape[0] == 0:
if area_ranges == [(None, None)]:
fp[...] = 1
else:
det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0] + 1) * (
det_bboxes[:, 3] - det_bboxes[:, 1] + 1)
for i, (min_area, max_area) in enumerate(area_ranges):
fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
return tp, fp
ious = bbox_overlaps(det_bboxes, gt_bboxes)
# for each det, the max iou with all gts
ious_max = ious.max(axis=1)
# for each det, which gt overlaps most with it
ious_argmax = ious.argmax(axis=1)
# sort all dets in descending order by scores
sort_inds = np.argsort(-det_bboxes[:, -1])
for k, (min_area, max_area) in enumerate(area_ranges):
gt_covered = np.zeros(num_gts, dtype=bool)
# if no area range is specified, gt_area_ignore is all False
if min_area is None:
gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
else:
gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
for i in sort_inds:
if ious_max[i] >= iou_thr:
matched_gt = ious_argmax[i]
if not (gt_ignore_inds[matched_gt]
or gt_area_ignore[matched_gt]):
if not gt_covered[matched_gt]:
gt_covered[matched_gt] = True
tp[k, i] = 1
else:
fp[k, i] = 1
# otherwise ignore this detected bbox, tp = 0, fp = 0
elif min_area is None:
fp[k, i] = 1
else:
bbox = det_bboxes[i, :4]
area = (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1)
if area >= min_area and area < max_area:
fp[k, i] = 1
return tp, fp
def get_cls_results(det_results, annotations, class_id):
"""Get det results and gt information of a certain class.
Args:
det_results (list[list]): Same as `eval_map()`.
annotations (list[dict]): Same as `eval_map()`.
Returns:
tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes
"""
cls_dets = [img_res[class_id] for img_res in det_results]
cls_gts = []
cls_gts_ignore = []
for ann in annotations:
gt_inds = ann['labels'] == (class_id + 1)
cls_gts.append(ann['bboxes'][gt_inds, :])
if ann.get('labels_ignore', None) is not None:
ignore_inds = ann['labels_ignore'] == (class_id + 1)
cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])
else:
cls_gts_ignore.append(np.array((0, 4), dtype=np.float32))
return cls_dets, cls_gts, cls_gts_ignore
def eval_map(det_results,
annotations,
scale_ranges=None,
iou_thr=0.5,
dataset=None,
logger=None,
nproc=4):
"""Evaluate mAP of a dataset.
Args:
det_results (list[list]): [[cls1_det, cls2_det, ...], ...].
The outer list indicates images, and the inner list indicates
per-class detected bboxes.
annotations (list[dict]): Ground truth annotations where each item of
the list indicates an image. Keys of annotations are:
- "bboxes": numpy array of shape (n, 4)
- "labels": numpy array of shape (n, )
- "bboxes_ignore" (optional): numpy array of shape (k, 4)
- "labels_ignore" (optional): numpy array of shape (k, )
scale_ranges (list[tuple] | None): Range of scales to be evaluated,
in the format [(min1, max1), (min2, max2), ...]. A range of
(32, 64) means the area range between (32**2, 64**2).
Default: None.
iou_thr (float): IoU threshold to be considered as matched.
Default: 0.5.
dataset (list[str] | str | None): Dataset name or dataset classes,
there are minor differences in metrics for different datsets, e.g.
"voc07", "imagenet_det", etc. Default: None.
logger (logging.Logger | str | None): The way to print the mAP
summary. See `mmdet.utils.print_log()` for details. Default: None.
nproc (int): Processes used for computing TP and FP.
Default: 4.
Returns:
tuple: (mAP, [dict, dict, ...])
"""
assert len(det_results) == len(annotations)
num_imgs = len(det_results)
num_scales = len(scale_ranges) if scale_ranges is not None else 1
num_classes = len(det_results[0]) # positive class num
area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]
if scale_ranges is not None else None)
pool = Pool(nproc)
eval_results = []
for i in range(num_classes):
# get gt and det bboxes of this class
cls_dets, cls_gts, cls_gts_ignore = get_cls_results(
det_results, annotations, i)
# choose proper function according to datasets to compute tp and fp
if dataset in ['det', 'vid']:
tpfp_func = tpfp_imagenet
else:
tpfp_func = tpfp_default
# compute tp and fp for each image with multiple processes
tpfp = pool.starmap(
tpfp_func,
zip(cls_dets, cls_gts, cls_gts_ignore,
[iou_thr for _ in range(num_imgs)],
[area_ranges for _ in range(num_imgs)]))
tp, fp = tuple(zip(*tpfp))
# calculate gt number of each scale
# ignored gts or gts beyond the specific scale are not counted
num_gts = np.zeros(num_scales, dtype=int)
for j, bbox in enumerate(cls_gts):
if area_ranges is None:
num_gts[0] += bbox.shape[0]
else:
gt_areas = (bbox[:, 2] - bbox[:, 0] + 1) * (
bbox[:, 3] - bbox[:, 1] + 1)
for k, (min_area, max_area) in enumerate(area_ranges):
num_gts[k] += np.sum((gt_areas >= min_area)
& (gt_areas < max_area))
# sort all det bboxes by score, also sort tp and fp
cls_dets = np.vstack(cls_dets)
num_dets = cls_dets.shape[0]
sort_inds = np.argsort(-cls_dets[:, -1])
tp = np.hstack(tp)[:, sort_inds]
fp = np.hstack(fp)[:, sort_inds]
# calculate recall and precision with tp and fp
tp = np.cumsum(tp, axis=1)
fp = np.cumsum(fp, axis=1)
eps = np.finfo(np.float32).eps
recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)
precisions = tp / np.maximum((tp + fp), eps)
# calculate AP
if scale_ranges is None:
recalls = recalls[0, :]
precisions = precisions[0, :]
num_gts = num_gts.item()
mode = 'area' if dataset != 'voc07' else '11points'
ap = average_precision(recalls, precisions, mode)
eval_results.append({
'num_gts': num_gts,
'num_dets': num_dets,
'recall': recalls,
'precision': precisions,
'ap': ap
})
if scale_ranges is not None:
# shape (num_classes, num_scales)
all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])
all_num_gts = np.vstack(
[cls_result['num_gts'] for cls_result in eval_results])
mean_ap = []
for i in range(num_scales):
if np.any(all_num_gts[:, i] > 0):
mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())
else:
mean_ap.append(0.0)
else:
aps = []
for cls_result in eval_results:
if cls_result['num_gts'] > 0:
aps.append(cls_result['ap'])
mean_ap = np.array(aps).mean().item() if aps else 0.0
print_map_summary(
mean_ap, eval_results, dataset, area_ranges, logger=logger)
return mean_ap, eval_results
def print_map_summary(mean_ap,
results,
dataset=None,
scale_ranges=None,
logger=None):
"""Print mAP and results of each class.
A table will be printed to show the gts/dets/recall/AP of each class and
the mAP.
Args:
mean_ap (float): Calculated from `eval_map()`.
results (list[dict]): Calculated from `eval_map()`.
dataset (list[str] | str | None): Dataset name or dataset classes.
scale_ranges (list[tuple] | None): Range of scales to be evaluated.
logger (logging.Logger | str | None): The way to print the mAP
summary. See `mmdet.utils.print_log()` for details. Default: None.
"""
if logger == 'silent':
return
if isinstance(results[0]['ap'], np.ndarray):
num_scales = len(results[0]['ap'])
else:
num_scales = 1
if scale_ranges is not None:
assert len(scale_ranges) == num_scales
num_classes = len(results)
recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
aps = np.zeros((num_scales, num_classes), dtype=np.float32)
num_gts = np.zeros((num_scales, num_classes), dtype=int)
for i, cls_result in enumerate(results):
if cls_result['recall'].size > 0:
recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
aps[:, i] = cls_result['ap']
num_gts[:, i] = cls_result['num_gts']
if dataset is None:
label_names = [str(i) for i in range(1, num_classes + 1)]
elif mmcv.is_str(dataset):
label_names = get_classes(dataset)
else:
label_names = dataset
if not isinstance(mean_ap, list):
mean_ap = [mean_ap]
header = ['class', 'gts', 'dets', 'recall', 'ap']
for i in range(num_scales):
if scale_ranges is not None:
print_log('Scale range {}'.format(scale_ranges[i]), logger=logger)
table_data = [header]
for j in range(num_classes):
row_data = [
label_names[j], num_gts[i, j], results[j]['num_dets'],
'{:.3f}'.format(recalls[i, j]), '{:.3f}'.format(aps[i, j])
]
table_data.append(row_data)
table_data.append(['mAP', '', '', '', '{:.3f}'.format(mean_ap[i])])
table = AsciiTable(table_data)
table.inner_footing_row_border = True
print_log('\n' + table.table, logger=logger)
|
Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/mean_ap.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/evaluation/mean_ap.py",
"repo_id": "Cream",
"token_count": 9370
}
| 295 |
import copy
from mmdet.utils import build_from_cfg
from .dataset_wrappers import ConcatDataset, RepeatDataset
from .registry import DATASETS
def _concat_dataset(cfg, default_args=None):
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets)
def build_dataset(cfg, default_args=None):
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif isinstance(cfg['ann_file'], (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/builder.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/builder.py",
"repo_id": "Cream",
"token_count": 637
}
| 296 |
from collections import Sequence
import matplotlib.pyplot as plt
import mmcv
import numpy as np
import torch
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError('type {} cannot be converted to tensor.'.format(
type(data)))
def random_scale(img_scales, mode='range'):
"""Randomly select a scale from a list of scales or scale ranges.
Args:
img_scales (list[tuple]): Image scale or scale range.
mode (str): "range" or "value".
Returns:
tuple: Sampled image scale.
"""
num_scales = len(img_scales)
if num_scales == 1: # fixed scale is specified
img_scale = img_scales[0]
elif num_scales == 2: # randomly sample a scale
if mode == 'range':
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
elif mode == 'value':
img_scale = img_scales[np.random.randint(num_scales)]
else:
if mode != 'value':
raise ValueError(
'Only "value" mode supports more than 2 image scales')
img_scale = img_scales[np.random.randint(num_scales)]
return img_scale
def show_ann(coco, img, ann_info):
plt.imshow(mmcv.bgr2rgb(img))
plt.axis('off')
coco.showAnns(ann_info)
plt.show()
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/utils.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/utils.py",
"repo_id": "Cream",
"token_count": 981
}
| 297 |
import logging
import torch
import torch.nn as nn
from torch.nn.modules.batchnorm import _BatchNorm
from mmcv.cnn import constant_init, kaiming_init
from .utils import load_checkpoint
from ..registry import BACKBONES
norm_cfg = {
'BN': nn.BatchNorm2d,
'SyncBN': nn.SyncBatchNorm,
'GN': nn.GroupNorm,
}
_norm = 'SyncBN'
norm_layer = norm_cfg[_norm]
blocks_key = [
'shufflenet_3x3',
'shufflenet_5x5',
'shufflenet_7x7',
'xception_3x3',
]
Blocks = {
'shufflenet_3x3': lambda prefix, in_channels, output_channels, base_mid_channels, stride, bn_training: conv1x1_dwconv_conv1x1(prefix, in_channels, output_channels, base_mid_channels, 3, stride, bn_training),
'shufflenet_5x5': lambda prefix, in_channels, output_channels, base_mid_channels, stride, bn_training: conv1x1_dwconv_conv1x1(prefix, in_channels, output_channels, base_mid_channels, 5, stride, bn_training),
'shufflenet_7x7': lambda prefix, in_channels, output_channels, base_mid_channels, stride, bn_training: conv1x1_dwconv_conv1x1(prefix, in_channels, output_channels, base_mid_channels, 7, stride, bn_training),
'xception_3x3': lambda prefix, in_channels, output_channels, base_mid_channels, stride, bn_training: xception(prefix, in_channels, output_channels, base_mid_channels, stride, bn_training),
}
def create_spatial_conv2d_group_bn_relu(prefix, in_channels, out_channels, kernel_size, stride, padding=0, dilation=1, groups=1,
bias=False, has_bn=True, has_relu=True, channel_shuffle=False, has_spatial_conv=True, has_spatial_conv_bn=True,
conv_name_fun=None, bn_name_fun=None, bn_training=True, fix_weights=False):
conv_name = prefix
if conv_name_fun:
conv_name = conv_name_fun(prefix)
layer = nn.Sequential()
if has_spatial_conv:
spatial_conv_name = conv_name + '_s'
layer.add_module(spatial_conv_name, nn.Conv2d(in_channels=in_channels, out_channels=in_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=in_channels, bias=bias))
if fix_weights:
pass
if has_spatial_conv_bn:
layer.add_module(spatial_conv_name + '_bn', norm_layer(in_channels))
if channel_shuffle:
pass
assert in_channels % groups == 0
assert out_channels % groups == 0
layer.add_module(conv_name, nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=1, padding=0,
groups=groups, bias=bias))
if fix_weights:
pass
if has_bn:
bn_name = 'bn_' + prefix
if bn_name_fun:
bn_name = bn_name_fun(prefix)
layer.add_module(bn_name, norm_layer(out_channels))
if bn_training:
pass
if has_relu:
layer.add_module('relu' + prefix, nn.ReLU(inplace=True))
return layer
def conv1x1_dwconv_conv1x1(prefix, in_channels, out_channels, mid_channels, kernel_size, stride, bn_training=True):
mid_channels = int(mid_channels)
layer = list()
layer.append(create_spatial_conv2d_group_bn_relu(prefix=prefix + '_branch2a', in_channels=in_channels, out_channels=mid_channels,
kernel_size=-1, stride=1, padding=0, groups=1, has_bn=True, has_relu=True,
channel_shuffle=False, has_spatial_conv=False, has_spatial_conv_bn=False,
conv_name_fun=lambda p: 'interstellar' + p,
bn_name_fun=lambda p: 'bn' + p,
bn_training=bn_training))
layer.append(create_spatial_conv2d_group_bn_relu(prefix=prefix + '_branch2b', in_channels=mid_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=1,
has_bn=True, has_relu=False, channel_shuffle=False, has_spatial_conv=True,
has_spatial_conv_bn=True,
conv_name_fun=lambda p: 'interstellar' + p,
bn_name_fun=lambda p: 'bn' + p,
bn_training=bn_training))
return nn.Sequential(*layer)
def xception(prefix, in_channels, out_channels, mid_channels, stride, bn_training=True):
mid_channels = int(mid_channels)
layer = list()
layer.append(create_spatial_conv2d_group_bn_relu(prefix=prefix + '_branch2a', in_channels=in_channels, out_channels=mid_channels,
kernel_size=3, stride=stride, padding=1, groups=1, has_bn=True, has_relu=True,
channel_shuffle=False, has_spatial_conv=True, has_spatial_conv_bn=True,
conv_name_fun=lambda p: 'interstellar' + p,
bn_name_fun=lambda p: 'bn' + p,
bn_training=bn_training))
layer.append(create_spatial_conv2d_group_bn_relu(prefix=prefix + '_branch2b', in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=3, stride=1, padding=1, groups=1, has_bn=True,
has_relu=True,
channel_shuffle=False, has_spatial_conv=True,
has_spatial_conv_bn=True,
conv_name_fun=lambda p: 'interstellar' + p,
bn_name_fun=lambda p: 'bn' + p,
bn_training=bn_training))
layer.append(create_spatial_conv2d_group_bn_relu(prefix=prefix + '_branch2c', in_channels=mid_channels,
out_channels=out_channels,
kernel_size=3, stride=1, padding=1, groups=1, has_bn=True,
has_relu=False,
channel_shuffle=False, has_spatial_conv=True,
has_spatial_conv_bn=True,
conv_name_fun=lambda p: 'interstellar' + p,
bn_name_fun=lambda p: 'bn' + p,
bn_training=bn_training))
return nn.Sequential(*layer)
class ConvBNReLU(nn.Module):
def __init__(self, in_channel, out_channel, k_size, stride=1, padding=0, groups=1,
has_bn=True, has_relu=True, gaussian_init=False):
super(ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_channel, out_channel, kernel_size=k_size,
stride=stride, padding=padding,
groups=groups, bias=True)
if gaussian_init:
nn.init.normal_(self.conv.weight.data, 0, 0.01)
if has_bn:
self.bn = norm_layer(out_channel)
self.has_bn = has_bn
self.has_relu = has_relu
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.has_bn:
x = self.bn(x)
if self.has_relu:
x = self.relu(x)
return x
def channel_shuffle2(x):
channels = x.shape[1]
assert channels % 4 == 0
height = x.shape[2]
width = x.shape[3]
x = x.reshape(x.shape[0] * channels // 2, 2, height * width)
x = x.permute(1, 0, 2)
x = x.reshape(2, -1, channels // 2, height, width)
return x[0], x[1]
class ShuffleNetV2BlockSearched(nn.Module):
def __init__(self, prefix, in_channels, out_channels, stride, base_mid_channels, i_th, architecture):
super(ShuffleNetV2BlockSearched, self).__init__()
op = blocks_key[architecture[i_th]]
self.ksize = int(op.split('_')[1][0])
self.stride = stride
if self.stride == 2:
self.conv = Blocks[op](prefix + '_' + op, in_channels, out_channels - in_channels, base_mid_channels, stride, True)
else:
self.conv = Blocks[op](prefix + '_' + op, in_channels // 2, out_channels // 2, base_mid_channels, stride, True)
if stride > 1:
self.proj_conv = create_spatial_conv2d_group_bn_relu(prefix + '_proj', in_channels, in_channels, self.ksize,
stride, self.ksize // 2,
has_bn=True, has_relu=True, channel_shuffle=False,
has_spatial_conv=True, has_spatial_conv_bn=True,
conv_name_fun=lambda p: 'interstellar' + p,
bn_name_fun=lambda p: 'bn' + p)
self.relu = nn.ReLU(inplace=True)
def forward(self, x_in):
if self.stride == 1:
x_proj, x = channel_shuffle2(x_in)
else:
x_proj = x_in
x = x_in
x_proj = self.proj_conv(x_proj)
x = self.relu(self.conv(x))
return torch.cat((x_proj, x), dim=1)
@BACKBONES.register_module
class DetNas(nn.Module):
def __init__(self, model_size='VOC_FPN_300M', out_indices=(3, 7, 15, 19), frozen_stages=-1):
super(DetNas, self).__init__()
print('Model size is {}.'.format(model_size))
self.out_indices = out_indices
self.frozen_stages=frozen_stages
if model_size == 'COCO_FPN_3.8G':
architecture = [0, 0, 3, 1, 2, 1, 0, 2, 0, 3, 1, 2, 3, 3, 2, 0, 2, 1, 1, 3,
2, 0, 2, 2, 2, 1, 3, 1, 0, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3]
stage_repeats = [8, 8, 16, 8]
stage_out_channels = [-1, 72, 172, 432, 864, 1728, 1728]
elif model_size == 'COCO_FPN_1.3G':
architecture = [0, 0, 3, 1, 2, 1, 0, 2, 0, 3, 1, 2, 3, 3, 2, 0, 2, 1, 1, 3,
2, 0, 2, 2, 2, 1, 3, 1, 0, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3]
stage_repeats = [8, 8, 16, 8]
stage_out_channels = [-1, 48, 96, 240, 480, 960, 1024]
elif model_size == 'COCO_FPN_300M':
architecture = [2, 1, 2, 0, 2, 1, 1, 2, 3, 3, 1, 3, 0, 0, 3, 1, 3, 1, 3, 2]
stage_repeats = [4, 4, 8, 4]
stage_out_channels = [-1, 16, 64, 160, 320, 640, 1024]
elif model_size == 'COCO_RetinaNet_300M':
architecture = [2, 3, 1, 1, 3, 2, 1, 3, 3, 1, 1, 1, 3, 3, 2, 0, 3, 3, 3, 3]
stage_repeats = [4, 4, 8, 4]
stage_out_channels = [-1, 16, 64, 160, 320, 640, 1024]
elif model_size == 'VOC_FPN_300M':
architecture = [2, 1, 0, 3, 1, 3, 0, 3, 2, 0, 1, 1, 3, 3, 3, 3, 3, 3, 3, 1]
stage_repeats = [4, 4, 8, 4]
stage_out_channels = [-1, 16, 64, 160, 320, 640, 1024]
elif model_size == 'VOC_RetinaNet_300M':
architecture = [1, 3, 0, 0, 2, 3, 3, 3, 2, 3, 3, 3, 3, 2, 2, 0, 2, 3, 1, 1]
stage_repeats = [4, 4, 8, 4]
stage_out_channels = [-1, 16, 64, 160, 320, 640, 1024]
else:
raise NotImplementedError
self.first_conv = ConvBNReLU(in_channel=3, out_channel=stage_out_channels[1], k_size=3, stride=2, padding=1, gaussian_init=True)
self.features = list()
in_channels = stage_out_channels[1]
i_th = 0
for id_stage in range(1, len(stage_repeats) + 1):
out_channels = stage_out_channels[id_stage + 1]
repeats = stage_repeats[id_stage - 1]
for id_repeat in range(repeats):
prefix = str(id_stage) + chr(ord('a') + id_repeat)
stride = 1 if id_repeat > 0 else 2
self.features.append(ShuffleNetV2BlockSearched(prefix, in_channels=in_channels, out_channels=out_channels,
stride=stride, base_mid_channels=out_channels // 2, i_th=i_th,
architecture=architecture))
in_channels = out_channels
i_th += 1
self.features = nn.Sequential(*self.features)
if self.out_indices[-1] == len(self.features):
self.last_conv = ConvBNReLU(in_channel=in_channels, out_channel=stage_out_channels[-1], k_size=1, stride=1, padding=0)
# self.drop_out = nn.Dropout2d(p=0.2)
# self.global_pool = nn.AvgPool2d(7)
self._initialize_weights()
for m in self.modules():
if isinstance(m, nn.SyncBatchNorm):
m._specify_ddp_gpu_num(1)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.first_conv.bn.eval()
for m in [self.first_conv]:
for param in m.parameters():
param.requires_grad = False
for i in range(self.frozen_stages):
self.features[i].eval()
for param in self.features[i].parameters():
param.requires_grad = False
def _initialize_weights(self):
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d):
if 'first' in name:
nn.init.normal_(m.weight, 0, 0.01)
else:
nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, norm_layer):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
x = self.first_conv(x)
for i in range(len(self.features)):
x = self.features[i](x)
if i in self.out_indices:
outs.append(x)
if self.out_indices[-1] == len(self.features):
x = self.last_conv(x)
outs.append(x)
# x = self.last_conv(x)
# x = self.drop_out(x)
# x = self.global_pool(x).view(x.size(0), -1)
return tuple(outs)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/detnas.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/detnas.py",
"repo_id": "Cream",
"token_count": 9095
}
| 298 |
from .bbox_head import BBoxHead
from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead
from .double_bbox_head import DoubleConvFCBBoxHead
__all__ = [
'BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead', 'DoubleConvFCBBoxHead'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/models/bbox_heads/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/bbox_heads/__init__.py",
"repo_id": "Cream",
"token_count": 93
}
| 299 |
from .two_stage import TwoStageDetector
from ..registry import DETECTORS
import torch
from .. import builder
from mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler
@DETECTORS.register_module
class GridRCNN(TwoStageDetector):
"""Grid R-CNN.
This detector is the implementation of:
- Grid R-CNN (https://arxiv.org/abs/1811.12030)
- Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
"""
def __init__(self,
backbone,
rpn_head,
bbox_roi_extractor,
bbox_head,
grid_roi_extractor,
grid_head,
train_cfg,
test_cfg,
neck=None,
shared_head=None,
pretrained=None):
assert grid_head is not None
super(GridRCNN, self).__init__(
backbone=backbone,
neck=neck,
shared_head=shared_head,
rpn_head=rpn_head,
bbox_roi_extractor=bbox_roi_extractor,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
if grid_roi_extractor is not None:
self.grid_roi_extractor = builder.build_roi_extractor(
grid_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.grid_roi_extractor = self.bbox_roi_extractor
self.grid_head = builder.build_head(grid_head)
self.init_extra_weights()
def init_extra_weights(self):
self.grid_head.init_weights()
if not self.share_roi_extractor:
self.grid_roi_extractor.init_weights()
def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):
"""Ramdom jitter positive proposals for training."""
for sampling_result, img_meta in zip(sampling_results, img_metas):
bboxes = sampling_result.pos_bboxes
random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(
-amplitude, amplitude)
# before jittering
cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2
wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()
# after jittering
new_cxcy = cxcy + wh * random_offsets[:, :2]
new_wh = wh * (1 + random_offsets[:, 2:])
# xywh to xyxy
new_x1y1 = (new_cxcy - new_wh / 2)
new_x2y2 = (new_cxcy + new_wh / 2)
new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)
# clip bboxes
max_shape = img_meta['img_shape']
if max_shape is not None:
new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1)
new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1)
sampling_result.pos_bboxes = new_bboxes
return sampling_results
def forward_train(self,
img,
img_meta,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None):
x = self.extract_feat(img)
losses = dict()
# RPN forward and loss
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
proposal_inputs = rpn_outs + (img_meta, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
if self.with_bbox:
# assign gts and sample proposals
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
bbox_sampler = build_sampler(
self.train_cfg.rcnn.sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[i],
gt_bboxes[i],
gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
rois = bbox2roi([res.bboxes for res in sampling_results])
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
bbox_targets = self.bbox_head.get_target(sampling_results,
gt_bboxes, gt_labels,
self.train_cfg.rcnn)
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred,
*bbox_targets)
losses.update(loss_bbox)
# Grid head forward and loss
sampling_results = self._random_jitter(sampling_results, img_meta)
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
grid_feats = self.grid_roi_extractor(
x[:self.grid_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
grid_feats = self.shared_head(grid_feats)
# Accelerate training
max_sample_num_grid = self.train_cfg.rcnn.get('max_num_grid', 192)
sample_idx = torch.randperm(
grid_feats.shape[0])[:min(grid_feats.
shape[0], max_sample_num_grid)]
grid_feats = grid_feats[sample_idx]
grid_pred = self.grid_head(grid_feats)
grid_targets = self.grid_head.get_target(sampling_results,
self.train_cfg.rcnn)
grid_targets = grid_targets[sample_idx]
loss_grid = self.grid_head.loss(grid_pred, grid_targets)
losses.update(loss_grid)
return losses
def simple_test(self, img, img_meta, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, "Bbox head must be implemented."
x = self.extract_feat(img)
proposal_list = self.simple_test_rpn(
x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=False)
# pack rois into bboxes
grid_rois = bbox2roi([det_bboxes[:, :4]])
grid_feats = self.grid_roi_extractor(
x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)
if grid_rois.shape[0] != 0:
self.grid_head.test_mode = True
grid_pred = self.grid_head(grid_feats)
det_bboxes = self.grid_head.get_bboxes(det_bboxes,
grid_pred['fused'],
img_meta)
if rescale:
det_bboxes[:, :4] /= img_meta[0]['scale_factor']
else:
det_bboxes = torch.Tensor([])
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
return bbox_results
|
Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/grid_rcnn.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/grid_rcnn.py",
"repo_id": "Cream",
"token_count": 4677
}
| 300 |
import torch.nn as nn
import torch.nn.functional as F
from .utils import weighted_loss
from ..registry import LOSSES
mse_loss = weighted_loss(F.mse_loss)
@LOSSES.register_module
class MSELoss(nn.Module):
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None):
loss = self.loss_weight * mse_loss(
pred,
target,
weight,
reduction=self.reduction,
avg_factor=avg_factor)
return loss
|
Cream/CDARTS/CDARTS_detection/mmdet/models/losses/mse_loss.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/losses/mse_loss.py",
"repo_id": "Cream",
"token_count": 277
}
| 301 |
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import kaiming_init, constant_init, xavier_init
from mmdet.core import auto_fp16
from ..registry import NECKS
from ..utils import ConvModule
@NECKS.register_module
class PAFPN(nn.Module):
r""" PAFPN Arch
lateral TD 3x3 BU
C5 --------> C5 P5 N5 N5
lateral
C4 --------> C4 P4 N4 N4
lateral
C3 --------> C3 P3 N3 N3
lateral
C2 --------> C2 P2 N2 N2
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
conv_cfg=None,
norm_cfg=None,
activation=None,
lateral_kernel=1,
fpn_kernel=3,
bottom_up_kernel=3,
pa_kernel=3):
super(PAFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
self.fp16_enabled = False
self.fpn_kernel = fpn_kernel
self.lateral_kernel = lateral_kernel
self.bottom_up_kernel = bottom_up_kernel
self.pa_kernel = pa_kernel
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
self.bottom_up_convs = nn.ModuleList()
self.pa_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level): # Faster [0,4]
l_conv = ConvModule(
in_channels[i], out_channels, lateral_kernel,
padding=(lateral_kernel-1)//2, conv_cfg=conv_cfg, norm_cfg=norm_cfg,
activation=None, inplace=True)
fpn_conv = ConvModule(
out_channels, out_channels, fpn_kernel,
padding=(fpn_kernel-1)//2, conv_cfg=conv_cfg, norm_cfg=norm_cfg,
activation=None, inplace=True)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
for i in range(self.start_level, self.backbone_end_level - 1): # Faster [0,3]
if bottom_up_kernel > 0:
bottom_up_conv = ConvModule(
out_channels, out_channels, bottom_up_kernel, stride=2,
padding=(bottom_up_kernel-1)//2, conv_cfg=conv_cfg, norm_cfg=norm_cfg,
activation=activation, inplace=True)
self.bottom_up_convs.append(bottom_up_conv)
if pa_kernel > 0:
pa_conv = ConvModule(
out_channels, out_channels, pa_kernel,
padding=(pa_kernel-1)//2, conv_cfg=conv_cfg, norm_cfg=norm_cfg,
activation=activation, inplace=True)
self.pa_convs.append(pa_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.extra_convs_on_inputs:
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels, out_channels, 3,
stride=2, padding=1, conv_cfg=conv_cfg,
norm_cfg=norm_cfg, activation=self.activation, inplace=True)
self.fpn_convs.append(extra_fpn_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
@auto_fp16()
def forward(self, inputs):
# inputs [C2, C3, C4, C5]
assert len(inputs) == len(self.in_channels)
# build top-down laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
used_backbone_levels = len(laterals) # Faster rcnn:4
# Top-down path
for i in range(used_backbone_levels - 1, 0, -1):
laterals[i - 1] += F.interpolate(laterals[i], scale_factor=2, mode='nearest')
fpn_middle = [fpn_conv(laterals[i]) for i, fpn_conv in enumerate(self.fpn_convs)]
# Bottom-up path
# build outputs
if self.pa_kernel > 0:
outs = [fpn_middle[0]]
for i in range(0, self.backbone_end_level - self.start_level - 1): # Faster: [0,3]
if self.bottom_up_kernel > 0:
tmp = self.bottom_up_convs[i](outs[i]) + fpn_middle[i + 1]
else:
tmp = F.max_pool2d(outs[i], 2, stride=2) + fpn_middle[i + 1]
outs.append(self.pa_convs[i](tmp))
else:
outs = fpn_middle
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.extra_convs_on_inputs:
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[used_backbone_levels](orig))
else:
outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/necks/fpn_panet.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/necks/fpn_panet.py",
"repo_id": "Cream",
"token_count": 3748
}
| 302 |
import math
import time
import torch
import torch.nn as nn
from torch.autograd import Function
import torch.nn.functional as F
# quantize for weights and activations
class Quantizer(Function):
'''
take a real value x in alpha*[0,1] or alpha*[-1,1]
output a discrete-valued x in alpha*{0, 1/(2^k-1), ..., (2^k-1)/(2^k-1)} or likeness
where k is nbit
'''
@staticmethod
def forward(ctx, input, nbit, alpha=None, offset=None):
ctx.alpha = alpha
ctx.offset = offset
scale = (2 ** nbit - 1) if alpha is None else (2 ** nbit - 1) / alpha
ctx.scale = scale
return torch.round(input * scale) / scale if offset is None \
else (torch.round(input * scale) + torch.round(offset)) / scale
# if alpha is None:
# scale = 2 ** nbit - 1
# ctx.scale = scale
# if offset is None:
# return torch.round(input * scale) / scale
# else:
# return (torch.round(input * scale) + offset) / scale
# else:
# scale = (2 ** nbit - 1) / alpha
# if offset is None:
# return torch.round(input * scale) / scale
# else:
# ctx.save_for_backward(input, scale)
# return (torch.round(input * scale) + offset) / scale
@staticmethod
def backward(ctx, grad_output):
if ctx.offset is None:
return grad_output, None, None, None
else:
return grad_output, None, None, torch.sum(grad_output) / ctx.scale
def quantize(input, nbit, alpha=None, offset=None):
return Quantizer.apply(input, nbit, alpha, offset)
# standard sign with STE
class Signer(Function):
'''
take a real value x
output sign(x)
'''
@staticmethod
def forward(ctx, input):
return torch.sign(input)
@staticmethod
def backward(ctx, grad_output):
return grad_output
def sign(input):
return Signer.apply(input)
# sign in xnor-net for weights
class Xnor(Function):
'''
take a real value x
output sign(x_c) * E(|x_c|)
'''
@staticmethod
def forward(ctx, input):
return torch.sign(input) * torch.mean(torch.abs(input), dim=[1,2,3], keepdim=True)
@staticmethod
def backward(ctx, grad_output):
return grad_output
def xnor(input):
return Xnor.apply(input)
# sign in dorefa-net for weights
class ScaleSigner(Function):
'''
take a real value x
output sign(x) * E(|x|)
'''
@staticmethod
def forward(ctx, input):
return torch.sign(input) * torch.mean(torch.abs(input))
@staticmethod
def backward(ctx, grad_output):
return grad_output
def scale_sign(input):
return ScaleSigner.apply(input)
def dorefa_w(w, nbit_w, *args, **kwargs):
if nbit_w == 1:
w = scale_sign(w)
else:
w = torch.tanh(w)
w = w / (2 * torch.max(torch.abs(w))) + 0.5
w = 2 * quantize(w, nbit_w) - 1
return w
def wrpn_w(w, nbit_w, *args, **kwargs):
if nbit_w == 1:
w = scale_sign(w)
else:
w = quantize(torch.clamp(w, -1, 1), nbit_w - 1)
return w
def xnor_w(w, nbit_w=1, *args, **kwargs):
if nbit_w != 1:
raise ValueError('nbit_w must be 1 in XNOR-Net.')
return xnor(w)
def bireal_w(w, nbit_w=1, *args, **kwargs):
if nbit_w != 1:
raise ValueError('nbit_w must be 1 in Bi-Real-Net.')
return sign(w) * torch.mean(torch.abs(w.clone().detach()))
# dorefa quantize for activations
def dorefa_a(input, nbit_a, *args, **kwargs):
return quantize(torch.clamp(input, 0, 1.0), nbit_a, *args, **kwargs)
# PACT quantize for activations
def pact_a(input, nbit_a, alpha, *args, **kwargs):
x = 0.5*(torch.abs(input)-torch.abs(input-alpha)+alpha)
return quantize(x, nbit_a, alpha, *args, **kwargs)
# bi-real sign for activations
class BirealActivation(Function):
'''
take a real value x
output sign(x)
'''
@staticmethod
def forward(ctx, input, nbit_a=1):
ctx.save_for_backward(input)
return input.clamp(-1, 1).sign()
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = (2 + 2 * input) * input.lt(0).float() + (2 - 2 * input) * input.ge(0).float()
grad_input = torch.clamp(grad_input, 0)
grad_input *= grad_output
return grad_input, None
def bireal_a(input, nbit_a=1, *args, **kwargs):
return BirealActivation.apply(input)
class QuantConv(nn.Conv2d):
# general QuantConv for quantized conv
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(QuantConv, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.in_channels = in_channels
self.out_channels = out_channels
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_custome_parameters()
self.quant_config()
def quant_config(self, quant_name_w='dorefa', quant_name_a='dorefa', nbit_w=1, nbit_a=1, has_offset=False):
self.nbit_w = nbit_w
self.nbit_a = nbit_a
name_w_dict = {'dorefa': dorefa_w, 'pact': dorefa_w, 'wrpn': wrpn_w, 'xnor': xnor_w, 'bireal': bireal_w}
name_a_dict = {'dorefa': dorefa_a, 'pact': pact_a, 'wrpn': dorefa_a, 'xnor': dorefa_a, 'bireal': bireal_a}
self.quant_w = name_w_dict[quant_name_w]
self.quant_a = name_a_dict[quant_name_a]
if quant_name_a == 'pact':
self.alpha_a = nn.Parameter(torch.Tensor(1), requires_grad=True)
else:
self.register_parameter('alpha_a', None)
if quant_name_w == 'pact':
self.alpha_w = nn.Parameter(torch.Tensor(1), requires_grad=True)
else:
self.register_parameter('alpha_w', None)
if has_offset:
self.offset = nn.Parameter(torch.Tensor(1))
else:
self.register_parameter('offset', None)
# print(quant_name_w, quant_name_a, nbit_w, nbit_a)
if self.alpha_a is not None:
nn.init.constant_(self.alpha_a, 10)
if self.alpha_w is not None:
nn.init.constant_(self.alpha_w, 10)
if self.offset is not None:
nn.init.constant_(self.offset, 0)
def reset_custome_parameters(self):
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, input):
# 0-bit: identity mapping
if self.nbit_w == 0 or self.nbit_a == 0:
diff_channels = self.out_channels - self.in_channels
if self.stride == 2 or self.stride == (2, 2):
x = F.pad(input[:, :, ::2, ::2], (0, 0, 0, 0, diff_channels//2, diff_channels-diff_channels//2), 'constant', 0)
return x
else:
x = F.pad(input, (0, 0, 0, 0, diff_channels//2, diff_channels-diff_channels//2), 'constant', 0)
return x
# w quan
if self.nbit_w < 32:
w = self.quant_w(self.weight, self.nbit_w, self.alpha_w, self.offset)
else:
w = self.weight
# a quan
if self.nbit_a < 32:
x = self.quant_a(input, self.nbit_a, self.alpha_a)
else:
x = F.relu(input)
x = F.conv2d(x, w, None, self.stride, self.padding, self.dilation, self.groups)
return x
|
Cream/CDARTS/CDARTS_detection/mmdet/models/utils/quant_conv.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/utils/quant_conv.py",
"repo_id": "Cream",
"token_count": 3794
}
| 303 |
from .context_block import ContextBlock
__all__ = [
'ContextBlock',
]
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/gcb/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/gcb/__init__.py",
"repo_id": "Cream",
"token_count": 26
}
| 304 |
/* Generated by Cython 0.28.3 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [
"/home/work/anaconda3/lib/python3.6/site-packages/numpy/core/include/numpy/arrayobject.h",
"/home/work/anaconda3/lib/python3.6/site-packages/numpy/core/include/numpy/ufuncobject.h"
],
"extra_compile_args": {
"cc": [
"-Wno-unused-function",
"-Wno-write-strings"
],
"nvcc": [
"-c",
"--compiler-options",
"-fPIC"
]
},
"include_dirs": [
"/home/work/anaconda3/lib/python3.6/site-packages/numpy/core/include"
],
"language": "c++",
"name": "soft_nms_cpu",
"sources": [
"src/soft_nms_cpu.pyx"
]
},
"module_name": "soft_nms_cpu"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_28_3"
#define CYTHON_FUTURE_DIVISION 1
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef __cplusplus
#error "Cython files generated with the C++ option must be compiled with a C++ compiler."
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#else
#define CYTHON_INLINE inline
#endif
#endif
template<typename T>
void __Pyx_call_destructor(T& x) {
x.~T();
}
template<typename T>
class __Pyx_FakeReference {
public:
__Pyx_FakeReference() : ptr(NULL) { }
__Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { }
T *operator->() { return ptr; }
T *operator&() { return ptr; }
operator T&() { return *ptr; }
template<typename U> bool operator ==(U other) { return *ptr == other; }
template<typename U> bool operator !=(U other) { return *ptr != other; }
private:
T *ptr;
};
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0; // PyThread_create_key reports success always
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif // TSS (Thread Specific Storage) API
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#define PyObject_Unicode PyObject_Str
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__soft_nms_cpu
#define __PYX_HAVE_API__soft_nms_cpu
/* Early includes */
#include <string.h>
#include <stdio.h>
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* Header.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"src/soft_nms_cpu.pyx",
"__init__.pxd",
"type.pxd",
};
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":730
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":731
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":732
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":733
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":737
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":738
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":739
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":740
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":754
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":755
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":756
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":759
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":760
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":762
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":763
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":765
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":766
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/*--- Type declarations ---*/
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":770
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* BufferGetAndValidate.proto */
#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\
((obj == Py_None || obj == NULL) ?\
(__Pyx_ZeroBuffer(buf), 0) :\
__Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack))
static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static void __Pyx_ZeroBuffer(Py_buffer* buf);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#endif
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* GetModuleGlobalName.proto */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* SetItemInt.proto */
#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\
__Pyx_SetItemInt_Generic(o, to_py_func(i), v)))
static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v);
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
int is_list, int wraparound, int boundscheck);
/* SliceObject.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
int has_cstart, int has_cstop, int wraparound);
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* DictGetItem.proto */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);
#define __Pyx_PyObject_Dict_GetItem(obj, name)\
(likely(PyDict_CheckExact(obj)) ?\
__Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name))
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name)
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* RealImag.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX\
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_float(a, b) ((a)==(b))
#define __Pyx_c_sum_float(a, b) ((a)+(b))
#define __Pyx_c_diff_float(a, b) ((a)-(b))
#define __Pyx_c_prod_float(a, b) ((a)*(b))
#define __Pyx_c_quot_float(a, b) ((a)/(b))
#define __Pyx_c_neg_float(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_float(z) ((z)==(float)0)
#define __Pyx_c_conj_float(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_float(z) (::std::abs(z))
#define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_float(z) ((z)==0)
#define __Pyx_c_conj_float(z) (conjf(z))
#if 1
#define __Pyx_c_abs_float(z) (cabsf(z))
#define __Pyx_c_pow_float(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_double(a, b) ((a)==(b))
#define __Pyx_c_sum_double(a, b) ((a)+(b))
#define __Pyx_c_diff_double(a, b) ((a)-(b))
#define __Pyx_c_prod_double(a, b) ((a)*(b))
#define __Pyx_c_quot_double(a, b) ((a)/(b))
#define __Pyx_c_neg_double(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_double(z) ((z)==(double)0)
#define __Pyx_c_conj_double(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_double(z) (::std::abs(z))
#define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_double(z) ((z)==0)
#define __Pyx_c_conj_double(z) (conj(z))
#if 1
#define __Pyx_c_abs_double(z) (cabs(z))
#define __Pyx_c_pow_double(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
/* CIntFromPy.proto */
static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* PyIdentifierFromString.proto */
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
/* ModuleImport.proto */
static PyObject *__Pyx_ImportModule(const char *name);
/* TypeImport.proto */
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
/* Module declarations from 'soft_nms_cpu' */
static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_12soft_nms_cpu_max(__pyx_t_5numpy_float32_t, __pyx_t_5numpy_float32_t); /*proto*/
static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_12soft_nms_cpu_min(__pyx_t_5numpy_float32_t, __pyx_t_5numpy_float32_t); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "soft_nms_cpu"
extern int __pyx_module_is_main_soft_nms_cpu;
int __pyx_module_is_main_soft_nms_cpu = 0;
/* Implementation of 'soft_nms_cpu' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_RuntimeError;
static PyObject *__pyx_builtin_ImportError;
static const char __pyx_k_N[] = "N";
static const char __pyx_k_i[] = "i";
static const char __pyx_k_s[] = "s";
static const char __pyx_k_ih[] = "ih";
static const char __pyx_k_iw[] = "iw";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_ov[] = "ov";
static const char __pyx_k_ti[] = "ti";
static const char __pyx_k_ts[] = "ts";
static const char __pyx_k_ua[] = "ua";
static const char __pyx_k_x1[] = "x1";
static const char __pyx_k_x2[] = "x2";
static const char __pyx_k_y1[] = "y1";
static const char __pyx_k_y2[] = "y2";
static const char __pyx_k_exp[] = "exp";
static const char __pyx_k_pos[] = "pos";
static const char __pyx_k_tx1[] = "tx1";
static const char __pyx_k_tx2[] = "tx2";
static const char __pyx_k_ty1[] = "ty1";
static const char __pyx_k_ty2[] = "ty2";
static const char __pyx_k_area[] = "area";
static const char __pyx_k_copy[] = "copy";
static const char __pyx_k_inds[] = "inds";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_boxes[] = "boxes";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_sigma[] = "sigma";
static const char __pyx_k_arange[] = "arange";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_maxpos[] = "maxpos";
static const char __pyx_k_method[] = "method";
static const char __pyx_k_weight[] = "weight";
static const char __pyx_k_iou_thr[] = "iou_thr";
static const char __pyx_k_box_area[] = "box_area";
static const char __pyx_k_boxes_in[] = "boxes_in";
static const char __pyx_k_maxscore[] = "maxscore";
static const char __pyx_k_min_score[] = "min_score";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_ImportError[] = "ImportError";
static const char __pyx_k_RuntimeError[] = "RuntimeError";
static const char __pyx_k_soft_nms_cpu[] = "soft_nms_cpu";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_src_soft_nms_cpu_pyx[] = "src/soft_nms_cpu.pyx";
static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
static PyObject *__pyx_n_s_ImportError;
static PyObject *__pyx_n_s_N;
static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
static PyObject *__pyx_n_s_RuntimeError;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_arange;
static PyObject *__pyx_n_s_area;
static PyObject *__pyx_n_s_box_area;
static PyObject *__pyx_n_s_boxes;
static PyObject *__pyx_n_s_boxes_in;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_copy;
static PyObject *__pyx_n_s_exp;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_ih;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_inds;
static PyObject *__pyx_n_s_iou_thr;
static PyObject *__pyx_n_s_iw;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_maxpos;
static PyObject *__pyx_n_s_maxscore;
static PyObject *__pyx_n_s_method;
static PyObject *__pyx_n_s_min_score;
static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_kp_u_numpy_core_multiarray_failed_to;
static PyObject *__pyx_kp_u_numpy_core_umath_failed_to_impor;
static PyObject *__pyx_n_s_ov;
static PyObject *__pyx_n_s_pos;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_s;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_sigma;
static PyObject *__pyx_n_s_soft_nms_cpu;
static PyObject *__pyx_kp_s_src_soft_nms_cpu_pyx;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_ti;
static PyObject *__pyx_n_s_ts;
static PyObject *__pyx_n_s_tx1;
static PyObject *__pyx_n_s_tx2;
static PyObject *__pyx_n_s_ty1;
static PyObject *__pyx_n_s_ty2;
static PyObject *__pyx_n_s_ua;
static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
static PyObject *__pyx_n_s_weight;
static PyObject *__pyx_n_s_x1;
static PyObject *__pyx_n_s_x2;
static PyObject *__pyx_n_s_y1;
static PyObject *__pyx_n_s_y2;
static PyObject *__pyx_pf_12soft_nms_cpu_soft_nms_cpu(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes_in, float __pyx_v_iou_thr, unsigned int __pyx_v_method, float __pyx_v_sigma, float __pyx_v_min_score); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_2;
static PyObject *__pyx_int_3;
static PyObject *__pyx_int_4;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_codeobj__11;
/* Late includes */
/* "soft_nms_cpu.pyx":15
*
*
* cdef inline np.float32_t max(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<<
* return a if a >= b else b
*
*/
static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_12soft_nms_cpu_max(__pyx_t_5numpy_float32_t __pyx_v_a, __pyx_t_5numpy_float32_t __pyx_v_b) {
__pyx_t_5numpy_float32_t __pyx_r;
__Pyx_RefNannyDeclarations
__pyx_t_5numpy_float32_t __pyx_t_1;
__Pyx_RefNannySetupContext("max", 0);
/* "soft_nms_cpu.pyx":16
*
* cdef inline np.float32_t max(np.float32_t a, np.float32_t b):
* return a if a >= b else b # <<<<<<<<<<<<<<
*
* cdef inline np.float32_t min(np.float32_t a, np.float32_t b):
*/
if (((__pyx_v_a >= __pyx_v_b) != 0)) {
__pyx_t_1 = __pyx_v_a;
} else {
__pyx_t_1 = __pyx_v_b;
}
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "soft_nms_cpu.pyx":15
*
*
* cdef inline np.float32_t max(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<<
* return a if a >= b else b
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "soft_nms_cpu.pyx":18
* return a if a >= b else b
*
* cdef inline np.float32_t min(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<<
* return a if a <= b else b
*
*/
static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_12soft_nms_cpu_min(__pyx_t_5numpy_float32_t __pyx_v_a, __pyx_t_5numpy_float32_t __pyx_v_b) {
__pyx_t_5numpy_float32_t __pyx_r;
__Pyx_RefNannyDeclarations
__pyx_t_5numpy_float32_t __pyx_t_1;
__Pyx_RefNannySetupContext("min", 0);
/* "soft_nms_cpu.pyx":19
*
* cdef inline np.float32_t min(np.float32_t a, np.float32_t b):
* return a if a <= b else b # <<<<<<<<<<<<<<
*
*
*/
if (((__pyx_v_a <= __pyx_v_b) != 0)) {
__pyx_t_1 = __pyx_v_a;
} else {
__pyx_t_1 = __pyx_v_b;
}
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "soft_nms_cpu.pyx":18
* return a if a >= b else b
*
* cdef inline np.float32_t min(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<<
* return a if a <= b else b
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "soft_nms_cpu.pyx":22
*
*
* def soft_nms_cpu( # <<<<<<<<<<<<<<
* np.ndarray[float, ndim=2] boxes_in,
* float iou_thr,
*/
/* Python wrapper */
static PyObject *__pyx_pw_12soft_nms_cpu_1soft_nms_cpu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_12soft_nms_cpu_1soft_nms_cpu = {"soft_nms_cpu", (PyCFunction)__pyx_pw_12soft_nms_cpu_1soft_nms_cpu, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_12soft_nms_cpu_1soft_nms_cpu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_boxes_in = 0;
float __pyx_v_iou_thr;
unsigned int __pyx_v_method;
float __pyx_v_sigma;
float __pyx_v_min_score;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("soft_nms_cpu (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_boxes_in,&__pyx_n_s_iou_thr,&__pyx_n_s_method,&__pyx_n_s_sigma,&__pyx_n_s_min_score,0};
PyObject* values[5] = {0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_boxes_in)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_iou_thr)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("soft_nms_cpu", 0, 2, 5, 1); __PYX_ERR(0, 22, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_method);
if (value) { values[2] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_min_score);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "soft_nms_cpu") < 0)) __PYX_ERR(0, 22, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_boxes_in = ((PyArrayObject *)values[0]);
__pyx_v_iou_thr = __pyx_PyFloat_AsFloat(values[1]); if (unlikely((__pyx_v_iou_thr == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 24, __pyx_L3_error)
if (values[2]) {
__pyx_v_method = __Pyx_PyInt_As_unsigned_int(values[2]); if (unlikely((__pyx_v_method == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25, __pyx_L3_error)
} else {
__pyx_v_method = ((unsigned int)1);
}
if (values[3]) {
__pyx_v_sigma = __pyx_PyFloat_AsFloat(values[3]); if (unlikely((__pyx_v_sigma == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 26, __pyx_L3_error)
} else {
__pyx_v_sigma = ((float)0.5);
}
if (values[4]) {
__pyx_v_min_score = __pyx_PyFloat_AsFloat(values[4]); if (unlikely((__pyx_v_min_score == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 27, __pyx_L3_error)
} else {
__pyx_v_min_score = ((float)0.001);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("soft_nms_cpu", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 22, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("soft_nms_cpu.soft_nms_cpu", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_boxes_in), __pyx_ptype_5numpy_ndarray, 1, "boxes_in", 0))) __PYX_ERR(0, 23, __pyx_L1_error)
__pyx_r = __pyx_pf_12soft_nms_cpu_soft_nms_cpu(__pyx_self, __pyx_v_boxes_in, __pyx_v_iou_thr, __pyx_v_method, __pyx_v_sigma, __pyx_v_min_score);
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_12soft_nms_cpu_soft_nms_cpu(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes_in, float __pyx_v_iou_thr, unsigned int __pyx_v_method, float __pyx_v_sigma, float __pyx_v_min_score) {
PyObject *__pyx_v_boxes = NULL;
unsigned int __pyx_v_N;
float __pyx_v_iw;
float __pyx_v_ih;
float __pyx_v_ua;
int __pyx_v_pos;
float __pyx_v_maxscore;
int __pyx_v_maxpos;
float __pyx_v_x1;
float __pyx_v_x2;
float __pyx_v_y1;
float __pyx_v_y2;
float __pyx_v_tx1;
float __pyx_v_tx2;
float __pyx_v_ty1;
float __pyx_v_ty2;
float __pyx_v_ts;
float __pyx_v_area;
float __pyx_v_weight;
float __pyx_v_ov;
PyObject *__pyx_v_inds = NULL;
PyObject *__pyx_v_i = NULL;
PyObject *__pyx_v_ti = NULL;
CYTHON_UNUSED PyObject *__pyx_v_s = NULL;
__Pyx_LocalBuf_ND __pyx_pybuffernd_boxes_in;
__Pyx_Buffer __pyx_pybuffer_boxes_in;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
unsigned int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
float __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
PyObject *__pyx_t_12 = NULL;
long __pyx_t_13;
__Pyx_RefNannySetupContext("soft_nms_cpu", 0);
__pyx_pybuffer_boxes_in.pybuffer.buf = NULL;
__pyx_pybuffer_boxes_in.refcount = 0;
__pyx_pybuffernd_boxes_in.data = NULL;
__pyx_pybuffernd_boxes_in.rcbuffer = &__pyx_pybuffer_boxes_in;
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_boxes_in.rcbuffer->pybuffer, (PyObject*)__pyx_v_boxes_in, &__Pyx_TypeInfo_float, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 22, __pyx_L1_error)
}
__pyx_pybuffernd_boxes_in.diminfo[0].strides = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_boxes_in.diminfo[0].shape = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_boxes_in.diminfo[1].strides = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_boxes_in.diminfo[1].shape = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.shape[1];
/* "soft_nms_cpu.pyx":29
* float min_score=0.001,
* ):
* boxes = boxes_in.copy() # <<<<<<<<<<<<<<
* cdef unsigned int N = boxes.shape[0]
* cdef float iw, ih, box_area
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_boxes_in), __pyx_n_s_copy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 29, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
if (__pyx_t_3) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error)
}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_boxes = __pyx_t_1;
__pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":30
* ):
* boxes = boxes_in.copy()
* cdef unsigned int N = boxes.shape[0] # <<<<<<<<<<<<<<
* cdef float iw, ih, box_area
* cdef float ua
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_boxes, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyInt_As_unsigned_int(__pyx_t_2); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 30, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_N = __pyx_t_4;
/* "soft_nms_cpu.pyx":33
* cdef float iw, ih, box_area
* cdef float ua
* cdef int pos = 0 # <<<<<<<<<<<<<<
* cdef float maxscore = 0
* cdef int maxpos = 0
*/
__pyx_v_pos = 0;
/* "soft_nms_cpu.pyx":34
* cdef float ua
* cdef int pos = 0
* cdef float maxscore = 0 # <<<<<<<<<<<<<<
* cdef int maxpos = 0
* cdef float x1, x2, y1, y2, tx1, tx2, ty1, ty2, ts, area, weight, ov
*/
__pyx_v_maxscore = 0.0;
/* "soft_nms_cpu.pyx":35
* cdef int pos = 0
* cdef float maxscore = 0
* cdef int maxpos = 0 # <<<<<<<<<<<<<<
* cdef float x1, x2, y1, y2, tx1, tx2, ty1, ty2, ts, area, weight, ov
* inds = np.arange(N)
*/
__pyx_v_maxpos = 0;
/* "soft_nms_cpu.pyx":37
* cdef int maxpos = 0
* cdef float x1, x2, y1, y2, tx1, tx2, ty1, ty2, ts, area, weight, ov
* inds = np.arange(N) # <<<<<<<<<<<<<<
*
* for i in range(N):
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_arange); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_From_unsigned_int(__pyx_v_N); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_5) {
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_1};
__pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_1};
__pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL;
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_inds = __pyx_t_2;
__pyx_t_2 = 0;
/* "soft_nms_cpu.pyx":39
* inds = np.arange(N)
*
* for i in range(N): # <<<<<<<<<<<<<<
* maxscore = boxes[i, 4]
* maxpos = i
*/
__pyx_t_2 = __Pyx_PyInt_From_unsigned_int(__pyx_v_N); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
if (likely(PyList_CheckExact(__pyx_t_3)) || PyTuple_CheckExact(__pyx_t_3)) {
__pyx_t_2 = __pyx_t_3; __Pyx_INCREF(__pyx_t_2); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_8 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 39, __pyx_L1_error)
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_7); __Pyx_INCREF(__pyx_t_3); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 39, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_7); __Pyx_INCREF(__pyx_t_3); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 39, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
}
} else {
__pyx_t_3 = __pyx_t_8(__pyx_t_2);
if (unlikely(!__pyx_t_3)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(0, 39, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_3);
}
__Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_3);
__pyx_t_3 = 0;
/* "soft_nms_cpu.pyx":40
*
* for i in range(N):
* maxscore = boxes[i, 4] # <<<<<<<<<<<<<<
* maxpos = i
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4);
__pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_6); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 40, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_maxscore = __pyx_t_9;
/* "soft_nms_cpu.pyx":41
* for i in range(N):
* maxscore = boxes[i, 4]
* maxpos = i # <<<<<<<<<<<<<<
*
* tx1 = boxes[i, 0]
*/
__pyx_t_10 = __Pyx_PyInt_As_int(__pyx_v_i); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 41, __pyx_L1_error)
__pyx_v_maxpos = __pyx_t_10;
/* "soft_nms_cpu.pyx":43
* maxpos = i
*
* tx1 = boxes[i, 0] # <<<<<<<<<<<<<<
* ty1 = boxes[i, 1]
* tx2 = boxes[i, 2]
*/
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 43, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_0);
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 43, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_tx1 = __pyx_t_9;
/* "soft_nms_cpu.pyx":44
*
* tx1 = boxes[i, 0]
* ty1 = boxes[i, 1] # <<<<<<<<<<<<<<
* tx2 = boxes[i, 2]
* ty2 = boxes[i, 3]
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1);
__pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_6); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 44, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_ty1 = __pyx_t_9;
/* "soft_nms_cpu.pyx":45
* tx1 = boxes[i, 0]
* ty1 = boxes[i, 1]
* tx2 = boxes[i, 2] # <<<<<<<<<<<<<<
* ty2 = boxes[i, 3]
* ts = boxes[i, 4]
*/
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_2);
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 45, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_tx2 = __pyx_t_9;
/* "soft_nms_cpu.pyx":46
* ty1 = boxes[i, 1]
* tx2 = boxes[i, 2]
* ty2 = boxes[i, 3] # <<<<<<<<<<<<<<
* ts = boxes[i, 4]
* ti = inds[i]
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_3);
__Pyx_GIVEREF(__pyx_int_3);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_3);
__pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_6); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_ty2 = __pyx_t_9;
/* "soft_nms_cpu.pyx":47
* tx2 = boxes[i, 2]
* ty2 = boxes[i, 3]
* ts = boxes[i, 4] # <<<<<<<<<<<<<<
* ti = inds[i]
*
*/
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 47, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_4);
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 47, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 47, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_ts = __pyx_t_9;
/* "soft_nms_cpu.pyx":48
* ty2 = boxes[i, 3]
* ts = boxes[i, 4]
* ti = inds[i] # <<<<<<<<<<<<<<
*
* pos = i + 1
*/
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_inds, __pyx_v_i); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 48, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_ti, __pyx_t_3);
__pyx_t_3 = 0;
/* "soft_nms_cpu.pyx":50
* ti = inds[i]
*
* pos = i + 1 # <<<<<<<<<<<<<<
* # get max box
* while pos < N:
*/
__pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_v_i, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 50, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 50, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_pos = __pyx_t_10;
/* "soft_nms_cpu.pyx":52
* pos = i + 1
* # get max box
* while pos < N: # <<<<<<<<<<<<<<
* if maxscore < boxes[pos, 4]:
* maxscore = boxes[pos, 4]
*/
while (1) {
__pyx_t_11 = ((__pyx_v_pos < __pyx_v_N) != 0);
if (!__pyx_t_11) break;
/* "soft_nms_cpu.pyx":53
* # get max box
* while pos < N:
* if maxscore < boxes[pos, 4]: # <<<<<<<<<<<<<<
* maxscore = boxes[pos, 4]
* maxpos = pos
*/
__pyx_t_3 = PyFloat_FromDouble(__pyx_v_maxscore); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_4);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyObject_RichCompare(__pyx_t_3, __pyx_t_6, Py_LT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 53, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (__pyx_t_11) {
/* "soft_nms_cpu.pyx":54
* while pos < N:
* if maxscore < boxes[pos, 4]:
* maxscore = boxes[pos, 4] # <<<<<<<<<<<<<<
* maxpos = pos
* pos = pos + 1
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 54, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 54, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_4);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 54, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_1); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 54, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_maxscore = __pyx_t_9;
/* "soft_nms_cpu.pyx":55
* if maxscore < boxes[pos, 4]:
* maxscore = boxes[pos, 4]
* maxpos = pos # <<<<<<<<<<<<<<
* pos = pos + 1
*
*/
__pyx_v_maxpos = __pyx_v_pos;
/* "soft_nms_cpu.pyx":53
* # get max box
* while pos < N:
* if maxscore < boxes[pos, 4]: # <<<<<<<<<<<<<<
* maxscore = boxes[pos, 4]
* maxpos = pos
*/
}
/* "soft_nms_cpu.pyx":56
* maxscore = boxes[pos, 4]
* maxpos = pos
* pos = pos + 1 # <<<<<<<<<<<<<<
*
* # add max box as a detection
*/
__pyx_v_pos = (__pyx_v_pos + 1);
}
/* "soft_nms_cpu.pyx":59
*
* # add max box as a detection
* boxes[i, 0] = boxes[maxpos, 0] # <<<<<<<<<<<<<<
* boxes[i, 1] = boxes[maxpos, 1]
* boxes[i, 2] = boxes[maxpos, 2]
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_0);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_0);
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 59, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":60
* # add max box as a detection
* boxes[i, 0] = boxes[maxpos, 0]
* boxes[i, 1] = boxes[maxpos, 1] # <<<<<<<<<<<<<<
* boxes[i, 2] = boxes[maxpos, 2]
* boxes[i, 3] = boxes[maxpos, 3]
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_1);
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 60, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":61
* boxes[i, 0] = boxes[maxpos, 0]
* boxes[i, 1] = boxes[maxpos, 1]
* boxes[i, 2] = boxes[maxpos, 2] # <<<<<<<<<<<<<<
* boxes[i, 3] = boxes[maxpos, 3]
* boxes[i, 4] = boxes[maxpos, 4]
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_2);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_2);
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 61, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":62
* boxes[i, 1] = boxes[maxpos, 1]
* boxes[i, 2] = boxes[maxpos, 2]
* boxes[i, 3] = boxes[maxpos, 3] # <<<<<<<<<<<<<<
* boxes[i, 4] = boxes[maxpos, 4]
* inds[i] = inds[maxpos]
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
__Pyx_INCREF(__pyx_int_3);
__Pyx_GIVEREF(__pyx_int_3);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_3);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_3);
__Pyx_GIVEREF(__pyx_int_3);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_3);
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 62, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":63
* boxes[i, 2] = boxes[maxpos, 2]
* boxes[i, 3] = boxes[maxpos, 3]
* boxes[i, 4] = boxes[maxpos, 4] # <<<<<<<<<<<<<<
* inds[i] = inds[maxpos]
*
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_4);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_4);
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 63, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":64
* boxes[i, 3] = boxes[maxpos, 3]
* boxes[i, 4] = boxes[maxpos, 4]
* inds[i] = inds[maxpos] # <<<<<<<<<<<<<<
*
* # swap ith box with position of max box
*/
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_inds, __pyx_v_maxpos, int, 1, __Pyx_PyInt_From_int, 0, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_v_inds, __pyx_v_i, __pyx_t_1) < 0)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":67
*
* # swap ith box with position of max box
* boxes[maxpos, 0] = tx1 # <<<<<<<<<<<<<<
* boxes[maxpos, 1] = ty1
* boxes[maxpos, 2] = tx2
*/
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_tx1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__pyx_t_6 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_1) < 0)) __PYX_ERR(0, 67, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":68
* # swap ith box with position of max box
* boxes[maxpos, 0] = tx1
* boxes[maxpos, 1] = ty1 # <<<<<<<<<<<<<<
* boxes[maxpos, 2] = tx2
* boxes[maxpos, 3] = ty2
*/
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_ty1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3);
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_1);
__pyx_t_3 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 68, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":69
* boxes[maxpos, 0] = tx1
* boxes[maxpos, 1] = ty1
* boxes[maxpos, 2] = tx2 # <<<<<<<<<<<<<<
* boxes[maxpos, 3] = ty2
* boxes[maxpos, 4] = ts
*/
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_tx2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_2);
__pyx_t_6 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_1) < 0)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":70
* boxes[maxpos, 1] = ty1
* boxes[maxpos, 2] = tx2
* boxes[maxpos, 3] = ty2 # <<<<<<<<<<<<<<
* boxes[maxpos, 4] = ts
* inds[maxpos] = ti
*/
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_ty2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3);
__Pyx_INCREF(__pyx_int_3);
__Pyx_GIVEREF(__pyx_int_3);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_3);
__pyx_t_3 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 70, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":71
* boxes[maxpos, 2] = tx2
* boxes[maxpos, 3] = ty2
* boxes[maxpos, 4] = ts # <<<<<<<<<<<<<<
* inds[maxpos] = ti
*
*/
__pyx_t_1 = PyFloat_FromDouble(__pyx_v_ts); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4);
__pyx_t_6 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_1) < 0)) __PYX_ERR(0, 71, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":72
* boxes[maxpos, 3] = ty2
* boxes[maxpos, 4] = ts
* inds[maxpos] = ti # <<<<<<<<<<<<<<
*
* tx1 = boxes[i, 0]
*/
if (unlikely(__Pyx_SetItemInt(__pyx_v_inds, __pyx_v_maxpos, __pyx_v_ti, int, 1, __Pyx_PyInt_From_int, 0, 1, 0) < 0)) __PYX_ERR(0, 72, __pyx_L1_error)
/* "soft_nms_cpu.pyx":74
* inds[maxpos] = ti
*
* tx1 = boxes[i, 0] # <<<<<<<<<<<<<<
* ty1 = boxes[i, 1]
* tx2 = boxes[i, 2]
*/
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_0);
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 74, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 74, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_tx1 = __pyx_t_9;
/* "soft_nms_cpu.pyx":75
*
* tx1 = boxes[i, 0]
* ty1 = boxes[i, 1] # <<<<<<<<<<<<<<
* tx2 = boxes[i, 2]
* ty2 = boxes[i, 3]
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1);
__pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_1); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_ty1 = __pyx_t_9;
/* "soft_nms_cpu.pyx":76
* tx1 = boxes[i, 0]
* ty1 = boxes[i, 1]
* tx2 = boxes[i, 2] # <<<<<<<<<<<<<<
* ty2 = boxes[i, 3]
* ts = boxes[i, 4]
*/
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_2);
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 76, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_tx2 = __pyx_t_9;
/* "soft_nms_cpu.pyx":77
* ty1 = boxes[i, 1]
* tx2 = boxes[i, 2]
* ty2 = boxes[i, 3] # <<<<<<<<<<<<<<
* ts = boxes[i, 4]
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 77, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_3);
__Pyx_GIVEREF(__pyx_int_3);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_3);
__pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_1); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 77, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_ty2 = __pyx_t_9;
/* "soft_nms_cpu.pyx":78
* tx2 = boxes[i, 2]
* ty2 = boxes[i, 3]
* ts = boxes[i, 4] # <<<<<<<<<<<<<<
*
* pos = i + 1
*/
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_i);
__Pyx_GIVEREF(__pyx_v_i);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_4);
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 78, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_ts = __pyx_t_9;
/* "soft_nms_cpu.pyx":80
* ts = boxes[i, 4]
*
* pos = i + 1 # <<<<<<<<<<<<<<
* # NMS iterations, note that N changes if detection boxes fall below
* # threshold
*/
__pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_v_i, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 80, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 80, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_pos = __pyx_t_10;
/* "soft_nms_cpu.pyx":83
* # NMS iterations, note that N changes if detection boxes fall below
* # threshold
* while pos < N: # <<<<<<<<<<<<<<
* x1 = boxes[pos, 0]
* y1 = boxes[pos, 1]
*/
while (1) {
__pyx_t_11 = ((__pyx_v_pos < __pyx_v_N) != 0);
if (!__pyx_t_11) break;
/* "soft_nms_cpu.pyx":84
* # threshold
* while pos < N:
* x1 = boxes[pos, 0] # <<<<<<<<<<<<<<
* y1 = boxes[pos, 1]
* x2 = boxes[pos, 2]
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 84, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 84, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_0);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 84, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 84, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_x1 = __pyx_t_9;
/* "soft_nms_cpu.pyx":85
* while pos < N:
* x1 = boxes[pos, 0]
* y1 = boxes[pos, 1] # <<<<<<<<<<<<<<
* x2 = boxes[pos, 2]
* y2 = boxes[pos, 3]
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3);
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_1);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 85, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_y1 = __pyx_t_9;
/* "soft_nms_cpu.pyx":86
* x1 = boxes[pos, 0]
* y1 = boxes[pos, 1]
* x2 = boxes[pos, 2] # <<<<<<<<<<<<<<
* y2 = boxes[pos, 3]
* s = boxes[pos, 4]
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_2);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 86, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_x2 = __pyx_t_9;
/* "soft_nms_cpu.pyx":87
* y1 = boxes[pos, 1]
* x2 = boxes[pos, 2]
* y2 = boxes[pos, 3] # <<<<<<<<<<<<<<
* s = boxes[pos, 4]
*
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 87, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 87, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3);
__Pyx_INCREF(__pyx_int_3);
__Pyx_GIVEREF(__pyx_int_3);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 87, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 87, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_y2 = __pyx_t_9;
/* "soft_nms_cpu.pyx":88
* x2 = boxes[pos, 2]
* y2 = boxes[pos, 3]
* s = boxes[pos, 4] # <<<<<<<<<<<<<<
*
* area = (x2 - x1 + 1) * (y2 - y1 + 1)
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 88, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_4);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF_SET(__pyx_v_s, __pyx_t_3);
__pyx_t_3 = 0;
/* "soft_nms_cpu.pyx":90
* s = boxes[pos, 4]
*
* area = (x2 - x1 + 1) * (y2 - y1 + 1) # <<<<<<<<<<<<<<
* iw = (min(tx2, x2) - max(tx1, x1) + 1)
* if iw > 0:
*/
__pyx_v_area = (((__pyx_v_x2 - __pyx_v_x1) + 1.0) * ((__pyx_v_y2 - __pyx_v_y1) + 1.0));
/* "soft_nms_cpu.pyx":91
*
* area = (x2 - x1 + 1) * (y2 - y1 + 1)
* iw = (min(tx2, x2) - max(tx1, x1) + 1) # <<<<<<<<<<<<<<
* if iw > 0:
* ih = (min(ty2, y2) - max(ty1, y1) + 1)
*/
__pyx_v_iw = ((__pyx_f_12soft_nms_cpu_min(__pyx_v_tx2, __pyx_v_x2) - __pyx_f_12soft_nms_cpu_max(__pyx_v_tx1, __pyx_v_x1)) + 1.0);
/* "soft_nms_cpu.pyx":92
* area = (x2 - x1 + 1) * (y2 - y1 + 1)
* iw = (min(tx2, x2) - max(tx1, x1) + 1)
* if iw > 0: # <<<<<<<<<<<<<<
* ih = (min(ty2, y2) - max(ty1, y1) + 1)
* if ih > 0:
*/
__pyx_t_11 = ((__pyx_v_iw > 0.0) != 0);
if (__pyx_t_11) {
/* "soft_nms_cpu.pyx":93
* iw = (min(tx2, x2) - max(tx1, x1) + 1)
* if iw > 0:
* ih = (min(ty2, y2) - max(ty1, y1) + 1) # <<<<<<<<<<<<<<
* if ih > 0:
* ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
*/
__pyx_v_ih = ((__pyx_f_12soft_nms_cpu_min(__pyx_v_ty2, __pyx_v_y2) - __pyx_f_12soft_nms_cpu_max(__pyx_v_ty1, __pyx_v_y1)) + 1.0);
/* "soft_nms_cpu.pyx":94
* if iw > 0:
* ih = (min(ty2, y2) - max(ty1, y1) + 1)
* if ih > 0: # <<<<<<<<<<<<<<
* ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
* ov = iw * ih / ua # iou between max box and detection box
*/
__pyx_t_11 = ((__pyx_v_ih > 0.0) != 0);
if (__pyx_t_11) {
/* "soft_nms_cpu.pyx":95
* ih = (min(ty2, y2) - max(ty1, y1) + 1)
* if ih > 0:
* ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) # <<<<<<<<<<<<<<
* ov = iw * ih / ua # iou between max box and detection box
*
*/
__pyx_v_ua = ((double)(((((__pyx_v_tx2 - __pyx_v_tx1) + 1.0) * ((__pyx_v_ty2 - __pyx_v_ty1) + 1.0)) + __pyx_v_area) - (__pyx_v_iw * __pyx_v_ih)));
/* "soft_nms_cpu.pyx":96
* if ih > 0:
* ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
* ov = iw * ih / ua # iou between max box and detection box # <<<<<<<<<<<<<<
*
* if method == 1: # linear
*/
__pyx_t_9 = (__pyx_v_iw * __pyx_v_ih);
if (unlikely(__pyx_v_ua == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
__PYX_ERR(0, 96, __pyx_L1_error)
}
__pyx_v_ov = (__pyx_t_9 / __pyx_v_ua);
/* "soft_nms_cpu.pyx":98
* ov = iw * ih / ua # iou between max box and detection box
*
* if method == 1: # linear # <<<<<<<<<<<<<<
* if ov > iou_thr:
* weight = 1 - ov
*/
switch (__pyx_v_method) {
case 1:
/* "soft_nms_cpu.pyx":99
*
* if method == 1: # linear
* if ov > iou_thr: # <<<<<<<<<<<<<<
* weight = 1 - ov
* else:
*/
__pyx_t_11 = ((__pyx_v_ov > __pyx_v_iou_thr) != 0);
if (__pyx_t_11) {
/* "soft_nms_cpu.pyx":100
* if method == 1: # linear
* if ov > iou_thr:
* weight = 1 - ov # <<<<<<<<<<<<<<
* else:
* weight = 1
*/
__pyx_v_weight = (1.0 - __pyx_v_ov);
/* "soft_nms_cpu.pyx":99
*
* if method == 1: # linear
* if ov > iou_thr: # <<<<<<<<<<<<<<
* weight = 1 - ov
* else:
*/
goto __pyx_L12;
}
/* "soft_nms_cpu.pyx":102
* weight = 1 - ov
* else:
* weight = 1 # <<<<<<<<<<<<<<
* elif method == 2: # gaussian
* weight = np.exp(-(ov * ov) / sigma)
*/
/*else*/ {
__pyx_v_weight = 1.0;
}
__pyx_L12:;
/* "soft_nms_cpu.pyx":98
* ov = iw * ih / ua # iou between max box and detection box
*
* if method == 1: # linear # <<<<<<<<<<<<<<
* if ov > iou_thr:
* weight = 1 - ov
*/
break;
/* "soft_nms_cpu.pyx":103
* else:
* weight = 1
* elif method == 2: # gaussian # <<<<<<<<<<<<<<
* weight = np.exp(-(ov * ov) / sigma)
* else: # original NMS
*/
case 2:
/* "soft_nms_cpu.pyx":104
* weight = 1
* elif method == 2: # gaussian
* weight = np.exp(-(ov * ov) / sigma) # <<<<<<<<<<<<<<
* else: # original NMS
* if ov > iou_thr:
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_exp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_9 = (-(__pyx_v_ov * __pyx_v_ov));
if (unlikely(__pyx_v_sigma == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
__PYX_ERR(0, 104, __pyx_L1_error)
}
__pyx_t_1 = PyFloat_FromDouble((__pyx_t_9 / __pyx_v_sigma)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
}
}
if (!__pyx_t_5) {
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_GOTREF(__pyx_t_3);
} else {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_1};
__pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_1};
__pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_12 = PyTuple_New(1+1); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_5); __pyx_t_5 = NULL;
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_12, 0+1, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_12, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
}
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 104, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_weight = __pyx_t_9;
/* "soft_nms_cpu.pyx":103
* else:
* weight = 1
* elif method == 2: # gaussian # <<<<<<<<<<<<<<
* weight = np.exp(-(ov * ov) / sigma)
* else: # original NMS
*/
break;
default:
/* "soft_nms_cpu.pyx":106
* weight = np.exp(-(ov * ov) / sigma)
* else: # original NMS
* if ov > iou_thr: # <<<<<<<<<<<<<<
* weight = 0
* else:
*/
__pyx_t_11 = ((__pyx_v_ov > __pyx_v_iou_thr) != 0);
if (__pyx_t_11) {
/* "soft_nms_cpu.pyx":107
* else: # original NMS
* if ov > iou_thr:
* weight = 0 # <<<<<<<<<<<<<<
* else:
* weight = 1
*/
__pyx_v_weight = 0.0;
/* "soft_nms_cpu.pyx":106
* weight = np.exp(-(ov * ov) / sigma)
* else: # original NMS
* if ov > iou_thr: # <<<<<<<<<<<<<<
* weight = 0
* else:
*/
goto __pyx_L13;
}
/* "soft_nms_cpu.pyx":109
* weight = 0
* else:
* weight = 1 # <<<<<<<<<<<<<<
*
* boxes[pos, 4] = weight * boxes[pos, 4]
*/
/*else*/ {
__pyx_v_weight = 1.0;
}
__pyx_L13:;
break;
}
/* "soft_nms_cpu.pyx":111
* weight = 1
*
* boxes[pos, 4] = weight * boxes[pos, 4] # <<<<<<<<<<<<<<
*
* # if box score falls below threshold, discard the box by
*/
__pyx_t_3 = PyFloat_FromDouble(__pyx_v_weight); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_4);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_12); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_t_12 = PyNumber_Multiply(__pyx_t_3, __pyx_t_6); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4);
__pyx_t_6 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_12) < 0)) __PYX_ERR(0, 111, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
/* "soft_nms_cpu.pyx":115
* # if box score falls below threshold, discard the box by
* # swapping with last box update N
* if boxes[pos, 4] < min_score: # <<<<<<<<<<<<<<
* boxes[pos, 0] = boxes[N-1, 0]
* boxes[pos, 1] = boxes[N-1, 1]
*/
__pyx_t_12 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_12);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_12);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4);
__pyx_t_12 = 0;
__pyx_t_12 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyFloat_FromDouble(__pyx_v_min_score); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyObject_RichCompare(__pyx_t_12, __pyx_t_3, Py_LT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (__pyx_t_11) {
/* "soft_nms_cpu.pyx":116
* # swapping with last box update N
* if boxes[pos, 4] < min_score:
* boxes[pos, 0] = boxes[N-1, 0] # <<<<<<<<<<<<<<
* boxes[pos, 1] = boxes[N-1, 1]
* boxes[pos, 2] = boxes[N-1, 2]
*/
__pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_3);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_0);
__pyx_t_3 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_12, __pyx_t_6) < 0)) __PYX_ERR(0, 116, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "soft_nms_cpu.pyx":117
* if boxes[pos, 4] < min_score:
* boxes[pos, 0] = boxes[N-1, 0]
* boxes[pos, 1] = boxes[N-1, 1] # <<<<<<<<<<<<<<
* boxes[pos, 2] = boxes[N-1, 2]
* boxes[pos, 3] = boxes[N-1, 3]
*/
__pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_1);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_12); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_t_12 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_12);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_12);
__Pyx_INCREF(__pyx_int_1);
__Pyx_GIVEREF(__pyx_int_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1);
__pyx_t_12 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_6) < 0)) __PYX_ERR(0, 117, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "soft_nms_cpu.pyx":118
* boxes[pos, 0] = boxes[N-1, 0]
* boxes[pos, 1] = boxes[N-1, 1]
* boxes[pos, 2] = boxes[N-1, 2] # <<<<<<<<<<<<<<
* boxes[pos, 3] = boxes[N-1, 3]
* boxes[pos, 4] = boxes[N-1, 4]
*/
__pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 118, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 118, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_2);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 118, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 118, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 118, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_3);
__Pyx_INCREF(__pyx_int_2);
__Pyx_GIVEREF(__pyx_int_2);
PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_2);
__pyx_t_3 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_12, __pyx_t_6) < 0)) __PYX_ERR(0, 118, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "soft_nms_cpu.pyx":119
* boxes[pos, 1] = boxes[N-1, 1]
* boxes[pos, 2] = boxes[N-1, 2]
* boxes[pos, 3] = boxes[N-1, 3] # <<<<<<<<<<<<<<
* boxes[pos, 4] = boxes[N-1, 4]
* inds[pos] = inds[N - 1]
*/
__pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 119, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 119, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_int_3);
__Pyx_GIVEREF(__pyx_int_3);
PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_3);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_12); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 119, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_t_12 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 119, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 119, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_12);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_12);
__Pyx_INCREF(__pyx_int_3);
__Pyx_GIVEREF(__pyx_int_3);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_3);
__pyx_t_12 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_6) < 0)) __PYX_ERR(0, 119, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "soft_nms_cpu.pyx":120
* boxes[pos, 2] = boxes[N-1, 2]
* boxes[pos, 3] = boxes[N-1, 3]
* boxes[pos, 4] = boxes[N-1, 4] # <<<<<<<<<<<<<<
* inds[pos] = inds[N - 1]
* N = N - 1
*/
__pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_3);
__Pyx_INCREF(__pyx_int_4);
__Pyx_GIVEREF(__pyx_int_4);
PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_4);
__pyx_t_3 = 0;
if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_12, __pyx_t_6) < 0)) __PYX_ERR(0, 120, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "soft_nms_cpu.pyx":121
* boxes[pos, 3] = boxes[N-1, 3]
* boxes[pos, 4] = boxes[N-1, 4]
* inds[pos] = inds[N - 1] # <<<<<<<<<<<<<<
* N = N - 1
* pos = pos - 1
*/
__pyx_t_13 = (__pyx_v_N - 1);
__pyx_t_6 = __Pyx_GetItemInt(__pyx_v_inds, __pyx_t_13, long, 1, __Pyx_PyInt_From_long, 0, 1, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_SetItemInt(__pyx_v_inds, __pyx_v_pos, __pyx_t_6, int, 1, __Pyx_PyInt_From_int, 0, 1, 0) < 0)) __PYX_ERR(0, 121, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
/* "soft_nms_cpu.pyx":122
* boxes[pos, 4] = boxes[N-1, 4]
* inds[pos] = inds[N - 1]
* N = N - 1 # <<<<<<<<<<<<<<
* pos = pos - 1
*
*/
__pyx_v_N = (__pyx_v_N - 1);
/* "soft_nms_cpu.pyx":123
* inds[pos] = inds[N - 1]
* N = N - 1
* pos = pos - 1 # <<<<<<<<<<<<<<
*
* pos = pos + 1
*/
__pyx_v_pos = (__pyx_v_pos - 1);
/* "soft_nms_cpu.pyx":115
* # if box score falls below threshold, discard the box by
* # swapping with last box update N
* if boxes[pos, 4] < min_score: # <<<<<<<<<<<<<<
* boxes[pos, 0] = boxes[N-1, 0]
* boxes[pos, 1] = boxes[N-1, 1]
*/
}
/* "soft_nms_cpu.pyx":94
* if iw > 0:
* ih = (min(ty2, y2) - max(ty1, y1) + 1)
* if ih > 0: # <<<<<<<<<<<<<<
* ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
* ov = iw * ih / ua # iou between max box and detection box
*/
}
/* "soft_nms_cpu.pyx":92
* area = (x2 - x1 + 1) * (y2 - y1 + 1)
* iw = (min(tx2, x2) - max(tx1, x1) + 1)
* if iw > 0: # <<<<<<<<<<<<<<
* ih = (min(ty2, y2) - max(ty1, y1) + 1)
* if ih > 0:
*/
}
/* "soft_nms_cpu.pyx":125
* pos = pos - 1
*
* pos = pos + 1 # <<<<<<<<<<<<<<
*
* return boxes[:N], inds[:N]
*/
__pyx_v_pos = (__pyx_v_pos + 1);
}
/* "soft_nms_cpu.pyx":39
* inds = np.arange(N)
*
* for i in range(N): # <<<<<<<<<<<<<<
* maxscore = boxes[i, 4]
* maxpos = i
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "soft_nms_cpu.pyx":127
* pos = pos + 1
*
* return boxes[:N], inds[:N] # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_boxes, 0, __pyx_v_N, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_6 = __Pyx_PyObject_GetSlice(__pyx_v_inds, 0, __pyx_v_N, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_12);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_6);
__pyx_t_2 = 0;
__pyx_t_6 = 0;
__pyx_r = __pyx_t_12;
__pyx_t_12 = 0;
goto __pyx_L0;
/* "soft_nms_cpu.pyx":22
*
*
* def soft_nms_cpu( # <<<<<<<<<<<<<<
* np.ndarray[float, ndim=2] boxes_in,
* float iou_thr,
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_12);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes_in.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("soft_nms_cpu.soft_nms_cpu", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes_in.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF(__pyx_v_boxes);
__Pyx_XDECREF(__pyx_v_inds);
__Pyx_XDECREF(__pyx_v_i);
__Pyx_XDECREF(__pyx_v_ti);
__Pyx_XDECREF(__pyx_v_s);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":215
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fulfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
char *__pyx_t_8;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":222
*
* cdef int i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223
* cdef int i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":225
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":227
* ndim = PyArray_NDIM(self)
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":227
* ndim = PyArray_NDIM(self)
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
if (unlikely(__pyx_t_1)) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 229, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 229, __pyx_L1_error)
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":227
* ndim = PyArray_NDIM(self)
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L7_bool_binop_done;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":232
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L7_bool_binop_done:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
if (unlikely(__pyx_t_1)) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 233, __pyx_L1_error)
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":236
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":240
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim))));
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242
* info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_4 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":244
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
goto __pyx_L9;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
/*else*/ {
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L9:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef int offset
*/
__pyx_v_f = NULL;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef int offset
*
*/
__pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":257
* cdef int offset
*
* info.obj = self # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(descr):
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259
* info.obj = self
*
* if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
__pyx_t_1 = ((!(PyDataType_HASFIELDS(__pyx_v_descr) != 0)) != 0);
if (__pyx_t_1) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260
*
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
*/
__pyx_t_4 = __pyx_v_descr->type_num;
__pyx_v_t = __pyx_t_4;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":261
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
if (!__pyx_t_2) {
goto __pyx_L15_next_or;
} else {
}
__pyx_t_2 = (__pyx_v_little_endian != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L14_bool_binop_done;
}
__pyx_L15_next_or:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":262
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L14_bool_binop_done:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":261
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (unlikely(__pyx_t_1)) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 263, __pyx_L1_error)
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":261
* if not PyDataType_HASFIELDS(descr):
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":264
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
switch (__pyx_v_t) {
case NPY_BYTE:
__pyx_v_f = ((char *)"b");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
case NPY_UBYTE:
__pyx_v_f = ((char *)"B");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":266
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
case NPY_SHORT:
__pyx_v_f = ((char *)"h");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
case NPY_USHORT:
__pyx_v_f = ((char *)"H");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":268
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
case NPY_INT:
__pyx_v_f = ((char *)"i");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":269
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
case NPY_UINT:
__pyx_v_f = ((char *)"I");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
case NPY_LONG:
__pyx_v_f = ((char *)"l");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":271
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
case NPY_ULONG:
__pyx_v_f = ((char *)"L");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
case NPY_LONGLONG:
__pyx_v_f = ((char *)"q");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
case NPY_ULONGLONG:
__pyx_v_f = ((char *)"Q");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
case NPY_FLOAT:
__pyx_v_f = ((char *)"f");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
case NPY_DOUBLE:
__pyx_v_f = ((char *)"d");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
case NPY_LONGDOUBLE:
__pyx_v_f = ((char *)"g");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
case NPY_CFLOAT:
__pyx_v_f = ((char *)"Zf");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
case NPY_CDOUBLE:
__pyx_v_f = ((char *)"Zd");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
case NPY_CLONGDOUBLE:
__pyx_v_f = ((char *)"Zg");
break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
case NPY_OBJECT:
__pyx_v_f = ((char *)"O");
break;
default:
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 282, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_7 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 282, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 282, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 282, __pyx_L1_error)
break;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259
* info.obj = self
*
* if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286
* return
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
*/
/*else*/ {
__pyx_v_info->format = ((char *)PyObject_Malloc(0xFF));
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287
* else:
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288
* info.format = <char*>PyObject_Malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
* f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
* info.format + _buffer_format_string_len,
* &offset)
*/
__pyx_t_8 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_8 == ((char *)NULL))) __PYX_ERR(1, 289, __pyx_L1_error)
__pyx_v_f = __pyx_t_8;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = '\x00';
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":215
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fulfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":294
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
if (__pyx_t_1) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* PyObject_Free(info.strides)
*/
PyObject_Free(__pyx_v_info->format);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* PyObject_Free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":298
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* PyObject_Free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
PyObject_Free(__pyx_v_info->strides);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* PyObject_Free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":294
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* PyObject_Free(info.format)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 776, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 779, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":781
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 782, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":781
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":785
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 785, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":787
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 788, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":787
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":790
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("PyDataType_SHAPE", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
__pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0);
if (__pyx_t_1) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape # <<<<<<<<<<<<<<
* else:
* return ()
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape));
__pyx_r = ((PyObject*)__pyx_v_d->subarray->shape);
goto __pyx_L0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794
* return <tuple>d.subarray.shape
* else:
* return () # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_empty_tuple);
__pyx_r = __pyx_empty_tuple;
goto __pyx_L0;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":790
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":796
* return ()
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
long __pyx_t_8;
char *__pyx_t_9;
__Pyx_RefNannySetupContext("_util_dtypestring", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801
*
* cdef dtype child
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":802
* cdef dtype child
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(__pyx_v_descr->names == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(1, 805, __pyx_L1_error)
}
__pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 805, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 805, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":806
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
if (unlikely(__pyx_v_descr->fields == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 806, __pyx_L1_error)
}
__pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 806, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 806, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
*/
if (likely(__pyx_v_fields != Py_None)) {
PyObject* sequence = __pyx_v_fields;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 807, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 807, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 807, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 807, __pyx_L1_error)
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 807, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
__pyx_t_3 = 0;
__Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 809, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 809, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 809, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
if (unlikely(__pyx_t_6)) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":810
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 810, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 810, __pyx_L1_error)
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":812
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
if (!__pyx_t_7) {
goto __pyx_L8_next_or;
} else {
}
__pyx_t_7 = (__pyx_v_little_endian != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_L8_next_or:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":813
*
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
if (__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L7_bool_binop_done:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":812
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (unlikely(__pyx_t_6)) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 814, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 814, __pyx_L1_error)
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":812
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":824
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 824, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 824, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 824, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!__pyx_t_6) break;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 0x78;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":829
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":831
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
if (__pyx_t_6) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":832
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":833
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
if (unlikely(__pyx_t_6)) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":834
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 834, __pyx_L1_error)
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":833
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x68;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":841
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x69;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":843
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 843, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 843, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 843, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x6C;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 845, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 845, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x71;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 846, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 846, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 846, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 847, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 847, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x66;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":848
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 848, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 848, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 848, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x64;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":849
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 849, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 849, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 849, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x67;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x66;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x64;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x67;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (likely(__pyx_t_6)) {
(__pyx_v_f[0]) = 79;
goto __pyx_L15;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
/*else*/ {
__pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 855, __pyx_L1_error)
}
__pyx_L15:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":831
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
goto __pyx_L13;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
/*else*/ {
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 860, __pyx_L1_error)
__pyx_v_f = __pyx_t_9;
}
__pyx_L13:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":796
* return ()
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":977
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":979
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":980
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":979
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
goto __pyx_L3;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":982
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
*/
/*else*/ {
Py_INCREF(__pyx_v_base);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":983
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":984
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":985
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":977
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":987
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":988
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
if (__pyx_t_1) {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":989
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return <object>arr.base
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":988
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":991
* return None
* else:
* return <object>arr.base # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
__pyx_r = ((PyObject *)__pyx_v_arr->base);
goto __pyx_L0;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":987
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* _import_array()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_array", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998
* cdef inline int import_array() except -1:
* try:
* _import_array() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import")
*/
__pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 998, __pyx_L3_error)
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":999
* try:
* _import_array()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.multiarray failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 999, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000
* _import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1000, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1000, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* _import_array()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* _import_array()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_umath", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1003
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004
* cdef inline int import_umath() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1004, __pyx_L3_error)
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1003
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1005
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1005, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1006
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1006, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1006, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1003
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1008
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
__Pyx_RefNannySetupContext("import_ufunc", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010
* cdef inline int import_ufunc() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1010, __pyx_L3_error)
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1011, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1012, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 1012, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1008
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_soft_nms_cpu(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_soft_nms_cpu},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"soft_nms_cpu",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
{&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
{&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1},
{&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
{&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_arange, __pyx_k_arange, sizeof(__pyx_k_arange), 0, 0, 1, 1},
{&__pyx_n_s_area, __pyx_k_area, sizeof(__pyx_k_area), 0, 0, 1, 1},
{&__pyx_n_s_box_area, __pyx_k_box_area, sizeof(__pyx_k_box_area), 0, 0, 1, 1},
{&__pyx_n_s_boxes, __pyx_k_boxes, sizeof(__pyx_k_boxes), 0, 0, 1, 1},
{&__pyx_n_s_boxes_in, __pyx_k_boxes_in, sizeof(__pyx_k_boxes_in), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_copy, __pyx_k_copy, sizeof(__pyx_k_copy), 0, 0, 1, 1},
{&__pyx_n_s_exp, __pyx_k_exp, sizeof(__pyx_k_exp), 0, 0, 1, 1},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_ih, __pyx_k_ih, sizeof(__pyx_k_ih), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_inds, __pyx_k_inds, sizeof(__pyx_k_inds), 0, 0, 1, 1},
{&__pyx_n_s_iou_thr, __pyx_k_iou_thr, sizeof(__pyx_k_iou_thr), 0, 0, 1, 1},
{&__pyx_n_s_iw, __pyx_k_iw, sizeof(__pyx_k_iw), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_maxpos, __pyx_k_maxpos, sizeof(__pyx_k_maxpos), 0, 0, 1, 1},
{&__pyx_n_s_maxscore, __pyx_k_maxscore, sizeof(__pyx_k_maxscore), 0, 0, 1, 1},
{&__pyx_n_s_method, __pyx_k_method, sizeof(__pyx_k_method), 0, 0, 1, 1},
{&__pyx_n_s_min_score, __pyx_k_min_score, sizeof(__pyx_k_min_score), 0, 0, 1, 1},
{&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
{&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_kp_u_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 1, 0, 0},
{&__pyx_kp_u_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 1, 0, 0},
{&__pyx_n_s_ov, __pyx_k_ov, sizeof(__pyx_k_ov), 0, 0, 1, 1},
{&__pyx_n_s_pos, __pyx_k_pos, sizeof(__pyx_k_pos), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_sigma, __pyx_k_sigma, sizeof(__pyx_k_sigma), 0, 0, 1, 1},
{&__pyx_n_s_soft_nms_cpu, __pyx_k_soft_nms_cpu, sizeof(__pyx_k_soft_nms_cpu), 0, 0, 1, 1},
{&__pyx_kp_s_src_soft_nms_cpu_pyx, __pyx_k_src_soft_nms_cpu_pyx, sizeof(__pyx_k_src_soft_nms_cpu_pyx), 0, 0, 1, 0},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_ti, __pyx_k_ti, sizeof(__pyx_k_ti), 0, 0, 1, 1},
{&__pyx_n_s_ts, __pyx_k_ts, sizeof(__pyx_k_ts), 0, 0, 1, 1},
{&__pyx_n_s_tx1, __pyx_k_tx1, sizeof(__pyx_k_tx1), 0, 0, 1, 1},
{&__pyx_n_s_tx2, __pyx_k_tx2, sizeof(__pyx_k_tx2), 0, 0, 1, 1},
{&__pyx_n_s_ty1, __pyx_k_ty1, sizeof(__pyx_k_ty1), 0, 0, 1, 1},
{&__pyx_n_s_ty2, __pyx_k_ty2, sizeof(__pyx_k_ty2), 0, 0, 1, 1},
{&__pyx_n_s_ua, __pyx_k_ua, sizeof(__pyx_k_ua), 0, 0, 1, 1},
{&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
{&__pyx_n_s_weight, __pyx_k_weight, sizeof(__pyx_k_weight), 0, 0, 1, 1},
{&__pyx_n_s_x1, __pyx_k_x1, sizeof(__pyx_k_x1), 0, 0, 1, 1},
{&__pyx_n_s_x2, __pyx_k_x2, sizeof(__pyx_k_x2), 0, 0, 1, 1},
{&__pyx_n_s_y1, __pyx_k_y1, sizeof(__pyx_k_y1), 0, 0, 1, 1},
{&__pyx_n_s_y2, __pyx_k_y2, sizeof(__pyx_k_y2), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 39, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 229, __pyx_L1_error)
__pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 810, __pyx_L1_error)
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1000, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 229, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 233, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":810
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 810, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 814, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":834
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000
* _import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 1000, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1006
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 1006, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 1012, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "soft_nms_cpu.pyx":22
*
*
* def soft_nms_cpu( # <<<<<<<<<<<<<<
* np.ndarray[float, ndim=2] boxes_in,
* float iou_thr,
*/
__pyx_tuple__10 = PyTuple_Pack(30, __pyx_n_s_boxes_in, __pyx_n_s_iou_thr, __pyx_n_s_method, __pyx_n_s_sigma, __pyx_n_s_min_score, __pyx_n_s_boxes, __pyx_n_s_N, __pyx_n_s_iw, __pyx_n_s_ih, __pyx_n_s_box_area, __pyx_n_s_ua, __pyx_n_s_pos, __pyx_n_s_maxscore, __pyx_n_s_maxpos, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_tx1, __pyx_n_s_tx2, __pyx_n_s_ty1, __pyx_n_s_ty2, __pyx_n_s_ts, __pyx_n_s_area, __pyx_n_s_weight, __pyx_n_s_ov, __pyx_n_s_inds, __pyx_n_s_i, __pyx_n_s_ti, __pyx_n_s_s); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
__pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(5, 0, 30, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_soft_nms_cpu_pyx, __pyx_n_s_soft_nms_cpu, 22, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_modinit_global_init_code(void); /*proto*/
static int __Pyx_modinit_variable_export_code(void); /*proto*/
static int __Pyx_modinit_function_export_code(void); /*proto*/
static int __Pyx_modinit_type_init_code(void); /*proto*/
static int __Pyx_modinit_type_import_code(void); /*proto*/
static int __Pyx_modinit_variable_import_code(void); /*proto*/
static int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type",
#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error)
__pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 164, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 186, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 190, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 199, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 872, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#if PY_MAJOR_VERSION < 3
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC void
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#else
#ifdef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#endif
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (!(defined(__cplusplus)) || (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 4)))
#define CYTHON_SMALL_CODE __attribute__((optimize("Os")))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initsoft_nms_cpu(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initsoft_nms_cpu(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_soft_nms_cpu(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_soft_nms_cpu(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
result = PyDict_SetItemString(moddict, to_name, value);
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static int __pyx_pymod_exec_soft_nms_cpu(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0;
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_soft_nms_cpu(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("soft_nms_cpu", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_soft_nms_cpu) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "soft_nms_cpu")) {
if (unlikely(PyDict_SetItemString(modules, "soft_nms_cpu", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
(void)__Pyx_modinit_type_init_code();
if (unlikely(__Pyx_modinit_type_import_code() != 0)) goto __pyx_L1_error;
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "soft_nms_cpu.pyx":11
* # cython: language_level=3, boundscheck=False
*
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as np
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 11, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":22
*
*
* def soft_nms_cpu( # <<<<<<<<<<<<<<
* np.ndarray[float, ndim=2] boxes_in,
* float iou_thr,
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_12soft_nms_cpu_1soft_nms_cpu, NULL, __pyx_n_s_soft_nms_cpu); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_soft_nms_cpu, __pyx_t_1) < 0) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "soft_nms_cpu.pyx":1
* # ---------------------------------------------------------- # <<<<<<<<<<<<<<
* # Soft-NMS: Improving Object Detection With One Line of Code
* # Copyright (c) University of Maryland, College Park
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1008
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init soft_nms_cpu", 0, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init soft_nms_cpu");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* BufferGetAndValidate */
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (unlikely(info->buf == NULL)) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static int __Pyx__GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
buf->buf = NULL;
if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) {
__Pyx_ZeroBuffer(buf);
return -1;
}
if (unlikely(buf->ndim != nd)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if (unlikely((unsigned)buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_SafeReleaseBuffer(buf);
return -1;
}
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
#include "frameobject.h"
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = f->f_localsplus;
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, NULL, 0);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || __Pyx_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* GetModuleGlobalName */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
if (likely(result)) {
Py_INCREF(result);
} else if (unlikely(PyErr_Occurred())) {
result = NULL;
} else {
#else
result = PyDict_GetItem(__pyx_d, name);
if (likely(result)) {
Py_INCREF(result);
} else {
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
if (!result) {
PyErr_Clear();
#endif
result = __Pyx_GetBuiltinName(name);
}
return result;
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) {
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* SetItemInt */
static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
int r;
if (!j) return -1;
r = PyObject_SetItem(o, j, v);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list,
CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o));
if ((!boundscheck) || likely((n >= 0) & (n < PyList_GET_SIZE(o)))) {
PyObject* old = PyList_GET_ITEM(o, n);
Py_INCREF(v);
PyList_SET_ITEM(o, n, v);
Py_DECREF(old);
return 1;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_ass_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return -1;
PyErr_Clear();
}
}
return m->sq_ass_item(o, i, v);
}
}
#else
#if CYTHON_COMPILING_IN_PYPY
if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) {
#else
if (is_list || PySequence_Check(o)) {
#endif
return PySequence_SetItem(o, i, v);
}
#endif
return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v);
}
/* SliceObject */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj,
Py_ssize_t cstart, Py_ssize_t cstop,
PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
#if CYTHON_USE_TYPE_SLOTS
PyMappingMethods* mp;
#if PY_MAJOR_VERSION < 3
PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
if (likely(ms && ms->sq_slice)) {
if (!has_cstart) {
if (_py_start && (*_py_start != Py_None)) {
cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
} else
cstart = 0;
}
if (!has_cstop) {
if (_py_stop && (*_py_stop != Py_None)) {
cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
} else
cstop = PY_SSIZE_T_MAX;
}
if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
Py_ssize_t l = ms->sq_length(obj);
if (likely(l >= 0)) {
if (cstop < 0) {
cstop += l;
if (cstop < 0) cstop = 0;
}
if (cstart < 0) {
cstart += l;
if (cstart < 0) cstart = 0;
}
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
goto bad;
PyErr_Clear();
}
}
return ms->sq_slice(obj, cstart, cstop);
}
#endif
mp = Py_TYPE(obj)->tp_as_mapping;
if (likely(mp && mp->mp_subscript))
#endif
{
PyObject* result;
PyObject *py_slice, *py_start, *py_stop;
if (_py_slice) {
py_slice = *_py_slice;
} else {
PyObject* owned_start = NULL;
PyObject* owned_stop = NULL;
if (_py_start) {
py_start = *_py_start;
} else {
if (has_cstart) {
owned_start = py_start = PyInt_FromSsize_t(cstart);
if (unlikely(!py_start)) goto bad;
} else
py_start = Py_None;
}
if (_py_stop) {
py_stop = *_py_stop;
} else {
if (has_cstop) {
owned_stop = py_stop = PyInt_FromSsize_t(cstop);
if (unlikely(!py_stop)) {
Py_XDECREF(owned_start);
goto bad;
}
} else
py_stop = Py_None;
}
py_slice = PySlice_New(py_start, py_stop, Py_None);
Py_XDECREF(owned_start);
Py_XDECREF(owned_stop);
if (unlikely(!py_slice)) goto bad;
}
#if CYTHON_USE_TYPE_SLOTS
result = mp->mp_subscript(obj, py_slice);
#else
result = PyObject_GetItem(obj, py_slice);
#endif
if (!_py_slice) {
Py_DECREF(py_slice);
}
return result;
}
PyErr_Format(PyExc_TypeError,
"'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
bad:
return NULL;
}
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* DictGetItem */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args))
PyErr_SetObject(PyExc_KeyError, args);
Py_XDECREF(args);
}
return NULL;
}
Py_INCREF(value);
return value;
}
#endif
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if PY_VERSION_HEX >= 0x030700A2
*type = tstate->exc_state.exc_type;
*value = tstate->exc_state.exc_value;
*tb = tstate->exc_state.exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if PY_VERSION_HEX >= 0x030700A2
tmp_type = tstate->exc_state.exc_type;
tmp_value = tstate->exc_state.exc_value;
tmp_tb = tstate->exc_state.exc_traceback;
tstate->exc_state.exc_type = type;
tstate->exc_state.exc_value = value;
tstate->exc_state.exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
#endif
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if PY_VERSION_HEX >= 0x030700A2
tmp_type = tstate->exc_state.exc_type;
tmp_value = tstate->exc_state.exc_value;
tmp_tb = tstate->exc_state.exc_traceback;
tstate->exc_state.exc_type = local_type;
tstate->exc_state.exc_value = local_value;
tstate->exc_state.exc_traceback = local_tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
use_cline = __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback);
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (PyObject_Not(use_cline) != 0) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view);
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) {
const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(unsigned int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(unsigned int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(unsigned int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(unsigned int),
little, !is_unsigned);
}
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabsf(b.real) >= fabsf(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
float r = b.imag / b.real;
float s = 1.0 / (b.real + b.imag * r);
return __pyx_t_float_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
float r = b.real / b.imag;
float s = 1.0 / (b.imag + b.real * r);
return __pyx_t_float_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
float denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_float_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(a, a);
case 3:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, a);
case 4:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = powf(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2f(0, -1);
}
} else {
r = __Pyx_c_abs_float(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs(b.real) >= fabs(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
double r = b.imag / b.real;
double s = 1.0 / (b.real + b.imag * r);
return __pyx_t_double_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
double r = b.real / b.imag;
double s = 1.0 / (b.imag + b.real * r);
return __pyx_t_double_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
double denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_double_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(a, a);
case 3:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, a);
case 4:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2(0, -1);
}
} else {
r = __Pyx_c_abs_double(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) {
const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(enum NPY_TYPES) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(enum NPY_TYPES) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) {
const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(unsigned int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (unsigned int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (unsigned int) 0;
case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0])
case 2:
if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) {
return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) {
return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) {
return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (unsigned int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(unsigned int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (unsigned int) 0;
case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0])
case -2:
if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) {
return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) {
return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) {
return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) {
return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) {
return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) {
return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])));
}
}
break;
}
#endif
if (sizeof(unsigned int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
unsigned int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (unsigned int) -1;
}
} else {
unsigned int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (unsigned int) -1;
val = __Pyx_PyInt_As_unsigned_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to unsigned int");
return (unsigned int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned int");
return (unsigned int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* ModuleImport */
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
py_name = __Pyx_PyIdentifier_FromString(class_name);
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if (!strict && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
else if ((size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(x);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/src/soft_nms_cpu.cpp/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/src/soft_nms_cpu.cpp",
"repo_id": "Cream",
"token_count": 224293
}
| 305 |
# Modified from flops-counter.pytorch by Vladislav Sovrasov
# original repo: https://github.com/sovrasov/flops-counter.pytorch
# MIT License
# Copyright (c) 2018 Vladislav Sovrasov
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import numpy as np
import torch
import torch.nn as nn
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin
from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
_AvgPoolNd, _MaxPoolNd)
CONV_TYPES = (_ConvNd, )
DECONV_TYPES = (_ConvTransposeMixin, )
LINEAR_TYPES = (nn.Linear, )
POOLING_TYPES = (_AvgPoolNd, _MaxPoolNd, _AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd)
RELU_TYPES = (nn.ReLU, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.ReLU6)
BN_TYPES = (_BatchNorm, )
UPSAMPLE_TYPES = (nn.Upsample, )
SUPPORTED_TYPES = (
CONV_TYPES + DECONV_TYPES + LINEAR_TYPES + POOLING_TYPES + RELU_TYPES +
BN_TYPES + UPSAMPLE_TYPES)
def get_model_complexity_info(model,
input_res,
print_per_layer_stat=True,
as_strings=True,
input_constructor=None,
ost=sys.stdout):
assert type(input_res) is tuple
assert len(input_res) >= 2
flops_model = add_flops_counting_methods(model)
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
batch = torch.ones(()).new_empty(
(1, *input_res),
dtype=next(flops_model.parameters()).dtype,
device=next(flops_model.parameters()).device)
flops_model(batch)
if print_per_layer_stat:
print_model_with_flops(flops_model, ost=ost)
flops_count = flops_model.compute_average_flops_cost()
params_count = get_model_parameters_number(flops_model)
flops_model.stop_flops_count()
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count
def flops_to_string(flops, units='GMac', precision=2):
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KMac'
else:
return str(flops) + ' Mac'
else:
if units == 'GMac':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MMac':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KMac':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' Mac'
def params_to_string(params_num):
"""converting number to string
:param float params_num: number
:returns str: number
>>> params_to_string(1e9)
'1000.0 M'
>>> params_to_string(2e5)
'200.0 k'
>>> params_to_string(3e-9)
'3e-09'
"""
if params_num // 10**6 > 0:
return str(round(params_num / 10**6, 2)) + ' M'
elif params_num // 10**3:
return str(round(params_num / 10**3, 2)) + ' k'
else:
return str(params_num)
def print_model_with_flops(model, units='GMac', precision=3, ost=sys.stdout):
total_flops = model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([
flops_to_string(
accumulated_flops_cost, units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()
])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params_num
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(
net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(
net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(
net_main_module)
net_main_module.compute_average_flops_cost = \
compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
# Adding variables necessary for masked flops computation
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Returns current mean flops consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return flops_sum / batches_count
def start_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is
called on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
def is_supported_instance(module):
if isinstance(module, SUPPORTED_TYPES):
return True
else:
return False
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += int(batch_size * input.shape[1] * output.shape[1])
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
def bn_flops_counter_hook(module, input, output):
module.affine
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
def deconv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
input_height, input_width = input.shape[2:]
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = (
kernel_height * kernel_width * in_channels * filters_per_channel)
active_elements_count = batch_size * input_height * input_width
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
output_height, output_width = output.shape[2:]
bias_flops = out_channels * batch_size * output_height * output_height
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = np.prod(
kernel_dims) * in_channels * filters_per_channel
active_elements_count = batch_size * np.prod(output_dims)
if conv_module.__mask__ is not None:
# (b, 1, h, w)
output_height, output_width = output.shape[2:]
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height,
output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
def batch_counter_hook(module, input, output):
batch_size = 1
if len(input) > 0:
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = len(input)
else:
print('Warning! No positional inputs found for a module, '
'assuming batch size is 1.')
module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, CONV_TYPES):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif isinstance(module, RELU_TYPES):
handle = module.register_forward_hook(relu_flops_counter_hook)
elif isinstance(module, LINEAR_TYPES):
handle = module.register_forward_hook(linear_flops_counter_hook)
elif isinstance(module, POOLING_TYPES):
handle = module.register_forward_hook(pool_flops_counter_hook)
elif isinstance(module, BN_TYPES):
handle = module.register_forward_hook(bn_flops_counter_hook)
elif isinstance(module, UPSAMPLE_TYPES):
handle = module.register_forward_hook(upsample_flops_counter_hook)
elif isinstance(module, DECONV_TYPES):
handle = module.register_forward_hook(deconv_flops_counter_hook)
else:
handle = module.register_forward_hook(empty_flops_counter_hook)
module.__flops_handle__ = handle
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
# --- Masked flops counting
# Also being run in the initialization
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
|
Cream/CDARTS/CDARTS_detection/mmdet/utils/flops_counter.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/utils/flops_counter.py",
"repo_id": "Cream",
"token_count": 5977
}
| 306 |
from argparse import ArgumentParser
import mmcv
import numpy as np
from mmdet import datasets
from mmdet.core import eval_map
def voc_eval(result_file, dataset, iou_thr=0.5):
det_results = mmcv.load(result_file)
gt_bboxes = []
gt_labels = []
gt_ignore = []
for i in range(len(dataset)):
ann = dataset.get_ann_info(i)
bboxes = ann['bboxes']
labels = ann['labels']
if 'bboxes_ignore' in ann:
ignore = np.concatenate([
np.zeros(bboxes.shape[0], dtype=np.bool),
np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
])
gt_ignore.append(ignore)
bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
labels = np.concatenate([labels, ann['labels_ignore']])
gt_bboxes.append(bboxes)
gt_labels.append(labels)
if not gt_ignore:
gt_ignore = gt_ignore
if hasattr(dataset, 'year') and dataset.year == 2007:
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
eval_map(
det_results,
gt_bboxes,
gt_labels,
gt_ignore=gt_ignore,
scale_ranges=None,
iou_thr=iou_thr,
dataset=dataset_name,
print_summary=True)
def main():
parser = ArgumentParser(description='VOC Evaluation')
parser.add_argument('result', help='result file path')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--iou-thr',
type=float,
default=0.5,
help='IoU threshold for evaluation')
args = parser.parse_args()
cfg = mmcv.Config.fromfile(args.config)
test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
voc_eval(args.result, test_dataset, args.iou_thr)
if __name__ == '__main__':
main()
|
Cream/CDARTS/CDARTS_detection/tools/voc_eval.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/tools/voc_eval.py",
"repo_id": "Cream",
"token_count": 871
}
| 307 |
from .base_dataset import BaseDataset
from .cityscapes import Cityscapes
from .cityscapes_panoptic import CityscapesPanoptic
from .coco_panoptic import COCOPanoptic
|
Cream/CDARTS/CDARTS_segmentation/dataloaders/segdatasets/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/segdatasets/__init__.py",
"repo_id": "Cream",
"token_count": 56
}
| 308 |
# ------------------------------------------------------------------------------
# Builds dataloader.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import logging
import torch
import numpy as np
from .datasets import Cityscapes, CityscapesPanoptic, COCOPanoptic
from . import samplers
from segmentation.utils.comm import get_world_size
from segmentation.utils.env import seed_all_rng
def build_dataset_from_cfg(config, is_train=True):
"""Builds dataset from configuration file.
Args:
config: the configuration file.
is_train: Bool, training or testing, it automatically handles data augmentation.
Returns:
A torch Dataset.
"""
dataset_map = {
'cityscapes': Cityscapes,
'cityscapes_panoptic': CityscapesPanoptic,
'coco_panoptic': COCOPanoptic,
}
dataset_cfg = {
'cityscapes': dict(
root=config.DATASET.ROOT,
split=config.DATASET.TRAIN_SPLIT if is_train else config.DATASET.TEST_SPLIT,
is_train=is_train,
crop_size=config.DATASET.CROP_SIZE if is_train else config.TEST.CROP_SIZE,
mirror=config.DATASET.MIRROR,
min_scale=config.DATASET.MIN_SCALE,
max_scale=config.DATASET.MAX_SCALE,
scale_step_size=config.DATASET.SCALE_STEP_SIZE,
mean=config.DATASET.MEAN,
std=config.DATASET.STD
),
'cityscapes_panoptic': dict(
root=config.DATASET.ROOT,
split=config.DATASET.TRAIN_SPLIT if is_train else config.DATASET.TEST_SPLIT,
is_train=is_train,
crop_size=config.DATASET.CROP_SIZE if is_train else config.TEST.CROP_SIZE,
mirror=config.DATASET.MIRROR,
min_scale=config.DATASET.MIN_SCALE,
max_scale=config.DATASET.MAX_SCALE,
scale_step_size=config.DATASET.SCALE_STEP_SIZE,
mean=config.DATASET.MEAN,
std=config.DATASET.STD,
semantic_only=config.DATASET.SEMANTIC_ONLY,
ignore_stuff_in_offset=config.DATASET.IGNORE_STUFF_IN_OFFSET,
small_instance_area=config.DATASET.SMALL_INSTANCE_AREA,
small_instance_weight=config.DATASET.SMALL_INSTANCE_WEIGHT
),
'coco_panoptic': dict(
root=config.DATASET.ROOT,
split=config.DATASET.TRAIN_SPLIT if is_train else config.DATASET.TEST_SPLIT,
min_resize_value=config.DATASET.MIN_RESIZE_VALUE,
max_resize_value=config.DATASET.MAX_RESIZE_VALUE,
resize_factor=config.DATASET.RESIZE_FACTOR,
is_train=is_train,
crop_size=config.DATASET.CROP_SIZE if is_train else config.TEST.CROP_SIZE,
mirror=config.DATASET.MIRROR,
min_scale=config.DATASET.MIN_SCALE,
max_scale=config.DATASET.MAX_SCALE,
scale_step_size=config.DATASET.SCALE_STEP_SIZE,
mean=config.DATASET.MEAN,
std=config.DATASET.STD,
semantic_only=config.DATASET.SEMANTIC_ONLY,
ignore_stuff_in_offset=config.DATASET.IGNORE_STUFF_IN_OFFSET,
small_instance_area=config.DATASET.SMALL_INSTANCE_AREA,
small_instance_weight=config.DATASET.SMALL_INSTANCE_WEIGHT
),
}
dataset = dataset_map[config.DATASET.DATASET](
**dataset_cfg[config.DATASET.DATASET]
)
return dataset
def build_train_loader_from_cfg(config):
"""Builds dataloader from configuration file.
Args:
config: the configuration file.
Returns:
A torch Dataloader.
"""
num_workers = get_world_size()
images_per_batch = config.TRAIN.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "TRAIN.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "TRAIN.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
dataset = build_dataset_from_cfg(config, is_train=True)
sampler_name = config.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = samplers.TrainingSampler(len(dataset), shuffle=config.DATALOADER.TRAIN_SHUFFLE)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_worker, drop_last=True
)
# drop_last so the batch always have the same size
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=config.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
worker_init_fn=worker_init_reset_seed,
)
return data_loader
def build_test_loader_from_cfg(config):
"""Builds dataloader from configuration file.
Args:
config: the configuration file.
Returns:
A torch Dataloader.
"""
dataset = build_dataset_from_cfg(config, is_train=False)
sampler = samplers.InferenceSampler(len(dataset))
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=config.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
)
return data_loader
def worker_init_reset_seed(worker_id):
seed_all_rng(np.random.randint(2 ** 31) + worker_id)
|
Cream/CDARTS/CDARTS_segmentation/segmentation/data/build.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/data/build.py",
"repo_id": "Cream",
"token_count": 2649
}
| 309 |
# ------------------------------------------------------------------------------
# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/panoptic_evaluation.py
# Modified by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import logging
from collections import OrderedDict
import os
import json
import numpy as np
from tabulate import tabulate
from fvcore.common.file_io import PathManager
from segmentation.utils import save_annotation
logger = logging.getLogger(__name__)
class COCOPanopticEvaluator:
"""
Evaluate panoptic segmentation
"""
def __init__(self, output_dir=None, train_id_to_eval_id=None, label_divisor=256, void_label=65280,
gt_dir='./datasets/coco', split='val2017', num_classes=133):
"""
Args:
corresponding pixels should be ignored.
output_dir (str): an output directory to dump results.
train_id_to_eval_id (list): maps training id to evaluation id.
label_divisor (int):
void_label (int):
gt_dir (str): path to ground truth annotations.
split (str): evaluation split.
num_classes (int): number of classes.
"""
if output_dir is None:
raise ValueError('Must provide a output directory.')
self._output_dir = output_dir
if self._output_dir:
PathManager.mkdirs(self._output_dir)
self._panoptic_dir = os.path.join(self._output_dir, 'predictions')
if self._panoptic_dir:
PathManager.mkdirs(self._panoptic_dir)
self._predictions = []
self._predictions_json = os.path.join(output_dir, 'predictions.json')
self._train_id_to_eval_id = train_id_to_eval_id
self._label_divisor = label_divisor
self._void_label = void_label
self._num_classes = num_classes
self._logger = logging.getLogger(__name__)
self._gt_json_file = os.path.join(gt_dir, 'annotations', 'panoptic_{}.json'.format(split))
self._gt_folder = os.path.join(gt_dir, 'annotations', 'panoptic_{}'.format(split))
self._pred_json_file = os.path.join(output_dir, 'predictions.json')
self._pred_folder = self._panoptic_dir
def update(self, panoptic, image_filename=None, image_id=None):
from panopticapi.utils import id2rgb
if image_filename is None:
raise ValueError('Need to provide image_filename.')
if image_id is None:
raise ValueError('Need to provide image_id.')
# Change void region.
panoptic[panoptic == self._void_label] = 0
segments_info = []
for pan_lab in np.unique(panoptic):
pred_class = pan_lab // self._label_divisor
if self._train_id_to_eval_id is not None:
pred_class = self._train_id_to_eval_id[pred_class]
segments_info.append(
{
'id': int(pan_lab),
'category_id': int(pred_class),
}
)
save_annotation(id2rgb(panoptic), self._panoptic_dir, image_filename, add_colormap=False)
self._predictions.append(
{
'image_id': int(image_id),
'file_name': image_filename + '.png',
'segments_info': segments_info,
}
)
def evaluate(self):
from panopticapi.evaluation import pq_compute
gt_json_file = self._gt_json_file
gt_folder = self._gt_folder
pred_json_file = self._pred_json_file
pred_folder = self._pred_folder
with open(gt_json_file, "r") as f:
json_data = json.load(f)
json_data["annotations"] = self._predictions
with PathManager.open(self._predictions_json, "w") as f:
f.write(json.dumps(json_data))
pq_res = pq_compute(gt_json_file, pred_json_file, gt_folder, pred_folder)
res = {}
res["PQ"] = 100 * pq_res["All"]["pq"]
res["SQ"] = 100 * pq_res["All"]["sq"]
res["RQ"] = 100 * pq_res["All"]["rq"]
res["PQ_th"] = 100 * pq_res["Things"]["pq"]
res["SQ_th"] = 100 * pq_res["Things"]["sq"]
res["RQ_th"] = 100 * pq_res["Things"]["rq"]
res["PQ_st"] = 100 * pq_res["Stuff"]["pq"]
res["SQ_st"] = 100 * pq_res["Stuff"]["sq"]
res["RQ_st"] = 100 * pq_res["Stuff"]["rq"]
results = OrderedDict({"panoptic_seg": res})
self._logger.info(results)
_print_panoptic_results(pq_res)
return results
def _print_panoptic_results(pq_res):
headers = ["", "PQ", "SQ", "RQ", "#categories"]
data = []
for name in ["All", "Things", "Stuff"]:
row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]]
data.append(row)
table = tabulate(
data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center"
)
logger.info("Panoptic Evaluation Results:\n" + table)
|
Cream/CDARTS/CDARTS_segmentation/segmentation/evaluation/coco_panoptic.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/evaluation/coco_panoptic.py",
"repo_id": "Cream",
"token_count": 2325
}
| 310 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.