code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from typing import Any, List, Optional
import numpy as np
import pytest
import torch
from torch.cuda.amp import GradScaler
from torch.nn import Identity
from InnerEye.Common import common_util
from InnerEye.Common.common_util import MetricsDataframeLoggers
from InnerEye.Common.output_directories import OutputFolderForTests
from InnerEye.ML.common import ModelExecutionMode
from InnerEye.ML.config import SegmentationModelBase
from InnerEye.ML.configs.classification.DummyClassification import DummyClassification
from InnerEye.ML.deep_learning_config import DeepLearningConfig
from InnerEye.ML.model_training import model_train
from InnerEye.ML.model_training_steps import ModelTrainingStepsForScalarModel, TrainValidateParameters, \
get_scalar_model_inputs_and_labels
from InnerEye.ML.models.architectures.base_model import BaseModel, CropSizeConstraints
from InnerEye.ML.models.parallel.data_parallel import DataParallelModel
from InnerEye.ML.pipelines.forward_pass import SegmentationForwardPass
from InnerEye.ML.utils import ml_util
from InnerEye.ML.utils.device_aware_module import DeviceAwareModule
from InnerEye.ML.utils.io_util import ImageDataType
from InnerEye.ML.utils.metrics_util import SummaryWriters
from InnerEye.ML.utils.model_util import ModelAndInfo
from Tests.ML.configs.ClassificationModelForTesting import ClassificationModelForTesting
from Tests.ML.models.architectures.DummyScalarModel import DummyScalarModel
from Tests.ML.util import machine_has_gpu, no_gpu_available
from Tests.ML.util import get_default_checkpoint_handler
class SimpleModel(BaseModel):
def __init__(self, input_channels: int, channels: list, n_classes: int, kernel_size: int,
insert_value_in_output: Optional[float] = None,
crop_size_constraints: CropSizeConstraints = None):
super().__init__(input_channels=input_channels, name="SimpleModel", crop_size_constraints=crop_size_constraints)
self.channels = channels
self.n_classes = n_classes
self.kernel_size = kernel_size
self.insert_value_in_output = insert_value_in_output
self._model = torch.nn.Sequential(
torch.nn.Conv3d(input_channels, channels[0], kernel_size=self.kernel_size),
torch.nn.ConvTranspose3d(channels[0], n_classes, kernel_size=self.kernel_size)
)
def forward(self, x: Any) -> Any: # type: ignore
x = self._model(x)
if self.insert_value_in_output:
x[..., 0] = self.insert_value_in_output
return x
def get_all_child_layers(self) -> List[torch.nn.Module]:
return list(self._model.children())
@pytest.mark.parametrize("value_to_insert", [1.0, np.NaN, np.Inf])
@pytest.mark.parametrize("in_training_mode", [True, False])
def test_anomaly_detection(value_to_insert: float, in_training_mode: bool) -> None:
"""
Test anomaly detection for the segmentation forward pass.
:param value_to_insert: The value to insert in the image image (nan, inf, or a valid float)
:param in_training_mode: If true, run the segmentation forward pass in training mode, otherwise use the
settings for running on the validation set.
:return:
"""
image_size = [1, 1, 4, 4, 4]
labels_size = [1, 2, 4, 4, 4]
mask_size = [1, 4, 4, 4]
crop_size = (4, 4, 4)
inference_stride_size = (2, 2, 2)
ground_truth_ids = ["Lung"]
# image to run inference on
image = torch.from_numpy(np.random.uniform(size=image_size).astype(ImageDataType.IMAGE.value))
# labels for criterion
labels = torch.from_numpy(np.random.uniform(size=labels_size).astype(ImageDataType.SEGMENTATION.value))
# create a random mask if required
mask = torch.from_numpy((np.round(np.random.uniform(size=mask_size)).astype(dtype=ImageDataType.MASK.value)))
config = SegmentationModelBase(
crop_size=crop_size,
inference_stride_size=inference_stride_size,
image_channels=["ct"],
ground_truth_ids=ground_truth_ids,
should_validate=False,
detect_anomaly=True
)
model_and_info = ModelAndInfo(config=config, model_execution_mode=ModelExecutionMode.TRAIN,
checkpoint_path=None)
model_and_info._model: BaseModel = SimpleModel(1, [1], 2, 2) # type: ignore
model_and_info.create_summary_and_adjust_model_for_gpus()
model_and_info.try_create_optimizer_and_load_from_checkpoint()
config.use_gpu = False
model = model_and_info.model
optimizer = model_and_info.optimizer
# Create the loss criterion
criterion = lambda x, y: torch.tensor(value_to_insert, requires_grad=True)
pipeline = SegmentationForwardPass(model,
config,
batch_size=1,
optimizer=optimizer,
in_training_mode=in_training_mode,
criterion=criterion)
image[0, 0, 0, 0, 0] = value_to_insert
if np.isnan(value_to_insert) or np.isinf(value_to_insert):
with pytest.raises(RuntimeError) as ex:
pipeline.forward_pass_patches(patches=image, mask=mask, labels=labels)
assert f"loss computation returned {value_to_insert}" in str(ex)
else:
pipeline.forward_pass_patches(patches=image, mask=mask, labels=labels)
@pytest.mark.gpu
@pytest.mark.skipif(no_gpu_available, reason="Testing AMP requires a GPU")
@pytest.mark.parametrize("use_model_parallel", [False, True])
@pytest.mark.parametrize("use_mixed_precision", [False, True])
@pytest.mark.parametrize("execution_mode", [ModelExecutionMode.TRAIN, ModelExecutionMode.TEST])
def test_amp_activated(use_model_parallel: bool,
execution_mode: ModelExecutionMode,
use_mixed_precision: bool) -> None:
"""
Tests the mix precision flag and the model parallel flag.
"""
assert machine_has_gpu, "This test must be executed on a GPU machine."
assert torch.cuda.device_count() > 1, "This test must be executed on a multi-GPU machine"
# image, labels, and mask to run forward and backward passes
image = torch.from_numpy(np.random.uniform(size=[1, 1, 4, 4, 4]).astype(ImageDataType.IMAGE.value))
labels = torch.from_numpy(np.random.uniform(size=[1, 2, 4, 4, 4]).astype(ImageDataType.SEGMENTATION.value))
mask = torch.from_numpy((np.round(np.random.uniform(size=[1, 4, 4, 4])).astype(dtype=ImageDataType.MASK.value)))
crop_size = (4, 4, 4)
model_config = SegmentationModelBase(crop_size=crop_size,
image_channels=["ct"],
ground_truth_ids=["Lung"],
use_mixed_precision=use_mixed_precision,
use_model_parallel=use_model_parallel,
should_validate=False)
assert model_config.use_gpu
model_and_info = ModelAndInfo(config=model_config, model_execution_mode=execution_mode,
checkpoint_path=None)
model_and_info._model = SimpleModel(1, [1], 2, 2) # type: ignore
# Move the model to the GPU. This is mostly to avoid issues with AMP, which has trouble
# with first using a GPU model and later using a CPU-based one.
try:
model_and_info.create_summary_and_adjust_model_for_gpus()
except NotImplementedError as ex:
if use_model_parallel:
# The SimpleModel does not implement model partitioning, and should hence fail at this step.
assert "Model partitioning is not implemented" in str(ex)
return
else:
raise ValueError(f"Expected this call to succeed, but got: {ex}")
model_and_info.try_create_optimizer_and_load_from_checkpoint()
model = model_and_info.model
optimizer = model_and_info.optimizer
# This is the same logic spelt out in adjust_model_for_gpus
use_data_parallel = (execution_mode == ModelExecutionMode.TRAIN) or (not use_model_parallel)
if use_data_parallel:
assert isinstance(model, DataParallelModel)
gradient_scaler = GradScaler() if use_mixed_precision else None
criterion = lambda x, y: torch.tensor([0.0], requires_grad=True).cuda()
pipeline = SegmentationForwardPass(model,
model_config,
batch_size=1,
optimizer=optimizer,
gradient_scaler=gradient_scaler,
criterion=criterion)
logits, _ = pipeline._compute_loss(image, labels)
# When using DataParallel, we expect to get a list of tensors back, one per GPU.
if use_data_parallel:
assert isinstance(logits, list)
first_logit = logits[0]
else:
first_logit = logits
if use_mixed_precision:
assert first_logit.dtype == torch.float16
else:
assert first_logit.dtype == torch.float32
# Verify that forward and backward passes do not throw an exception
pipeline._forward_pass(patches=image, mask=mask, labels=labels)
@pytest.mark.skipif(common_util.is_windows(), reason="Has issues on windows build")
@pytest.mark.cpu_and_gpu
@pytest.mark.parametrize("use_gpu_override", [False, True])
def test_use_gpu_flag(use_gpu_override: bool) -> None:
config = DeepLearningConfig(should_validate=False)
# On a model that does not have a use_gpu_override, the use_gpu flag should return True exactly when a GPU is
# actually present.
assert config.use_gpu == machine_has_gpu
if machine_has_gpu:
# If a GPU is present, the use_gpu flag should exactly return whatever the override says
# (we can run in CPU mode even on a GPU)
config.use_gpu = use_gpu_override
assert config.use_gpu == use_gpu_override
else:
if use_gpu_override:
# We are on a machine without a GPU, but the override says we should use the GPU: fail.
with pytest.raises(ValueError) as ex:
config.use_gpu = use_gpu_override
assert "use_gpu to True if there is not CUDA capable GPU present" in str(ex)
else:
config.use_gpu = use_gpu_override
assert config.use_gpu == use_gpu_override
@pytest.mark.azureml
def test_mean_teacher_model(test_output_dirs: OutputFolderForTests) -> None:
"""
Test training and weight updates of the mean teacher model computation.
"""
def _get_parameters_of_model(model: DeviceAwareModule) -> Any:
"""
Returns the iterator of model parameters
"""
if isinstance(model, DataParallelModel):
return model.module.parameters()
else:
return model.parameters()
config = DummyClassification()
config.set_output_to(test_output_dirs.root_dir)
checkpoint_handler = get_default_checkpoint_handler(model_config=config,
project_root=test_output_dirs.root_dir)
config.num_epochs = 1
# Set train batch size to be arbitrary big to ensure we have only one training step
# i.e. one mean teacher update.
config.train_batch_size = 100
# Train without mean teacher
model_train(config, checkpoint_handler=checkpoint_handler)
# Retrieve the weight after one epoch
model_and_info = ModelAndInfo(config=config, model_execution_mode=ModelExecutionMode.TEST,
checkpoint_path=config.get_path_to_checkpoint(epoch=1))
model_and_info.try_create_model_and_load_from_checkpoint()
model = model_and_info.model
model_weight = next(_get_parameters_of_model(model))
# Get the starting weight of the mean teacher model
ml_util.set_random_seed(config.get_effective_random_seed())
model_and_info_mean_teacher = ModelAndInfo(config=config,
model_execution_mode=ModelExecutionMode.TEST,
checkpoint_path=None)
model_and_info_mean_teacher.try_create_model_and_load_from_checkpoint()
model_and_info_mean_teacher.try_create_mean_teacher_model_and_load_from_checkpoint()
mean_teach_model = model_and_info_mean_teacher.mean_teacher_model
assert mean_teach_model is not None # for mypy
initial_weight_mean_teacher_model = next(_get_parameters_of_model(mean_teach_model))
# Now train with mean teacher and check the update of the weight
alpha = 0.999
config.mean_teacher_alpha = alpha
model_train(config, checkpoint_handler=checkpoint_handler)
# Retrieve weight of mean teacher model saved in the checkpoint
model_and_info_mean_teacher = ModelAndInfo(config=config, model_execution_mode=ModelExecutionMode.TEST,
checkpoint_path=config.get_path_to_checkpoint(1))
model_and_info_mean_teacher.try_create_mean_teacher_model_and_load_from_checkpoint()
mean_teacher_model = model_and_info_mean_teacher.mean_teacher_model
assert mean_teacher_model is not None # for mypy
result_weight = next(_get_parameters_of_model(mean_teacher_model))
# Retrieve the associated student weight
model_and_info_mean_teacher.try_create_model_and_load_from_checkpoint()
student_model = model_and_info_mean_teacher.model
student_model_weight = next(_get_parameters_of_model(student_model))
# Assert that the student weight corresponds to the weight of a simple training without mean teacher
# computation
assert student_model_weight.allclose(model_weight)
# Check the update of the parameters
assert torch.all(alpha * initial_weight_mean_teacher_model + (1 - alpha) * student_model_weight == result_weight)
@pytest.mark.gpu
@pytest.mark.skipif(no_gpu_available, reason="Testing AMP requires a GPU")
@pytest.mark.parametrize("use_mixed_precision", [False, True])
@pytest.mark.parametrize("execution_mode", [ModelExecutionMode.TRAIN, ModelExecutionMode.VAL])
def test_amp_and_parallel_for_scalar_models(test_output_dirs: OutputFolderForTests,
execution_mode: ModelExecutionMode,
use_mixed_precision: bool) -> None:
"""
Tests the mix precision flag and data parallel for scalar models.
"""
class ClassificationModelWithIdentity(ClassificationModelForTesting):
def create_model(self) -> Any:
return DummyScalarModel(expected_image_size_zyx=config.expected_image_size_zyx,
activation=Identity(),
use_mixed_precision=use_mixed_precision)
assert machine_has_gpu, "This test must be executed on a GPU machine."
assert torch.cuda.device_count() > 1, "This test must be executed on a multi-GPU machine"
config = ClassificationModelWithIdentity()
config.use_mixed_precision = use_mixed_precision
model_and_info = ModelAndInfo(config=config, model_execution_mode=execution_mode,
checkpoint_path=None)
model_and_info.try_create_model_load_from_checkpoint_and_adjust()
model = model_and_info.model
# This is the same logic spelt out in update_model_for_multiple_gpu
# execution_mode == ModelExecutionMode.TRAIN or (not use_model_parallel), which is always True in our case
use_data_parallel = True
if use_data_parallel:
assert isinstance(model, DataParallelModel)
data_loaders = config.create_data_loaders()
gradient_scaler = GradScaler() if use_mixed_precision else None
train_val_parameters: TrainValidateParameters = TrainValidateParameters(
model=model,
data_loader=data_loaders[execution_mode],
in_training_mode=execution_mode == ModelExecutionMode.TRAIN,
gradient_scaler=gradient_scaler,
dataframe_loggers=MetricsDataframeLoggers(test_output_dirs.root_dir),
summary_writers=SummaryWriters(train=None, val=None) # type: ignore
)
training_steps = ModelTrainingStepsForScalarModel(config, train_val_parameters)
sample = list(data_loaders[execution_mode])[0]
model_input = get_scalar_model_inputs_and_labels(config, model, sample)
logits, posteriors, loss = training_steps._compute_model_output_and_loss(model_input)
# When using DataParallel, we expect to get a list of tensors back, one per GPU.
if use_data_parallel:
assert isinstance(logits, list)
first_logit = logits[0]
else:
first_logit = logits
if use_mixed_precision:
assert first_logit.dtype == torch.float16
assert posteriors.dtype == torch.float16
# BCEWithLogitsLoss outputs float32, even with float16 args
assert loss.dtype == torch.float32
else:
assert first_logit.dtype == torch.float32
assert posteriors.dtype == torch.float32
assert loss.dtype == torch.float32
# Verify that forward pass does not throw. It would for example if it fails to gather tensors or not convert
# float16 to float32
_, _, _ = training_steps._compute_model_output_and_loss(model_input)
|
[
"InnerEye.Common.common_util.MetricsDataframeLoggers",
"torch.nn.ConvTranspose3d",
"InnerEye.ML.config.SegmentationModelBase",
"numpy.isnan",
"torch.cuda.device_count",
"pytest.mark.skipif",
"pytest.mark.parametrize",
"InnerEye.ML.utils.model_util.ModelAndInfo",
"torch.nn.Conv3d",
"InnerEye.ML.deep_learning_config.DeepLearningConfig",
"pytest.raises",
"InnerEye.ML.utils.metrics_util.SummaryWriters",
"InnerEye.ML.configs.classification.DummyClassification.DummyClassification",
"InnerEye.ML.model_training_steps.get_scalar_model_inputs_and_labels",
"InnerEye.ML.pipelines.forward_pass.SegmentationForwardPass",
"numpy.isinf",
"torch.cuda.amp.GradScaler",
"torch.nn.Identity",
"InnerEye.ML.model_training.model_train",
"numpy.random.uniform",
"InnerEye.Common.common_util.is_windows",
"InnerEye.ML.model_training_steps.ModelTrainingStepsForScalarModel",
"Tests.ML.util.get_default_checkpoint_handler",
"torch.tensor",
"torch.all"
] |
[((2985, 3050), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""value_to_insert"""', '[1.0, np.NaN, np.Inf]'], {}), "('value_to_insert', [1.0, np.NaN, np.Inf])\n", (3008, 3050), False, 'import pytest\n'), ((3052, 3110), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_training_mode"""', '[True, False]'], {}), "('in_training_mode', [True, False])\n", (3075, 3110), False, 'import pytest\n'), ((5744, 5817), 'pytest.mark.skipif', 'pytest.mark.skipif', (['no_gpu_available'], {'reason': '"""Testing AMP requires a GPU"""'}), "(no_gpu_available, reason='Testing AMP requires a GPU')\n", (5762, 5817), False, 'import pytest\n'), ((5819, 5879), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_model_parallel"""', '[False, True]'], {}), "('use_model_parallel', [False, True])\n", (5842, 5879), False, 'import pytest\n'), ((5881, 5942), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_mixed_precision"""', '[False, True]'], {}), "('use_mixed_precision', [False, True])\n", (5904, 5942), False, 'import pytest\n'), ((5944, 6042), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""execution_mode"""', '[ModelExecutionMode.TRAIN, ModelExecutionMode.TEST]'], {}), "('execution_mode', [ModelExecutionMode.TRAIN,\n ModelExecutionMode.TEST])\n", (5967, 6042), False, 'import pytest\n'), ((9677, 9735), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_gpu_override"""', '[False, True]'], {}), "('use_gpu_override', [False, True])\n", (9700, 9735), False, 'import pytest\n'), ((14214, 14287), 'pytest.mark.skipif', 'pytest.mark.skipif', (['no_gpu_available'], {'reason': '"""Testing AMP requires a GPU"""'}), "(no_gpu_available, reason='Testing AMP requires a GPU')\n", (14232, 14287), False, 'import pytest\n'), ((14289, 14350), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_mixed_precision"""', '[False, True]'], {}), "('use_mixed_precision', [False, True])\n", (14312, 14350), False, 'import pytest\n'), ((14352, 14449), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""execution_mode"""', '[ModelExecutionMode.TRAIN, ModelExecutionMode.VAL]'], {}), "('execution_mode', [ModelExecutionMode.TRAIN,\n ModelExecutionMode.VAL])\n", (14375, 14449), False, 'import pytest\n'), ((4164, 4363), 'InnerEye.ML.config.SegmentationModelBase', 'SegmentationModelBase', ([], {'crop_size': 'crop_size', 'inference_stride_size': 'inference_stride_size', 'image_channels': "['ct']", 'ground_truth_ids': 'ground_truth_ids', 'should_validate': '(False)', 'detect_anomaly': '(True)'}), "(crop_size=crop_size, inference_stride_size=\n inference_stride_size, image_channels=['ct'], ground_truth_ids=\n ground_truth_ids, should_validate=False, detect_anomaly=True)\n", (4185, 4363), False, 'from InnerEye.ML.config import SegmentationModelBase\n'), ((4430, 4530), 'InnerEye.ML.utils.model_util.ModelAndInfo', 'ModelAndInfo', ([], {'config': 'config', 'model_execution_mode': 'ModelExecutionMode.TRAIN', 'checkpoint_path': 'None'}), '(config=config, model_execution_mode=ModelExecutionMode.TRAIN,\n checkpoint_path=None)\n', (4442, 4530), False, 'from InnerEye.ML.utils.model_util import ModelAndInfo\n'), ((5000, 5133), 'InnerEye.ML.pipelines.forward_pass.SegmentationForwardPass', 'SegmentationForwardPass', (['model', 'config'], {'batch_size': '(1)', 'optimizer': 'optimizer', 'in_training_mode': 'in_training_mode', 'criterion': 'criterion'}), '(model, config, batch_size=1, optimizer=optimizer,\n in_training_mode=in_training_mode, criterion=criterion)\n', (5023, 5133), False, 'from InnerEye.ML.pipelines.forward_pass import SegmentationForwardPass\n'), ((6898, 7101), 'InnerEye.ML.config.SegmentationModelBase', 'SegmentationModelBase', ([], {'crop_size': 'crop_size', 'image_channels': "['ct']", 'ground_truth_ids': "['Lung']", 'use_mixed_precision': 'use_mixed_precision', 'use_model_parallel': 'use_model_parallel', 'should_validate': '(False)'}), "(crop_size=crop_size, image_channels=['ct'],\n ground_truth_ids=['Lung'], use_mixed_precision=use_mixed_precision,\n use_model_parallel=use_model_parallel, should_validate=False)\n", (6919, 7101), False, 'from InnerEye.ML.config import SegmentationModelBase\n'), ((7352, 7448), 'InnerEye.ML.utils.model_util.ModelAndInfo', 'ModelAndInfo', ([], {'config': 'model_config', 'model_execution_mode': 'execution_mode', 'checkpoint_path': 'None'}), '(config=model_config, model_execution_mode=execution_mode,\n checkpoint_path=None)\n', (7364, 7448), False, 'from InnerEye.ML.utils.model_util import ModelAndInfo\n'), ((8682, 8820), 'InnerEye.ML.pipelines.forward_pass.SegmentationForwardPass', 'SegmentationForwardPass', (['model', 'model_config'], {'batch_size': '(1)', 'optimizer': 'optimizer', 'gradient_scaler': 'gradient_scaler', 'criterion': 'criterion'}), '(model, model_config, batch_size=1, optimizer=\n optimizer, gradient_scaler=gradient_scaler, criterion=criterion)\n', (8705, 8820), False, 'from InnerEye.ML.pipelines.forward_pass import SegmentationForwardPass\n'), ((9804, 9845), 'InnerEye.ML.deep_learning_config.DeepLearningConfig', 'DeepLearningConfig', ([], {'should_validate': '(False)'}), '(should_validate=False)\n', (9822, 9845), False, 'from InnerEye.ML.deep_learning_config import DeepLearningConfig\n'), ((9587, 9611), 'InnerEye.Common.common_util.is_windows', 'common_util.is_windows', ([], {}), '()\n', (9609, 9611), False, 'from InnerEye.Common import common_util\n'), ((11226, 11247), 'InnerEye.ML.configs.classification.DummyClassification.DummyClassification', 'DummyClassification', ([], {}), '()\n', (11245, 11247), False, 'from InnerEye.ML.configs.classification.DummyClassification import DummyClassification\n'), ((11325, 11421), 'Tests.ML.util.get_default_checkpoint_handler', 'get_default_checkpoint_handler', ([], {'model_config': 'config', 'project_root': 'test_output_dirs.root_dir'}), '(model_config=config, project_root=\n test_output_dirs.root_dir)\n', (11355, 11421), False, 'from Tests.ML.util import get_default_checkpoint_handler\n'), ((11695, 11753), 'InnerEye.ML.model_training.model_train', 'model_train', (['config'], {'checkpoint_handler': 'checkpoint_handler'}), '(config, checkpoint_handler=checkpoint_handler)\n', (11706, 11753), False, 'from InnerEye.ML.model_training import model_train\n'), ((12291, 12390), 'InnerEye.ML.utils.model_util.ModelAndInfo', 'ModelAndInfo', ([], {'config': 'config', 'model_execution_mode': 'ModelExecutionMode.TEST', 'checkpoint_path': 'None'}), '(config=config, model_execution_mode=ModelExecutionMode.TEST,\n checkpoint_path=None)\n', (12303, 12390), False, 'from InnerEye.ML.utils.model_util import ModelAndInfo\n'), ((12988, 13046), 'InnerEye.ML.model_training.model_train', 'model_train', (['config'], {'checkpoint_handler': 'checkpoint_handler'}), '(config, checkpoint_handler=checkpoint_handler)\n', (12999, 13046), False, 'from InnerEye.ML.model_training import model_train\n'), ((14087, 14197), 'torch.all', 'torch.all', (['(alpha * initial_weight_mean_teacher_model + (1 - alpha) *\n student_model_weight == result_weight)'], {}), '(alpha * initial_weight_mean_teacher_model + (1 - alpha) *\n student_model_weight == result_weight)\n', (14096, 14197), False, 'import torch\n'), ((15410, 15500), 'InnerEye.ML.utils.model_util.ModelAndInfo', 'ModelAndInfo', ([], {'config': 'config', 'model_execution_mode': 'execution_mode', 'checkpoint_path': 'None'}), '(config=config, model_execution_mode=execution_mode,\n checkpoint_path=None)\n', (15422, 15500), False, 'from InnerEye.ML.utils.model_util import ModelAndInfo\n'), ((16481, 16543), 'InnerEye.ML.model_training_steps.ModelTrainingStepsForScalarModel', 'ModelTrainingStepsForScalarModel', (['config', 'train_val_parameters'], {}), '(config, train_val_parameters)\n', (16513, 16543), False, 'from InnerEye.ML.model_training_steps import ModelTrainingStepsForScalarModel, TrainValidateParameters, get_scalar_model_inputs_and_labels\n'), ((16613, 16670), 'InnerEye.ML.model_training_steps.get_scalar_model_inputs_and_labels', 'get_scalar_model_inputs_and_labels', (['config', 'model', 'sample'], {}), '(config, model, sample)\n', (16647, 16670), False, 'from InnerEye.ML.model_training_steps import ModelTrainingStepsForScalarModel, TrainValidateParameters, get_scalar_model_inputs_and_labels\n'), ((4935, 4984), 'torch.tensor', 'torch.tensor', (['value_to_insert'], {'requires_grad': '(True)'}), '(value_to_insert, requires_grad=True)\n', (4947, 4984), False, 'import torch\n'), ((5375, 5400), 'numpy.isnan', 'np.isnan', (['value_to_insert'], {}), '(value_to_insert)\n', (5383, 5400), True, 'import numpy as np\n'), ((5404, 5429), 'numpy.isinf', 'np.isinf', (['value_to_insert'], {}), '(value_to_insert)\n', (5412, 5429), True, 'import numpy as np\n'), ((6370, 6395), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6393, 6395), False, 'import torch\n'), ((8545, 8557), 'torch.cuda.amp.GradScaler', 'GradScaler', ([], {}), '()\n', (8555, 8557), False, 'from torch.cuda.amp import GradScaler\n'), ((15205, 15230), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (15228, 15230), False, 'import torch\n'), ((15995, 16007), 'torch.cuda.amp.GradScaler', 'GradScaler', ([], {}), '()\n', (16005, 16007), False, 'from torch.cuda.amp import GradScaler\n'), ((2508, 2582), 'torch.nn.Conv3d', 'torch.nn.Conv3d', (['input_channels', 'channels[0]'], {'kernel_size': 'self.kernel_size'}), '(input_channels, channels[0], kernel_size=self.kernel_size)\n', (2523, 2582), False, 'import torch\n'), ((2596, 2674), 'torch.nn.ConvTranspose3d', 'torch.nn.ConvTranspose3d', (['channels[0]', 'n_classes'], {'kernel_size': 'self.kernel_size'}), '(channels[0], n_classes, kernel_size=self.kernel_size)\n', (2620, 2674), False, 'import torch\n'), ((5444, 5471), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5457, 5471), False, 'import pytest\n'), ((16325, 16375), 'InnerEye.Common.common_util.MetricsDataframeLoggers', 'MetricsDataframeLoggers', (['test_output_dirs.root_dir'], {}), '(test_output_dirs.root_dir)\n', (16348, 16375), False, 'from InnerEye.Common.common_util import MetricsDataframeLoggers\n'), ((16401, 16437), 'InnerEye.ML.utils.metrics_util.SummaryWriters', 'SummaryWriters', ([], {'train': 'None', 'val': 'None'}), '(train=None, val=None)\n', (16415, 16437), False, 'from InnerEye.ML.utils.metrics_util import SummaryWriters\n'), ((3792, 3826), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'image_size'}), '(size=image_size)\n', (3809, 3826), True, 'import numpy as np\n'), ((3919, 3954), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'labels_size'}), '(size=labels_size)\n', (3936, 3954), True, 'import numpy as np\n'), ((6547, 6586), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[1, 1, 4, 4, 4]'}), '(size=[1, 1, 4, 4, 4])\n', (6564, 6586), True, 'import numpy as np\n'), ((6652, 6691), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[1, 2, 4, 4, 4]'}), '(size=[1, 2, 4, 4, 4])\n', (6669, 6691), True, 'import numpy as np\n'), ((8620, 8659), 'torch.tensor', 'torch.tensor', (['[0.0]'], {'requires_grad': '(True)'}), '([0.0], requires_grad=True)\n', (8632, 8659), False, 'import torch\n'), ((10447, 10472), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10460, 10472), False, 'import pytest\n'), ((4074, 4107), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'mask_size'}), '(size=mask_size)\n', (4091, 4107), True, 'import numpy as np\n'), ((6772, 6808), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[1, 4, 4, 4]'}), '(size=[1, 4, 4, 4])\n', (6789, 6808), True, 'import numpy as np\n'), ((15029, 15039), 'torch.nn.Identity', 'Identity', ([], {}), '()\n', (15037, 15039), False, 'from torch.nn import Identity\n')]
|
# generate overexposure samples from clear images
# author: @LucasX
import argparse
import os
import random
from multiprocessing import Queue, Process
import cv2
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('-orig_dir', type=str,
default='C:/Users/LucasX/Desktop/ShelfExposure/normal')
parser.add_argument('-outpur_dir', type=str,
default='C:/Users/LucasX/Desktop/ShelfExposure/exposure')
parser.add_argument('-alpha', type=float, default=2.0)
parser.add_argument('-beta', type=float, default=0.0)
parser.add_argument('-procs', type=int, default=2)
parser.add_argument('-show', type=bool, default=False)
args = vars(parser.parse_args())
print('-' * 100)
for key, value in args.items():
print('%s = %s' % (key, value))
print('-' * 100)
def modify_img_saturation(img_f):
"""
modify image saturation to imitate overexposure effect
:param img_f:
:return:
"""
if img_f.endswith('.jpg') or img_f.endswith('.png') or img_f.endswith('.jpeg'):
if not os.path.exists(args['outpur_dir']):
os.makedirs(args['outpur_dir'])
image = cv2.imread(img_f)
overexposure_image = np.zeros(image.shape, image.dtype)
# alpha = args['alpha']
alpha = random.randint(2, 10)
for y in range(image.shape[0]):
for x in range(image.shape[1]):
for c in range(image.shape[2]):
overexposure_image[y, x, c] = np.clip(alpha * image[y, x, c] + args['beta'], 0, 255)
if args['show'] and overexposure_image is not None:
cv2.imshow('overexposure_image', overexposure_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite(os.path.join(args['outpur_dir'], os.path.basename(img_f)), overexposure_image)
print('[INFO] generate overexposure image {} successfully'.format(os.path.basename(img_f)))
def multi_proc_modify_img_saturation(imgs_queue):
"""
modify image saturation to imitate overexposure effect in multi-processing mode
:param imgs_queue:
:return:
"""
if not imgs_queue.empty():
img_f = imgs_queue.get()
if img_f.endswith('.jpg') or img_f.endswith('.png') or img_f.endswith('.jpeg'):
if not os.path.exists(args['outpur_dir']):
os.makedirs(args['outpur_dir'])
image = cv2.imread(img_f)
overexposure_image = np.zeros(image.shape, image.dtype)
# alpha = args['alpha']
alpha = random.randint(2, 10)
for y in range(image.shape[0]):
for x in range(image.shape[1]):
for c in range(image.shape[2]):
overexposure_image[y, x, c] = np.clip(alpha * image[y, x, c] + args['beta'], 0, 255)
if args['show'] and overexposure_image is not None:
cv2.imshow('overexposure_image', overexposure_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite(os.path.join(args['outpur_dir'], os.path.basename(img_f)), overexposure_image)
print('[INFO] generate overexposure image {} successfully'.format(os.path.basename(img_f)))
if __name__ == '__main__':
# multi-thread processing version
imgs_queue = Queue()
for img_f in os.listdir(args['orig_dir']):
imgs_queue.put(os.path.join(args['orig_dir'], img_f))
for i in range(args['procs']):
Process(target=multi_proc_modify_img_saturation, args=(imgs_queue,)).start()
# single-thread processing version
# for img_f in os.listdir(args['orig_dir']):
# modify_img_saturation(os.path.join(args['orig_dir'], img_f))
|
[
"argparse.ArgumentParser",
"random.randint",
"os.makedirs",
"cv2.waitKey",
"cv2.destroyAllWindows",
"os.path.basename",
"numpy.zeros",
"os.path.exists",
"numpy.clip",
"cv2.imread",
"multiprocessing.Queue",
"multiprocessing.Process",
"cv2.imshow",
"os.path.join",
"os.listdir"
] |
[((192, 217), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (215, 217), False, 'import argparse\n'), ((3309, 3316), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (3314, 3316), False, 'from multiprocessing import Queue, Process\n'), ((3334, 3362), 'os.listdir', 'os.listdir', (["args['orig_dir']"], {}), "(args['orig_dir'])\n", (3344, 3362), False, 'import os\n'), ((1148, 1165), 'cv2.imread', 'cv2.imread', (['img_f'], {}), '(img_f)\n', (1158, 1165), False, 'import cv2\n'), ((1196, 1230), 'numpy.zeros', 'np.zeros', (['image.shape', 'image.dtype'], {}), '(image.shape, image.dtype)\n', (1204, 1230), True, 'import numpy as np\n'), ((1279, 1300), 'random.randint', 'random.randint', (['(2)', '(10)'], {}), '(2, 10)\n', (1293, 1300), False, 'import random\n'), ((1052, 1086), 'os.path.exists', 'os.path.exists', (["args['outpur_dir']"], {}), "(args['outpur_dir'])\n", (1066, 1086), False, 'import os\n'), ((1100, 1131), 'os.makedirs', 'os.makedirs', (["args['outpur_dir']"], {}), "(args['outpur_dir'])\n", (1111, 1131), False, 'import os\n'), ((1613, 1665), 'cv2.imshow', 'cv2.imshow', (['"""overexposure_image"""', 'overexposure_image'], {}), "('overexposure_image', overexposure_image)\n", (1623, 1665), False, 'import cv2\n'), ((1678, 1692), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1689, 1692), False, 'import cv2\n'), ((1705, 1728), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1726, 1728), False, 'import cv2\n'), ((2392, 2409), 'cv2.imread', 'cv2.imread', (['img_f'], {}), '(img_f)\n', (2402, 2409), False, 'import cv2\n'), ((2444, 2478), 'numpy.zeros', 'np.zeros', (['image.shape', 'image.dtype'], {}), '(image.shape, image.dtype)\n', (2452, 2478), True, 'import numpy as np\n'), ((2535, 2556), 'random.randint', 'random.randint', (['(2)', '(10)'], {}), '(2, 10)\n', (2549, 2556), False, 'import random\n'), ((3387, 3424), 'os.path.join', 'os.path.join', (["args['orig_dir']", 'img_f'], {}), "(args['orig_dir'], img_f)\n", (3399, 3424), False, 'import os\n'), ((1783, 1806), 'os.path.basename', 'os.path.basename', (['img_f'], {}), '(img_f)\n', (1799, 1806), False, 'import os\n'), ((1903, 1926), 'os.path.basename', 'os.path.basename', (['img_f'], {}), '(img_f)\n', (1919, 1926), False, 'import os\n'), ((2288, 2322), 'os.path.exists', 'os.path.exists', (["args['outpur_dir']"], {}), "(args['outpur_dir'])\n", (2302, 2322), False, 'import os\n'), ((2340, 2371), 'os.makedirs', 'os.makedirs', (["args['outpur_dir']"], {}), "(args['outpur_dir'])\n", (2351, 2371), False, 'import os\n'), ((2893, 2945), 'cv2.imshow', 'cv2.imshow', (['"""overexposure_image"""', 'overexposure_image'], {}), "('overexposure_image', overexposure_image)\n", (2903, 2945), False, 'import cv2\n'), ((2962, 2976), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2973, 2976), False, 'import cv2\n'), ((2993, 3016), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3014, 3016), False, 'import cv2\n'), ((3470, 3538), 'multiprocessing.Process', 'Process', ([], {'target': 'multi_proc_modify_img_saturation', 'args': '(imgs_queue,)'}), '(target=multi_proc_modify_img_saturation, args=(imgs_queue,))\n', (3477, 3538), False, 'from multiprocessing import Queue, Process\n'), ((1485, 1539), 'numpy.clip', 'np.clip', (["(alpha * image[y, x, c] + args['beta'])", '(0)', '(255)'], {}), "(alpha * image[y, x, c] + args['beta'], 0, 255)\n", (1492, 1539), True, 'import numpy as np\n'), ((3075, 3098), 'os.path.basename', 'os.path.basename', (['img_f'], {}), '(img_f)\n', (3091, 3098), False, 'import os\n'), ((3199, 3222), 'os.path.basename', 'os.path.basename', (['img_f'], {}), '(img_f)\n', (3215, 3222), False, 'import os\n'), ((2757, 2811), 'numpy.clip', 'np.clip', (["(alpha * image[y, x, c] + args['beta'])", '(0)', '(255)'], {}), "(alpha * image[y, x, c] + args['beta'], 0, 255)\n", (2764, 2811), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 16:40:41 2020
<NAME>
<EMAIL>
BME Bogazici University
Istanbul / Uskudar
@author: abas
"""
import numpy as np
import pytorch_lightning as pl
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from preprocess import transformation,scaler,normalizer,smoother
import pandas as pd
from scipy.fft import fft,ifft
class dataset(Dataset):
""" This function initializes the dataset.
Args:
path (string): Input path
gtpath (string,optional): Path for ground truths. Default to None
responseCol (string,optional): If ınput dataset has the response in it you can simply assign it to this value. Note: 0 for first, 1 for second column same as python. Take that into account
phase (str, optional): Defaults to 'train'.
preprocess (bool, optional): Switch for preprocess. Defaults to True.
smooth (bool, optional): Switch for smoothing. Defaults to True.
normalise (bool, optional): Switch for normalise. Defaults to True.
transform (bool, optional): Switch for yeo-johnson power transformation. Defaults to True.
"""
def __init__(self,path,gtpath=None,responseCol=-1,phase='train',preprocess=True,smooth=True,normalise=True,transform=True):
self.normalise=normalise
self.exc=pd.read_excel(path)
self.phase=phase
self.smooth=smooth
self.normalise=normalise
self.transform=transform
self.preprocess=preprocess
if gtpath is not None:
self.response=np.load(gtpath)
else:
self.response=np.array(self.exc.iloc[:,responseCol])
self.excarr=np.array(self.exc.drop(self.exc.columns[[responseCol]],axis=1))
if phase=='train':
self.excarr=np.array(self.exc)
if self.preprocess:
self.excarr=normalizer(self.excarr)
self.excarr=smoother(self.excarr)
self.excarr,self.scale=scaler(self.excarr)
self.excarr,self.transformater=transformation(self.excarr)
def __len__(self):
return len(self.excarr)
def __getitem__(self,idx=None):
spectrum=self.excarr[idx,:]
response=self.response[idx]
age=torch.tensor(self.exc.iloc[idx,1]).type(torch.float32)
return age,torch.tensor(spectrum).type(torch.float32).unsqueeze(0),torch.tensor(response).type(torch.long)
|
[
"preprocess.normalizer",
"numpy.load",
"preprocess.scaler",
"pandas.read_excel",
"preprocess.transformation",
"numpy.array",
"torch.tensor",
"preprocess.smoother"
] |
[((1431, 1450), 'pandas.read_excel', 'pd.read_excel', (['path'], {}), '(path)\n', (1444, 1450), True, 'import pandas as pd\n'), ((1661, 1676), 'numpy.load', 'np.load', (['gtpath'], {}), '(gtpath)\n', (1668, 1676), True, 'import numpy as np\n'), ((1717, 1756), 'numpy.array', 'np.array', (['self.exc.iloc[:, responseCol]'], {}), '(self.exc.iloc[:, responseCol])\n', (1725, 1756), True, 'import numpy as np\n'), ((1913, 1931), 'numpy.array', 'np.array', (['self.exc'], {}), '(self.exc)\n', (1921, 1931), True, 'import numpy as np\n'), ((2005, 2028), 'preprocess.normalizer', 'normalizer', (['self.excarr'], {}), '(self.excarr)\n', (2015, 2028), False, 'from preprocess import transformation, scaler, normalizer, smoother\n'), ((2057, 2078), 'preprocess.smoother', 'smoother', (['self.excarr'], {}), '(self.excarr)\n', (2065, 2078), False, 'from preprocess import transformation, scaler, normalizer, smoother\n'), ((2118, 2137), 'preprocess.scaler', 'scaler', (['self.excarr'], {}), '(self.excarr)\n', (2124, 2137), False, 'from preprocess import transformation, scaler, normalizer, smoother\n'), ((2185, 2212), 'preprocess.transformation', 'transformation', (['self.excarr'], {}), '(self.excarr)\n', (2199, 2212), False, 'from preprocess import transformation, scaler, normalizer, smoother\n'), ((2438, 2473), 'torch.tensor', 'torch.tensor', (['self.exc.iloc[idx, 1]'], {}), '(self.exc.iloc[idx, 1])\n', (2450, 2473), False, 'import torch\n'), ((2577, 2599), 'torch.tensor', 'torch.tensor', (['response'], {}), '(response)\n', (2589, 2599), False, 'import torch\n'), ((2521, 2543), 'torch.tensor', 'torch.tensor', (['spectrum'], {}), '(spectrum)\n', (2533, 2543), False, 'import torch\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: lry
"""
import scipy.io as sio
import numpy as np
sep = [20,40,61,103] # please pay attention to this seperation , setted according to the spectral response function of different sensors.
# setting global parameters
patch_size = 16
patch_size1 = 16
patch_size2 = 16
patch_num = 2
batch_size = patch_size*patch_size*patch_num
gap = 12 # the interval between two adjacent patches, must smaller than 'patch_size'
sigmaInit = 0.01
lastTrain = 0 # go on the former training
Pretrain = 500 # the number of iteration for pretraining
Maxiter = 3000 # the max iterations for training # try 2000 3000 4000
step = 100 # save the model every "step" iterations
learning_rate = 0.0001
max_grad_norm = 0.1
# saving path
path = './result_fusion'
filename = "../processed_data/.."
print("Loading data")
data = sio.loadmat(filename)
Xl_3d = data['xl']
Xl_bicubic = data['xl_bicubic'] # this is the bibubic-interpolated xl image acquired with matlab
Xg_3d = data['xg']
scale = data['scale'][0][0]
Trans_data = data['P']
N1,N2,dimX = data['xh'].shape
s1,s2 = data['xl'].shape[0], data['xl'].shape[1]
dimXg = Xg_3d.shape[2]
Xl_2d = np.reshape(Xl_3d,[-1,dimX])
num = s1*s2 # the number of low-resolution pixels
f2_1 = 9 # 1D filter size
f3_1 = 5 # 2D filter size
hidden_size_local = 30
hidden_size_global = 20
gener_hidden_size = 20
enc_q = enc_k = hidden_size_global * 2
enc_v = hidden_size_global * 2
enc_k_z = enc_v_z = 30
dec_q = 30
filter_num_1 = 20
filter_num_2 = 20
filter_num_3 = hidden_size_global*2*dimXg #### hidden_size_global*2*dimXg
f_1 = 5
f_2 = 3
f_3 = 1
H3_1 = dimX
dimZ = dimXg*hidden_size_global # dimension of z
|
[
"numpy.reshape",
"scipy.io.loadmat"
] |
[((935, 956), 'scipy.io.loadmat', 'sio.loadmat', (['filename'], {}), '(filename)\n', (946, 956), True, 'import scipy.io as sio\n'), ((1262, 1291), 'numpy.reshape', 'np.reshape', (['Xl_3d', '[-1, dimX]'], {}), '(Xl_3d, [-1, dimX])\n', (1272, 1291), True, 'import numpy as np\n')]
|
# %%
import pandas as pd
import numpy as np
from datetime import datetime
import os
import pickle
import matplotlib.pyplot as plt
import scipy.special as sc
from scipy.stats import norm
from scipy.stats import lognorm
import copy
exec(open('../env_vars.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
dir_code_methods = os.environ['dir_code_methods']
# %%
# Output of this script is the data frame data_day_limits
exec(open(os.path.join(os.path.realpath(dir_code_methods), 'setup-day-limits.py')).read())
# %%
execute_test = False
if execute_test:
# Sanity check: are there any duplicates in the hours_since_start_day column?
for participant in dict_knitted_with_puffmarker.keys():
for days in dict_knitted_with_puffmarker[participant].keys():
current_data = dict_knitted_with_puffmarker[participant][days]
if len(current_data.index) > 0:
which_idx_dup = current_data['hours_since_start_day'].duplicated()
which_idx_dup = np.array(which_idx_dup)
if np.sum(which_idx_dup*1.)>0:
print((participant, days, np.cumsum(which_idx_dup))) # prints those participant-days with duplicates
# found: 1 selfreport and 1 random ema with exactly the same hours_since_start_day
# the selfreport will eventually be dropped since when_smoke=4
# %%
# Test out the function
execute_test = False
if execute_test:
use_this_id = None
use_this_days = None
# Test out the function latent_poisson_process_ex1
# pre-quit
print(latent_poisson_process_ex1(latent_dict = latent_data[use_this_id][use_this_days], params = {'lambda': 0.14}))
# post-quit
print(latent_poisson_process_ex1(latent_dict = latent_data[use_this_id][use_this_days], params = {'lambda': 0.14}))
# %%
# Test out the function
execute_test = False
if execute_test:
use_this_id = None
use_this_days = None
# Test out the function latent_poisson_process_ex2
# pre-quit
print(latent_poisson_process_ex2(latent_dict = latent_data[use_this_id][use_this_days], params = {'lambda_prequit': 0.14, 'lambda_postquit': 0.75}))
# post-quit
print(latent_poisson_process_ex2(latent_dict = latent_data[use_this_id][use_this_days], params = {'lambda_prequit': 0.14, 'lambda_postquit': 0.75}))
# %%
# Test out the class
execute_test = False
if execute_test:
tmp_latent_data = copy.deepcopy(latent_data)
lat_pp_ex1 = latent(data=tmp_latent_data, model=latent_poisson_process_ex1, params = {'lambda': 0.14})
print(lat_pp_ex1.model)
print(lat_pp_ex1.params)
print(lat_pp_ex1.compute_total_pp(use_params = None))
lat_pp_ex1.update_params(new_params = {'lambda': 0.77})
print(lat_pp_ex1.model)
print(lat_pp_ex1.params)
print(lat_pp_ex1.compute_total_pp(use_params = None))
# %%
# Another test on the class
execute_test = False
if execute_test:
tmp_latent_data = copy.deepcopy(latent_data)
lat_pp_ex2 = latent(data=tmp_latent_data, model=latent_poisson_process_ex2, params = {'lambda_prequit': 0.14, 'lambda_postquit': 0.75})
print(lat_pp_ex2.model)
print(lat_pp_ex2.params)
print(lat_pp_ex2.compute_total_pp(use_params = None))
lat_pp_ex2.update_params(new_params = {'lambda_prequit': 0.05, 'lambda_postquit': 0.25})
print(lat_pp_ex2.model)
print(lat_pp_ex2.params)
print(lat_pp_ex2.compute_total_pp(use_params = None))
# %%
# Test out the function
execute_test = False
if execute_test:
use_participant = None
use_days = None
tmp_clean_data = copy.deepcopy(clean_data[use_participant][use_days]) # keep clean_data[use_participant][use_days] untouched
tmp_latent_data = copy.deepcopy(latent_data[use_participant][use_days]) # keep latent_data[use_participant][use_days] untouched
tmp_clean_data, tmp_latent_data = matching(observed_dict = tmp_clean_data, latent_dict = tmp_latent_data)
print(tmp_clean_data)
print(tmp_latent_data)
print(clean_data[use_participant][use_days]) # Check that this object remains unmodified
print(latent_data[use_participant][use_days]) # Check that this object remains unmodified
# %%
# Test out the function
execute_test = False
if execute_test:
use_participant = None
use_days = None
tmp_clean_data = copy.deepcopy(clean_data[use_participant][use_days]) # keep clean_data[use_participant][use_days] untouched
tmp_latent_data = copy.deepcopy(latent_data[use_participant][use_days])
if len(tmp_latent_data['matched']) > 0:
res = selfreport_mem(observed_dict = tmp_clean_data, latent_dict = tmp_latent_data)
print(res)
# %%
# Test out the function
execute_test = False
if execute_test:
use_participant = None
use_days = None
tmp_clean_data = copy.deepcopy(clean_data[use_participant][use_days]) # keep clean_data[use_participant][use_days] untouched
tmp_latent_data = copy.deepcopy(latent_data[use_participant][use_days])
res = selfreport_mem_total(observed_dict = tmp_clean_data, latent_dict = tmp_latent_data, params = {'p':0.9})
print(res)
# %%
# Another test of the function
execute_test = False
if execute_test:
tmp_clean_data = copy.deepcopy(clean_data) # keep clean_data untouched
tmp_latent_data = copy.deepcopy(latent_data) # keep latent_data untouched
# Sanity check: are there observed events which are NOT matched to latent events?
all_matched = True
for use_this_id in tmp_clean_data.keys():
for use_this_days in tmp_clean_data[use_this_id].keys():
observed = tmp_clean_data[use_this_id][use_this_days]
latent = tmp_latent_data[use_this_id][use_this_days]
res = selfreport_mem_total(observed_dict = observed, latent_dict = latent, params = {'p':0.9})
if res== -np.inf:
all_matched = False
print(("NOT all matched", use_this_id, use_this_days, res))
if all_matched:
print("all observed events are matched to latent events")
# %%
# Test out the class
execute_test = False
if execute_test:
tmp_clean_data = copy.deepcopy(clean_data) # keep clean_data untouched
tmp_latent_data = copy.deepcopy(latent_data) # keep latent_data untouched
sr_mem = measurement_model(data=tmp_clean_data, model=selfreport_mem_total, latent = tmp_latent_data, model_params={'p':0.9})
print(sr_mem.model_params)
print(sr_mem.compute_total_mem())
sr_mem.update_params(new_params = {'p':0.4})
print(sr_mem.model_params)
print(sr_mem.compute_total_mem())
|
[
"copy.deepcopy",
"numpy.sum",
"os.path.realpath",
"numpy.cumsum",
"numpy.array"
] |
[((2457, 2483), 'copy.deepcopy', 'copy.deepcopy', (['latent_data'], {}), '(latent_data)\n', (2470, 2483), False, 'import copy\n'), ((2978, 3004), 'copy.deepcopy', 'copy.deepcopy', (['latent_data'], {}), '(latent_data)\n', (2991, 3004), False, 'import copy\n'), ((3608, 3660), 'copy.deepcopy', 'copy.deepcopy', (['clean_data[use_participant][use_days]'], {}), '(clean_data[use_participant][use_days])\n', (3621, 3660), False, 'import copy\n'), ((3739, 3792), 'copy.deepcopy', 'copy.deepcopy', (['latent_data[use_participant][use_days]'], {}), '(latent_data[use_participant][use_days])\n', (3752, 3792), False, 'import copy\n'), ((4341, 4393), 'copy.deepcopy', 'copy.deepcopy', (['clean_data[use_participant][use_days]'], {}), '(clean_data[use_participant][use_days])\n', (4354, 4393), False, 'import copy\n'), ((4472, 4525), 'copy.deepcopy', 'copy.deepcopy', (['latent_data[use_participant][use_days]'], {}), '(latent_data[use_participant][use_days])\n', (4485, 4525), False, 'import copy\n'), ((4820, 4872), 'copy.deepcopy', 'copy.deepcopy', (['clean_data[use_participant][use_days]'], {}), '(clean_data[use_participant][use_days])\n', (4833, 4872), False, 'import copy\n'), ((4951, 5004), 'copy.deepcopy', 'copy.deepcopy', (['latent_data[use_participant][use_days]'], {}), '(latent_data[use_participant][use_days])\n', (4964, 5004), False, 'import copy\n'), ((5231, 5256), 'copy.deepcopy', 'copy.deepcopy', (['clean_data'], {}), '(clean_data)\n', (5244, 5256), False, 'import copy\n'), ((5308, 5334), 'copy.deepcopy', 'copy.deepcopy', (['latent_data'], {}), '(latent_data)\n', (5321, 5334), False, 'import copy\n'), ((6144, 6169), 'copy.deepcopy', 'copy.deepcopy', (['clean_data'], {}), '(clean_data)\n', (6157, 6169), False, 'import copy\n'), ((6221, 6247), 'copy.deepcopy', 'copy.deepcopy', (['latent_data'], {}), '(latent_data)\n', (6234, 6247), False, 'import copy\n'), ((1037, 1060), 'numpy.array', 'np.array', (['which_idx_dup'], {}), '(which_idx_dup)\n', (1045, 1060), True, 'import numpy as np\n'), ((482, 516), 'os.path.realpath', 'os.path.realpath', (['dir_code_methods'], {}), '(dir_code_methods)\n', (498, 516), False, 'import os\n'), ((1080, 1107), 'numpy.sum', 'np.sum', (['(which_idx_dup * 1.0)'], {}), '(which_idx_dup * 1.0)\n', (1086, 1107), True, 'import numpy as np\n'), ((1154, 1178), 'numpy.cumsum', 'np.cumsum', (['which_idx_dup'], {}), '(which_idx_dup)\n', (1163, 1178), True, 'import numpy as np\n')]
|
from .alphabet import protein_alphabet, dna_alphabet, rna_alphabet
from .alignment import Alignment, ReferenceMapping
import numpy as np
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo
def _get_substitution_matrix(alphabet):
""" Return a tuple with default parameters `(substitution_matrix, gap_open, gap_extend) for the given alphabet. """
if alphabet == protein_alphabet:
return MatrixInfo.blosum50, -8, -8
elif alphabet == dna_alphabet:
return ({
('A', 'A'): 5,
('C', 'A'): -4, ('C', 'C'): 5,
('G', 'A'): -4, ('G', 'C'): -4, ('G', 'G'): 5,
('T', 'A'): -4, ('T', 'C'): -4, ('T', 'G'): -4, ('T', 'T'): 5
}, -2, -0.5)
elif alphabet == rna_alphabet:
return ({
('A', 'A'): 5,
('C', 'A'): -4, ('C', 'C'): 5,
('G', 'A'): -4, ('G', 'C'): -4, ('G', 'G'): 5,
('U', 'A'): -4, ('U', 'C'): -4, ('U', 'G'): -4, ('U', 'U'): 5
}, -2, -0.5)
else:
raise ValueError('explicit substitution_matrix missing on alignment with alphabet that is not protein, dna,'
' or rna')
def search(align, seq, move_to_top=False, substitution_matrix=None, gap_open=None, gap_extend=None):
""" Searches for the best match to the given sequence in the given alignment, and returns its index.
If `move_to_top == True`, the sequence is swapped with the first alignment sequence. The return value remains
the position of the sequence before it got moved.
The default substitution matrix is BLOSUM50 for proteins and NUC.4.4 for DNA. A version of NUC.4.4 with T replaced
by U is used by default for RNA. For any other alphabets, a substitution matrix needs to be specified (that is a
dict from pairs of letters to scores).
The function currently does not work on multi-alphabet alignments.
"""
if len(align.alphabets) == 0 or len(align) == 0:
raise ValueError('search on empty alignment.')
if len(align.alphabets) > 1:
raise ValueError('search not implemented on multi-alphabet alignments.')
if len(seq) == 0:
raise ValueError('search with empty sequence.')
alphabet = align.alphabets[0][0]
if substitution_matrix is None:
substitution_matrix, default_gap_open, default_gap_extend = _get_substitution_matrix(alphabet)
if gap_open is None:
gap_open = default_gap_open
if gap_extend is None:
gap_extend = default_gap_extend
# make sure the sequence is a string
seq = ''.join(seq)
# turn alignment into sequence of strings, stripping gaps
if not alphabet.has_gap:
raise ValueError('search requires the alignment alphabet to have a gap.')
gap_char = alphabet[0]
align_seqs = [''.join(x for x in _ if x != gap_char) for _ in np.asarray(align[:, :])]
scores = []
for i, align_seq in enumerate(align_seqs):
scores.append(pairwise2.align.globalds(seq, align_seq, substitution_matrix,
gap_open, gap_extend, one_alignment_only=True, score_only=True,
penalize_end_gaps=False))
# find the highest scoring sequence
best_id = np.argmax(scores)
# swap to first position?
if move_to_top:
align.swap(0, best_id)
return best_id
def filter_rows(align, max_gaps=0.5):
""" Return a new alignment where rows that have too many gaps are removed (a fraction larger than max_gaps). """
if len(align) == 0:
return Alignment()
gap_structure = align.get_gap_structure()
gap_fractions = np.mean(gap_structure, axis=1)
mask = (gap_fractions <= max_gaps)
return align[mask]
def align_to_sequence(align, seq, ref_idx_names=None, truncate=False, force_idx=None):
""" Set the reference mapping for the alignment according to the given sequence.
By default, the function searches for the best match to `seq` within the alignment, and uses this match to infer a
mapping between alignment columns and positions in `seq`. Columns that do not match any position in `seq` are marked
with `None`. If `truncate` is `True`, the columns that do not have a match in `seq` are removed.
If `force_idx` is set, the search is not done, and only the sequence at that position is used for the matching.
By default, the positions in `seq` are numbered consecutively, starting from 0. If `ref_idx_names` is given,
position `i` in `seq` will have name `ref_idx_names[i]`, and these names will be used in the reference mapping that
will be attached to the alignment.
Currently this only works for single alphabet alignments, and the alphabet needs to be protein, DNA, or RNA.
The position of the matched sequence in the alignment, and the accuracy of the match, are returned in a dictionary.
"""
if len(align) == 0:
# nothing to do
return align
if len(align.alphabets) > 1:
raise ValueError('align_to_sequence not implemented on multi-alphabet alignments.')
alphabet = align.alphabets[0][0]
substitution_matrix, gap_open, gap_extend = _get_substitution_matrix(alphabet)
if force_idx is None:
# find the best matching sequence
force_idx = search(align, seq, substitution_matrix=substitution_matrix,
gap_open=gap_open, gap_extend=gap_extend)
# find the best match
gap_ch = alphabet[0]
# need the alignment sequence without gaps
align_seq = np.asarray(align.data[force_idx])[0]
align_gap_mask = (align_seq == gap_ch)
align_seq_no_gaps = align_seq[~align_gap_mask]
align_seq_no_gaps_as_str = ''.join(align_seq_no_gaps)
seq = ''.join(seq)
p_al = pairwise2.align.globalds(seq, align_seq_no_gaps_as_str, substitution_matrix, gap_open, gap_extend,
penalize_end_gaps=False)
# this will be the mapping from indices in alignment to indices in `seq`
ref_idxs = np.asarray([None for _ in range(len(align_seq))])
# the ungapped positions in p_al[0][0] correspond to positions in the reference sequence
# let's label them
p_al_ref_idxs = np.asarray([None for _ in range(len(p_al[0][0]))])
p_al_ref_idxs[np.asarray(list(p_al[0][0])) != gap_ch] = list(range(len(seq)))
# now the ungapped positions in p_al[0][1] correspond to ungapped positions in the alignment sequence
ref_idxs[~align_gap_mask] = p_al_ref_idxs[np.asarray(list(p_al[0][1])) != gap_ch]
# calculate some details
details = {'align_accuracy': np.mean(
[a == b for a, b in zip(p_al[0][0], p_al[0][1]) if a != gap_ch and b != gap_ch]),
'idx': force_idx}
# do we want to truncate the alignment?
if truncate:
# noinspection PyComparisonWithNone
truncate_mask = (ref_idxs != None)
align.truncate_columns(truncate_mask, in_place=True)
ref_idxs = ref_idxs[truncate_mask]
if ref_idx_names is not None:
ref_seq = [ref_idx_names[_] if _ is not None else None for _ in ref_idxs]
else:
ref_seq = ref_idxs
align.reference = ReferenceMapping(list(ref_seq))
return details
|
[
"numpy.asarray",
"Bio.pairwise2.align.globalds",
"numpy.mean",
"numpy.argmax"
] |
[((3260, 3277), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (3269, 3277), True, 'import numpy as np\n'), ((3655, 3685), 'numpy.mean', 'np.mean', (['gap_structure'], {'axis': '(1)'}), '(gap_structure, axis=1)\n', (3662, 3685), True, 'import numpy as np\n'), ((5770, 5897), 'Bio.pairwise2.align.globalds', 'pairwise2.align.globalds', (['seq', 'align_seq_no_gaps_as_str', 'substitution_matrix', 'gap_open', 'gap_extend'], {'penalize_end_gaps': '(False)'}), '(seq, align_seq_no_gaps_as_str, substitution_matrix,\n gap_open, gap_extend, penalize_end_gaps=False)\n', (5794, 5897), False, 'from Bio import pairwise2\n'), ((5546, 5579), 'numpy.asarray', 'np.asarray', (['align.data[force_idx]'], {}), '(align.data[force_idx])\n', (5556, 5579), True, 'import numpy as np\n'), ((2848, 2871), 'numpy.asarray', 'np.asarray', (['align[:, :]'], {}), '(align[:, :])\n', (2858, 2871), True, 'import numpy as np\n'), ((2959, 3118), 'Bio.pairwise2.align.globalds', 'pairwise2.align.globalds', (['seq', 'align_seq', 'substitution_matrix', 'gap_open', 'gap_extend'], {'one_alignment_only': '(True)', 'score_only': '(True)', 'penalize_end_gaps': '(False)'}), '(seq, align_seq, substitution_matrix, gap_open,\n gap_extend, one_alignment_only=True, score_only=True, penalize_end_gaps\n =False)\n', (2983, 3118), False, 'from Bio import pairwise2\n')]
|
import numpy as np
from ._Epsilon import Epsilon
class UCB1(Epsilon):
"""
Agente que soluciona el problema del el Bandido Multibrazo
(Multi-Armed Bandit) mediante el uso de una estrategia UCB1
Parámetros
----------
bandits : array of Bandit
Vector con los bandidos con los que se debe jugar
Métodos
-------
run :
Realiza una serie de tiradas con los bandidos seleccionados
por el algoritmo
update:
Actualiza los valores adicionales después de una tirada
select :
Selecciona un bandido para jugar en la próxima tirada
average_reward :
Obtención de la recompensa promedio
plot :
Representación gráfica del histórico de tiradas
References
----------
<NAME>, <NAME>, and <NAME>. "A survey of online
experiment design with the stochastic multi-armed bandit." arXiv preprint
arXiv:1510.00757 (2015).
"""
def select(self):
total = len(self._rewards)
if total < self._num_bandits:
bandit = total
else:
ucb = [0] * self._num_bandits
for i in range(self._num_bandits):
ucb[i] = self._mean[i] + np.sqrt(2 * np.log(total) / self._plays[i])
max_bandits = np.where(ucb == np.max(ucb))[0]
bandit = np.random.choice(max_bandits)
return bandit
class UCB2(Epsilon):
"""
Agente que soluciona el problema del el Bandido Multibrazo
(Multi-Armed Bandit) mediante el uso de una estrategia UCB2
Parámetros
----------
bandits : array of Bandit
Vector con los bandidos con los que se debe jugar
alpha : float
Parámetro que se influye en el ratio de aprendizaje del algoritmo
Métodos
-------
run :
Realiza una serie de tiradas con los bandidos seleccionados
por el algoritmo
update:
Actualiza los valores adicionales después de una tirada
select :
Selecciona un bandido para jugar en la próxima tirada
average_reward :
Obtención de la recompensa promedio
plot :
Representación gráfica del histórico de tiradas
References
----------
<NAME>, <NAME>, and <NAME>. "A survey of online
experiment design with the stochastic multi-armed bandit." arXiv preprint
arXiv:1510.00757 (2015).
"""
def __init__(self, bandits, alpha=0.1):
self.alpha = alpha
self._mean = [0] * len(bandits)
super(UCB2, self).__init__(bandits)
def select(self):
total = len(self._rewards)
if total == 0:
bandit = np.random.choice(self._num_bandits)
else:
ucb = [0] * num_bandits
for i in range(num_bandits):
try:
tau = int(np.ceil((1 + self.alpha) ** self._plays[i]))
if np.log(np.e * total / tau) > 0:
bonus = np.sqrt((1. + self.alpha) * np.log(np.e * total / tau) / (2 * tau))
else:
bonus = 0
except:
bonus = 0
if np.isnan(bonus):
ucb[i] = self._mean[i]
else:
ucb[i] = self._mean[i] + bonus
max_bandits = np.where(ucb == np.max(ucb))[0]
bandit = np.random.choice(max_bandits)
return bandit
class UCB1Tuned(Epsilon):
"""
Agente que soluciona el problema del el Bandido Multibrazo
(Multi-Armed Bandit) mediante el uso de una estrategia UCB1-Tuned
Parámetros
----------
bandits : array of Bandit
Vector con los bandidos con los que se debe jugar
Métodos
-------
run :
Realiza una serie de tiradas con los bandidos seleccionados
por el algoritmo
update:
Actualiza los valores adicionales después de una tirada
select :
Selecciona un bandido para jugar en la próxima tirada
average_reward :
Obtención de la recompensa promedio
plot :
Representación gráfica del histórico de tiradas
References
----------
<NAME>, <NAME>, and <NAME>. "A survey of online
experiment design with the stochastic multi-armed bandit." arXiv preprint
arXiv:1510.00757 (2015).
"""
def __init__(self, bandits):
self._mean2 = [0] * len(bandits)
super(UCB1Tuned, self).__init__(bandits)
def update(self, bandit, reward):
# Actualización de la media de los cuadrados
self._mean2[bandit] = (1 - 1.0/self._plays[bandit]) * self._mean2[bandit] \
+ 1.0/self._plays[bandit] * reward ** 2
def select(self):
total = len(self._rewards)
if total == 0:
bandit = np.random.choice(self._num_bandits)
else:
ucb = [0] * self._num_bandits
for i in range(self._num_bandits):
if self._plays[i] == 0:
v = self._mean2[i] - self._mean[i] ** 2 + np.sqrt(2 * np.log(total))
else:
v = self._mean2[i] - self._mean[i] ** 2 + np.sqrt(2 * np.log(total) / self._plays[i])
ucb[i] = self._mean[i] + np.sqrt(np.log(total) * np.min([1/4, v]))
max_bandits = np.where(ucb == np.max(ucb))[0]
bandit = np.random.choice(max_bandits)
return bandit
class UCBNormal(Epsilon):
"""
Agente que soluciona el problema del el Bandido Multibrazo
(Multi-Armed Bandit) mediante el uso de una estrategia UCB-Normal
Parámetros
----------
bandits : array of Bandit
Vector con los bandidos con los que se debe jugar
Métodos
-------
run :
Realiza una serie de tiradas con los bandidos seleccionados
por el algoritmo
update:
Actualiza los valores adicionales después de una tirada
select :
Selecciona un bandido para jugar en la próxima tirada
average_reward :
Obtención de la recompensa promedio
plot :
Representación gráfica del histórico de tiradas
References
----------
<NAME>, <NAME>, and <NAME>. "A survey of online
experiment design with the stochastic multi-armed bandit." arXiv preprint
arXiv:1510.00757 (2015).
"""
def __init__(self, bandits):
self._rewards2 = [0] * len(bandits)
super(UCBNormal, self).__init__(bandits)
def update(self, bandit, reward):
self._rewards2[bandit] += reward ** 2
def select(self):
total = len(self._rewards)
# Número de veces mínimo que debe jugar cada bandido
if total > 0:
min_plays = np.ceil(8 * np.log(total))
else:
min_plays = 1
# En caso de que algún bandido no jugase el mínimo de veces se selecciona ese
if np.any(np.array(self._plays) < min_plays):
min_bandit = np.where(np.array(self._plays) < min_plays)[0]
bandit = np.random.choice(min_bandit)
else:
ucb = [0] * self._num_bandits
for i in range(self._num_bandits):
if self._plays[i] > 1:
bonus = 16 * (self._rewards2[i] - self._plays[i] * self._mean[i]**2) / (self._plays[i] - 1)
bonus *= np.log(total - 1) / self._plays[i]
bonus = np.sqrt(bonus)
ucb[i] = self._mean[i] + bonus
else:
ucb[i] = self._mean[i]
max_bandits = np.where(ucb == np.max(ucb))[0]
bandit = np.random.choice(max_bandits)
return bandit
class UCBV(Epsilon):
"""
Agente que soluciona el problema del el Bandido Multibrazo
(Multi-Armed Bandit) mediante el uso de una estrategia UCBV
Parámetros
----------
bandits : array of Bandit
Vector con los bandidos con los que se debe jugar
b : float
Hiperparámetro para seleccionar el ration de aprendizaje
Métodos
-------
run :
Realiza una serie de tiradas con los bandidos seleccionados
por el algoritmo
update:
Actualiza los valores adicionales después de una tirada
select :
Selecciona un bandido para jugar en la próxima tirada
average_reward :
Obtención de la recompensa promedio
plot :
Representación gráfica del histórico de tiradas
References
----------
<NAME>, <NAME>, and <NAME>.
"Exploration-exploitation trade-off using variance estimates in multi-armed
bandits." Theoretical Computer Science, Volume 410, Issue 19, 28 April 2009,
Pages 1876-1902 (https://doi.org/10.1016/j.tcs.2009.01.016)
"""
def __init__(self, bandits, b=3):
self.b = b
self._mean2 = [0] * len(bandits)
super(UCBV, self).__init__(bandits)
def update(self, bandit, reward):
self._mean2[bandit] += reward**2
def select(self):
num_bandits = len(self.bandits)
total = len(self._rewards)
if total < num_bandits:
bandit = total
else:
ucb = [0] * num_bandits
for i in range(num_bandits):
var = self._mean2[i] / self._plays[i] - self._mean[i]**2
ucb[i] = self._mean[i]
ucb[i] += np.sqrt(2 * var * np.log(total) / self._plays[i])
ucb[i] += self.b * np.log(total) / self._plays[i]
max_bandits = np.where(ucb == np.max(ucb))[0]
bandit = np.random.choice(max_bandits)
|
[
"numpy.log",
"numpy.ceil",
"numpy.isnan",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.random.choice",
"numpy.sqrt"
] |
[((1368, 1397), 'numpy.random.choice', 'np.random.choice', (['max_bandits'], {}), '(max_bandits)\n', (1384, 1397), True, 'import numpy as np\n'), ((2717, 2752), 'numpy.random.choice', 'np.random.choice', (['self._num_bandits'], {}), '(self._num_bandits)\n', (2733, 2752), True, 'import numpy as np\n'), ((3484, 3513), 'numpy.random.choice', 'np.random.choice', (['max_bandits'], {}), '(max_bandits)\n', (3500, 3513), True, 'import numpy as np\n'), ((4978, 5013), 'numpy.random.choice', 'np.random.choice', (['self._num_bandits'], {}), '(self._num_bandits)\n', (4994, 5013), True, 'import numpy as np\n'), ((5571, 5600), 'numpy.random.choice', 'np.random.choice', (['max_bandits'], {}), '(max_bandits)\n', (5587, 5600), True, 'import numpy as np\n'), ((7279, 7307), 'numpy.random.choice', 'np.random.choice', (['min_bandit'], {}), '(min_bandit)\n', (7295, 7307), True, 'import numpy as np\n'), ((7898, 7927), 'numpy.random.choice', 'np.random.choice', (['max_bandits'], {}), '(max_bandits)\n', (7914, 7927), True, 'import numpy as np\n'), ((9943, 9972), 'numpy.random.choice', 'np.random.choice', (['max_bandits'], {}), '(max_bandits)\n', (9959, 9972), True, 'import numpy as np\n'), ((3262, 3277), 'numpy.isnan', 'np.isnan', (['bonus'], {}), '(bonus)\n', (3270, 3277), True, 'import numpy as np\n'), ((7150, 7171), 'numpy.array', 'np.array', (['self._plays'], {}), '(self._plays)\n', (7158, 7171), True, 'import numpy as np\n'), ((6982, 6995), 'numpy.log', 'np.log', (['total'], {}), '(total)\n', (6988, 6995), True, 'import numpy as np\n'), ((7667, 7681), 'numpy.sqrt', 'np.sqrt', (['bonus'], {}), '(bonus)\n', (7674, 7681), True, 'import numpy as np\n'), ((1331, 1342), 'numpy.max', 'np.max', (['ucb'], {}), '(ucb)\n', (1337, 1342), True, 'import numpy as np\n'), ((2908, 2951), 'numpy.ceil', 'np.ceil', (['((1 + self.alpha) ** self._plays[i])'], {}), '((1 + self.alpha) ** self._plays[i])\n', (2915, 2951), True, 'import numpy as np\n'), ((2976, 3002), 'numpy.log', 'np.log', (['(np.e * total / tau)'], {}), '(np.e * total / tau)\n', (2982, 3002), True, 'import numpy as np\n'), ((3447, 3458), 'numpy.max', 'np.max', (['ucb'], {}), '(ucb)\n', (3453, 3458), True, 'import numpy as np\n'), ((5534, 5545), 'numpy.max', 'np.max', (['ucb'], {}), '(ucb)\n', (5540, 5545), True, 'import numpy as np\n'), ((7220, 7241), 'numpy.array', 'np.array', (['self._plays'], {}), '(self._plays)\n', (7228, 7241), True, 'import numpy as np\n'), ((7604, 7621), 'numpy.log', 'np.log', (['(total - 1)'], {}), '(total - 1)\n', (7610, 7621), True, 'import numpy as np\n'), ((7861, 7872), 'numpy.max', 'np.max', (['ucb'], {}), '(ucb)\n', (7867, 7872), True, 'import numpy as np\n'), ((9807, 9820), 'numpy.log', 'np.log', (['total'], {}), '(total)\n', (9813, 9820), True, 'import numpy as np\n'), ((9906, 9917), 'numpy.max', 'np.max', (['ucb'], {}), '(ucb)\n', (9912, 9917), True, 'import numpy as np\n'), ((5445, 5458), 'numpy.log', 'np.log', (['total'], {}), '(total)\n', (5451, 5458), True, 'import numpy as np\n'), ((5461, 5479), 'numpy.min', 'np.min', (['[1 / 4, v]'], {}), '([1 / 4, v])\n', (5467, 5479), True, 'import numpy as np\n'), ((9740, 9753), 'numpy.log', 'np.log', (['total'], {}), '(total)\n', (9746, 9753), True, 'import numpy as np\n'), ((1248, 1261), 'numpy.log', 'np.log', (['total'], {}), '(total)\n', (1254, 1261), True, 'import numpy as np\n'), ((5244, 5257), 'numpy.log', 'np.log', (['total'], {}), '(total)\n', (5250, 5257), True, 'import numpy as np\n'), ((3068, 3094), 'numpy.log', 'np.log', (['(np.e * total / tau)'], {}), '(np.e * total / tau)\n', (3074, 3094), True, 'import numpy as np\n'), ((5355, 5368), 'numpy.log', 'np.log', (['total'], {}), '(total)\n', (5361, 5368), True, 'import numpy as np\n')]
|
import Globals
import tkinter as tk
from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL,simpledialog,\
PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, \
FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk
import os
from os.path import normpath, basename
from PIL import Image, ImageTk
import cv2
from cv2 import imread, IMREAD_ANYCOLOR, IMREAD_ANYDEPTH, imwrite
import pydicom
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib as mpl
from matplotlib import cm
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
import numpy as np
def drawProfiles(even):
#LAG DVH PLOT
return
def processDoseplan_usingReferencePoint(only_one):
################ RT Plan ######################
#Find each coordinate in mm to isocenter relative to first element in doseplan
iso_1 = abs(Globals.DVH_dataset_doseplan.ImagePositionPatient[0] - Globals.DVH_isocenter_mm[0])
iso_2 = abs(Globals.DVH_dataset_doseplan.ImagePositionPatient[1] - Globals.DVH_isocenter_mm[1])
iso_3 = abs(Globals.DVH_dataset_doseplan.ImagePositionPatient[2] - Globals.DVH_isocenter_mm[2])
#Given as [x,y,z] in patient coordinates
Globals.DVH_isocenter_mm = [iso_1, iso_2, iso_3]
try:
Globals.DVH_vertical = int(Globals.DVH_vertical)
except:
messagebox.showerror("Error", "Could not read the vertical displacements\n (Code: displacements to integer)")
return
try:
Globals.DVH_lateral = int(Globals.DVH_lateral)
except:
messagebox.showerror("Error", "Could not read the lateral displacements\n (Code: displacements to integer)")
return
try:
Globals.DVH_longitudinal = int(Globals.DVH_longitudinal)
except:
messagebox.showerror("Error", "Could not read the longitudinal displacements\n (Code: displacements to integer)")
return
lateral = Globals.DVH_lateral
longit = Globals.DVHlongitudinal
vertical = Globals.DVH_vertical
isocenter_px = np.zeros(3)
distance_in_doseplan_ROI_reference_point_px = []
if(Globals.DVH_dataset_doseplan.PixelSpacing==[1, 1]):
#make isocenter coordinates into pixel values
isocenter_px[0] = np.round(iso_1)
isocenter_px[1] = np.round(iso_2)
isocenter_px[2] = np.round(iso_3)
#find the pixel distance from reference point to ROI corners
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.DVH_distance_reference_point_ROI[0][0]),\
np.round(Globals.DVH_distance_reference_point_ROI[0][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.DVH_distance_reference_point_ROI[1][0]),\
np.round(Globals.DVH_distance_reference_point_ROI[1][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.DVH_distance_reference_point_ROI[2][0]),\
np.round(Globals.DVH_distance_reference_point_ROI[2][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.DVH_distance_reference_point_ROI[3][0]),\
np.round(Globals.DVH_distance_reference_point_ROI[3][1])])
#Input to px
lateral_px = np.round(lateral)
vertical_px = np.round(vertical)
longit_px = np.round(longit)
#displacment to px
doseplan_lateral_displacement_px = np.round(Globals.DVH_doseplan_lateral_displacement)
doseplan_vertical_displacement_px = np.round(Globals.DVH_doseplan_vertical_displacement)
doseplan_longitudinal_displacement_px = np.round(Globals.DVH_doseplan_longitudianl_displacement)
elif(Globals.DVH_dataset_doseplan.PixelSpacing==[2, 2]):
#make isocenter coordinates into pixel values
isocenter_px[0] = np.round(iso_1/2)
isocenter_px[1] = np.round(iso_2/2)
isocenter_px[2] = np.round(iso_3/2)
#find the pixel distance from reference point to ROI corners
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_reference_point_ROI[0][0])/2),\
np.round((Globals.DVH_distance_reference_point_ROI[0][1])/2)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_reference_point_ROI[1][0])/2),\
np.round((Globals.DVH_distance_reference_point_ROI[1][1])/2)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_reference_point_ROI[2][0])/2),\
np.round((Globals.DVH_distance_reference_point_ROI[2][1])/2)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_reference_point_ROI[3][0])/2),\
np.round((Globals.DVH_distance_reference_point_ROI[3][1])/2)])
#Input to px
lateral_px = np.round(lateral/2)
vertical_px = np.round(vertical/2)
longit_px = np.round(longit/2)
#displacment to pc
doseplan_lateral_displacement_px = np.round((Globals.DVH_doseplan_lateral_displacement)/2)
doseplan_vertical_displacement_px = np.round((Globals.DVH_doseplan_vertical_displacement)/2)
doseplan_longitudinal_displacement_px = np.round((Globals.DVH_doseplan_longitudianl_displacement)/2)
else:
#make isocenter coordinates into pixel values
isocenter_px[0] = np.round(iso_1/3)
isocenter_px[1] = np.round(iso_2/3)
isocenter_px[2] = np.round(iso_3/3)
#find the pixel distance from reference point to ROI corners
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_reference_point_ROI[0][0])/3),\
np.round((Globals.DVH_distance_reference_point_ROI[0][1])/3)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_reference_point_ROI[1][0])/3),\
np.round((Globals.DVH_distance_reference_point_ROI[1][1])/3)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_reference_point_ROI[2][0])/3),\
np.round((Globals.DVH_distance_reference_point_ROI[2][1])/3)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_reference_point_ROI[3][0])/3),\
np.round((Globals.DVH_distance_reference_point_ROI[3][1])/3)])
#Input to px
lateral_px = np.round(lateral/3)
vertical_px = np.round(vertical/3)
longit_px = np.round(longit/3)
#displacment to pc
doseplan_lateral_displacement_px = np.round((Globals.DVH_doseplan_lateral_displacement)/3)
doseplan_vertical_displacement_px = np.round((Globals.DVH_doseplan_vertical_displacement)/3)
doseplan_longitudinal_displacement_px = np.round((Globals.DVH_doseplan_longitudianl_displacement)/3)
temp_ref_point_doseplan = np.zeros(3)
#Finding reference point in doseplan
if(Globals.DVH_doseplan_patient_position=='HFS'):
temp_ref_point_doseplan[0] = int(isocenter_px[0]+ doseplan_lateral_displacement_px - lateral_px)
temp_ref_point_doseplan[1] = int(isocenter_px[1]- doseplan_vertical_displacement_px + vertical_px)
temp_ref_point_doseplan[2] = int(isocenter_px[2]+ doseplan_longitudinal_displacement_px - longit_px)
elif(Globals.DVH_doseplan_patient_position=='HFP'):
temp_ref_point_doseplan[0] = isocenter_px[0]- doseplan_lateral_displacement_px+ lateral_px
temp_ref_point_doseplan[1] = isocenter_px[1]+ doseplan_vertical_displacement_px - vertical_px
temp_ref_point_doseplan[2] = isocenter_px[2]+ doseplan_longitudinal_displacement_px - longit_px
elif(Globals.DVH_doseplan_patient_position=='HFDR'):
temp_ref_point_doseplan[0] = isocenter_px[0]- doseplan_vertical_displacement_px + vertical_px
temp_ref_point_doseplan[1] = isocenter_px[1]+ doseplan_lateral_displacement_px - lateral_px
temp_ref_point_doseplan[2] = isocenter_px[2]+ doseplan_longitudinal_displacement_px - longit_px
elif(Globals.DVH_doseplan_patient_position=='HFDL'):
temp_ref_point_doseplan[0] = isocenter_px[0]+ doseplan_vertical_displacement_px - vertical_px
temp_ref_point_doseplan[1] = isocenter_px[1]- doseplan_lateral_displacement_px + lateral_px
temp_ref_point_doseplan[2] = isocenter_px[2]+ doseplan_longitudinal_displacement_px - longit_px
elif(Globals.DVH_doseplan_patient_position=='FFS'):
temp_ref_point_doseplan[0] = isocenter_px[0]- doseplan_lateral_displacement_px + lateral_px
temp_ref_point_doseplan[1] = isocenter_px[1]+ doseplan_vertical_displacement_px - vertical_px
temp_ref_point_doseplan[2] = isocenter_px[2]- doseplan_longitudinal_displacement_px + longit_px
elif(Globals.DVH_doseplan_patient_position=='FFP'):
temp_ref_point_doseplan[0] = isocenter_px[0]+ doseplan_lateral_displacement_px- lateral_px
temp_ref_point_doseplan[1] = isocenter_px[1]- doseplan_vertical_displacement_px + vertical_px
temp_ref_point_doseplan[2] = isocenter_px[2]- doseplan_longitudinal_displacement_px + longit_px
elif(Globals.DVH_doseplan_patient_position=='FFDR'):
temp_ref_point_doseplan[0] = isocenter_px[0]- doseplan_vertical_displacement_px + vertical_px
temp_ref_point_doseplan[1] = isocenter_px[1]- doseplan_lateral_displacement_px + lateral_px
temp_ref_point_doseplan[2] = isocenter_px[2]- doseplan_longitudinal_displacement_px + longit_px
else:
temp_ref_point_doseplan[0] = isocenter_px[0] + doseplan_vertical_displacement_px - vertical_px
temp_ref_point_doseplan[1] = isocenter_px[1] + doseplan_lateral_displacement_px - lateral_px
temp_ref_point_doseplan[2] = isocenter_px[2]- doseplan_longitudinal_displacement_px + longit_px
Globals.DVH_reference_point_in_doseplan = temp_ref_point_doseplan
reference_point = np.zeros(3)
######################## Doseplan ##################################
#dataset_swapped is now the dataset entered the same way as expected with film (slice, rows, columns)
#isocenter_px and reference_point is not turned according to the doseplan and film orientation.
if(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[1, 0, 0, 0, 1, 0]):
reference_point[0] = temp_ref_point_doseplan[2]
reference_point[1] = temp_ref_point_doseplan[1]
reference_point[2] = temp_ref_point_doseplan[0]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Axial'):
dataset_swapped = Globals.DVH_dataset_doseplan.pixel_array
else:
messagebox.showerror("Error", "Something has gone wrong here.")
clearAll()
return
elif(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[1, 0, 0, 0, 0, 1]):
reference_point[0] = temp_ref_point_doseplan[1]
reference_point[1] = temp_ref_point_doseplan[2]
reference_point[2] = temp_ref_point_doseplan[0]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = Globals.DVH_dataset_doseplan.pixel_array
elif(Globals.DCH_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[0, 1, 0, 1, 0, 0]):
reference_point[0] = temp_ref_point_doseplan[2]
reference_point[1] = temp_ref_point_doseplan[0]
reference_point[2] = temp_ref_point_doseplan[1]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[0, 1, 0, 0, 0, 1]):
reference_point[0] = temp_ref_point_doseplan[0]
reference_point[1] = temp_ref_point_doseplan[2]
reference_point[2] = temp_ref_point_doseplan[1]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[0, 0, 1, 1, 0, 0]):
reference_point[0] = temp_ref_point_doseplan[1]
reference_point[1] = temp_ref_point_doseplan[0]
reference_point[2] = temp_ref_point_doseplan[2]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[0, 0, 1, 0, 1, 0]):
reference_point[0] = temp_ref_point_doseplan[0]
reference_point[1] = temp_ref_point_doseplan[1]
reference_point[2] = temp_ref_point_doseplan[2]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Sagittal'):
dataset_swapped = Globals.DVH_dataset_doseplan.pixel_array
elif(Globals.DCH_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
if(reference_point[0]<0 or reference_point[0]>dataset_swapped.shape[0]):
messagebox.showerror("Error", "Reference point is outside of dosematrix\n\
(Code: first dimension, number of frames in dosematrix)")
return
if(reference_point[1]<0 or reference_point[1]>dataset_swapped.shape[1]):
messagebox.showerror("Error", "Reference point is outside of dosematrix\n\
(Code: second dimension, rows in dosematrix)")
return
if(reference_point[2]<0 or reference_point[2]>dataset_swapped.shape[2]):
messagebox.showerror("Error", "Reference point is outside of dosematrix\n\
(Code: third dimension, columns in dosematrix)")
return
dose_slice = dataset_swapped[int(reference_point[0]),:,:]
#calculate the coordinates of the Region of Interest in doseplan (marked on the film)
#and checks if it actualy exists in dosematrix
doseplan_ROI_coords = []
top_left_test_side = False; top_left_test_down = False
top_right_test_side = False; top_right_test_down = False
bottom_left_test_side = False; bottom_left_test_down = False
bottom_right_test_side = False; bottom_right_test_down = False
top_left_side_corr = 0; top_left_down_corr = 0
top_right_side_corr = 0; top_right_down_corr = 0
bottom_left_side_corr = 0; bottom_left_down_corr = 0
bottom_right_side_corr = 0; bottom_right_down_corr = 0
top_left_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[0][0]
top_left_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[0][1]
if(top_left_to_side < 0):
top_left_test_side = True
top_left_side_corr = abs(top_left_to_side)
top_left_to_side = 0
if(top_left_to_side > dose_slice.shape[1]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(top_left_down < 0):
top_left_test_down = True
top_left_down_corr = abs(top_left_down)
top_left_down = 0
if(top_left_down > dose_slice.shape[0]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
top_right_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[1][0]
top_right_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[1][1]
if(top_right_to_side < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(top_right_to_side > dose_slice.shape[1]):
top_right_test_side = True
top_right_side_corr = top_right_to_side - dose_slice.shape[1]
top_right_to_side = dose_slice.shape[1]
if(top_right_down < 0):
top_right_test_down = True
top_right_down_corr = abs(top_right_down)
top_right_down = 0
if(top_right_down > dose_slice.shape[0]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
bottom_left_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[2][0]
bottom_left_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[2][1]
if(bottom_left_to_side < 0):
bottom_left_test_side = True
bottom_left_side_corr = abs(bottom_left_to_side)
bottom_left_to_side = 0
if(bottom_left_to_side > dose_slice.shape[1]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_left_down < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_left_down > dose_slice.shape[0]):
bottom_left_down_corr = bottom_left_down - dose_slice.shape[0]
bottom_left_down = dose_slice.shape[0]
bottom_left_test_down = True
bottom_right_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[3][0]
bottom_right_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[3][1]
if(bottom_right_to_side < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_right_to_side > dose_slice.shape[1]):
bottom_right_side_corr = bottom_right_to_side - dose_slice.shape[1]
bottom_right_to_side = dose_slice.shape[1]
bottom_right_test_side = True
if(bottom_right_down < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_right_down > dose_slice.shape[0]):
bottom_right_down_corr = bottom_right_down - dose_slice.shape[0]
bottom_right_down = dose_slice.shape[0]
bottom_right_test_down = True
if(top_right_test_side or top_right_test_down or top_left_test_side or top_left_test_down \
or bottom_right_test_side or bottom_right_test_down or bottom_left_test_side or bottom_left_test_down):
ROI_info = "Left side: " + str(max(top_left_side_corr, bottom_left_side_corr)) + " pixels.\n"\
+ "Right side: " + str(max(top_right_side_corr, bottom_right_side_corr)) + " pixels.\n "\
+ "Top side: " + str(max(top_left_down_corr, top_right_down_corr)) + " pixels.\n"\
+ "Bottom side: " + str(max(bottom_left_down_corr, bottom_right_down_corr)) + " pixels."
messagebox.showinfo("ROI info", "The ROI marked on the film did not fit with the size of the doseplan and had to \
be cut.\n" + ROI_info )
doseplan_ROI_coords.append([top_left_to_side, top_left_down])
doseplan_ROI_coords.append([top_right_to_side, top_right_down])
doseplan_ROI_coords.append([bottom_left_to_side, bottom_left_down])
doseplan_ROI_coords.append([bottom_right_to_side, bottom_right_down])
if only_one:
Globals.DVH_doseplan_dataset_ROI = \
dose_slice[int(top_left_down):int(bottom_left_down), int(top_left_to_side):int(top_right_to_side)]
img=Globals.DVH_doseplan_dataset_ROI
if(Globals.DVH_dataset_doseplan.PixelSpacing==[1, 1]):
img = cv2.resize(img, dsize=(img.shape[1]*5,img.shape[0]*5))
elif(Globals.DVH_dataset_doseplan.PixelSpacing==[2, 2]):
img = cv2.resize(img, dsize=(img.shape[1]*10,img.shape[0]*10))
else:
img = cv2.resize(img, dsize=(img.shape[1]*15,img.shape[0]*15))
mx=np.max(img)
Globals.DVH_max_dose_doseplan = mx*Globals.DVH_dose_scaling_doseplan
img = img/mx
PIL_img_doseplan_ROI = Image.fromarray(np.uint8(cm.viridis(img)*255))
wid = PIL_img_doseplan_ROI.width;heig = PIL_img_doseplan_ROI.height
doseplan_canvas = tk.Canvas(Globals.DVH_film_panedwindow)
doseplan_canvas.grid(row=2, column=0, sticky=N+S+W+E)
Globals.DVH_film_panedwindow.add(doseplan_canvas, \
height=max(heig, Globals.profiles_doseplan_text_image.height()), \
width=wid + Globals.profiles_doseplan_text_image.width())
doseplan_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
height=max(heig, Globals.profiles_doseplan_text_image.height()), \
width=wid + Globals.profiles_doseplan_text_image.width())
Globals.DVH_doseplan_write_image = tk.Canvas(doseplan_canvas)
Globals.DVH_doseplan_write_image.grid(row=0,column=1,sticky=N+S+W+E)
Globals.DVH_doseplan_write_image.config(bg='#ffffff', relief=FLAT, highlightthickness=0, width=wid, height=heig)
doseplan_text_image_canvas = tk.Canvas(doseplan_canvas)
doseplan_text_image_canvas.grid(row=0,column=0,sticky=N+S+W+E)
doseplan_text_image_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
width=Globals.profiles_doseplan_text_image.width(), height=Globals.profiles_doseplan_text_image.height())
scaled_image_visual = PIL_img_doseplan_ROI
scaled_image_visual = ImageTk.PhotoImage(image=scaled_image_visual)
Globals.DVH_doseplan_write_image_width = scaled_image_visual.width()
Globals.DVH_doseplan_write_image_height = scaled_image_visual.height()
Globals.DVH_doseplan_write_image.create_image(0,0,image=scaled_image_visual, anchor="nw")
Globals.DVH_doseplan_write_image.image = scaled_image_visual
doseplan_text_image_canvas.create_image(0,0,image=Globals.profiles_doseplan_text_image, anchor="nw")
doseplan_text_image_canvas.image=Globals.profiles_doseplan_text_image
drawProfiles(False)
else:
img=dose_slice[int(top_left_down):int(bottom_left_down), int(top_left_to_side):int(top_right_to_side)]
Globals.DVH_doseplan_dataset_ROI_several.append(img)
Globals.DVH_number_of_doseplans+=1
if(Globals.DVH_dataset_doseplan.PixelSpacing==[1, 1]):
Globals.DVH_several_img.append(cv2.resize(img, dsize=(img.shape[1]*5,img.shape[0]*5)))
elif(Globals.DVH_dataset_doseplan.PixelSpacing==[2, 2]):
Globals.DVH_several_img.append(cv2.resize(img, dsize=(img.shape[1]*10,img.shape[0]*10)))
else:
Globals.DVH_several_img.append(cv2.resize(img, dsize=(img.shape[1]*15,img.shape[0]*15)))
def processDoseplan_usingIsocenter(only_one):
################ RT Plan ######################
#Find each coordinate in mm to isocenter relative to first element in doseplan
iso_1 = abs(Globals.DVH_dataset_doseplan.ImagePositionPatient[0] - Globals.DVH_isocenter_mm[0])
iso_2 = abs(Globals.DVH_dataset_doseplan.ImagePositionPatient[1] - Globals.DVH_isocenter_mm[1])
iso_3 = abs(Globals.DVH_dataset_doseplan.ImagePositionPatient[2] - Globals.DVH_isocenter_mm[2])
#Given as [x,y,z] in patient coordinates
Globals.DVH_isocenter_mm = [iso_1, iso_2, iso_3]
#Isocenter in pixel relative to the first element in the doseplan
isocenter_px = np.zeros(3)
distance_in_doseplan_ROI_reference_point_px = []
if(Globals.DVH_dataset_doseplan.PixelSpacing==[1, 1]):
isocenter_px[0] = np.round(iso_1)#np.round(Globals.profiles_isocenter_mm[0])
isocenter_px[1] = np.round(iso_2)#np.round(Globals.profiles_isocenter_mm[1])
isocenter_px[2] = np.round(iso_3)#np.round(Globals.profiles_isocenter_mm[2])
#Change distance in film to pixel in doseplan
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.DVH_distance_isocenter_ROI[0][0]),\
np.round(Globals.DVH_distance_isocenter_ROI[0][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.DVH_distance_isocenter_ROI[1][0]),\
np.round(Globals.DVH_distance_isocenter_ROI[1][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.DVH_distance_isocenter_ROI[2][0]),\
np.round(Globals.DVH_distance_isocenter_ROI[2][1])])
distance_in_doseplan_ROI_reference_point_px.append([np.round(Globals.DVH_distance_isocenter_ROI[3][0]),\
np.round(Globals.DVH_distance_isocenter_ROI[3][1])])
elif(Globals.DVH_dataset_doseplan.PixelSpacing==[2, 2]):
isocenter_px[0] = np.round(iso_1/2)#np.round(Globals.profiles_isocenter_mm[0]/2)
isocenter_px[1] = np.round(iso_2/2)#np.round(Globals.profiles_isocenter_mm[1]/2)
isocenter_px[2] = np.round(iso_3/2)#np.round(Globals.profiles_isocenter_mm[2]/2)
#Change distance in film to pixel in doseplan
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_isocenter_ROI[0][0])/2),\
np.round((Globals.DVH_distance_isocenter_ROI[0][1])/2)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_isocenter_ROI[1][0])/2),\
np.round((Globals.DVH_distance_isocenter_ROI[1][1])/2)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_isocenter_ROI[2][0])/2),\
np.round((Globals.DVH_distance_isocenter_ROI[2][1])/2)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_isocenter_ROI[3][0])/2),\
np.round((Globals.DVH_distance_isocenter_ROI[3][1])/2)])
else:
isocenter_px[0] = np.round(iso_1/3)#np.round(Globals.profiles_isocenter_mm[0]/3)
isocenter_px[1] = np.round(iso_2/3)#np.round(Globals.profiles_isocenter_mm[1]/3)
isocenter_px[2] = np.round(iso_3/3)#np.round(Globals.profiles_isocenter_mm[2]/3)
#Change distance in film to pixel in doseplan
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_isocenter_ROI[0][0])/3),\
np.round((Globals.DVH_distance_isocenter_ROI[0][1])/3)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_isocenter_ROI[1][0])/3),\
np.round((Globals.DVH_distance_isocenter_ROI[1][1])/3)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_isocenter_ROI[2][0])/3),\
np.round((Globals.DVH_distance_isocenter_ROI[2][1])/3)])
distance_in_doseplan_ROI_reference_point_px.append([np.round((Globals.DVH_distance_isocenter_ROI[3][0])/3),\
np.round((Globals.DVH_distance_isocenter_ROI[3][1])/3)])
reference_point = np.zeros(3)
######################## Doseplan ##################################
#dataset_swapped is now the dataset entered the same way as expected with film (slice, rows, columns)
#isocenter_px and reference_point is not turned according to the doseplan and film orientation.
if(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[1, 0, 0, 0, 1, 0]):
reference_point[0] = isocenter_px[2]
reference_point[1] = isocenter_px[1]
reference_point[2] = isocenter_px[0]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Axial'):
dataset_swapped = Globals.DVH_dataset_doseplan.pixel_array
else:
messagebox.showerror("Error", "Something has gone wrong here.")
clearAll()
return
elif(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[1, 0, 0, 0, 0, 1]):
reference_point[0] = isocenter_px[1]
reference_point[1] = isocenter_px[2]
reference_point[2] = isocenter_px[0]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = Globals.DVH_dataset_doseplan.pixel_array
elif(Globals.DVH_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[0, 1, 0, 1, 0, 0]):
reference_point[0] = isocenter_px[2]
reference_point[1] = isocenter_px[0]
reference_point[2] = isocenter_px[1]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[0, 1, 0, 0, 0, 1]):
reference_point[0] = isocenter_px[0]
reference_point[1] = isocenter_px[2]
reference_point[2] = isocenter_px[1]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[0, 0, 1, 1, 0, 0]):
reference_point[0] = isocenter_px[1]
reference_point[1] = isocenter_px[0]
reference_point[2] = isocenter_px[2]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 1,2)
temp_ref = reference_point[1]
reference_point[1] = reference_point[2]
reference_point[2] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Sagittal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
elif(Globals.DVH_dataset_doseplan.ImageOrientationPatient==[0, 0, 1, 0, 1, 0]):
reference_point[0] = isocenter_px[0]
reference_point[1] = isocenter_px[1]
reference_point[2] = isocenter_px[2]
if(Globals.DVH_film_orientation.get()=='Coronal'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
dataset_swapped = np.swapaxes(dataset_swapped, 0,1)
temp_ref = reference_point[0]
reference_point[0] = reference_point[1]
reference_point[1] = temp_ref
elif(Globals.DVH_film_orientation.get()=='Sagittal'):
dataset_swapped = Globals.DVH_dataset_doseplan.pixel_array
elif(Globals.DVH_film_orientation.get()=='Axial'):
dataset_swapped = np.swapaxes(Globals.DVH_dataset_doseplan.pixel_array, 0,2)
temp_ref = reference_point[0]
reference_point[0] = reference_point[2]
reference_point[2] = temp_ref
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
else:
messagebox.showerror("Error", "Something has gone wrong.")
clearAll()
return
####################### Match film and doseplan ###############################
#Pick the slice where the reference point is (this is the slice-position of the film)
if Globals.DVH_dataset_doseplan.PixelSpacing == [1, 1]:
offset = int(np.round(Globals.DVH_offset))
dose_slice = dataset_swapped[int(reference_point[0]) + offset]
elif Globals.DVH_dataset_doseplan.PixelSpacing == [2, 2]:
offset = int(np.round(Globals.DVH_offset/2))
dose_slice = dataset_swapped[int(reference_point[0] + offset)]
else:
offset = int(np.round(Globals.DVH_offset/3))
dose_slice = dataset_swapped[int(reference_point[0]) + offset]
#calculate the coordinates of the Region of Interest in doseplan (marked on the film)
#and checks if it actualy exists in dosematrix
doseplan_ROI_coords = []
top_left_test_side = False; top_left_test_down = False
top_right_test_side = False; top_right_test_down = False
bottom_left_test_side = False; bottom_left_test_down = False
bottom_right_test_side = False; bottom_right_test_down = False
top_left_side_corr = 0; top_left_down_corr = 0
top_right_side_corr = 0; top_right_down_corr = 0
bottom_left_side_corr = 0; bottom_left_down_corr = 0
bottom_right_side_corr = 0; bottom_right_down_corr = 0
top_left_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[0][0]
top_left_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[0][1]
if(top_left_to_side < 0):
top_left_test_side = True
top_left_side_corr = abs(top_left_to_side)
top_left_to_side = 0
if(top_left_to_side > dose_slice.shape[1]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(top_left_down < 0):
top_left_test_down = True
top_left_down_corr = abs(top_left_down)
top_left_down = 0
if(top_left_down > dose_slice.shape[0]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
top_right_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[1][0]
top_right_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[1][1]
if(top_right_to_side < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(top_right_to_side > dose_slice.shape[1]):
top_right_test_side = True
top_right_side_corr = top_right_to_side - dose_slice.shape[1]
top_right_to_side = dose_slice.shape[1]
if(top_right_down < 0):
top_right_test_down = True
top_right_down_corr = abs(top_right_down)
top_right_down = 0
if(top_right_down > dose_slice.shape[0]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
bottom_left_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[2][0]
bottom_left_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[2][1]
if(bottom_left_to_side < 0):
bottom_left_test_side = True
bottom_left_side_corr = abs(bottom_left_to_side)
bottom_left_to_side = 0
if(bottom_left_to_side > dose_slice.shape[1]):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_left_down < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_left_down > dose_slice.shape[0]):
bottom_left_down_corr = bottom_left_down - dose_slice.shape[0]
bottom_left_down = dose_slice.shape[0]
bottom_left_test_down = True
bottom_right_to_side = reference_point[2] - distance_in_doseplan_ROI_reference_point_px[3][0]
bottom_right_down = reference_point[1] - distance_in_doseplan_ROI_reference_point_px[3][1]
if(bottom_right_to_side < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_right_to_side > dose_slice.shape[1]):
bottom_right_side_corr = bottom_right_to_side - dose_slice.shape[1]
bottom_right_to_side = dose_slice.shape[1]
bottom_right_test_side = True
if(bottom_right_down < 0):
messagebox.showerror("Fatal Error", "Fatal error: marked ROI is out of range in doseplan. Try again")
clearAll()
return
if(bottom_right_down > dose_slice.shape[0]):
bottom_right_down_corr = bottom_right_down - dose_slice.shape[0]
bottom_right_down = dose_slice.shape[0]
bottom_right_test_down = True
if(top_right_test_side or top_right_test_down or top_left_test_side or top_left_test_down \
or bottom_right_test_side or bottom_right_test_down or bottom_left_test_side or bottom_left_test_down):
ROI_info = "Left side: " + str(max(top_left_side_corr, bottom_left_side_corr)) + " pixels.\n"\
+ "Right side: " + str(max(top_right_side_corr, bottom_right_side_corr)) + " pixels.\n "\
+ "Top side: " + str(max(top_left_down_corr, top_right_down_corr)) + " pixels.\n"\
+ "Bottom side: " + str(max(bottom_left_down_corr, bottom_right_down_corr)) + " pixels."
messagebox.showinfo("ROI info", "The ROI marked on the film did not fit with the size of the doseplan and had to \
be cut.\n" + ROI_info )
doseplan_ROI_coords.append([top_left_to_side, top_left_down])
doseplan_ROI_coords.append([top_right_to_side, top_right_down])
doseplan_ROI_coords.append([bottom_left_to_side, bottom_left_down])
doseplan_ROI_coords.append([bottom_right_to_side, bottom_right_down])
#dose_slice = cv2.flip(dose_slice, 1)
if(only_one):
Globals.DVH_doseplan_dataset_ROI = \
dose_slice[int(top_left_down):int(bottom_left_down), int(top_left_to_side):int(top_right_to_side)]
img=Globals.DVH_doseplan_dataset_ROI
if(Globals.DVH_dataset_doseplan.PixelSpacing==[1, 1]):
img = cv2.resize(img, dsize=(img.shape[1]*5,img.shape[0]*5))
elif(Globals.DVH_dataset_doseplan.PixelSpacing==[2, 2]):
img = cv2.resize(img, dsize=(img.shape[1]*10,img.shape[0]*10))
else:
img = cv2.resize(img, dsize=(img.shape[1]*15,img.shape[0]*15))
mx=np.max(img)
Globals.DVH_max_dose_doseplan = mx*Globals.DVH_dose_scaling_doseplan
max_dose = mx*Globals.DVH_dose_scaling_doseplan
img = img/mx
PIL_img_doseplan_ROI = Image.fromarray(np.uint8(cm.viridis(img)*255))
wid = PIL_img_doseplan_ROI.width;heig = PIL_img_doseplan_ROI.height
doseplan_canvas = tk.Canvas(Globals.DVH_film_panedwindow)
doseplan_canvas.grid(row=2, column=0, sticky=N+S+W+E)
Globals.DVH_film_panedwindow.add(doseplan_canvas, \
height=max(heig, Globals.profiles_doseplan_text_image.height()), \
width=wid + Globals.profiles_doseplan_text_image.width())
doseplan_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
height=max(heig, Globals.profiles_doseplan_text_image.height()), \
width=wid + Globals.profiles_doseplan_text_image.width())
Globals.DVH_doseplan_write_image = tk.Canvas(doseplan_canvas)
Globals.DVH_doseplan_write_image.grid(row=0,column=1,sticky=N+S+W+E)
Globals.DVH_doseplan_write_image.config(bg='#ffffff', relief=FLAT, highlightthickness=0, width=wid, height=heig)
doseplan_text_image_canvas = tk.Canvas(doseplan_canvas)
doseplan_text_image_canvas.grid(row=0,column=0,sticky=N+S+W+E)
doseplan_text_image_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
width=Globals.profiles_doseplan_text_image.width(), height=Globals.profiles_doseplan_text_image.height())
scaled_image_visual = PIL_img_doseplan_ROI
scaled_image_visual = ImageTk.PhotoImage(image=scaled_image_visual)
Globals.DVH_doseplan_write_image_width = scaled_image_visual.width()
Globals.DVH_doseplan_write_image_height = scaled_image_visual.height()
Globals.DVH_doseplan_write_image.create_image(0,0,image=scaled_image_visual, anchor="nw")
Globals.DVH_doseplan_write_image.image = scaled_image_visual
doseplan_text_image_canvas.create_image(0,0,image=Globals.profiles_doseplan_text_image, anchor="nw")
doseplan_text_image_canvas.image=Globals.profiles_doseplan_text_image
drawProfiles(False)
else:
img=dose_slice[int(top_left_down):int(bottom_left_down), int(top_left_to_side):int(top_right_to_side)]
Globals.DVH_doseplan_dataset_ROI_several.append(img)
Globals.DVH_number_of_doseplans+=1
if(Globals.DVH_dataset_doseplan.PixelSpacing==[1, 1]):
Globals.DVH_several_img.append(cv2.resize(img, dsize=(img.shape[1]*5,img.shape[0]*5)))
elif(Globals.DVH_dataset_doseplan.PixelSpacing==[2, 2]):
Globals.DVH_several_img.append(cv2.resize(img, dsize=(img.shape[1]*10,img.shape[0]*10)))
else:
Globals.DVH_several_img.append(cv2.resize(img, dsize=(img.shape[1]*15,img.shape[0]*15)))
def UploadDoseplan(only_one):
file = filedialog.askopenfilename()
ext = os.path.splitext(file)[-1].lower()
if(not(ext == '.dcm')):
if(ext == ""):
return
else:
messagebox.showerror("Error", "The file must be a *.dcm file")
return
current_folder = os.getcwd()
parent = os.path.dirname(file)
os.chdir(parent)
dataset = pydicom.dcmread(file)
try:
dose_summation_type = dataset.DoseSummationType
except:
messagebox.showerror("Error", "Could not upload the doseplan correctly. Try again or another file.\n (Code: dose summation)")
return
if(not(dose_summation_type == "PLAN")):
ok = messagebox.askokcancel("Dose summation", "You did not upload the full doseplan. Do you want to continue?")
if not ok:
return
os.chdir(current_folder)
doseplan_dataset = dataset.pixel_array
#Check that the resolution is either 1x1x1, 2x2x2 or 3x3x3
if(not((dataset.PixelSpacing==[1, 1] and dataset.SliceThickness==1) \
or (dataset.PixelSpacing==[2, 2] and dataset.SliceThickness==2) \
or (dataset.PixelSpacing==[3, 3] and dataset.SliceThickness==3))):
messagebox.showerror("Error", "The resolution in doseplan must be 1x1x1, 2x2x2 or 3x3x3")
return
#Check that the datamatrix is in right angles to the coordinate system
if(not(dataset.ImageOrientationPatient==[1, 0, 0, 0, 1, 0] or \
dataset.ImageOrientationPatient==[1, 0, 0, 0, 0, 1] or \
dataset.ImageOrientationPatient==[0, 1, 0, 1, 0, 0] or \
dataset.ImageOrientationPatient==[0, 1, 0, 0, 0, 1] or \
dataset.ImageOrientationPatient==[0, 0, 1, 1, 0, 0] or \
dataset.ImageOrientationPatient==[0, 0, 1, 0, 1, 0])):
messagebox.showerror("Error", "The Image Orientation (Patient) must be parallel to one of the main axis and perpendicular to the two others.")
return
if not only_one and Globals.DVH_number_of_doseplans > 1:
if(not (Globals.DVH_dataset_doseplan.PixelSpacing==dataset.PixelSpacing)):
messagebox.showerror("Error", "Resolution of the doseplans must be equal. \n(Code: UploadDoseplan)")
return
if(not (Globals.DVH_dataset_doseplan.DoseGridScaling == dataset.DoseGridScaling)):
messagebox.showerror("Error", "Dose grid scaling of the doseplans must be equal. \n(Code: UploadDoseplan)")
return
Globals.DVH_dataset_doseplan = dataset
Globals.DVH_dose_scaling_doseplan = dataset.DoseGridScaling
Globals.DVH_test_if_added_doseplan = True
if(Globals.DVH_test_if_added_rtplan):
if(Globals.DVH_isocenter_or_reference_point == "Isocenter"):
processDoseplan_usingIsocenter(only_one)
elif(Globals.DVH_isocenter_or_reference_point == "Ref_point"):
processDoseplan_usingReferencePoint(only_one)
else:
messagebox.showerror("Error", "Something went wrong. Try again.\n (Code: processDoseplan)")
return
if only_one:
Globals.DVH_upload_button_doseplan.config(state=DISABLED)
if not only_one:
filename = basename(normpath(file))
textbox_filename = tk.Text(Globals.DVH_doseplans_scroll_frame, width = 30, height = 1)
textbox_filename.insert(INSERT, filename)
textbox_filename.config(bg='#ffffff', font=('calibri', '12'), state=DISABLED, relief=FLAT)
textbox_filename.grid(row = Globals.DVH_number_of_doseplans_row_count, column = 0, sticky=N+S+W+E, pady=(10,10), padx=(10,10))
Globals.DVH_doseplans_scroll_frame.grid_columnconfigure(Globals.DVH_doseplans_grid_config_count, weight=0)
Globals.DVH_doseplans_scroll_frame.grid_rowconfigure(Globals.DVH_doseplans_grid_config_count, weight=0)
Globals.DVH_doseplans_filenames.append(textbox_filename)
Globals.DVH_doseplans_grid_config_count+=1;
textbox_factor = tk.Text(Globals.DVH_doseplans_scroll_frame, width = 6, height = 1)
textbox_factor.insert(INSERT, "Factor: ")
textbox_factor.config(bg='#ffffff', font=('calibri', '12'), state=DISABLED, relief=FLAT)
textbox_factor.grid(row = Globals.profiles_number_of_doseplans_row_count, column = 1, sticky=N+S+W+E, pady=(10,10), padx=(10,10))
Globals.DVH_doseplans_scroll_frame.grid_columnconfigure(Globals.DVH_doseplans_grid_config_count, weight=0)
Globals.DVH_doseplans_scroll_frame.grid_rowconfigure(Globals.DVH_doseplans_grid_config_count, weight=0)
Globals.DVH_doseplans_factor_text.append(textbox_factor)
Globals.DVH_doseplans_grid_config_count+=1;
textbox_factor_input = tk.Text(Globals.DVH_doseplans_scroll_frame)
textbox_factor_input.insert(INSERT, " ")
textbox_factor_input.config(bg='#E5f9ff', font=('calibri', '12'), state=NORMAL, bd = 2)
textbox_factor_input.grid(row = Globals.DVH_number_of_doseplans_row_count, column = 1, sticky=N+S+W+E, pady=(10,10), padx=(30,10))
Globals.DVH_doseplans_scroll_frame.grid_columnconfigure(Globals.DVH_doseplans_grid_config_count, weight=0)
Globals.DVH_doseplans_scroll_frame.grid_rowconfigure(Globals.DVH_doseplans_grid_config_count, weight=0)
Globals.DVH_doseplans_factor_input.append(textbox_factor_input)
Globals.DVH_number_of_doseplans_row_count+=1
Globals.DVH_doseplans_grid_config_count+=1;
def UploadDoseplan_button_function():
yes = messagebox.askyesno("Question", "Are you going to upload several doseplans and/or use a factor on a plan?")
if not yes:
UploadDoseplan(True)
return
several_doseplans_window = tk.Toplevel(Globals.tab5_canvas)
several_doseplans_window.geometry("600x500+10+10")
several_doseplans_window.grab_set()
doseplans_over_all_frame = tk.Frame(several_doseplans_window, bd=0, relief=FLAT)
doseplans_over_all_canvas = Canvas(doseplans_over_all_frame)
doseplans_xscrollbar = Scrollbar(doseplans_over_all_frame, orient=HORIZONTAL, command=doseplans_over_all_canvas.xview)
doseplans_yscrollbar = Scrollbar(doseplans_over_all_frame, command=doseplans_over_all_canvas.yview)
Globals.DVH_doseplans_scroll_frame = ttk.Frame(doseplans_over_all_canvas)
Globals.DVH_doseplans_scroll_frame.bind("<Configure>", lambda e: doseplans_over_all_canvas.configure(scrollregion=doseplans_over_all_canvas.bbox('all')))
doseplans_over_all_canvas.create_window((0,0), window=Globals.DVH_doseplans_scroll_frame, anchor='nw')
doseplans_over_all_canvas.configure(xscrollcommand=doseplans_xscrollbar.set, yscrollcommand=doseplans_yscrollbar.set)
doseplans_over_all_frame.config(highlightthickness=0, bg='#ffffff')
doseplans_over_all_canvas.config(highlightthickness=0, bg='#ffffff')
doseplans_over_all_frame.pack(expand=True, fill=BOTH)
doseplans_over_all_canvas.grid(row=0, column=0, sticky=N+S+E+W)
doseplans_over_all_frame.grid_columnconfigure(0, weight=1)
doseplans_over_all_frame.grid_rowconfigure(0, weight=1)
doseplans_xscrollbar.grid(row=1, column=0, sticky=E+W)
doseplans_over_all_frame.grid_columnconfigure(1, weight=0)
doseplans_over_all_frame.grid_rowconfigure(1, weight=0)
doseplans_yscrollbar.grid(row=0, column=1, sticky=N+S)
doseplans_over_all_frame.grid_columnconfigure(2, weight=0)
doseplans_over_all_frame.grid_rowconfigure(2, weight=0)
upload_doseplan_frame = tk.Frame(Globals.DVH_doseplans_scroll_frame)
upload_doseplan_frame.grid(row=0, column = 0, padx = (30,30), pady=(30,0), sticky=N+S+E+W)
Globals.DVH_doseplans_scroll_frame.grid_columnconfigure(0, weight=0)
Globals.DVH_doseplans_scroll_frame.grid_rowconfigure(0, weight=0)
upload_doseplan_frame.config(bg = '#ffffff')
upload_button_doseplan = tk.Button(upload_doseplan_frame, text='Browse', image=Globals.profiles_add_doseplans_button_image,\
cursor='hand2', font=('calibri', '14'), relief=FLAT, state=ACTIVE, command=lambda: UploadDoseplan(False))
upload_button_doseplan.pack(expand=True, fill=BOTH)
upload_button_doseplan.configure(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
upload_button_doseplan.image = Globals.profiles_add_doseplans_button_image
def closeUploadDoseplans():
if(len(Globals.DVH_doseplan_dataset_ROI_several) == 0):
messagebox.showinfo("INFO", "No doseplan has been uploaded")
return
for i in range(len(Globals.DVH_doseplan_dataset_ROI_several)):
if Globals.DVH_doseplans_factor_input[i].get("1.0", 'end-1c') == " ":
factor = 1
else:
try:
factor = float(Globals.DVH_doseplans_factor_input[i].get("1.0", 'end-1c'))
except:
messagebox.showerror("Error", "Invalid factor. Must be number.\n (Code: closeUploadDoseplans)")
return
if i == 0:
doseplan_ROI = Globals.DVH_doseplan_dataset_ROI_several[i]
doseplan_ROI= doseplan_ROI*factor
img_ROI = Globals.DVH_several_img[i]
img_ROI = img_ROI*factor
else:
doseplan_ROI+= factor*Globals.DVH_doseplan_dataset_ROI_several[i]
img_ROI+= factor*Globals.DVH_several_img[i]
mx=np.max(img_ROI)
#max_dose = mx*Globals.DVH_dose_scaling_doseplan
img_ROI = img_ROI/mx
PIL_img_doseplan_ROI = Image.fromarray(np.uint8(cm.viridis(img_ROI)*255))
wid = PIL_img_doseplan_ROI.width;heig = PIL_img_doseplan_ROI.height
doseplan_canvas = tk.Canvas(Globals.DVH_film_panedwindow)
doseplan_canvas.grid(row=2, column=0, sticky=N+S+W+E)
Globals.DVH_film_panedwindow.add(doseplan_canvas, \
height=max(heig, Globals.profiles_doseplan_text_image.height()), \
width=wid + Globals.profiles_doseplan_text_image.width())
doseplan_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
height=max(heig, Globals.profiles_doseplan_text_image.height()), \
width=wid + Globals.profiles_doseplan_text_image.width())
Globals.DVH_doseplan_write_image = tk.Canvas(doseplan_canvas)
Globals.DVH_doseplan_write_image.grid(row=0,column=1,sticky=N+S+W+E)
Globals.DVH_doseplan_write_image.config(bg='#ffffff', relief=FLAT, highlightthickness=0, width=wid, height=heig)
doseplan_text_image_canvas = tk.Canvas(doseplan_canvas)
doseplan_text_image_canvas.grid(row=0,column=0,sticky=N+S+W+E)
doseplan_text_image_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
width=Globals.profiles_doseplan_text_image.width(), height=Globals.profiles_doseplan_text_image.height())
scaled_image_visual = PIL_img_doseplan_ROI
scaled_image_visual = ImageTk.PhotoImage(image=scaled_image_visual)
Globals.DVH_doseplan_write_image_width = scaled_image_visual.width()
Globals.DVH_doseplan_write_image_height = scaled_image_visual.height()
Globals.DVH_doseplan_write_image.create_image(0,0,image=scaled_image_visual, anchor="nw")
Globals.DVH_doseplan_write_image.image = scaled_image_visual
doseplan_text_image_canvas.create_image(0,0,image=Globals.profiles_doseplan_text_image, anchor="nw")
doseplan_text_image_canvas.image=Globals.profiles_doseplan_text_image
Globals.DVH_doseplan_dataset_ROI = doseplan_ROI
Globals.DVH_upload_button_doseplan.config(state=DISABLED)
several_doseplans_window.after(500, lambda: several_doseplans_window.destroy())
drawProfiles(False)
doseplans_done_button_frame = tk.Frame(Globals.DVH_doseplans_scroll_frame)
doseplans_done_button_frame.grid(row=0, column = 1, padx=(0,40), pady=(30,0), sticky=N+S+W+E)
doseplans_done_button_frame.config(bg='#ffffff')
Globals.DVH_doseplans_scroll_frame.grid_rowconfigure(3, weight=0)
Globals.DVH_doseplans_scroll_frame.grid_columnconfigure(3, weight=0)
doseplans_done_button = tk.Button(doseplans_done_button_frame, text='Done', image=Globals.done_button_image,\
cursor='hand2', font=('calibri', '14'), relief=FLAT, state=ACTIVE, command=closeUploadDoseplans)
doseplans_done_button.pack(expand=True, fill=BOTH)
doseplans_done_button.configure(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
doseplans_done_button.image = Globals.done_button_image
filename_title = tk.Text(Globals.DVH_doseplans_scroll_frame, width = 15, height= 1)
filename_title.insert(INSERT, "Filename")
filename_title.grid(row=2, column=0, sticky=N+S+E+W, pady=(40,0), padx=(45,15))
filename_title.config(bg='#ffffff', relief=FLAT, state=DISABLED, font=('calibri', '15', 'bold'))
Globals.DVH_doseplans_scroll_frame.grid_rowconfigure(1, weight=0)
Globals.DVH_doseplans_scroll_frame.grid_columnconfigure(1, weight=0)
factor_title = tk.Text(Globals.DVH_doseplans_scroll_frame, width=30, height=2)
factor_title.insert(INSERT, "Here you can write a factor to use \non the doseplan. Defaults to 1.")
factor_title.grid(row=2, column=1, sticky=N+W+S+E, pady=(37,10), padx=(15,25))
factor_title.config(bg='#ffffff', relief=FLAT, state=DISABLED, font=('calibri', '15', 'bold'))
Globals.DVH_doseplans_scroll_frame.grid_columnconfigure(2,weight=0)
Globals.DVH_doseplans_scroll_frame.grid_rowconfigure(2, weight=0)
def UploadRTplan():
file = filedialog.askopenfilename()
ext = os.path.splitext(file)[-1].lower()
if(not(ext == '.dcm')):
if(ext == ""):
return
else:
messagebox.showerror("Error", "The file must be a *.dcm file")
return
current_folder = os.getcwd()
parent = os.path.dirname(file)
os.chdir(parent)
dataset = pydicom.dcmread(file)
os.chdir(current_folder)
Globals.DVH_dataset_rtplan = dataset
#Isocenter given in mm from origo in patient coordinate system
try:
isocenter_mm = dataset.BeamSequence[0].ControlPointSequence[0].IsocenterPosition
Globals.DVH_isocenter_mm = isocenter_mm
except:
messagebox.showerror("Error", "Could not read the RT plan file. Try again or try another file.\n\
(Code: isocenter reading)")
return
try:
Globals.DVH_doseplan_vertical_displacement = dataset.PatientSetupSequence[0].TableTopVerticalSetupDisplacement
except:
messagebox.showerror("Error", "Could not read the RT plan file. Try again or try another file. \n\
(Code: vertical table displacement)")
try:
Globals.DVH_doseplan_lateral_displacement = dataset.PatientSetupSequence[0].TableTopLateralSetupDisplacement
except:
messagebox.showerror("Error", "Could not read the RT plan file. Try again or try another file-\n\
(Code: lateral table displacement)")
try:
Globals.DVH_doseplan_longitudianl_displacement = dataset.PatientSetupSequence[0].TableTopLongitudinalSetupDisplacement
except:
messagebox.showerror("Error", "Could not read the RT plan file. Try again or try another file\n\
(Code: longitudinal table displacement)")
try:
patient_position = dataset.PatientSetupSequence[0].PatientPosition
Globals.DVH_doseplan_patient_position = patient_position
except:
messagebox.showerror("Error", "Could not read the RT plan file. Try again or try another file\n\
(Code: Patient position)")
if(not(patient_position=='HFS' or patient_position=='HFP' or patient_position=='HFDR' or patient_position == 'HFDL'\
or patient_position=='FFDR' or patient_position=='FFDL' or patient_position=='FFP' or patient_position=='FFS')):
messagebox.showerror("Error", "Fidora does only support patient positions: \n\
HFS, HFP, HFDR, HFDL, FFP, FFS, FFDR, FFDL")
return
Globals.DVH_test_if_added_rtplan = True
Globals.DVH_upload_button_doseplan.config(state=ACTIVE)
Globals.DVH_upload_button_rtplan.config(state=DISABLED)
def pixel_to_dose(P,a,b,c):
ret = c + b/(P-a)
return ret
def markIsocenter(img, new_window_isocenter_tab, image_canvas, cv2Img):
if(len(Globals.DVH_mark_isocenter_oval)>0):
image_canvas.delete(Globals.DVH_mark_isocenter_up_down_line[0])
image_canvas.delete(Globals.DVH_mark_isocenter_right_left_line[0])
image_canvas.delete(Globals.DVH_mark_isocenter_oval[0])
Globals.DVH_mark_isocenter_oval=[]
Globals.DVH_mark_isocenter_right_left_line=[]
Globals.DVH_mark_isocenter_up_down_line=[]
Globals.DVH_iscoenter_coords = []
img_mark_isocenter = ImageTk.PhotoImage(image=img)
mark_isocenter_window = tk.Toplevel(new_window_isocenter_tab)
mark_isocenter_window.geometry("1035x620+10+10")
mark_isocenter_window.grab_set()
mark_isocenter_over_all_frame = tk.Frame(mark_isocenter_window, bd=0, relief=FLAT)
mark_isocenter_over_all_canvas = Canvas(mark_isocenter_over_all_frame)
mark_isocenter_xscrollbar = Scrollbar(mark_isocenter_over_all_frame, orient=HORIZONTAL, command=mark_isocenter_over_all_canvas.xview)
mark_isocenter_yscrollbar = Scrollbar(mark_isocenter_over_all_frame, command=mark_isocenter_over_all_canvas.yview)
mark_isocenter_scroll_frame = ttk.Frame(mark_isocenter_over_all_canvas)
mark_isocenter_scroll_frame.bind("<Configure>", lambda e: mark_isocenter_over_all_canvas.configure(scrollregion=mark_isocenter_over_all_canvas.bbox('all')))
mark_isocenter_over_all_canvas.create_window((0,0), window=mark_isocenter_scroll_frame, anchor='nw')
mark_isocenter_over_all_canvas.configure(xscrollcommand=mark_isocenter_xscrollbar.set, yscrollcommand=mark_isocenter_yscrollbar.set)
mark_isocenter_over_all_frame.config(highlightthickness=0, bg='#ffffff')
mark_isocenter_over_all_canvas.config(highlightthickness=0, bg='#ffffff')
mark_isocenter_over_all_frame.pack(expand=True, fill=BOTH)
mark_isocenter_over_all_canvas.grid(row=0, column=0, sticky=N+S+E+W)
mark_isocenter_over_all_frame.grid_columnconfigure(0, weight=1)
mark_isocenter_over_all_frame.grid_rowconfigure(0, weight=1)
mark_isocenter_xscrollbar.grid(row=1, column=0, sticky=E+W)
mark_isocenter_over_all_frame.grid_columnconfigure(1, weight=0)
mark_isocenter_over_all_frame.grid_rowconfigure(1, weight=0)
mark_isocenter_yscrollbar.grid(row=0, column=1, sticky=N+S)
mark_isocenter_over_all_frame.grid_columnconfigure(2, weight=0)
mark_isocenter_over_all_frame.grid_rowconfigure(2, weight=0)
mark_isocenter_image_canvas = tk.Canvas(mark_isocenter_scroll_frame)
mark_isocenter_image_canvas.grid(row=0,column=0, rowspan=10, columnspan=3, sticky=N+S+E+W, padx=(0,0), pady=(0,0))
mark_isocenter_scroll_frame.grid_columnconfigure(0, weight=0)
mark_isocenter_scroll_frame.grid_rowconfigure(0, weight=0)
mark_isocenter_image_canvas.create_image(0,0,image=img_mark_isocenter,anchor="nw")
mark_isocenter_image_canvas.image = img_mark_isocenter
mark_isocenter_image_canvas.config(cursor='hand2', bg='#ffffff', relief=FLAT, bd=0, \
scrollregion=mark_isocenter_image_canvas.bbox(ALL), height=img_mark_isocenter.height(), width=img_mark_isocenter.width())
mark_isocenter_image_canvas.grid_propagate(0)
def findCoords(event):
mark_isocenter_image_canvas.create_oval(event.x-2, event.y-2, event.x+2, event.y+2, fill='red')
if(Globals.DVH_iscoenter_coords==[]):
Globals.DVH_iscoenter_coords.append([event.x, event.y])
mark_isocenter_image_canvas.config(cursor='hand2')
elif(len(Globals.DVH_iscoenter_coords)==1):
Globals.DVH_iscoenter_coords.append([event.x, event.y])
Globals.DVH_film_isocenter = [Globals.DVH_iscoenter_coords[0][0], Globals.DVH_iscoenter_coords[1][1]]
x1,y1 = Globals.DVH_iscoenter_coords[0]
x4,y4 = Globals.DVH_iscoenter_coords[1]
x2 = x1;y3=y4
y2=2*Globals.DVH_film_isocenter[1]-y1
x3=2*Globals.DVH_film_isocenter[0]-x4
up_down_line = image_canvas.create_line(int(x1/2),int(y1/2),int(x2/2),int(y2/2),fill='purple', smooth=1, width=2)
right_left_line = image_canvas.create_line(int(x3/2),int(y3/2),int(x4/2),int(y4/2), fill='purple', smooth=1, width=2)
oval = image_canvas.create_oval(int(Globals.DVH_film_isocenter[0]/2)-3, int(Globals.DVH_film_isocenter[1]/2)-3,\
int(Globals.DVH_film_isocenter[0]/2)+3, int(Globals.DVH_film_isocenter[1]/2)+3, fill='red')
Globals.DVH_mark_isocenter_up_down_line.append(up_down_line)
Globals.DVH_mark_isocenter_right_left_line.append(right_left_line)
Globals.DVH_mark_isocenter_oval.append(oval)
mark_isocenter_window.after(500, lambda: mark_isocenter_window.destroy())
Globals.DVH_isocenter_check = True
if(Globals.DVH_ROI_check):
Globals.DVH_done_button.config(state=ACTIVE)
mark_isocenter_image_canvas.bind("<Button 1>",findCoords)
def markReferencePoint(img, new_window_reference_point_tab, image_canvas_reference_tab, cv2Img):
if(len(Globals.DVH_mark_reference_point_oval)>0):
image_canvas_reference_tab.delete(Globals.DVH_mark_reference_point_oval[0])
Globals.DVH_mark_reference_point_oval=[]
img_mark_reference_point = ImageTk.PhotoImage(image=img)
mark_reference_point_window = tk.Toplevel(new_window_reference_point_tab)
mark_reference_point_window.geometry("1035x620+10+10")
mark_reference_point_window.grab_set()
mark_reference_point_over_all_frame = tk.Frame(mark_reference_point_window, bd=0, relief=FLAT)
mark_reference_point_over_all_canvas = Canvas(mark_reference_point_over_all_frame)
mark_reference_point_xscrollbar = Scrollbar(mark_reference_point_over_all_frame, orient=HORIZONTAL, command=mark_reference_point_over_all_canvas.xview)
mark_reference_point_yscrollbar = Scrollbar(mark_reference_point_over_all_frame, command=mark_reference_point_over_all_canvas.yview)
mark_reference_point_scroll_frame = ttk.Frame(mark_reference_point_over_all_canvas)
mark_reference_point_scroll_frame.bind("<Configure>", lambda e: mark_reference_point_over_all_canvas.configure(scrollregion=mark_reference_point_over_all_canvas.bbox('all')))
mark_reference_point_over_all_canvas.create_window((0,0), window=mark_reference_point_scroll_frame, anchor='nw')
mark_reference_point_over_all_canvas.configure(xscrollcommand=mark_reference_point_xscrollbar.set, yscrollcommand=mark_reference_point_yscrollbar.set)
mark_reference_point_over_all_frame.config(highlightthickness=0, bg='#ffffff')
mark_reference_point_over_all_canvas.config(highlightthickness=0, bg='#ffffff')
mark_reference_point_over_all_frame.pack(expand=True, fill=BOTH)
mark_reference_point_over_all_canvas.grid(row=0, column=0, sticky=N+S+E+W)
mark_reference_point_over_all_frame.grid_columnconfigure(0, weight=1)
mark_reference_point_over_all_frame.grid_rowconfigure(0, weight=1)
mark_reference_point_xscrollbar.grid(row=1, column=0, sticky=E+W)
mark_reference_point_over_all_frame.grid_columnconfigure(1, weight=0)
mark_reference_point_over_all_frame.grid_rowconfigure(1, weight=0)
mark_reference_point_yscrollbar.grid(row=0, column=1, sticky=N+S)
mark_reference_point_over_all_frame.grid_columnconfigure(2, weight=0)
mark_reference_point_over_all_frame.grid_rowconfigure(2, weight=0)
mark_reference_point_image_canvas = tk.Canvas(mark_reference_point_scroll_frame)
mark_reference_point_image_canvas.grid(row=0,column=0, rowspan=10, columnspan=3, sticky=N+S+E+W, padx=(0,0), pady=(0,0))
mark_reference_point_scroll_frame.grid_columnconfigure(0, weight=0)
mark_reference_point_scroll_frame.grid_rowconfigure(0, weight=0)
mark_reference_point_image_canvas.create_image(0,0,image=img_mark_reference_point,anchor="nw")
mark_reference_point_image_canvas.image = img_mark_reference_point
mark_reference_point_image_canvas.config(cursor='hand2', bg='#ffffff', relief=FLAT, bd=0, \
scrollregion=mark_reference_point_image_canvas.bbox(ALL), height=img_mark_reference_point.height(), width=img_mark_reference_point.width())
mark_reference_point_image_canvas.grid_propagate(0)
def findCoords(event):
mark_reference_point_image_canvas.create_oval(event.x-2, event.y-2, event.x+2, event.y+2, fill='red')
Globals.DVH_film_reference_point = [event.x, event.y]
oval = image_canvas_reference_tab.create_oval(int(Globals.DVH_film_reference_point[0]/2)-3, \
int(Globals.DVH_film_reference_point[1]/2)-3, int(Globals.DVH_film_reference_point[0]/2)+3, \
int(Globals.DVH_film_reference_point[1]/2)+3, fill='red')
Globals.DVH_mark_reference_point_oval.append(oval)
mark_reference_point_window.after(500, lambda: mark_reference_point_window.destroy())
Globals.DVH_reference_point_check = True
if(Globals.DVH_ROI_reference_point_check):
Globals.DVH_done_button_reference_point.config(state=ACTIVE)
mark_reference_point_image_canvas.bind("<Button 1>",findCoords)
def markROI(img, tab, canvas, ref_point_test):
if(len(Globals.DVH_mark_ROI_rectangle)>0):
canvas.delete(Globals.DVH_mark_ROI_rectangle[0])
Globals.DVH_mark_ROI_rectangle = []
Globals.DVH_ROI_coords = []
img_mark_ROI = ImageTk.PhotoImage(image=img)
mark_ROI_window = tk.Toplevel(tab)
mark_ROI_window.geometry("1035x620+10+10")
mark_ROI_window.grab_set()
mark_ROI_over_all_frame = tk.Frame(mark_ROI_window, bd=0, relief=FLAT)
mark_ROI_over_all_canvas = Canvas(mark_ROI_over_all_frame)
mark_ROI_xscrollbar = Scrollbar(mark_ROI_over_all_frame, orient=HORIZONTAL, command=mark_ROI_over_all_canvas.xview)
mark_ROI_yscrollbar = Scrollbar(mark_ROI_over_all_frame, command=mark_ROI_over_all_canvas.yview)
mark_ROI_scroll_frame = ttk.Frame(mark_ROI_over_all_canvas)
mark_ROI_scroll_frame.bind("<Configure>", lambda e: mark_ROI_over_all_canvas.configure(scrollregion=mark_ROI_over_all_canvas.bbox('all')))
mark_ROI_over_all_canvas.create_window((0,0), window=mark_ROI_scroll_frame, anchor='nw')
mark_ROI_over_all_canvas.configure(xscrollcommand=mark_ROI_xscrollbar.set, yscrollcommand=mark_ROI_yscrollbar.set)
mark_ROI_over_all_frame.config(highlightthickness=0, bg='#ffffff')
mark_ROI_over_all_canvas.config(highlightthickness=0, bg='#ffffff')
mark_ROI_over_all_frame.pack(expand=True, fill=BOTH)
mark_ROI_over_all_canvas.grid(row=0, column=0, sticky=N+S+E+W)
mark_ROI_over_all_frame.grid_columnconfigure(0, weight=1)
mark_ROI_over_all_frame.grid_rowconfigure(0, weight=1)
mark_ROI_xscrollbar.grid(row=1, column=0, sticky=E+W)
mark_ROI_over_all_frame.grid_columnconfigure(1, weight=0)
mark_ROI_over_all_frame.grid_rowconfigure(1, weight=0)
mark_ROI_yscrollbar.grid(row=0, column=1, sticky=N+S)
mark_ROI_over_all_frame.grid_columnconfigure(2, weight=0)
mark_ROI_over_all_frame.grid_rowconfigure(2, weight=0)
mark_ROI_image_canvas = tk.Canvas(mark_ROI_scroll_frame)
mark_ROI_image_canvas.grid(row=0,column=0, rowspan=10, columnspan=3, sticky=N+S+E+W, padx=(0,0), pady=(0,0))
mark_ROI_scroll_frame.grid_columnconfigure(0, weight=0)
mark_ROI_scroll_frame.grid_rowconfigure(0, weight=0)
mark_ROI_image_canvas.create_image(0,0,image=img_mark_ROI,anchor="nw")
mark_ROI_image_canvas.image = img_mark_ROI
mark_ROI_image_canvas.config(bg='#E5f9ff', relief=FLAT, bd=0, \
scrollregion=mark_ROI_image_canvas.bbox(ALL), height=img_mark_ROI.height(), width=img_mark_ROI.width())
mark_ROI_image_canvas.grid_propagate(0)
rectangle = mark_ROI_image_canvas.create_rectangle(0,0,0,0,outline='green')
rectangle_top_corner = []
rectangle_bottom_corner = []
def buttonPushed(event):
rectangle_top_corner.append([event.x, event.y])
def buttonMoving(event):
mark_ROI_image_canvas.coords(rectangle, rectangle_top_corner[0][0], rectangle_top_corner[0][1], \
event.x, event.y)
def buttonReleased(event):
rectangle_bottom_corner.append([event.x, event.y])
mark_ROI_image_canvas.coords(rectangle, rectangle_top_corner[0][0], rectangle_top_corner[0][1],\
rectangle_bottom_corner[0][0], rectangle_bottom_corner[0][1])
mark_ROI_image_canvas.itemconfig(rectangle, outline='Blue')
### Husk at koordinatene går bortover så nedover! Top left - top right - bottom left - bottom right
Globals.DVH_ROI_coords.append([rectangle_top_corner[0][0], rectangle_top_corner[0][1]])
Globals.DVH_ROI_coords.append([rectangle_bottom_corner[0][0], rectangle_top_corner[0][1]])
Globals.DVH_ROI_coords.append([rectangle_top_corner[0][0], rectangle_bottom_corner[0][1]])
Globals.DVH_ROI_coords.append([rectangle_bottom_corner[0][0], rectangle_bottom_corner[0][1]])
rect = canvas.create_rectangle(int((rectangle_top_corner[0][0])/2), int((rectangle_top_corner[0][1])/2),\
int((rectangle_bottom_corner[0][0])/2), int((rectangle_bottom_corner[0][1])/2), outline='Blue', width=2)
Globals.DVH_mark_ROI_rectangle.append(rect)
if(ref_point_test):
Globals.DVH_ROI_reference_point_check = True
if(Globals.DVH_reference_point_check):
Globals.DVH_done_button_reference_point.config(state=ACTIVE)
else:
Globals.DVH_ROI_check = True
if(Globals.DVH_isocenter_check):
Globals.DVH_done_button.config(state=ACTIVE)
mark_ROI_window.after(500, lambda: mark_ROI_window.destroy())
mark_ROI_image_canvas.bind("<B1-Motion>", buttonMoving)
mark_ROI_image_canvas.bind("<Button-1>", buttonPushed)
mark_ROI_image_canvas.bind("<ButtonRelease-1>", buttonReleased)
def UploadFilm():
if(Globals.DVH_film_orientation.get() == '-'):
messagebox.showerror("Missing parameter", "Film orientation missing \n (Code: UploadFilm)")
return
if Globals.DVH_film_factor_input.get("1.0", 'end-1c') == " ":
Globals.DVH_film_factor = 1
else:
try:
Globals.DVH_film_factor = float(Globals.DVH_film_factor_input.get("1.0", 'end-1c'))
except:
messagebox.showerror("Missing parameter", "Film factor invalid format. \n (Code: UploadFilm)")
return
file = filedialog.askopenfilename()
ext = os.path.splitext(file)[-1].lower()
if(ext == '.tif'):
current_folder = os.getcwd()
parent = os.path.dirname(file)
os.chdir(parent)
img = Image.open(file)
img = img.transpose(Image.FLIP_LEFT_RIGHT)
cv2Img = cv2.imread(basename(normpath(file)), cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
cv2Img = cv2.medianBlur(cv2Img, 5)
if(cv2Img is None):
messagebox.showerror("Error", "Something has gone wrong. Check that the filename does not contain Æ,Ø,Å")
return
if(cv2Img.shape[2] == 3):
if(cv2Img.shape[0]==1270 and cv2Img.shape[1]==1016):
cv2Img = abs(cv2Img-Globals.correctionMatrix127)
cv2Img = np.clip(cv2Img, 0, 65535)
cv2Img = cv2.flip(cv2Img,1)
img_scaled = img.resize((508, 635), Image.ANTIALIAS)
img_scaled = ImageTk.PhotoImage(image=img_scaled)
Globals.DVH_film_dataset = cv2Img
Globals.DVH_film_dataset_red_channel = cv2Img[:,:,2]
else:
messagebox.showerror("Error","The resolution of the image is not consistent with dpi")
return
else:
messagebox.showerror("Error","The uploaded image need to be in RGB-format")
return
os.chdir(current_folder)
if(not (img.width == 1016)):
messagebox.showerror("Error", "Dpi in image has to be 127")
return
Globals.DVH_film_orientation_menu.configure(state=DISABLED)
Globals.DVH_film_factor_input.config(state=DISABLED)
h = 635 + 20
w = 508 + 625
new_window = tk.Toplevel(Globals.tab5)
new_window.geometry("%dx%d+0+0" % (w, h))
new_window.grab_set()
new_window_over_all_frame = tk.Frame(new_window, bd=0, relief=FLAT)
new_window_over_all_canvas = Canvas(new_window_over_all_frame)
new_window_xscrollbar = Scrollbar(new_window_over_all_frame, orient=HORIZONTAL, command=new_window_over_all_canvas.xview)
new_window_yscrollbar = Scrollbar(new_window_over_all_frame, command=new_window_over_all_canvas.yview)
new_window_scroll_frame = ttk.Frame(new_window_over_all_canvas)
new_window_scroll_frame.bind("<Configure>", lambda e: new_window_over_all_canvas.configure(scrollregion=new_window_over_all_canvas.bbox('all')))
new_window_over_all_canvas.create_window((0,0), window=new_window_scroll_frame, anchor='nw')
new_window_over_all_canvas.configure(xscrollcommand=new_window_xscrollbar.set, yscrollcommand=new_window_yscrollbar.set)
new_window_over_all_frame.config(highlightthickness=0, bg='#ffffff')
new_window_over_all_canvas.config(highlightthickness=0, bg='#ffffff')
new_window_over_all_frame.pack(expand=True, fill=BOTH)
new_window_over_all_canvas.grid(row=0, column=0, sticky=N+S+E+W)
new_window_over_all_frame.grid_columnconfigure(0, weight=1)
new_window_over_all_frame.grid_rowconfigure(0, weight=1)
new_window_xscrollbar.grid(row=1, column=0, sticky=E+W)
new_window_over_all_frame.grid_columnconfigure(1, weight=0)
new_window_over_all_frame.grid_rowconfigure(1, weight=0)
new_window_yscrollbar.grid(row=0, column=1, sticky=N+S)
new_window_over_all_frame.grid_columnconfigure(2, weight=0)
new_window_over_all_frame.grid_rowconfigure(2, weight=0)
new_window_explain_text = tk.Text(new_window_scroll_frame, height= 3, width=120)
new_window_explain_text.insert(INSERT, \
"To match the film with the doseplan you have to mark either isocenter or a reference point\
on the film of your choice.In the case of the reference point you \nwill be asked to input the \
lenght in lateral, longitudinal and vertical to a reference point used in the linac. It the \
reference point in the film is the same as \nthe one in the phantom/linac you can input all zeros,\
in other cases your input is in mm. Later you will have the oppertunity to make small\
adjustments \nto the placement of either the reference point or isocenter.")
new_window_explain_text.config(state=DISABLED, font=('calibri', '13', 'bold'), bg = '#ffffff', relief=FLAT)
new_window_explain_text.grid(row=0, column=0, columnspan=5, sticky=N+S+W+E, pady=(15,5), padx=(10,10))
new_window_scroll_frame.grid_rowconfigure(0, weight=0)
new_window_scroll_frame.grid_columnconfigure(0, weight=0)
new_window_notebook = ttk.Notebook(new_window_scroll_frame)
new_window_notebook.borderWidth=0
new_window_notebook.grid(row=2, column=0, columnspan=5, sticky=E+W+N+S, pady=(0,0), padx =(0,0))
new_window_scroll_frame.grid_rowconfigure(4, weight=0)
new_window_scroll_frame.grid_columnconfigure(4, weight=0)
new_window_isocenter_tab = ttk.Frame(new_window_notebook)
new_window_notebook.add(new_window_isocenter_tab, text='Isocenter')
new_window_reference_point_tab = ttk.Frame(new_window_notebook)
new_window_notebook.add(new_window_reference_point_tab, text='Reference point')
new_window_manually_tab = ttk.Frame(new_window_notebook)
new_window_notebook.add(new_window_manually_tab, text='Manually')
image_canvas = tk.Canvas(new_window_isocenter_tab)
image_canvas.grid(row=0,column=0, rowspan=12, columnspan=3, sticky=N+S+E+W, padx=(0,0), pady=(0,0))
new_window_isocenter_tab.grid_rowconfigure(1, weight=0)
new_window_isocenter_tab.grid_columnconfigure(1, weight=0)
image_canvas.create_image(0,0,image=img_scaled,anchor="nw")
image_canvas.image = img_scaled
image_canvas.config(bg='#ffffff', relief=FLAT, bd=0, scrollregion=image_canvas.bbox(ALL), \
height=img_scaled.height(), width=img_scaled.width())
image_canvas.grid_propagate(0)
image_canvas_reference_tab = tk.Canvas(new_window_reference_point_tab)
image_canvas_reference_tab.grid(row=0,column=0, rowspan=10, columnspan=3, sticky=N+S+E+W, padx=(0,0), pady=(0,0))
new_window_reference_point_tab.grid_rowconfigure(1, weight=0)
new_window_reference_point_tab.grid_columnconfigure(1, weight=0)
image_canvas_reference_tab.create_image(0,0,image=img_scaled,anchor="nw")
image_canvas_reference_tab.image = img_scaled
image_canvas_reference_tab.config(bg='#ffffff', relief=FLAT, bd=0, scrollregion=image_canvas.bbox(ALL), \
height=img_scaled.height(), width=img_scaled.width())
image_canvas_reference_tab.grid_propagate(0)
film_window_mark_isocenter_text = tk.Text(new_window_isocenter_tab, width=55, height=7)
film_window_mark_isocenter_text.insert(INSERT, \
"When clicking the button \"Mark isocenter\" a window showing \n\
the image will appear and you are to click on the markers \n\
made on the film upon irradiation to find the isocenter. Start \n\
with the marker showing the direction of the film (see the \n\
specifications in main window). When both marks are made \n\
you will see the isocenter in the image. If you are not happy \n\
with the placement click the button again and repeat.")
film_window_mark_isocenter_text.config(bg='#ffffff', relief=FLAT, bd=0, state=DISABLED, font=('calibri', '11'))
film_window_mark_isocenter_text.grid(row=0, column=3, rowspan=3, sticky=N+S+E+W, padx=(10,10), pady=(10,0))
new_window_isocenter_tab.columnconfigure(2, weight=0)
new_window_isocenter_tab.rowconfigure(2, weight=0)
film_window_mark_reference_point_text = tk.Text(new_window_reference_point_tab, width=55, height=5)
film_window_mark_reference_point_text.insert(INSERT, \
"When clicking the button \"Mark point\" a window showing \n\
the image will appear and you are to click on the marker \n\
made on the film upon irradiation to find the point. When\n\
the mark are made you will see the isocenter in the image.\n\
If you are not happy with the placement click the button \n\
again and repeat.")
film_window_mark_reference_point_text.config(bg='#ffffff', relief=FLAT, bd=0, state=DISABLED, font=('calibri', '11'))
film_window_mark_reference_point_text.grid(row=0, column=3, rowspan=3, sticky=N+S+E+W, padx=(10,10), pady=(5,0))
new_window_reference_point_tab.columnconfigure(2, weight=0)
new_window_reference_point_tab.rowconfigure(2, weight=0)
mark_isocenter_button_frame = tk.Frame(new_window_isocenter_tab)
mark_isocenter_button_frame.grid(row=3, column=3, padx=(10,10), pady=(0,10))
mark_isocenter_button_frame.configure(bg='#ffffff')
new_window_isocenter_tab.grid_columnconfigure(3, weight=0)
new_window_isocenter_tab.grid_rowconfigure(3, weight=0)
mark_isocenter_button = tk.Button(mark_isocenter_button_frame, text='Browse', image=Globals.profiles_mark_isocenter_button_image,\
cursor='hand2',font=('calibri', '14'), relief=FLAT, state=ACTIVE, command=lambda: markIsocenter(img, new_window_isocenter_tab, image_canvas, cv2Img))
mark_isocenter_button.pack(expand=True, fill=BOTH)
mark_isocenter_button.config(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
mark_isocenter_button.image=Globals.profiles_mark_isocenter_button_image
mark_point_button_frame = tk.Frame(new_window_reference_point_tab)
mark_point_button_frame.grid(row=3, column=3, padx=(10,10), pady=(30,0))
mark_point_button_frame.configure(bg='#ffffff')
new_window_reference_point_tab.grid_columnconfigure(3, weight=0)
new_window_reference_point_tab.grid_rowconfigure(3, weight=0)
mark_point_button = tk.Button(mark_point_button_frame, text='Browse', image=Globals.profiles_mark_point_button_image,\
cursor='hand2',font=('calibri', '14'), relief=FLAT, state=ACTIVE, command=lambda: \
markReferencePoint(img, new_window_reference_point_tab, image_canvas_reference_tab, cv2Img))
mark_point_button.pack(expand=True, fill=BOTH)
mark_point_button.config(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
mark_point_button.image=Globals.profiles_mark_point_button_image
write_displacement_relative_to_reference_point = tk.Text(new_window_reference_point_tab, width = 55, height=3)
write_displacement_relative_to_reference_point.insert(INSERT, "\
If the marked reference points in the film does not match\n\
the reference point in the phantom you can write the\n\
displacemnet here (in mm). Defaults to zero ")
write_displacement_relative_to_reference_point.grid(row=4, column=3, rowspan=2, sticky=N+S+E+W, padx=(10,10), pady=(0,10))
write_displacement_relative_to_reference_point.config(bg='#ffffff', relief=FLAT, bd=0, state=DISABLED, font=('calibri', '11'))
new_window_reference_point_tab.grid_rowconfigure(6, weight=0)
new_window_reference_point_tab.grid_columnconfigure(6, weight=0)
input_lateral_text = tk.Text(new_window_reference_point_tab, width=12, height=1)
input_lateral_text.insert(INSERT, "Lateral:")
input_lateral_text.config(bg='#ffffff', relief=FLAT, bd=0, state=DISABLED, font=('calibri', '10'))
input_lateral_text.grid(row=5, column=3, sticky=N+S, padx=(0,250), pady=(25,0))
new_window_reference_point_tab.grid_rowconfigure(10, weight=0)
new_window_reference_point_tab.grid_rowconfigure(10, weight=0)
Globals.DVH_input_lateral_displacement = tk.Text(new_window_reference_point_tab, width=5, height=1)
Globals.DVH_input_lateral_displacement.insert(INSERT, " ")
Globals.DVH_input_lateral_displacement.config(bg='#E5f9ff', relief=GROOVE, bd=2, state=NORMAL, font=('calibri', '11'))
Globals.DVH_input_lateral_displacement.grid(row=5, column=3, padx=(0,285), pady=(35,0))
new_window_reference_point_tab.grid_rowconfigure(7, weight=0)
new_window_reference_point_tab.grid_columnconfigure(7, weight=0)
input_vertical_text = tk.Text(new_window_reference_point_tab, width=12, height=1)
input_vertical_text.insert(INSERT, "Vertical:")
input_vertical_text.config(bg='#ffffff', relief=FLAT, bd=0, state=DISABLED, font=('calibri', '10'))
input_vertical_text.grid(row=5, column=3, sticky=N+S, padx=(0,0), pady=(25,0))
new_window_reference_point_tab.grid_rowconfigure(11, weight=0)
new_window_reference_point_tab.grid_rowconfigure(11, weight=0)
Globals.DVH_input_vertical_displacement = tk.Text(new_window_reference_point_tab, width=4, height=1)
Globals.DVH_input_vertical_displacement.insert(INSERT, " ")
Globals.DVH_input_vertical_displacement.config(bg='#E5f9ff', relief=GROOVE, bd=2, state=NORMAL, font=('calibri', '11'))
Globals.DVH_input_vertical_displacement.grid(row=5, column=3, padx=(0,25), pady=(35,0))
new_window_reference_point_tab.grid_rowconfigure(8, weight=0)
new_window_reference_point_tab.grid_columnconfigure(8, weight=0)
input_long_text = tk.Text(new_window_reference_point_tab, width=12, height=1)
input_long_text.insert(INSERT, "Longitudinal:")
input_long_text.config(bg='#ffffff', relief=FLAT, bd=0, state=DISABLED, font=('calibri', '10'))
input_long_text.grid(row=5, column=3, sticky=N+S, padx=(250,0), pady=(25,0))
new_window_reference_point_tab.grid_rowconfigure(12, weight=0)
new_window_reference_point_tab.grid_rowconfigure(12, weight=0)
Globals.DVH_input_longitudinal_displacement = tk.Text(new_window_reference_point_tab, width=5, height=1)
Globals.DVH_input_longitudinal_displacement.insert(INSERT, " ")
Globals.DVH_input_longitudinal_displacement.config(bg='#E5f9ff', relief=GROOVE, bd=2, state=NORMAL, font=('calibri', '11'))
Globals.DVH_input_longitudinal_displacement.grid(row=5, column=3, padx=(240,0), pady=(35,0))
new_window_reference_point_tab.grid_rowconfigure(9, weight=0)
new_window_reference_point_tab.grid_columnconfigure(9, weight=0)
film_window_mark_ROI_text = tk.Text(new_window_isocenter_tab, width=55, height=7)
film_window_mark_ROI_text.insert(INSERT, \
"When clicking the button \"Mark ROI\" a window showing the\n\
image will appear and you are to drag a rectangle marking \n\
the region of interest. Fidora will assume the film has been\n\
scanned in either portrait or landscape orientation. When\n\
the ROI has been marked it will appear on the image. If you\n\
are not happy with the placement click the button again.")
film_window_mark_ROI_text.config(bg='#ffffff', relief=FLAT, bd=0, state=DISABLED, font=('calibri', '11'))
film_window_mark_ROI_text.grid(row=5, column=3, rowspan=4, sticky=N+S+E+W, padx=(10,10), pady=(0,0))
new_window_isocenter_tab.grid_columnconfigure(4, weight=0)
new_window_isocenter_tab.grid_rowconfigure(4, weight=0)
film_window_mark_ROI_reference_point_text = tk.Text(new_window_reference_point_tab, width=55, height=5)
film_window_mark_ROI_reference_point_text.insert(INSERT, \
"When clicking the button \"Mark ROI\" a window showing the\n\
image will appear and you are to drag a rectangle marking \n\
the region of interest. Fidora will assume the film has been\n\
scanned in either portrait or landscape orientation. When\n\
the ROI has been marked it will appear on the image. If you\n\
are not happy with the placement click the button again.")
film_window_mark_ROI_reference_point_text.config(bg='#ffffff', relief=FLAT, bd=0, state=DISABLED, font=('calibri', '11'))
film_window_mark_ROI_reference_point_text.grid(row=6, column=3, rowspan=3, sticky=N+E+W, padx=(10,10), pady=(10,0))
new_window_reference_point_tab.grid_columnconfigure(4, weight=0)
new_window_reference_point_tab.grid_rowconfigure(4, weight=0)
mark_ROI_button_frame = tk.Frame(new_window_isocenter_tab)
mark_ROI_button_frame.grid(row=8, column=3, padx=(10,0), pady=(0,5))
mark_ROI_button_frame.configure(bg='#ffffff')
new_window_isocenter_tab.grid_columnconfigure(5, weight=0)
new_window_isocenter_tab.grid_rowconfigure(5, weight=0)
mark_ROI_button = tk.Button(mark_ROI_button_frame, text='Browse', image=Globals.profiles_mark_ROI_button_image,\
cursor='hand2',font=('calibri', '14'), relief=FLAT, state=ACTIVE, command=lambda: markROI(img, new_window_isocenter_tab, image_canvas, False))
mark_ROI_button.pack(expand=True, fill=BOTH)
mark_ROI_button.config(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
mark_ROI_button.image=Globals.profiles_mark_ROI_button_image
slice_offset_text = tk.Text(new_window_isocenter_tab, width=25, height=1)
slice_offset_text.insert(INSERT, "Slice offset, mm (default 0):")
slice_offset_text.config(state=DISABLED, font=('calibri', '10'), bd = 0, relief=FLAT)
slice_offset_text.grid(row=9, column=3, padx=(5,110), pady=(0,0))
new_window_isocenter_tab.grid_columnconfigure(6, weight=0)
new_window_isocenter_tab.grid_rowconfigure(6, weight=0)
Globals.DVH_slice_offset = tk.Text(new_window_isocenter_tab, width=8, height=1)
Globals.DVH_slice_offset.grid(row=9, column=3, padx=(110,10), pady=(0,0))
Globals.DVH_slice_offset.insert(INSERT, " ")
Globals.DVH_slice_offset.config(state=NORMAL, font=('calibri', '10'), bd = 2, bg='#ffffff')
new_window_isocenter_tab.grid_columnconfigure(7, weight=0)
new_window_isocenter_tab.grid_rowconfigure(7, weight=0)
mark_ROI_button_reference_point_frame = tk.Frame(new_window_reference_point_tab)
mark_ROI_button_reference_point_frame.grid(row=9, column=3, padx=(10,10), pady=(0,5))
mark_ROI_button_reference_point_frame.configure(bg='#ffffff')
new_window_reference_point_tab.grid_columnconfigure(5, weight=0)
new_window_reference_point_tab.grid_rowconfigure(5, weight=0)
mark_ROI_reference_point_button = tk.Button(mark_ROI_button_reference_point_frame, text='Browse', image=Globals.profiles_mark_ROI_button_image,\
cursor='hand2',font=('calibri', '14'), relief=FLAT, state=ACTIVE, command=lambda: markROI(img, new_window_reference_point_tab, image_canvas_reference_tab, True))
mark_ROI_reference_point_button.pack(expand=True, fill=BOTH)
mark_ROI_reference_point_button.config(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
mark_ROI_reference_point_button.image=Globals.profiles_mark_ROI_button_image
def finishFilmMarkers(ref_test):
Globals.DVH_slice_offset.config(state=DISABLED)
if(ref_test):
if(not(Globals.DVH_input_lateral_displacement.get("1.0",'end-1c')==" ")):
try:
test = float(Globals.DVH_input_lateral_displacement.get("1.0",'end-1c'))
Globals.DVH_lateral = test
except:
messagebox.showerror("Error", "The displacements must be numbers\n (Code: lateral displacement)")
return
else:
Globals.DVH_lateral = 0
if(not(Globals.DVH_input_longitudinal_displacement.get("1.0",'end-1c')==" ")):
try:
test = float(Globals.DVH_input_longitudinal_displacement.get("1.0", 'end-1c'))
Globals.DVH_longitudinal = test
except:
messagebox.showerror("Error", "The displacements must be numbers\n (Code: longitudinal displacement)")
return
else:
Globals.DVH_longitudinal = 0
if(not(Globals.DVH_input_vertical_displacement.get("1.0",'end-1c')==" ")):
try:
test = float(Globals.DVH_input_vertical_displacement.get("1.0", 'end-1c'))
Globals.DVH_vertical = test
except:
messagebox.showerror("Error", "The displacements must be numbers\n (Code: vertical displacement)")
return
else:
Globals.DVH_vertical = 0
Globals.DVH_input_vertical_displacement.config(state=DISABLED)
Globals.DVH_input_longitudinal_displacement.config(state=DISABLED)
Globals.DVH_input_lateral_displacement.config(state=DISABLED)
else:
if not Globals.DVH_slice_offset.get("1.0",'end-1c')==" ":
try:
offset = float(Globals.DVH_slice_offset.get("1.0",'end-1c'))
Globals.DVH_offset = offset
except:
messagebox.showerror("Error", "Slice offset must be a number \n(Code: finishFilmMarkers(false)")
return
else:
Globals.DVH_offset = 0
if(ref_test):
choose_batch_window = tk.Toplevel(new_window_reference_point_tab)
else:
choose_batch_window = tk.Toplevel(new_window_isocenter_tab)
choose_batch_window.geometry("670x380+50+50")
choose_batch_window.grab_set()
choose_batch_frame = tk.Frame(choose_batch_window)
choose_batch_frame.pack(expand=True, fill=BOTH)
choose_batch_frame.configure(bg='#ffffff')
batch_cnt = 0
weight_cnt = 0
read = open('calibration.txt', 'r')
lines = read.readlines()
read.close()
row_cnt=0
for l in lines:
words = l.split()
line = "Batch nr. : " + words[2] + ". Date: " + words[0] + " " + words[1] + "."
write_batch_nr = tk.Text(choose_batch_frame, width=10, height=1)
write_batch_nr.grid(row=row_cnt, column=0, sticky=N+S+W+E, padx=(10,5), pady=(10,10))
choose_batch_frame.grid_columnconfigure(weight_cnt, weight=0)
choose_batch_frame.grid_rowconfigure(weight_cnt, weight=0)
write_batch_nr.insert(INSERT, "Batch nr.: ")
write_batch_nr.config(state=DISABLED, bd = 0, font=('calibri', '12', 'bold'))
weight_cnt+=1
write_batch = tk.Text(choose_batch_frame, width=20, height=1)
write_batch.grid(row=row_cnt, column=1, sticky=N+S+W+E, padx=(10,5), pady=(10,10))
choose_batch_frame.grid_columnconfigure(weight_cnt, weight=0)
choose_batch_frame.grid_rowconfigure(weight_cnt, weight=0)
write_batch.insert(INSERT, words[2])
write_batch.config(state=DISABLED, bd = 0, font=('calibri', '12'))
weight_cnt+=1
write_batch_date = tk.Text(choose_batch_frame, width=8, height=1)
write_batch_date.grid(row=row_cnt, column=2, sticky=N+S+W+E, padx=(10,5), pady=(10,10))
choose_batch_frame.grid_columnconfigure(weight_cnt, weight=0)
choose_batch_frame.grid_rowconfigure(weight_cnt, weight=0)
write_batch_date.insert(INSERT, "Date: ")
write_batch_date.config(state=DISABLED, bd = 0, font=('calibri', '12', 'bold'))
weight_cnt+=1
write_date = tk.Text(choose_batch_frame, width=30, height=1)
write_date.grid(row=row_cnt, column=3, sticky=N+S+W+E, padx=(10,5), pady=(10,10))
choose_batch_frame.grid_columnconfigure(weight_cnt, weight=0)
choose_batch_frame.grid_rowconfigure(weight_cnt, weight=0)
write_date.insert(INSERT, words[0] + ", " + words[1] + "")
write_date.config(state=DISABLED, bd = 0, font=('calibri', '12'))
weight_cnt+=1
Radiobutton(choose_batch_frame, text='',bg='#ffffff', cursor='hand2',font=('calibri', '14'), \
variable=Globals.DVH_film_batch, value=batch_cnt).grid(row=row_cnt, \
column=4, sticky=N+S+W+E, padx=(5,5), pady=(10,10))
choose_batch_frame.grid_columnconfigure(weight_cnt, weight=0)
choose_batch_frame.grid_rowconfigure(weight_cnt, weight=0)
weight_cnt+=1;row_cnt+=1;batch_cnt+=1
def set_batch():
choose_batch_window.destroy()
f = open('calibration.txt', 'r')
lines = f.readlines()
words = lines[Globals.DVH_film_batch.get()].split()
Globals.DVH_popt_red[0] = float(words[3])
Globals.DVH_popt_red[1] = float(words[4])
Globals.DVH_popt_red[2] = float(words[5])
f.close()
Globals.DVH_film_dataset_ROI_red_channel_dose = np.zeros((Globals.DVH_film_dataset_ROI_red_channel.shape[0],\
Globals.DVH_film_dataset_ROI_red_channel.shape[1]))
for i in range(Globals.DVH_film_dataset_ROI_red_channel_dose.shape[0]):
for j in range(Globals.DVH_film_dataset_ROI_red_channel_dose.shape[1]):
Globals.DVH_film_dataset_ROI_red_channel_dose[i,j] = Globals.DVH_film_factor*\
pixel_to_dose(Globals.DVH_film_dataset_ROI_red_channel[i,j], \
Globals.DVH_popt_red[0], Globals.DVH_popt_red[1], Globals.DVH_popt_red[2])
Globals.DVH_film_dataset_red_channel_dose = np.zeros((Globals.DVH_film_dataset_red_channel.shape[0],\
Globals.DVH_film_dataset_red_channel.shape[1]))
for i in range(Globals.DVH_film_dataset_red_channel_dose.shape[0]):
for j in range(Globals.DVH_film_dataset_red_channel_dose.shape[1]):
Globals.DVH_film_dataset_red_channel_dose[i,j] = Globals.DVH_film_factor*\
pixel_to_dose(Globals.DVH_film_dataset_red_channel[i,j], \
Globals.DVH_popt_red[0], Globals.DVH_popt_red[1], Globals.DVH_popt_red[2])
Globals.DVH_film_write_image.create_image(0,0,image=scaled_image_visual, anchor="nw")
Globals.DVH_film_write_image.image = scaled_image_visual
mx_film=np.max(Globals.DVH_film_dataset_ROI_red_channel_dose)
Globals.DVH_max_dose_film = mx_film
img_film = Globals.DVH_film_dataset_ROI_red_channel_dose
img_film = img_film/mx_film
PIL_img_film = Image.fromarray(np.uint8(cm.viridis(img_film)*255))
scaled_image_visual_film = ImageTk.PhotoImage(image=PIL_img_film)
Globals.DVH_film_dose_write_image.create_image(0,0,image=scaled_image_visual_film, anchor="nw")
Globals.DVH_film_dose_write_image.image = scaled_image_visual_film
film_scanned_image_text_canvas.create_image(0,0,image=Globals.profiles_scanned_image_text_image, anchor="nw")
film_scanned_image_text_canvas.image = Globals.profiles_scanned_image_text_image
film_dose_map_image_text_canvas.create_image(0,0, image=Globals.profiles_film_dose_map_text_image, anchor="nw")
film_dose_map_image_text_canvas.image=Globals.profiles_film_dose_map_text_image
new_window.destroy()
set_batch_button_frame = tk.Frame(choose_batch_frame)
set_batch_button_frame.grid(row=row_cnt, column=1, columnspan=3, padx=(10,0), pady=(5,5))
set_batch_button_frame.configure(bg='#ffffff')
choose_batch_frame.grid_columnconfigure(weight_cnt, weight=0)
choose_batch_frame.grid_rowconfigure(weight_cnt, weight=0)
set_batch_button = tk.Button(set_batch_button_frame, text='OK', image=Globals.done_button_image, cursor='hand2',\
font=('calibri', '14'), relief=FLAT, state=ACTIVE, command=set_batch)
set_batch_button.pack(expand=True, fill=BOTH)
set_batch_button.image=Globals.done_button_image
img_ROI = Globals.DVH_film_dataset[Globals.DVH_ROI_coords[0][1]:Globals.DVH_ROI_coords[2][1],\
Globals.DVH_ROI_coords[0][0]:Globals.DVH_ROI_coords[1][0], :]
img_ROI_red_channel = img_ROI[:,:,2]
Globals.DVH_film_variable_ROI_coords = [Globals.DVH_ROI_coords[0][1], Globals.DVH_ROI_coords[2][1],\
Globals.DVH_ROI_coords[0][0], Globals.DVH_ROI_coords[1][0]]
Globals.DVH_film_dataset_ROI = img_ROI
Globals.DVH_film_dataset_ROI_red_channel = img_ROI_red_channel
R = img_ROI[:,:,2];B = img_ROI[:,:,0]; G = img_ROI[:,:,1]
img_ROI_RGB = np.zeros(img_ROI.shape)
img_ROI_RGB[:,:,0]=R; img_ROI_RGB[:,:,1]=G; img_ROI_RGB[:,:,2]=B
PIL_img_ROI = (img_ROI_RGB/256).astype('uint8')
PIL_img_ROI = Image.fromarray(PIL_img_ROI, 'RGB')
#PIL_img_ROI = Image.fromarray((img_ROI_RGB * 255).astype(np.uint8), 'RGB')
wid = PIL_img_ROI.width;heig = PIL_img_ROI.height
#film_window_write_image = tk.Canvas(film_window_scroll_frame)
film_image_canvas = tk.Canvas(Globals.DVH_film_panedwindow)
film_image_canvas.grid(row=0,column=0, sticky=N+S+W+E)
Globals.DVH_film_panedwindow.add(film_image_canvas, \
height=max(heig,Globals.profiles_scanned_image_text_image.height()), \
width=wid + Globals.profiles_scanned_image_text_image.width())
film_image_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
height=max(heig,Globals.profiles_scanned_image_text_image.height()), \
width=wid + Globals.profiles_scanned_image_text_image.width())
film_dose_canvas = tk.Canvas(Globals.DVH_film_panedwindow)
film_dose_canvas.grid(row=1,column=0, sticky=N+S+W+E)
Globals.DVH_film_panedwindow.add(film_dose_canvas, \
height=max(heig,Globals.profiles_film_dose_map_text_image.height()), \
width=wid + Globals.profiles_film_dose_map_text_image.width())
film_dose_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
height=max(heig,Globals.profiles_film_dose_map_text_image.height()), \
width=wid + Globals.profiles_film_dose_map_text_image.width())
Globals.DVH_film_write_image = tk.Canvas(film_image_canvas)
Globals.DVH_film_write_image.grid(row=0,column=1,sticky=N+S+W+E)
Globals.DVH_film_write_image.config(bg='#ffffff', relief=FLAT, highlightthickness=0, width=wid, height=heig)
Globals.DVH_film_dose_write_image = tk.Canvas(film_dose_canvas)
Globals.DVH_film_dose_write_image.grid(row=0,column=1,sticky=N+S+W+E)
Globals.DVH_film_dose_write_image.config(bg='#ffffff', relief=FLAT, highlightthickness=0, width=wid, height=heig)
film_scanned_image_text_canvas=tk.Canvas(film_image_canvas)
film_scanned_image_text_canvas.grid(row=0,column=0,sticky=N+S+W+E)
film_scanned_image_text_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
height=Globals.profiles_scanned_image_text_image.height(), width=Globals.profiles_scanned_image_text_image.width())
film_dose_map_image_text_canvas=tk.Canvas(film_dose_canvas)
film_dose_map_image_text_canvas.grid(row=0,column=0,sticky=N+S+W+E)
film_dose_map_image_text_canvas.config(bg='#ffffff', relief=FLAT, highlightthickness=0, \
height=Globals.profiles_film_dose_map_text_image.height(), width=Globals.profiles_film_dose_map_text_image.width())
scaled_image_visual = PIL_img_ROI
scaled_image_visual = ImageTk.PhotoImage(image=scaled_image_visual)
Globals.DVH_upload_button_doseplan.config(state=DISABLED)
Globals.DVH_upload_button_rtplan.config(state=ACTIVE)
Globals.DVH_upload_button_film.config(state=DISABLED)
#Beregne avstand mellom ROI og isocenter gitt i mm
# [top left[mot venstre, oppover], top right[mot venstre (høyre blir negativ), oppover], bottom left, bottom right]
if(ref_test):
Globals.DVH_distance_reference_point_ROI.append([(Globals.DVH_film_reference_point[0]-Globals.DVH_ROI_coords[0][0])*0.2, \
(Globals.DVH_film_reference_point[1] -Globals.DVH_ROI_coords[0][1])*0.2])
Globals.DVH_distance_reference_point_ROI.append([(Globals.DVH_film_reference_point[0] - Globals.DVH_ROI_coords[1][0])*0.2,\
(Globals.DVH_film_reference_point[1] - Globals.DVH_ROI_coords[1][1])*0.2])
Globals.DVH_distance_reference_point_ROI.append([(Globals.DVH_film_reference_point[0] - Globals.DVH_ROI_coords[2][0])*0.2,\
(Globals.DVH_film_reference_point[1] - Globals.DVH_ROI_coords[2][1])*0.2])
Globals.DVH_distance_reference_point_ROI.append([(Globals.DVH_film_reference_point[0] - Globals.DVH_ROI_coords[3][0])*0.2,\
(Globals.DVH_film_reference_point[1] - Globals.DVH_ROI_coords[3][1])*0.2])
Globals.DVH_isocenter_or_reference_point = "Ref_point"
else:
Globals.DVH_distance_isocenter_ROI.append([(Globals.DVH_film_isocenter[0]-Globals.DVH_ROI_coords[0][0])*0.2, \
(Globals.DVH_film_isocenter[1] -Globals.DVH_ROI_coords[0][1])*0.2])
Globals.DVH_distance_isocenter_ROI.append([(Globals.DVH_film_isocenter[0] - Globals.DVH_ROI_coords[1][0])*0.2,\
(Globals.DVH_film_isocenter[1] - Globals.DVH_ROI_coords[1][1])*0.2])
Globals.DVH_distance_isocenter_ROI.append([(Globals.DVH_film_isocenter[0] - Globals.DVH_ROI_coords[2][0])*0.2,\
(Globals.DVH_film_isocenter[1] - Globals.DVH_ROI_coords[2][1])*0.2])
Globals.DVH_distance_isocenter_ROI.append([(Globals.DVH_film_isocenter[0] - Globals.DVH_ROI_coords[3][0])*0.2,\
(Globals.DVH_film_isocenter[1] - Globals.DVH_ROI_coords[3][1])*0.2])
Globals.DVH_isocenter_or_reference_point = "Isocenter"
done_button_frame = tk.Frame(new_window_isocenter_tab)
done_button_frame.grid(row=10, column=3, padx=(10,10), pady=(5,5), sticky=N+S+W+E)
done_button_frame.configure(bg='#ffffff')
new_window_isocenter_tab.grid_columnconfigure(5, weight=0)
new_window_isocenter_tab.grid_rowconfigure(5, weight=0)
Globals.DVH_done_button = tk.Button(done_button_frame, text='Done', image=Globals.done_button_image,\
cursor='hand2', font=('calibri', '14'), relief=FLAT, state=DISABLED, command=lambda: finishFilmMarkers(False))
Globals.DVH_done_button.pack(expand=True, fill=BOTH)
Globals.DVH_done_button.config(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
Globals.DVH_done_button.image=Globals.done_button_image
done_button_reference_point_frame = tk.Frame(new_window_reference_point_tab)
done_button_reference_point_frame.grid(row=10, column=3, padx=(10,10), pady=(5,5), sticky=N+S+W+E)
done_button_reference_point_frame.configure(bg='#ffffff')
new_window_reference_point_tab.grid_columnconfigure(5, weight=0)
new_window_reference_point_tab.grid_rowconfigure(5, weight=0)
Globals.DVH_done_button_reference_point= tk.Button(done_button_reference_point_frame, text='Done', image=Globals.done_button_image,\
cursor='hand2', font=('calibri', '14'), relief=FLAT, state=DISABLED, command=lambda: finishFilmMarkers(True))
Globals.DVH_done_button_reference_point.pack(expand=True, fill=BOTH)
Globals.DVH_done_button_reference_point.config(bg='#ffffff', activebackground='#ffffff', activeforeground='#ffffff', highlightthickness=0)
Globals.DVH_done_button_reference_point.image=Globals.done_button_image
elif(ext==""):
return
else:
messagebox.showerror("Error", "The file must be a *.tif file")
def help_showPlanes():
new_window = tk.Toplevel(Globals.tab5)
w = Globals.profiles_showPlanes_image.width()
h = Globals.profiles_showPlanes_image.height()
new_window.geometry("%dx%d+0+0" % (w, h))
new_window.grab_set()
canvas = tk.Canvas(new_window)
canvas.config(relief=FLAT, bg='#ffffff', highlightthickness=0)
canvas.create_image(0, 0, image=Globals.profiles_showPlanes_image, anchor='nw')
canvas.pack(expand=True, fill=BOTH)
|
[
"Globals.profiles_doseplan_text_image.width",
"Globals.DVH_doseplans_filenames.append",
"Globals.DVH_input_lateral_displacement.config",
"Globals.profiles_showPlanes_image.width",
"Globals.DVH_distance_reference_point_ROI.append",
"Globals.DCH_film_orientation.get",
"os.path.dirname",
"tkinter.filedialog.askopenfilename",
"tkinter.ttk.Frame",
"numpy.max",
"Globals.profiles_film_dose_map_text_image.height",
"Globals.DVH_doseplans_scroll_frame.grid_rowconfigure",
"Globals.DVH_doseplan_dataset_ROI_several.append",
"tkinter.messagebox.showinfo",
"Globals.DVH_input_vertical_displacement.config",
"cv2.flip",
"Globals.DVH_doseplans_factor_input.append",
"tkinter.ttk.Notebook",
"Globals.DVH_slice_offset.get",
"PIL.ImageTk.PhotoImage",
"PIL.Image.open",
"Globals.DVH_mark_isocenter_up_down_line.append",
"Globals.DVH_done_button.config",
"Globals.DVH_input_lateral_displacement.get",
"tkinter.messagebox.askyesno",
"Globals.profiles_doseplan_text_image.height",
"tkinter.Text",
"Globals.DVH_slice_offset.grid",
"Globals.DVH_film_factor_input.get",
"Globals.profiles_film_dose_map_text_image.width",
"Globals.DVH_input_longitudinal_displacement.grid",
"os.chdir",
"Globals.DVH_film_write_image.grid",
"tkinter.Toplevel",
"os.path.normpath",
"Globals.DVH_mark_reference_point_oval.append",
"Globals.DVH_input_vertical_displacement.grid",
"Globals.DVH_film_orientation.get",
"Globals.DVH_input_longitudinal_displacement.insert",
"tkinter.Canvas",
"Globals.DVH_film_orientation_menu.configure",
"tkinter.Scrollbar",
"Globals.DVH_film_dose_write_image.create_image",
"Globals.DVH_distance_isocenter_ROI.append",
"os.path.splitext",
"Globals.DVH_mark_ROI_rectangle.append",
"PIL.Image.fromarray",
"Globals.DVH_doseplans_scroll_frame.grid_columnconfigure",
"Globals.DVH_film_write_image.config",
"Globals.DVH_film_factor_input.config",
"Globals.DVH_upload_button_doseplan.config",
"Globals.DVH_done_button.pack",
"Globals.DVH_film_dose_write_image.config",
"Globals.DVH_input_longitudinal_displacement.config",
"Globals.DVH_done_button_reference_point.config",
"numpy.clip",
"Globals.DVH_upload_button_rtplan.config",
"Globals.DVH_iscoenter_coords.append",
"tkinter.Frame",
"Globals.DVH_doseplan_write_image.create_image",
"Globals.profiles_scanned_image_text_image.width",
"Globals.DVH_ROI_coords.append",
"Globals.DVH_slice_offset.insert",
"Globals.profiles_scanned_image_text_image.height",
"Globals.DVH_film_write_image.create_image",
"Globals.profiles_showPlanes_image.height",
"numpy.swapaxes",
"cv2.resize",
"Globals.DVH_input_lateral_displacement.insert",
"pydicom.dcmread",
"Globals.DVH_film_batch.get",
"Globals.DVH_input_vertical_displacement.get",
"tkinter.messagebox.showerror",
"os.getcwd",
"numpy.zeros",
"tkinter.Radiobutton",
"Globals.DVH_done_button_reference_point.pack",
"cv2.medianBlur",
"Globals.DVH_doseplans_factor_text.append",
"numpy.round",
"tkinter.Button",
"Globals.DVH_doseplan_write_image.config",
"Globals.DVH_mark_isocenter_oval.append",
"Globals.DVH_slice_offset.config",
"matplotlib.cm.viridis",
"Globals.DVH_input_lateral_displacement.grid",
"tkinter.messagebox.askokcancel",
"Globals.DVH_upload_button_film.config",
"Globals.DVH_mark_isocenter_right_left_line.append",
"Globals.DVH_input_vertical_displacement.insert",
"Globals.DVH_doseplan_write_image.grid",
"Globals.DVH_film_dose_write_image.grid",
"Globals.DVH_input_longitudinal_displacement.get"
] |
[((2165, 2176), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2173, 2176), True, 'import numpy as np\n'), ((6927, 6938), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6935, 6938), True, 'import numpy as np\n'), ((9940, 9951), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (9948, 9951), True, 'import numpy as np\n'), ((28706, 28717), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (28714, 28717), True, 'import numpy as np\n'), ((32116, 32127), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (32124, 32127), True, 'import numpy as np\n'), ((50068, 50096), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (50094, 50096), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((50346, 50357), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (50355, 50357), False, 'import os\n'), ((50371, 50392), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (50386, 50392), False, 'import os\n'), ((50397, 50413), 'os.chdir', 'os.chdir', (['parent'], {}), '(parent)\n', (50405, 50413), False, 'import os\n'), ((50428, 50449), 'pydicom.dcmread', 'pydicom.dcmread', (['file'], {}), '(file)\n', (50443, 50449), False, 'import pydicom\n'), ((50887, 50911), 'os.chdir', 'os.chdir', (['current_folder'], {}), '(current_folder)\n', (50895, 50911), False, 'import os\n'), ((55491, 55602), 'tkinter.messagebox.askyesno', 'messagebox.askyesno', (['"""Question"""', '"""Are you going to upload several doseplans and/or use a factor on a plan?"""'], {}), "('Question',\n 'Are you going to upload several doseplans and/or use a factor on a plan?')\n", (55510, 55602), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((55695, 55727), 'tkinter.Toplevel', 'tk.Toplevel', (['Globals.tab5_canvas'], {}), '(Globals.tab5_canvas)\n', (55706, 55727), True, 'import tkinter as tk\n'), ((55859, 55912), 'tkinter.Frame', 'tk.Frame', (['several_doseplans_window'], {'bd': '(0)', 'relief': 'FLAT'}), '(several_doseplans_window, bd=0, relief=FLAT)\n', (55867, 55912), True, 'import tkinter as tk\n'), ((55945, 55977), 'tkinter.Canvas', 'Canvas', (['doseplans_over_all_frame'], {}), '(doseplans_over_all_frame)\n', (55951, 55977), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((56006, 56106), 'tkinter.Scrollbar', 'Scrollbar', (['doseplans_over_all_frame'], {'orient': 'HORIZONTAL', 'command': 'doseplans_over_all_canvas.xview'}), '(doseplans_over_all_frame, orient=HORIZONTAL, command=\n doseplans_over_all_canvas.xview)\n', (56015, 56106), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((56129, 56205), 'tkinter.Scrollbar', 'Scrollbar', (['doseplans_over_all_frame'], {'command': 'doseplans_over_all_canvas.yview'}), '(doseplans_over_all_frame, command=doseplans_over_all_canvas.yview)\n', (56138, 56205), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((56248, 56284), 'tkinter.ttk.Frame', 'ttk.Frame', (['doseplans_over_all_canvas'], {}), '(doseplans_over_all_canvas)\n', (56257, 56284), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((57461, 57505), 'tkinter.Frame', 'tk.Frame', (['Globals.DVH_doseplans_scroll_frame'], {}), '(Globals.DVH_doseplans_scroll_frame)\n', (57469, 57505), True, 'import tkinter as tk\n'), ((57605, 57673), 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', (['(0)'], {'weight': '(0)'}), '(0, weight=0)\n', (57660, 57673), False, 'import Globals\n'), ((57678, 57743), 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', (['(0)'], {'weight': '(0)'}), '(0, weight=0)\n', (57730, 57743), False, 'import Globals\n'), ((61764, 61808), 'tkinter.Frame', 'tk.Frame', (['Globals.DVH_doseplans_scroll_frame'], {}), '(Globals.DVH_doseplans_scroll_frame)\n', (61772, 61808), True, 'import tkinter as tk\n'), ((61964, 62029), 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', (['(3)'], {'weight': '(0)'}), '(3, weight=0)\n', (62016, 62029), False, 'import Globals\n'), ((62034, 62102), 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', (['(3)'], {'weight': '(0)'}), '(3, weight=0)\n', (62089, 62102), False, 'import Globals\n'), ((62132, 62322), 'tkinter.Button', 'tk.Button', (['doseplans_done_button_frame'], {'text': '"""Done"""', 'image': 'Globals.done_button_image', 'cursor': '"""hand2"""', 'font': "('calibri', '14')", 'relief': 'FLAT', 'state': 'ACTIVE', 'command': 'closeUploadDoseplans'}), "(doseplans_done_button_frame, text='Done', image=Globals.\n done_button_image, cursor='hand2', font=('calibri', '14'), relief=FLAT,\n state=ACTIVE, command=closeUploadDoseplans)\n", (62141, 62322), True, 'import tkinter as tk\n'), ((62593, 62656), 'tkinter.Text', 'tk.Text', (['Globals.DVH_doseplans_scroll_frame'], {'width': '(15)', 'height': '(1)'}), '(Globals.DVH_doseplans_scroll_frame, width=15, height=1)\n', (62600, 62656), True, 'import tkinter as tk\n'), ((62895, 62960), 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', (['(1)'], {'weight': '(0)'}), '(1, weight=0)\n', (62947, 62960), False, 'import Globals\n'), ((62965, 63033), 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', (['(1)'], {'weight': '(0)'}), '(1, weight=0)\n', (63020, 63033), False, 'import Globals\n'), ((63054, 63117), 'tkinter.Text', 'tk.Text', (['Globals.DVH_doseplans_scroll_frame'], {'width': '(30)', 'height': '(2)'}), '(Globals.DVH_doseplans_scroll_frame, width=30, height=2)\n', (63061, 63117), True, 'import tkinter as tk\n'), ((63408, 63476), 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', (['(2)'], {'weight': '(0)'}), '(2, weight=0)\n', (63463, 63476), False, 'import Globals\n'), ((63480, 63545), 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', (['(2)'], {'weight': '(0)'}), '(2, weight=0)\n', (63532, 63545), False, 'import Globals\n'), ((63578, 63606), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (63604, 63606), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((63856, 63867), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (63865, 63867), False, 'import os\n'), ((63881, 63902), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (63896, 63902), False, 'import os\n'), ((63907, 63923), 'os.chdir', 'os.chdir', (['parent'], {}), '(parent)\n', (63915, 63923), False, 'import os\n'), ((63938, 63959), 'pydicom.dcmread', 'pydicom.dcmread', (['file'], {}), '(file)\n', (63953, 63959), False, 'import pydicom\n'), ((63964, 63988), 'os.chdir', 'os.chdir', (['current_folder'], {}), '(current_folder)\n', (63972, 63988), False, 'import os\n'), ((66091, 66146), 'Globals.DVH_upload_button_doseplan.config', 'Globals.DVH_upload_button_doseplan.config', ([], {'state': 'ACTIVE'}), '(state=ACTIVE)\n', (66132, 66146), False, 'import Globals\n'), ((66151, 66206), 'Globals.DVH_upload_button_rtplan.config', 'Globals.DVH_upload_button_rtplan.config', ([], {'state': 'DISABLED'}), '(state=DISABLED)\n', (66190, 66206), False, 'import Globals\n'), ((66818, 66847), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'img'}), '(image=img)\n', (66836, 66847), False, 'from PIL import Image, ImageTk\n'), ((66876, 66913), 'tkinter.Toplevel', 'tk.Toplevel', (['new_window_isocenter_tab'], {}), '(new_window_isocenter_tab)\n', (66887, 66913), True, 'import tkinter as tk\n'), ((67041, 67091), 'tkinter.Frame', 'tk.Frame', (['mark_isocenter_window'], {'bd': '(0)', 'relief': 'FLAT'}), '(mark_isocenter_window, bd=0, relief=FLAT)\n', (67049, 67091), True, 'import tkinter as tk\n'), ((67129, 67166), 'tkinter.Canvas', 'Canvas', (['mark_isocenter_over_all_frame'], {}), '(mark_isocenter_over_all_frame)\n', (67135, 67166), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((67200, 67310), 'tkinter.Scrollbar', 'Scrollbar', (['mark_isocenter_over_all_frame'], {'orient': 'HORIZONTAL', 'command': 'mark_isocenter_over_all_canvas.xview'}), '(mark_isocenter_over_all_frame, orient=HORIZONTAL, command=\n mark_isocenter_over_all_canvas.xview)\n', (67209, 67310), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((67338, 67429), 'tkinter.Scrollbar', 'Scrollbar', (['mark_isocenter_over_all_frame'], {'command': 'mark_isocenter_over_all_canvas.yview'}), '(mark_isocenter_over_all_frame, command=\n mark_isocenter_over_all_canvas.yview)\n', (67347, 67429), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((67460, 67501), 'tkinter.ttk.Frame', 'ttk.Frame', (['mark_isocenter_over_all_canvas'], {}), '(mark_isocenter_over_all_canvas)\n', (67469, 67501), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((68760, 68798), 'tkinter.Canvas', 'tk.Canvas', (['mark_isocenter_scroll_frame'], {}), '(mark_isocenter_scroll_frame)\n', (68769, 68798), True, 'import tkinter as tk\n'), ((71568, 71597), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'img'}), '(image=img)\n', (71586, 71597), False, 'from PIL import Image, ImageTk\n'), ((71632, 71675), 'tkinter.Toplevel', 'tk.Toplevel', (['new_window_reference_point_tab'], {}), '(new_window_reference_point_tab)\n', (71643, 71675), True, 'import tkinter as tk\n'), ((71821, 71877), 'tkinter.Frame', 'tk.Frame', (['mark_reference_point_window'], {'bd': '(0)', 'relief': 'FLAT'}), '(mark_reference_point_window, bd=0, relief=FLAT)\n', (71829, 71877), True, 'import tkinter as tk\n'), ((71921, 71964), 'tkinter.Canvas', 'Canvas', (['mark_reference_point_over_all_frame'], {}), '(mark_reference_point_over_all_frame)\n', (71927, 71964), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((72004, 72126), 'tkinter.Scrollbar', 'Scrollbar', (['mark_reference_point_over_all_frame'], {'orient': 'HORIZONTAL', 'command': 'mark_reference_point_over_all_canvas.xview'}), '(mark_reference_point_over_all_frame, orient=HORIZONTAL, command=\n mark_reference_point_over_all_canvas.xview)\n', (72013, 72126), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((72160, 72263), 'tkinter.Scrollbar', 'Scrollbar', (['mark_reference_point_over_all_frame'], {'command': 'mark_reference_point_over_all_canvas.yview'}), '(mark_reference_point_over_all_frame, command=\n mark_reference_point_over_all_canvas.yview)\n', (72169, 72263), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((72300, 72347), 'tkinter.ttk.Frame', 'ttk.Frame', (['mark_reference_point_over_all_canvas'], {}), '(mark_reference_point_over_all_canvas)\n', (72309, 72347), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((73732, 73776), 'tkinter.Canvas', 'tk.Canvas', (['mark_reference_point_scroll_frame'], {}), '(mark_reference_point_scroll_frame)\n', (73741, 73776), True, 'import tkinter as tk\n'), ((75638, 75667), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'img'}), '(image=img)\n', (75656, 75667), False, 'from PIL import Image, ImageTk\n'), ((75690, 75706), 'tkinter.Toplevel', 'tk.Toplevel', (['tab'], {}), '(tab)\n', (75701, 75706), True, 'import tkinter as tk\n'), ((75816, 75860), 'tkinter.Frame', 'tk.Frame', (['mark_ROI_window'], {'bd': '(0)', 'relief': 'FLAT'}), '(mark_ROI_window, bd=0, relief=FLAT)\n', (75824, 75860), True, 'import tkinter as tk\n'), ((75892, 75923), 'tkinter.Canvas', 'Canvas', (['mark_ROI_over_all_frame'], {}), '(mark_ROI_over_all_frame)\n', (75898, 75923), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((75951, 76049), 'tkinter.Scrollbar', 'Scrollbar', (['mark_ROI_over_all_frame'], {'orient': 'HORIZONTAL', 'command': 'mark_ROI_over_all_canvas.xview'}), '(mark_ROI_over_all_frame, orient=HORIZONTAL, command=\n mark_ROI_over_all_canvas.xview)\n', (75960, 76049), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((76071, 76145), 'tkinter.Scrollbar', 'Scrollbar', (['mark_ROI_over_all_frame'], {'command': 'mark_ROI_over_all_canvas.yview'}), '(mark_ROI_over_all_frame, command=mark_ROI_over_all_canvas.yview)\n', (76080, 76145), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((76175, 76210), 'tkinter.ttk.Frame', 'ttk.Frame', (['mark_ROI_over_all_canvas'], {}), '(mark_ROI_over_all_canvas)\n', (76184, 76210), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((77343, 77375), 'tkinter.Canvas', 'tk.Canvas', (['mark_ROI_scroll_frame'], {}), '(mark_ROI_scroll_frame)\n', (77352, 77375), True, 'import tkinter as tk\n'), ((80666, 80694), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (80692, 80694), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((117283, 117308), 'tkinter.Toplevel', 'tk.Toplevel', (['Globals.tab5'], {}), '(Globals.tab5)\n', (117294, 117308), True, 'import tkinter as tk\n'), ((117317, 117358), 'Globals.profiles_showPlanes_image.width', 'Globals.profiles_showPlanes_image.width', ([], {}), '()\n', (117356, 117358), False, 'import Globals\n'), ((117367, 117409), 'Globals.profiles_showPlanes_image.height', 'Globals.profiles_showPlanes_image.height', ([], {}), '()\n', (117407, 117409), False, 'import Globals\n'), ((117496, 117517), 'tkinter.Canvas', 'tk.Canvas', (['new_window'], {}), '(new_window)\n', (117505, 117517), True, 'import tkinter as tk\n'), ((2369, 2384), 'numpy.round', 'np.round', (['iso_1'], {}), '(iso_1)\n', (2377, 2384), True, 'import numpy as np\n'), ((2411, 2426), 'numpy.round', 'np.round', (['iso_2'], {}), '(iso_2)\n', (2419, 2426), True, 'import numpy as np\n'), ((2453, 2468), 'numpy.round', 'np.round', (['iso_3'], {}), '(iso_3)\n', (2461, 2468), True, 'import numpy as np\n'), ((3342, 3359), 'numpy.round', 'np.round', (['lateral'], {}), '(lateral)\n', (3350, 3359), True, 'import numpy as np\n'), ((3382, 3400), 'numpy.round', 'np.round', (['vertical'], {}), '(vertical)\n', (3390, 3400), True, 'import numpy as np\n'), ((3421, 3437), 'numpy.round', 'np.round', (['longit'], {}), '(longit)\n', (3429, 3437), True, 'import numpy as np\n'), ((3509, 3560), 'numpy.round', 'np.round', (['Globals.DVH_doseplan_lateral_displacement'], {}), '(Globals.DVH_doseplan_lateral_displacement)\n', (3517, 3560), True, 'import numpy as np\n'), ((3605, 3657), 'numpy.round', 'np.round', (['Globals.DVH_doseplan_vertical_displacement'], {}), '(Globals.DVH_doseplan_vertical_displacement)\n', (3613, 3657), True, 'import numpy as np\n'), ((3706, 3762), 'numpy.round', 'np.round', (['Globals.DVH_doseplan_longitudianl_displacement'], {}), '(Globals.DVH_doseplan_longitudianl_displacement)\n', (3714, 3762), True, 'import numpy as np\n'), ((18578, 18732), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Reference point is outside of dosematrix\n (Code: first dimension, number of frames in dosematrix)"""'], {}), '(\'Error\',\n """Reference point is outside of dosematrix\n (Code: first dimension, number of frames in dosematrix)"""\n )\n', (18598, 18732), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((18823, 18966), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Reference point is outside of dosematrix\n (Code: second dimension, rows in dosematrix)"""'], {}), '(\'Error\',\n """Reference point is outside of dosematrix\n (Code: second dimension, rows in dosematrix)"""\n )\n', (18843, 18966), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((19057, 19202), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Reference point is outside of dosematrix\n (Code: third dimension, columns in dosematrix)"""'], {}), '(\'Error\',\n """Reference point is outside of dosematrix\n (Code: third dimension, columns in dosematrix)"""\n )\n', (19077, 19202), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((20318, 20423), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (20338, 20423), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((20642, 20747), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (20662, 20747), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((21009, 21114), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (21029, 21114), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((21541, 21646), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (21561, 21646), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((22087, 22192), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (22107, 22192), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((22261, 22366), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (22281, 22366), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((22840, 22945), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (22860, 22945), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((23232, 23337), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (23252, 23337), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((24201, 24361), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""ROI info"""', '("""The ROI marked on the film did not fit with the size of the doseplan and had to be cut.\n"""\n + ROI_info)'], {}), '(\'ROI info\', \n """The ROI marked on the film did not fit with the size of the doseplan and had to be cut.\n"""\n + ROI_info)\n', (24220, 24361), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((25233, 25244), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (25239, 25244), True, 'import numpy as np\n'), ((25524, 25563), 'tkinter.Canvas', 'tk.Canvas', (['Globals.DVH_film_panedwindow'], {}), '(Globals.DVH_film_panedwindow)\n', (25533, 25563), True, 'import tkinter as tk\n'), ((26119, 26145), 'tkinter.Canvas', 'tk.Canvas', (['doseplan_canvas'], {}), '(doseplan_canvas)\n', (26128, 26145), True, 'import tkinter as tk\n'), ((26154, 26230), 'Globals.DVH_doseplan_write_image.grid', 'Globals.DVH_doseplan_write_image.grid', ([], {'row': '(0)', 'column': '(1)', 'sticky': '(N + S + W + E)'}), '(row=0, column=1, sticky=N + S + W + E)\n', (26191, 26230), False, 'import Globals\n'), ((26231, 26347), 'Globals.DVH_doseplan_write_image.config', 'Globals.DVH_doseplan_write_image.config', ([], {'bg': '"""#ffffff"""', 'relief': 'FLAT', 'highlightthickness': '(0)', 'width': 'wid', 'height': 'heig'}), "(bg='#ffffff', relief=FLAT,\n highlightthickness=0, width=wid, height=heig)\n", (26270, 26347), False, 'import Globals\n'), ((26382, 26408), 'tkinter.Canvas', 'tk.Canvas', (['doseplan_canvas'], {}), '(doseplan_canvas)\n', (26391, 26408), True, 'import tkinter as tk\n'), ((26773, 26818), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'scaled_image_visual'}), '(image=scaled_image_visual)\n', (26791, 26818), False, 'from PIL import Image, ImageTk\n'), ((26983, 27079), 'Globals.DVH_doseplan_write_image.create_image', 'Globals.DVH_doseplan_write_image.create_image', (['(0)', '(0)'], {'image': 'scaled_image_visual', 'anchor': '"""nw"""'}), "(0, 0, image=\n scaled_image_visual, anchor='nw')\n", (27028, 27079), False, 'import Globals\n'), ((27492, 27544), 'Globals.DVH_doseplan_dataset_ROI_several.append', 'Globals.DVH_doseplan_dataset_ROI_several.append', (['img'], {}), '(img)\n', (27539, 27544), False, 'import Globals\n'), ((28856, 28871), 'numpy.round', 'np.round', (['iso_1'], {}), '(iso_1)\n', (28864, 28871), True, 'import numpy as np\n'), ((28941, 28956), 'numpy.round', 'np.round', (['iso_2'], {}), '(iso_2)\n', (28949, 28956), True, 'import numpy as np\n'), ((29026, 29041), 'numpy.round', 'np.round', (['iso_3'], {}), '(iso_3)\n', (29034, 29041), True, 'import numpy as np\n'), ((42210, 42315), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (42230, 42315), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((42534, 42639), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (42554, 42639), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((42901, 43006), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (42921, 43006), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((43433, 43538), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (43453, 43538), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((43979, 44084), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (43999, 44084), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((44153, 44258), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (44173, 44258), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((44732, 44837), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (44752, 44837), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((45124, 45229), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Fatal Error"""', '"""Fatal error: marked ROI is out of range in doseplan. Try again"""'], {}), "('Fatal Error',\n 'Fatal error: marked ROI is out of range in doseplan. Try again')\n", (45144, 45229), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((46093, 46253), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""ROI info"""', '("""The ROI marked on the film did not fit with the size of the doseplan and had to be cut.\n"""\n + ROI_info)'], {}), '(\'ROI info\', \n """The ROI marked on the film did not fit with the size of the doseplan and had to be cut.\n"""\n + ROI_info)\n', (46112, 46253), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((47174, 47185), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (47180, 47185), True, 'import numpy as np\n'), ((47521, 47560), 'tkinter.Canvas', 'tk.Canvas', (['Globals.DVH_film_panedwindow'], {}), '(Globals.DVH_film_panedwindow)\n', (47530, 47560), True, 'import tkinter as tk\n'), ((48116, 48142), 'tkinter.Canvas', 'tk.Canvas', (['doseplan_canvas'], {}), '(doseplan_canvas)\n', (48125, 48142), True, 'import tkinter as tk\n'), ((48151, 48227), 'Globals.DVH_doseplan_write_image.grid', 'Globals.DVH_doseplan_write_image.grid', ([], {'row': '(0)', 'column': '(1)', 'sticky': '(N + S + W + E)'}), '(row=0, column=1, sticky=N + S + W + E)\n', (48188, 48227), False, 'import Globals\n'), ((48228, 48344), 'Globals.DVH_doseplan_write_image.config', 'Globals.DVH_doseplan_write_image.config', ([], {'bg': '"""#ffffff"""', 'relief': 'FLAT', 'highlightthickness': '(0)', 'width': 'wid', 'height': 'heig'}), "(bg='#ffffff', relief=FLAT,\n highlightthickness=0, width=wid, height=heig)\n", (48267, 48344), False, 'import Globals\n'), ((48379, 48405), 'tkinter.Canvas', 'tk.Canvas', (['doseplan_canvas'], {}), '(doseplan_canvas)\n', (48388, 48405), True, 'import tkinter as tk\n'), ((48770, 48815), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'scaled_image_visual'}), '(image=scaled_image_visual)\n', (48788, 48815), False, 'from PIL import Image, ImageTk\n'), ((48980, 49076), 'Globals.DVH_doseplan_write_image.create_image', 'Globals.DVH_doseplan_write_image.create_image', (['(0)', '(0)'], {'image': 'scaled_image_visual', 'anchor': '"""nw"""'}), "(0, 0, image=\n scaled_image_visual, anchor='nw')\n", (49025, 49076), False, 'import Globals\n'), ((49485, 49537), 'Globals.DVH_doseplan_dataset_ROI_several.append', 'Globals.DVH_doseplan_dataset_ROI_several.append', (['img'], {}), '(img)\n', (49532, 49537), False, 'import Globals\n'), ((50738, 50848), 'tkinter.messagebox.askokcancel', 'messagebox.askokcancel', (['"""Dose summation"""', '"""You did not upload the full doseplan. Do you want to continue?"""'], {}), "('Dose summation',\n 'You did not upload the full doseplan. Do you want to continue?')\n", (50760, 50848), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((51249, 51342), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""The resolution in doseplan must be 1x1x1, 2x2x2 or 3x3x3"""'], {}), "('Error',\n 'The resolution in doseplan must be 1x1x1, 2x2x2 or 3x3x3')\n", (51269, 51342), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((51828, 51979), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""The Image Orientation (Patient) must be parallel to one of the main axis and perpendicular to the two others."""'], {}), "('Error',\n 'The Image Orientation (Patient) must be parallel to one of the main axis and perpendicular to the two others.'\n )\n", (51848, 51979), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((53106, 53163), 'Globals.DVH_upload_button_doseplan.config', 'Globals.DVH_upload_button_doseplan.config', ([], {'state': 'DISABLED'}), '(state=DISABLED)\n', (53147, 53163), False, 'import Globals\n'), ((53257, 53320), 'tkinter.Text', 'tk.Text', (['Globals.DVH_doseplans_scroll_frame'], {'width': '(30)', 'height': '(1)'}), '(Globals.DVH_doseplans_scroll_frame, width=30, height=1)\n', (53264, 53320), True, 'import tkinter as tk\n'), ((53617, 53728), 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', (['Globals.DVH_doseplans_grid_config_count'], {'weight': '(0)'}), '(Globals.\n DVH_doseplans_grid_config_count, weight=0)\n', (53672, 53728), False, 'import Globals\n'), ((53732, 53840), 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', (['Globals.DVH_doseplans_grid_config_count'], {'weight': '(0)'}), '(Globals.\n DVH_doseplans_grid_config_count, weight=0)\n', (53784, 53840), False, 'import Globals\n'), ((53844, 53900), 'Globals.DVH_doseplans_filenames.append', 'Globals.DVH_doseplans_filenames.append', (['textbox_filename'], {}), '(textbox_filename)\n', (53882, 53900), False, 'import Globals\n'), ((53980, 54042), 'tkinter.Text', 'tk.Text', (['Globals.DVH_doseplans_scroll_frame'], {'width': '(6)', 'height': '(1)'}), '(Globals.DVH_doseplans_scroll_frame, width=6, height=1)\n', (53987, 54042), True, 'import tkinter as tk\n'), ((54340, 54451), 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', (['Globals.DVH_doseplans_grid_config_count'], {'weight': '(0)'}), '(Globals.\n DVH_doseplans_grid_config_count, weight=0)\n', (54395, 54451), False, 'import Globals\n'), ((54455, 54563), 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', (['Globals.DVH_doseplans_grid_config_count'], {'weight': '(0)'}), '(Globals.\n DVH_doseplans_grid_config_count, weight=0)\n', (54507, 54563), False, 'import Globals\n'), ((54567, 54623), 'Globals.DVH_doseplans_factor_text.append', 'Globals.DVH_doseplans_factor_text.append', (['textbox_factor'], {}), '(textbox_factor)\n', (54607, 54623), False, 'import Globals\n'), ((54709, 54752), 'tkinter.Text', 'tk.Text', (['Globals.DVH_doseplans_scroll_frame'], {}), '(Globals.DVH_doseplans_scroll_frame)\n', (54716, 54752), True, 'import tkinter as tk\n'), ((55045, 55156), 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_columnconfigure', (['Globals.DVH_doseplans_grid_config_count'], {'weight': '(0)'}), '(Globals.\n DVH_doseplans_grid_config_count, weight=0)\n', (55100, 55156), False, 'import Globals\n'), ((55160, 55268), 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', 'Globals.DVH_doseplans_scroll_frame.grid_rowconfigure', (['Globals.DVH_doseplans_grid_config_count'], {'weight': '(0)'}), '(Globals.\n DVH_doseplans_grid_config_count, weight=0)\n', (55212, 55268), False, 'import Globals\n'), ((55272, 55335), 'Globals.DVH_doseplans_factor_input.append', 'Globals.DVH_doseplans_factor_input.append', (['textbox_factor_input'], {}), '(textbox_factor_input)\n', (55313, 55335), False, 'import Globals\n'), ((59396, 59411), 'numpy.max', 'np.max', (['img_ROI'], {}), '(img_ROI)\n', (59402, 59411), True, 'import numpy as np\n'), ((59683, 59722), 'tkinter.Canvas', 'tk.Canvas', (['Globals.DVH_film_panedwindow'], {}), '(Globals.DVH_film_panedwindow)\n', (59692, 59722), True, 'import tkinter as tk\n'), ((60278, 60304), 'tkinter.Canvas', 'tk.Canvas', (['doseplan_canvas'], {}), '(doseplan_canvas)\n', (60287, 60304), True, 'import tkinter as tk\n'), ((60313, 60389), 'Globals.DVH_doseplan_write_image.grid', 'Globals.DVH_doseplan_write_image.grid', ([], {'row': '(0)', 'column': '(1)', 'sticky': '(N + S + W + E)'}), '(row=0, column=1, sticky=N + S + W + E)\n', (60350, 60389), False, 'import Globals\n'), ((60390, 60506), 'Globals.DVH_doseplan_write_image.config', 'Globals.DVH_doseplan_write_image.config', ([], {'bg': '"""#ffffff"""', 'relief': 'FLAT', 'highlightthickness': '(0)', 'width': 'wid', 'height': 'heig'}), "(bg='#ffffff', relief=FLAT,\n highlightthickness=0, width=wid, height=heig)\n", (60429, 60506), False, 'import Globals\n'), ((60541, 60567), 'tkinter.Canvas', 'tk.Canvas', (['doseplan_canvas'], {}), '(doseplan_canvas)\n', (60550, 60567), True, 'import tkinter as tk\n'), ((60932, 60977), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'scaled_image_visual'}), '(image=scaled_image_visual)\n', (60950, 60977), False, 'from PIL import Image, ImageTk\n'), ((61142, 61238), 'Globals.DVH_doseplan_write_image.create_image', 'Globals.DVH_doseplan_write_image.create_image', (['(0)', '(0)'], {'image': 'scaled_image_visual', 'anchor': '"""nw"""'}), "(0, 0, image=\n scaled_image_visual, anchor='nw')\n", (61187, 61238), False, 'import Globals\n'), ((61554, 61611), 'Globals.DVH_upload_button_doseplan.config', 'Globals.DVH_upload_button_doseplan.config', ([], {'state': 'DISABLED'}), '(state=DISABLED)\n', (61595, 61611), False, 'import Globals\n'), ((65887, 66032), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Fidora does only support patient positions: \n HFS, HFP, HFDR, HFDL, FFP, FFS, FFDR, FFDL"""'], {}), '(\'Error\',\n """Fidora does only support patient positions: \n HFS, HFP, HFDR, HFDL, FFP, FFS, FFDR, FFDL"""\n )\n', (65907, 66032), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((75001, 75051), 'Globals.DVH_mark_reference_point_oval.append', 'Globals.DVH_mark_reference_point_oval.append', (['oval'], {}), '(oval)\n', (75045, 75051), False, 'import Globals\n'), ((78799, 78890), 'Globals.DVH_ROI_coords.append', 'Globals.DVH_ROI_coords.append', (['[rectangle_top_corner[0][0], rectangle_top_corner[0][1]]'], {}), '([rectangle_top_corner[0][0],\n rectangle_top_corner[0][1]])\n', (78828, 78890), False, 'import Globals\n'), ((78895, 78989), 'Globals.DVH_ROI_coords.append', 'Globals.DVH_ROI_coords.append', (['[rectangle_bottom_corner[0][0], rectangle_top_corner[0][1]]'], {}), '([rectangle_bottom_corner[0][0],\n rectangle_top_corner[0][1]])\n', (78924, 78989), False, 'import Globals\n'), ((78994, 79088), 'Globals.DVH_ROI_coords.append', 'Globals.DVH_ROI_coords.append', (['[rectangle_top_corner[0][0], rectangle_bottom_corner[0][1]]'], {}), '([rectangle_top_corner[0][0],\n rectangle_bottom_corner[0][1]])\n', (79023, 79088), False, 'import Globals\n'), ((79093, 79190), 'Globals.DVH_ROI_coords.append', 'Globals.DVH_ROI_coords.append', (['[rectangle_bottom_corner[0][0], rectangle_bottom_corner[0][1]]'], {}), '([rectangle_bottom_corner[0][0],\n rectangle_bottom_corner[0][1]])\n', (79122, 79190), False, 'import Globals\n'), ((79427, 79470), 'Globals.DVH_mark_ROI_rectangle.append', 'Globals.DVH_mark_ROI_rectangle.append', (['rect'], {}), '(rect)\n', (79464, 79470), False, 'import Globals\n'), ((80132, 80166), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (80164, 80166), False, 'import Globals\n'), ((80184, 80282), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Missing parameter"""', '"""Film orientation missing \n (Code: UploadFilm)"""'], {}), '(\'Missing parameter\',\n """Film orientation missing \n (Code: UploadFilm)""")\n', (80204, 80282), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((80298, 80348), 'Globals.DVH_film_factor_input.get', 'Globals.DVH_film_factor_input.get', (['"""1.0"""', '"""end-1c"""'], {}), "('1.0', 'end-1c')\n", (80331, 80348), False, 'import Globals\n'), ((80788, 80799), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (80797, 80799), False, 'import os\n'), ((80817, 80838), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (80832, 80838), False, 'import os\n'), ((80847, 80863), 'os.chdir', 'os.chdir', (['parent'], {}), '(parent)\n', (80855, 80863), False, 'import os\n'), ((80878, 80894), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (80888, 80894), False, 'from PIL import Image, ImageTk\n'), ((81060, 81085), 'cv2.medianBlur', 'cv2.medianBlur', (['cv2Img', '(5)'], {}), '(cv2Img, 5)\n', (81074, 81085), False, 'import cv2\n'), ((82056, 82080), 'os.chdir', 'os.chdir', (['current_folder'], {}), '(current_folder)\n', (82064, 82080), False, 'import os\n'), ((82219, 82278), 'Globals.DVH_film_orientation_menu.configure', 'Globals.DVH_film_orientation_menu.configure', ([], {'state': 'DISABLED'}), '(state=DISABLED)\n', (82262, 82278), False, 'import Globals\n'), ((82287, 82339), 'Globals.DVH_film_factor_input.config', 'Globals.DVH_film_factor_input.config', ([], {'state': 'DISABLED'}), '(state=DISABLED)\n', (82323, 82339), False, 'import Globals\n'), ((82405, 82430), 'tkinter.Toplevel', 'tk.Toplevel', (['Globals.tab5'], {}), '(Globals.tab5)\n', (82416, 82430), True, 'import tkinter as tk\n'), ((82548, 82587), 'tkinter.Frame', 'tk.Frame', (['new_window'], {'bd': '(0)', 'relief': 'FLAT'}), '(new_window, bd=0, relief=FLAT)\n', (82556, 82587), True, 'import tkinter as tk\n'), ((82625, 82658), 'tkinter.Canvas', 'Canvas', (['new_window_over_all_frame'], {}), '(new_window_over_all_frame)\n', (82631, 82658), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((82692, 82794), 'tkinter.Scrollbar', 'Scrollbar', (['new_window_over_all_frame'], {'orient': 'HORIZONTAL', 'command': 'new_window_over_all_canvas.xview'}), '(new_window_over_all_frame, orient=HORIZONTAL, command=\n new_window_over_all_canvas.xview)\n', (82701, 82794), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((82822, 82900), 'tkinter.Scrollbar', 'Scrollbar', (['new_window_over_all_frame'], {'command': 'new_window_over_all_canvas.yview'}), '(new_window_over_all_frame, command=new_window_over_all_canvas.yview)\n', (82831, 82900), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((82936, 82973), 'tkinter.ttk.Frame', 'ttk.Frame', (['new_window_over_all_canvas'], {}), '(new_window_over_all_canvas)\n', (82945, 82973), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((84212, 84265), 'tkinter.Text', 'tk.Text', (['new_window_scroll_frame'], {'height': '(3)', 'width': '(120)'}), '(new_window_scroll_frame, height=3, width=120)\n', (84219, 84265), True, 'import tkinter as tk\n'), ((85251, 85288), 'tkinter.ttk.Notebook', 'ttk.Notebook', (['new_window_scroll_frame'], {}), '(new_window_scroll_frame)\n', (85263, 85288), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((85601, 85631), 'tkinter.ttk.Frame', 'ttk.Frame', (['new_window_notebook'], {}), '(new_window_notebook)\n', (85610, 85631), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((85749, 85779), 'tkinter.ttk.Frame', 'ttk.Frame', (['new_window_notebook'], {}), '(new_window_notebook)\n', (85758, 85779), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((85902, 85932), 'tkinter.ttk.Frame', 'ttk.Frame', (['new_window_notebook'], {}), '(new_window_notebook)\n', (85911, 85932), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((86031, 86066), 'tkinter.Canvas', 'tk.Canvas', (['new_window_isocenter_tab'], {}), '(new_window_isocenter_tab)\n', (86040, 86066), True, 'import tkinter as tk\n'), ((86657, 86698), 'tkinter.Canvas', 'tk.Canvas', (['new_window_reference_point_tab'], {}), '(new_window_reference_point_tab)\n', (86666, 86698), True, 'import tkinter as tk\n'), ((87376, 87429), 'tkinter.Text', 'tk.Text', (['new_window_isocenter_tab'], {'width': '(55)', 'height': '(7)'}), '(new_window_isocenter_tab, width=55, height=7)\n', (87383, 87429), True, 'import tkinter as tk\n'), ((88334, 88393), 'tkinter.Text', 'tk.Text', (['new_window_reference_point_tab'], {'width': '(55)', 'height': '(5)'}), '(new_window_reference_point_tab, width=55, height=5)\n', (88341, 88393), True, 'import tkinter as tk\n'), ((89215, 89249), 'tkinter.Frame', 'tk.Frame', (['new_window_isocenter_tab'], {}), '(new_window_isocenter_tab)\n', (89223, 89249), True, 'import tkinter as tk\n'), ((90132, 90172), 'tkinter.Frame', 'tk.Frame', (['new_window_reference_point_tab'], {}), '(new_window_reference_point_tab)\n', (90140, 90172), True, 'import tkinter as tk\n'), ((91097, 91156), 'tkinter.Text', 'tk.Text', (['new_window_reference_point_tab'], {'width': '(55)', 'height': '(3)'}), '(new_window_reference_point_tab, width=55, height=3)\n', (91104, 91156), True, 'import tkinter as tk\n'), ((91835, 91894), 'tkinter.Text', 'tk.Text', (['new_window_reference_point_tab'], {'width': '(12)', 'height': '(1)'}), '(new_window_reference_point_tab, width=12, height=1)\n', (91842, 91894), True, 'import tkinter as tk\n'), ((92336, 92394), 'tkinter.Text', 'tk.Text', (['new_window_reference_point_tab'], {'width': '(5)', 'height': '(1)'}), '(new_window_reference_point_tab, width=5, height=1)\n', (92343, 92394), True, 'import tkinter as tk\n'), ((92403, 92461), 'Globals.DVH_input_lateral_displacement.insert', 'Globals.DVH_input_lateral_displacement.insert', (['INSERT', '""" """'], {}), "(INSERT, ' ')\n", (92448, 92461), False, 'import Globals\n'), ((92470, 92592), 'Globals.DVH_input_lateral_displacement.config', 'Globals.DVH_input_lateral_displacement.config', ([], {'bg': '"""#E5f9ff"""', 'relief': 'GROOVE', 'bd': '(2)', 'state': 'NORMAL', 'font': "('calibri', '11')"}), "(bg='#E5f9ff', relief=GROOVE,\n bd=2, state=NORMAL, font=('calibri', '11'))\n", (92515, 92592), False, 'import Globals\n'), ((92597, 92690), 'Globals.DVH_input_lateral_displacement.grid', 'Globals.DVH_input_lateral_displacement.grid', ([], {'row': '(5)', 'column': '(3)', 'padx': '(0, 285)', 'pady': '(35, 0)'}), '(row=5, column=3, padx=(0, 285),\n pady=(35, 0))\n', (92640, 92690), False, 'import Globals\n'), ((92859, 92918), 'tkinter.Text', 'tk.Text', (['new_window_reference_point_tab'], {'width': '(12)', 'height': '(1)'}), '(new_window_reference_point_tab, width=12, height=1)\n', (92866, 92918), True, 'import tkinter as tk\n'), ((93363, 93421), 'tkinter.Text', 'tk.Text', (['new_window_reference_point_tab'], {'width': '(4)', 'height': '(1)'}), '(new_window_reference_point_tab, width=4, height=1)\n', (93370, 93421), True, 'import tkinter as tk\n'), ((93430, 93489), 'Globals.DVH_input_vertical_displacement.insert', 'Globals.DVH_input_vertical_displacement.insert', (['INSERT', '""" """'], {}), "(INSERT, ' ')\n", (93476, 93489), False, 'import Globals\n'), ((93498, 93621), 'Globals.DVH_input_vertical_displacement.config', 'Globals.DVH_input_vertical_displacement.config', ([], {'bg': '"""#E5f9ff"""', 'relief': 'GROOVE', 'bd': '(2)', 'state': 'NORMAL', 'font': "('calibri', '11')"}), "(bg='#E5f9ff', relief=GROOVE,\n bd=2, state=NORMAL, font=('calibri', '11'))\n", (93544, 93621), False, 'import Globals\n'), ((93626, 93719), 'Globals.DVH_input_vertical_displacement.grid', 'Globals.DVH_input_vertical_displacement.grid', ([], {'row': '(5)', 'column': '(3)', 'padx': '(0, 25)', 'pady': '(35, 0)'}), '(row=5, column=3, padx=(0, 25),\n pady=(35, 0))\n', (93670, 93719), False, 'import Globals\n'), ((93887, 93946), 'tkinter.Text', 'tk.Text', (['new_window_reference_point_tab'], {'width': '(12)', 'height': '(1)'}), '(new_window_reference_point_tab, width=12, height=1)\n', (93894, 93946), True, 'import tkinter as tk\n'), ((94389, 94447), 'tkinter.Text', 'tk.Text', (['new_window_reference_point_tab'], {'width': '(5)', 'height': '(1)'}), '(new_window_reference_point_tab, width=5, height=1)\n', (94396, 94447), True, 'import tkinter as tk\n'), ((94456, 94519), 'Globals.DVH_input_longitudinal_displacement.insert', 'Globals.DVH_input_longitudinal_displacement.insert', (['INSERT', '""" """'], {}), "(INSERT, ' ')\n", (94506, 94519), False, 'import Globals\n'), ((94528, 94656), 'Globals.DVH_input_longitudinal_displacement.config', 'Globals.DVH_input_longitudinal_displacement.config', ([], {'bg': '"""#E5f9ff"""', 'relief': 'GROOVE', 'bd': '(2)', 'state': 'NORMAL', 'font': "('calibri', '11')"}), "(bg='#E5f9ff', relief=\n GROOVE, bd=2, state=NORMAL, font=('calibri', '11'))\n", (94578, 94656), False, 'import Globals\n'), ((94660, 94758), 'Globals.DVH_input_longitudinal_displacement.grid', 'Globals.DVH_input_longitudinal_displacement.grid', ([], {'row': '(5)', 'column': '(3)', 'padx': '(240, 0)', 'pady': '(35, 0)'}), '(row=5, column=3, padx=(240,\n 0), pady=(35, 0))\n', (94708, 94758), False, 'import Globals\n'), ((94938, 94991), 'tkinter.Text', 'tk.Text', (['new_window_isocenter_tab'], {'width': '(55)', 'height': '(7)'}), '(new_window_isocenter_tab, width=55, height=7)\n', (94945, 94991), True, 'import tkinter as tk\n'), ((95830, 95889), 'tkinter.Text', 'tk.Text', (['new_window_reference_point_tab'], {'width': '(55)', 'height': '(5)'}), '(new_window_reference_point_tab, width=55, height=5)\n', (95837, 95889), True, 'import tkinter as tk\n'), ((96767, 96801), 'tkinter.Frame', 'tk.Frame', (['new_window_isocenter_tab'], {}), '(new_window_isocenter_tab)\n', (96775, 96801), True, 'import tkinter as tk\n'), ((97615, 97668), 'tkinter.Text', 'tk.Text', (['new_window_isocenter_tab'], {'width': '(25)', 'height': '(1)'}), '(new_window_isocenter_tab, width=25, height=1)\n', (97622, 97668), True, 'import tkinter as tk\n'), ((98081, 98133), 'tkinter.Text', 'tk.Text', (['new_window_isocenter_tab'], {'width': '(8)', 'height': '(1)'}), '(new_window_isocenter_tab, width=8, height=1)\n', (98088, 98133), True, 'import tkinter as tk\n'), ((98142, 98217), 'Globals.DVH_slice_offset.grid', 'Globals.DVH_slice_offset.grid', ([], {'row': '(9)', 'column': '(3)', 'padx': '(110, 10)', 'pady': '(0, 0)'}), '(row=9, column=3, padx=(110, 10), pady=(0, 0))\n', (98171, 98217), False, 'import Globals\n'), ((98224, 98268), 'Globals.DVH_slice_offset.insert', 'Globals.DVH_slice_offset.insert', (['INSERT', '""" """'], {}), "(INSERT, ' ')\n", (98255, 98268), False, 'import Globals\n'), ((98277, 98370), 'Globals.DVH_slice_offset.config', 'Globals.DVH_slice_offset.config', ([], {'state': 'NORMAL', 'font': "('calibri', '10')", 'bd': '(2)', 'bg': '"""#ffffff"""'}), "(state=NORMAL, font=('calibri', '10'), bd=2,\n bg='#ffffff')\n", (98308, 98370), False, 'import Globals\n'), ((98549, 98589), 'tkinter.Frame', 'tk.Frame', (['new_window_reference_point_tab'], {}), '(new_window_reference_point_tab)\n', (98557, 98589), True, 'import tkinter as tk\n'), ((115356, 115390), 'tkinter.Frame', 'tk.Frame', (['new_window_isocenter_tab'], {}), '(new_window_isocenter_tab)\n', (115364, 115390), True, 'import tkinter as tk\n'), ((115905, 115957), 'Globals.DVH_done_button.pack', 'Globals.DVH_done_button.pack', ([], {'expand': '(True)', 'fill': 'BOTH'}), '(expand=True, fill=BOTH)\n', (115933, 115957), False, 'import Globals\n'), ((115966, 116092), 'Globals.DVH_done_button.config', 'Globals.DVH_done_button.config', ([], {'bg': '"""#ffffff"""', 'activebackground': '"""#ffffff"""', 'activeforeground': '"""#ffffff"""', 'highlightthickness': '(0)'}), "(bg='#ffffff', activebackground='#ffffff',\n activeforeground='#ffffff', highlightthickness=0)\n", (115996, 116092), False, 'import Globals\n'), ((116198, 116238), 'tkinter.Frame', 'tk.Frame', (['new_window_reference_point_tab'], {}), '(new_window_reference_point_tab)\n', (116206, 116238), True, 'import tkinter as tk\n'), ((116827, 116895), 'Globals.DVH_done_button_reference_point.pack', 'Globals.DVH_done_button_reference_point.pack', ([], {'expand': '(True)', 'fill': 'BOTH'}), '(expand=True, fill=BOTH)\n', (116871, 116895), False, 'import Globals\n'), ((116904, 117050), 'Globals.DVH_done_button_reference_point.config', 'Globals.DVH_done_button_reference_point.config', ([], {'bg': '"""#ffffff"""', 'activebackground': '"""#ffffff"""', 'activeforeground': '"""#ffffff"""', 'highlightthickness': '(0)'}), "(bg='#ffffff',\n activebackground='#ffffff', activeforeground='#ffffff',\n highlightthickness=0)\n", (116950, 117050), False, 'import Globals\n'), ((1482, 1603), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Could not read the vertical displacements\n (Code: displacements to integer)"""'], {}), '(\'Error\',\n """Could not read the vertical displacements\n (Code: displacements to integer)"""\n )\n', (1502, 1603), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((1691, 1811), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Could not read the lateral displacements\n (Code: displacements to integer)"""'], {}), '(\'Error\',\n """Could not read the lateral displacements\n (Code: displacements to integer)"""\n )\n', (1711, 1811), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((1909, 2034), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Could not read the longitudinal displacements\n (Code: displacements to integer)"""'], {}), '(\'Error\',\n """Could not read the longitudinal displacements\n (Code: displacements to integer)"""\n )\n', (1929, 2034), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((3905, 3924), 'numpy.round', 'np.round', (['(iso_1 / 2)'], {}), '(iso_1 / 2)\n', (3913, 3924), True, 'import numpy as np\n'), ((3949, 3968), 'numpy.round', 'np.round', (['(iso_2 / 2)'], {}), '(iso_2 / 2)\n', (3957, 3968), True, 'import numpy as np\n'), ((3993, 4012), 'numpy.round', 'np.round', (['(iso_3 / 2)'], {}), '(iso_3 / 2)\n', (4001, 4012), True, 'import numpy as np\n'), ((4916, 4937), 'numpy.round', 'np.round', (['(lateral / 2)'], {}), '(lateral / 2)\n', (4924, 4937), True, 'import numpy as np\n'), ((4958, 4980), 'numpy.round', 'np.round', (['(vertical / 2)'], {}), '(vertical / 2)\n', (4966, 4980), True, 'import numpy as np\n'), ((4999, 5019), 'numpy.round', 'np.round', (['(longit / 2)'], {}), '(longit / 2)\n', (5007, 5019), True, 'import numpy as np\n'), ((5089, 5144), 'numpy.round', 'np.round', (['(Globals.DVH_doseplan_lateral_displacement / 2)'], {}), '(Globals.DVH_doseplan_lateral_displacement / 2)\n', (5097, 5144), True, 'import numpy as np\n'), ((5189, 5245), 'numpy.round', 'np.round', (['(Globals.DVH_doseplan_vertical_displacement / 2)'], {}), '(Globals.DVH_doseplan_vertical_displacement / 2)\n', (5197, 5245), True, 'import numpy as np\n'), ((5294, 5354), 'numpy.round', 'np.round', (['(Globals.DVH_doseplan_longitudianl_displacement / 2)'], {}), '(Globals.DVH_doseplan_longitudianl_displacement / 2)\n', (5302, 5354), True, 'import numpy as np\n'), ((5446, 5465), 'numpy.round', 'np.round', (['(iso_1 / 3)'], {}), '(iso_1 / 3)\n', (5454, 5465), True, 'import numpy as np\n'), ((5490, 5509), 'numpy.round', 'np.round', (['(iso_2 / 3)'], {}), '(iso_2 / 3)\n', (5498, 5509), True, 'import numpy as np\n'), ((5534, 5553), 'numpy.round', 'np.round', (['(iso_3 / 3)'], {}), '(iso_3 / 3)\n', (5542, 5553), True, 'import numpy as np\n'), ((6457, 6478), 'numpy.round', 'np.round', (['(lateral / 3)'], {}), '(lateral / 3)\n', (6465, 6478), True, 'import numpy as np\n'), ((6499, 6521), 'numpy.round', 'np.round', (['(vertical / 3)'], {}), '(vertical / 3)\n', (6507, 6521), True, 'import numpy as np\n'), ((6540, 6560), 'numpy.round', 'np.round', (['(longit / 3)'], {}), '(longit / 3)\n', (6548, 6560), True, 'import numpy as np\n'), ((6630, 6685), 'numpy.round', 'np.round', (['(Globals.DVH_doseplan_lateral_displacement / 3)'], {}), '(Globals.DVH_doseplan_lateral_displacement / 3)\n', (6638, 6685), True, 'import numpy as np\n'), ((6730, 6786), 'numpy.round', 'np.round', (['(Globals.DVH_doseplan_vertical_displacement / 3)'], {}), '(Globals.DVH_doseplan_vertical_displacement / 3)\n', (6738, 6786), True, 'import numpy as np\n'), ((6835, 6895), 'numpy.round', 'np.round', (['(Globals.DVH_doseplan_longitudianl_displacement / 3)'], {}), '(Globals.DVH_doseplan_longitudianl_displacement / 3)\n', (6843, 6895), True, 'import numpy as np\n'), ((10493, 10527), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (10525, 10527), False, 'import Globals\n'), ((10571, 10630), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (10582, 10630), True, 'import numpy as np\n'), ((24937, 24996), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 5, img.shape[0] * 5)'}), '(img, dsize=(img.shape[1] * 5, img.shape[0] * 5))\n', (24947, 24996), False, 'import cv2\n'), ((29952, 29971), 'numpy.round', 'np.round', (['(iso_1 / 2)'], {}), '(iso_1 / 2)\n', (29960, 29971), True, 'import numpy as np\n'), ((30041, 30060), 'numpy.round', 'np.round', (['(iso_2 / 2)'], {}), '(iso_2 / 2)\n', (30049, 30060), True, 'import numpy as np\n'), ((30130, 30149), 'numpy.round', 'np.round', (['(iso_3 / 2)'], {}), '(iso_3 / 2)\n', (30138, 30149), True, 'import numpy as np\n'), ((31045, 31064), 'numpy.round', 'np.round', (['(iso_1 / 3)'], {}), '(iso_1 / 3)\n', (31053, 31064), True, 'import numpy as np\n'), ((31134, 31153), 'numpy.round', 'np.round', (['(iso_2 / 3)'], {}), '(iso_2 / 3)\n', (31142, 31153), True, 'import numpy as np\n'), ((31223, 31242), 'numpy.round', 'np.round', (['(iso_3 / 3)'], {}), '(iso_3 / 3)\n', (31231, 31242), True, 'import numpy as np\n'), ((32645, 32679), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (32677, 32679), False, 'import Globals\n'), ((32723, 32782), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (32734, 32782), True, 'import numpy as np\n'), ((40739, 40767), 'numpy.round', 'np.round', (['Globals.DVH_offset'], {}), '(Globals.DVH_offset)\n', (40747, 40767), True, 'import numpy as np\n'), ((46878, 46937), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 5, img.shape[0] * 5)'}), '(img, dsize=(img.shape[1] * 5, img.shape[0] * 5))\n', (46888, 46937), False, 'import cv2\n'), ((50238, 50300), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""The file must be a *.dcm file"""'], {}), "('Error', 'The file must be a *.dcm file')\n", (50258, 50300), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((50535, 50672), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Could not upload the doseplan correctly. Try again or another file.\n (Code: dose summation)"""'], {}), '(\'Error\',\n """Could not upload the doseplan correctly. Try again or another file.\n (Code: dose summation)"""\n )\n', (50555, 50672), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((52147, 52254), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Resolution of the doseplans must be equal. \n(Code: UploadDoseplan)"""'], {}), '(\'Error\',\n """Resolution of the doseplans must be equal. \n(Code: UploadDoseplan)""")\n', (52167, 52254), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((52370, 52489), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Dose grid scaling of the doseplans must be equal. \n(Code: UploadDoseplan)"""'], {}), '(\'Error\',\n """Dose grid scaling of the doseplans must be equal. \n(Code: UploadDoseplan)"""\n )\n', (52390, 52489), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((53214, 53228), 'os.path.normpath', 'normpath', (['file'], {}), '(file)\n', (53222, 53228), False, 'from os.path import normpath, basename\n'), ((58410, 58470), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""INFO"""', '"""No doseplan has been uploaded"""'], {}), "('INFO', 'No doseplan has been uploaded')\n", (58429, 58470), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((63748, 63810), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""The file must be a *.dcm file"""'], {}), "('Error', 'The file must be a *.dcm file')\n", (63768, 63810), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((64273, 64420), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Could not read the RT plan file. Try again or try another file.\n (Code: isocenter reading)"""'], {}), '(\'Error\',\n """Could not read the RT plan file. Try again or try another file.\n (Code: isocenter reading)"""\n )\n', (64293, 64420), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((64575, 64733), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Could not read the RT plan file. Try again or try another file. \n (Code: vertical table displacement)"""'], {}), '(\'Error\',\n """Could not read the RT plan file. Try again or try another file. \n (Code: vertical table displacement)"""\n )\n', (64595, 64733), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((64871, 65027), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Could not read the RT plan file. Try again or try another file-\n (Code: lateral table displacement)"""'], {}), '(\'Error\',\n """Could not read the RT plan file. Try again or try another file-\n (Code: lateral table displacement)"""\n )\n', (64891, 65027), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((65175, 65335), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Could not read the RT plan file. Try again or try another file\n (Code: longitudinal table displacement)"""'], {}), '(\'Error\',\n """Could not read the RT plan file. Try again or try another file\n (Code: longitudinal table displacement)"""\n )\n', (65195, 65335), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((65496, 65641), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Could not read the RT plan file. Try again or try another file\n (Code: Patient position)"""'], {}), '(\'Error\',\n """Could not read the RT plan file. Try again or try another file\n (Code: Patient position)"""\n )\n', (65516, 65641), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((69654, 69709), 'Globals.DVH_iscoenter_coords.append', 'Globals.DVH_iscoenter_coords.append', (['[event.x, event.y]'], {}), '([event.x, event.y])\n', (69689, 69709), False, 'import Globals\n'), ((75259, 75319), 'Globals.DVH_done_button_reference_point.config', 'Globals.DVH_done_button_reference_point.config', ([], {'state': 'ACTIVE'}), '(state=ACTIVE)\n', (75305, 75319), False, 'import Globals\n'), ((81126, 81235), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong. Check that the filename does not contain Æ,Ø,Å"""'], {}), "('Error',\n 'Something has gone wrong. Check that the filename does not contain Æ,Ø,Å')\n", (81146, 81235), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((81952, 82028), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""The uploaded image need to be in RGB-format"""'], {}), "('Error', 'The uploaded image need to be in RGB-format')\n", (81972, 82028), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((82131, 82190), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Dpi in image has to be 127"""'], {}), "('Error', 'Dpi in image has to be 127')\n", (82151, 82190), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((99572, 99619), 'Globals.DVH_slice_offset.config', 'Globals.DVH_slice_offset.config', ([], {'state': 'DISABLED'}), '(state=DISABLED)\n', (99603, 99619), False, 'import Globals\n'), ((102285, 102314), 'tkinter.Frame', 'tk.Frame', (['choose_batch_window'], {}), '(choose_batch_window)\n', (102293, 102314), True, 'import tkinter as tk\n'), ((108381, 108409), 'tkinter.Frame', 'tk.Frame', (['choose_batch_frame'], {}), '(choose_batch_frame)\n', (108389, 108409), True, 'import tkinter as tk\n'), ((108748, 108920), 'tkinter.Button', 'tk.Button', (['set_batch_button_frame'], {'text': '"""OK"""', 'image': 'Globals.done_button_image', 'cursor': '"""hand2"""', 'font': "('calibri', '14')", 'relief': 'FLAT', 'state': 'ACTIVE', 'command': 'set_batch'}), "(set_batch_button_frame, text='OK', image=Globals.\n done_button_image, cursor='hand2', font=('calibri', '14'), relief=FLAT,\n state=ACTIVE, command=set_batch)\n", (108757, 108920), True, 'import tkinter as tk\n'), ((109707, 109730), 'numpy.zeros', 'np.zeros', (['img_ROI.shape'], {}), '(img_ROI.shape)\n', (109715, 109730), True, 'import numpy as np\n'), ((109895, 109930), 'PIL.Image.fromarray', 'Image.fromarray', (['PIL_img_ROI', '"""RGB"""'], {}), "(PIL_img_ROI, 'RGB')\n", (109910, 109930), False, 'from PIL import Image, ImageTk\n'), ((110201, 110240), 'tkinter.Canvas', 'tk.Canvas', (['Globals.DVH_film_panedwindow'], {}), '(Globals.DVH_film_panedwindow)\n', (110210, 110240), True, 'import tkinter as tk\n'), ((110834, 110873), 'tkinter.Canvas', 'tk.Canvas', (['Globals.DVH_film_panedwindow'], {}), '(Globals.DVH_film_panedwindow)\n', (110843, 110873), True, 'import tkinter as tk\n'), ((111476, 111504), 'tkinter.Canvas', 'tk.Canvas', (['film_image_canvas'], {}), '(film_image_canvas)\n', (111485, 111504), True, 'import tkinter as tk\n'), ((111517, 111589), 'Globals.DVH_film_write_image.grid', 'Globals.DVH_film_write_image.grid', ([], {'row': '(0)', 'column': '(1)', 'sticky': '(N + S + W + E)'}), '(row=0, column=1, sticky=N + S + W + E)\n', (111550, 111589), False, 'import Globals\n'), ((111594, 111706), 'Globals.DVH_film_write_image.config', 'Globals.DVH_film_write_image.config', ([], {'bg': '"""#ffffff"""', 'relief': 'FLAT', 'highlightthickness': '(0)', 'width': 'wid', 'height': 'heig'}), "(bg='#ffffff', relief=FLAT,\n highlightthickness=0, width=wid, height=heig)\n", (111629, 111706), False, 'import Globals\n'), ((111764, 111791), 'tkinter.Canvas', 'tk.Canvas', (['film_dose_canvas'], {}), '(film_dose_canvas)\n', (111773, 111791), True, 'import tkinter as tk\n'), ((111804, 111881), 'Globals.DVH_film_dose_write_image.grid', 'Globals.DVH_film_dose_write_image.grid', ([], {'row': '(0)', 'column': '(1)', 'sticky': '(N + S + W + E)'}), '(row=0, column=1, sticky=N + S + W + E)\n', (111842, 111881), False, 'import Globals\n'), ((111886, 112003), 'Globals.DVH_film_dose_write_image.config', 'Globals.DVH_film_dose_write_image.config', ([], {'bg': '"""#ffffff"""', 'relief': 'FLAT', 'highlightthickness': '(0)', 'width': 'wid', 'height': 'heig'}), "(bg='#ffffff', relief=FLAT,\n highlightthickness=0, width=wid, height=heig)\n", (111926, 112003), False, 'import Globals\n'), ((112055, 112083), 'tkinter.Canvas', 'tk.Canvas', (['film_image_canvas'], {}), '(film_image_canvas)\n', (112064, 112083), True, 'import tkinter as tk\n'), ((112441, 112468), 'tkinter.Canvas', 'tk.Canvas', (['film_dose_canvas'], {}), '(film_dose_canvas)\n', (112450, 112468), True, 'import tkinter as tk\n'), ((112864, 112909), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'scaled_image_visual'}), '(image=scaled_image_visual)\n', (112882, 112909), False, 'from PIL import Image, ImageTk\n'), ((112923, 112980), 'Globals.DVH_upload_button_doseplan.config', 'Globals.DVH_upload_button_doseplan.config', ([], {'state': 'DISABLED'}), '(state=DISABLED)\n', (112964, 112980), False, 'import Globals\n'), ((112993, 113046), 'Globals.DVH_upload_button_rtplan.config', 'Globals.DVH_upload_button_rtplan.config', ([], {'state': 'ACTIVE'}), '(state=ACTIVE)\n', (113032, 113046), False, 'import Globals\n'), ((113059, 113112), 'Globals.DVH_upload_button_film.config', 'Globals.DVH_upload_button_film.config', ([], {'state': 'DISABLED'}), '(state=DISABLED)\n', (113096, 113112), False, 'import Globals\n'), ((117178, 117240), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""The file must be a *.tif file"""'], {}), "('Error', 'The file must be a *.tif file')\n", (117198, 117240), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((2599, 2655), 'numpy.round', 'np.round', (['Globals.DVH_distance_reference_point_ROI[0][0]'], {}), '(Globals.DVH_distance_reference_point_ROI[0][0])\n', (2607, 2655), True, 'import numpy as np\n'), ((2670, 2726), 'numpy.round', 'np.round', (['Globals.DVH_distance_reference_point_ROI[0][1]'], {}), '(Globals.DVH_distance_reference_point_ROI[0][1])\n', (2678, 2726), True, 'import numpy as np\n'), ((2789, 2845), 'numpy.round', 'np.round', (['Globals.DVH_distance_reference_point_ROI[1][0]'], {}), '(Globals.DVH_distance_reference_point_ROI[1][0])\n', (2797, 2845), True, 'import numpy as np\n'), ((2860, 2916), 'numpy.round', 'np.round', (['Globals.DVH_distance_reference_point_ROI[1][1]'], {}), '(Globals.DVH_distance_reference_point_ROI[1][1])\n', (2868, 2916), True, 'import numpy as np\n'), ((2979, 3035), 'numpy.round', 'np.round', (['Globals.DVH_distance_reference_point_ROI[2][0]'], {}), '(Globals.DVH_distance_reference_point_ROI[2][0])\n', (2987, 3035), True, 'import numpy as np\n'), ((3050, 3106), 'numpy.round', 'np.round', (['Globals.DVH_distance_reference_point_ROI[2][1]'], {}), '(Globals.DVH_distance_reference_point_ROI[2][1])\n', (3058, 3106), True, 'import numpy as np\n'), ((3169, 3225), 'numpy.round', 'np.round', (['Globals.DVH_distance_reference_point_ROI[3][0]'], {}), '(Globals.DVH_distance_reference_point_ROI[3][0])\n', (3177, 3225), True, 'import numpy as np\n'), ((3240, 3296), 'numpy.round', 'np.round', (['Globals.DVH_distance_reference_point_ROI[3][1]'], {}), '(Globals.DVH_distance_reference_point_ROI[3][1])\n', (3248, 3296), True, 'import numpy as np\n'), ((10779, 10813), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (10811, 10813), False, 'import Globals\n'), ((10858, 10917), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (10869, 10917), True, 'import numpy as np\n'), ((11578, 11612), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (11610, 11612), False, 'import Globals\n'), ((25075, 25136), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 10, img.shape[0] * 10)'}), '(img, dsize=(img.shape[1] * 10, img.shape[0] * 10))\n', (25085, 25136), False, 'import cv2\n'), ((25164, 25225), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 15, img.shape[0] * 15)'}), '(img, dsize=(img.shape[1] * 15, img.shape[0] * 15))\n', (25174, 25225), False, 'import cv2\n'), ((26591, 26635), 'Globals.profiles_doseplan_text_image.width', 'Globals.profiles_doseplan_text_image.width', ([], {}), '()\n', (26633, 26635), False, 'import Globals\n'), ((26644, 26689), 'Globals.profiles_doseplan_text_image.height', 'Globals.profiles_doseplan_text_image.height', ([], {}), '()\n', (26687, 26689), False, 'import Globals\n'), ((27695, 27754), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 5, img.shape[0] * 5)'}), '(img, dsize=(img.shape[1] * 5, img.shape[0] * 5))\n', (27705, 27754), False, 'import cv2\n'), ((29208, 29258), 'numpy.round', 'np.round', (['Globals.DVH_distance_isocenter_ROI[0][0]'], {}), '(Globals.DVH_distance_isocenter_ROI[0][0])\n', (29216, 29258), True, 'import numpy as np\n'), ((29273, 29323), 'numpy.round', 'np.round', (['Globals.DVH_distance_isocenter_ROI[0][1]'], {}), '(Globals.DVH_distance_isocenter_ROI[0][1])\n', (29281, 29323), True, 'import numpy as np\n'), ((29386, 29436), 'numpy.round', 'np.round', (['Globals.DVH_distance_isocenter_ROI[1][0]'], {}), '(Globals.DVH_distance_isocenter_ROI[1][0])\n', (29394, 29436), True, 'import numpy as np\n'), ((29451, 29501), 'numpy.round', 'np.round', (['Globals.DVH_distance_isocenter_ROI[1][1]'], {}), '(Globals.DVH_distance_isocenter_ROI[1][1])\n', (29459, 29501), True, 'import numpy as np\n'), ((29564, 29614), 'numpy.round', 'np.round', (['Globals.DVH_distance_isocenter_ROI[2][0]'], {}), '(Globals.DVH_distance_isocenter_ROI[2][0])\n', (29572, 29614), True, 'import numpy as np\n'), ((29629, 29679), 'numpy.round', 'np.round', (['Globals.DVH_distance_isocenter_ROI[2][1]'], {}), '(Globals.DVH_distance_isocenter_ROI[2][1])\n', (29637, 29679), True, 'import numpy as np\n'), ((29742, 29792), 'numpy.round', 'np.round', (['Globals.DVH_distance_isocenter_ROI[3][0]'], {}), '(Globals.DVH_distance_isocenter_ROI[3][0])\n', (29750, 29792), True, 'import numpy as np\n'), ((29807, 29857), 'numpy.round', 'np.round', (['Globals.DVH_distance_isocenter_ROI[3][1]'], {}), '(Globals.DVH_distance_isocenter_ROI[3][1])\n', (29815, 29857), True, 'import numpy as np\n'), ((32931, 32965), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (32963, 32965), False, 'import Globals\n'), ((33010, 33069), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (33021, 33069), True, 'import numpy as np\n'), ((33697, 33731), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (33729, 33731), False, 'import Globals\n'), ((40923, 40955), 'numpy.round', 'np.round', (['(Globals.DVH_offset / 2)'], {}), '(Globals.DVH_offset / 2)\n', (40931, 40955), True, 'import numpy as np\n'), ((41057, 41089), 'numpy.round', 'np.round', (['(Globals.DVH_offset / 3)'], {}), '(Globals.DVH_offset / 3)\n', (41065, 41089), True, 'import numpy as np\n'), ((47016, 47077), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 10, img.shape[0] * 10)'}), '(img, dsize=(img.shape[1] * 10, img.shape[0] * 10))\n', (47026, 47077), False, 'import cv2\n'), ((47105, 47166), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 15, img.shape[0] * 15)'}), '(img, dsize=(img.shape[1] * 15, img.shape[0] * 15))\n', (47115, 47166), False, 'import cv2\n'), ((48588, 48632), 'Globals.profiles_doseplan_text_image.width', 'Globals.profiles_doseplan_text_image.width', ([], {}), '()\n', (48630, 48632), False, 'import Globals\n'), ((48641, 48686), 'Globals.profiles_doseplan_text_image.height', 'Globals.profiles_doseplan_text_image.height', ([], {}), '()\n', (48684, 48686), False, 'import Globals\n'), ((49688, 49747), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 5, img.shape[0] * 5)'}), '(img, dsize=(img.shape[1] * 5, img.shape[0] * 5))\n', (49698, 49747), False, 'import cv2\n'), ((50107, 50129), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (50123, 50129), False, 'import os\n'), ((52969, 53067), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something went wrong. Try again.\n (Code: processDoseplan)"""'], {}), '(\'Error\',\n """Something went wrong. Try again.\n (Code: processDoseplan)""")\n', (52989, 53067), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((60750, 60794), 'Globals.profiles_doseplan_text_image.width', 'Globals.profiles_doseplan_text_image.width', ([], {}), '()\n', (60792, 60794), False, 'import Globals\n'), ((60803, 60848), 'Globals.profiles_doseplan_text_image.height', 'Globals.profiles_doseplan_text_image.height', ([], {}), '()\n', (60846, 60848), False, 'import Globals\n'), ((63617, 63639), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (63633, 63639), False, 'import os\n'), ((69838, 69893), 'Globals.DVH_iscoenter_coords.append', 'Globals.DVH_iscoenter_coords.append', (['[event.x, event.y]'], {}), '([event.x, event.y])\n', (69873, 69893), False, 'import Globals\n'), ((70740, 70800), 'Globals.DVH_mark_isocenter_up_down_line.append', 'Globals.DVH_mark_isocenter_up_down_line.append', (['up_down_line'], {}), '(up_down_line)\n', (70786, 70800), False, 'import Globals\n'), ((70813, 70879), 'Globals.DVH_mark_isocenter_right_left_line.append', 'Globals.DVH_mark_isocenter_right_left_line.append', (['right_left_line'], {}), '(right_left_line)\n', (70862, 70879), False, 'import Globals\n'), ((70892, 70936), 'Globals.DVH_mark_isocenter_oval.append', 'Globals.DVH_mark_isocenter_oval.append', (['oval'], {}), '(oval)\n', (70930, 70936), False, 'import Globals\n'), ((79624, 79684), 'Globals.DVH_done_button_reference_point.config', 'Globals.DVH_done_button_reference_point.config', ([], {'state': 'ACTIVE'}), '(state=ACTIVE)\n', (79670, 79684), False, 'import Globals\n'), ((79801, 79845), 'Globals.DVH_done_button.config', 'Globals.DVH_done_button.config', ([], {'state': 'ACTIVE'}), '(state=ACTIVE)\n', (79831, 79845), False, 'import Globals\n'), ((80460, 80510), 'Globals.DVH_film_factor_input.get', 'Globals.DVH_film_factor_input.get', (['"""1.0"""', '"""end-1c"""'], {}), "('1.0', 'end-1c')\n", (80493, 80510), False, 'import Globals\n'), ((80540, 80641), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Missing parameter"""', '"""Film factor invalid format. \n (Code: UploadFilm)"""'], {}), '(\'Missing parameter\',\n """Film factor invalid format. \n (Code: UploadFilm)""")\n', (80560, 80641), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((80705, 80727), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (80721, 80727), False, 'import os\n'), ((80983, 80997), 'os.path.normpath', 'normpath', (['file'], {}), '(file)\n', (80991, 80997), False, 'from os.path import normpath, basename\n'), ((81440, 81465), 'numpy.clip', 'np.clip', (['cv2Img', '(0)', '(65535)'], {}), '(cv2Img, 0, 65535)\n', (81447, 81465), True, 'import numpy as np\n'), ((81491, 81510), 'cv2.flip', 'cv2.flip', (['cv2Img', '(1)'], {}), '(cv2Img, 1)\n', (81499, 81510), False, 'import cv2\n'), ((81608, 81644), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'img_scaled'}), '(image=img_scaled)\n', (81626, 81644), False, 'from PIL import Image, ImageTk\n'), ((81816, 81907), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""The resolution of the image is not consistent with dpi"""'], {}), "('Error',\n 'The resolution of the image is not consistent with dpi')\n", (81836, 81907), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((101224, 101286), 'Globals.DVH_input_vertical_displacement.config', 'Globals.DVH_input_vertical_displacement.config', ([], {'state': 'DISABLED'}), '(state=DISABLED)\n', (101270, 101286), False, 'import Globals\n'), ((101303, 101369), 'Globals.DVH_input_longitudinal_displacement.config', 'Globals.DVH_input_longitudinal_displacement.config', ([], {'state': 'DISABLED'}), '(state=DISABLED)\n', (101353, 101369), False, 'import Globals\n'), ((101386, 101447), 'Globals.DVH_input_lateral_displacement.config', 'Globals.DVH_input_lateral_displacement.config', ([], {'state': 'DISABLED'}), '(state=DISABLED)\n', (101431, 101447), False, 'import Globals\n'), ((102011, 102054), 'tkinter.Toplevel', 'tk.Toplevel', (['new_window_reference_point_tab'], {}), '(new_window_reference_point_tab)\n', (102022, 102054), True, 'import tkinter as tk\n'), ((102111, 102148), 'tkinter.Toplevel', 'tk.Toplevel', (['new_window_isocenter_tab'], {}), '(new_window_isocenter_tab)\n', (102122, 102148), True, 'import tkinter as tk\n'), ((102814, 102861), 'tkinter.Text', 'tk.Text', (['choose_batch_frame'], {'width': '(10)', 'height': '(1)'}), '(choose_batch_frame, width=10, height=1)\n', (102821, 102861), True, 'import tkinter as tk\n'), ((103332, 103379), 'tkinter.Text', 'tk.Text', (['choose_batch_frame'], {'width': '(20)', 'height': '(1)'}), '(choose_batch_frame, width=20, height=1)\n', (103339, 103379), True, 'import tkinter as tk\n'), ((103833, 103879), 'tkinter.Text', 'tk.Text', (['choose_batch_frame'], {'width': '(8)', 'height': '(1)'}), '(choose_batch_frame, width=8, height=1)\n', (103840, 103879), True, 'import tkinter as tk\n'), ((104350, 104397), 'tkinter.Text', 'tk.Text', (['choose_batch_frame'], {'width': '(30)', 'height': '(1)'}), '(choose_batch_frame, width=30, height=1)\n', (104357, 104397), True, 'import tkinter as tk\n'), ((105813, 105930), 'numpy.zeros', 'np.zeros', (['(Globals.DVH_film_dataset_ROI_red_channel.shape[0], Globals.\n DVH_film_dataset_ROI_red_channel.shape[1])'], {}), '((Globals.DVH_film_dataset_ROI_red_channel.shape[0], Globals.\n DVH_film_dataset_ROI_red_channel.shape[1]))\n', (105821, 105930), True, 'import numpy as np\n'), ((106485, 106594), 'numpy.zeros', 'np.zeros', (['(Globals.DVH_film_dataset_red_channel.shape[0], Globals.\n DVH_film_dataset_red_channel.shape[1])'], {}), '((Globals.DVH_film_dataset_red_channel.shape[0], Globals.\n DVH_film_dataset_red_channel.shape[1]))\n', (106493, 106594), True, 'import numpy as np\n'), ((107089, 107180), 'Globals.DVH_film_write_image.create_image', 'Globals.DVH_film_write_image.create_image', (['(0)', '(0)'], {'image': 'scaled_image_visual', 'anchor': '"""nw"""'}), "(0, 0, image=scaled_image_visual,\n anchor='nw')\n", (107130, 107180), False, 'import Globals\n'), ((107273, 107326), 'numpy.max', 'np.max', (['Globals.DVH_film_dataset_ROI_red_channel_dose'], {}), '(Globals.DVH_film_dataset_ROI_red_channel_dose)\n', (107279, 107326), True, 'import numpy as np\n'), ((107623, 107661), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'PIL_img_film'}), '(image=PIL_img_film)\n', (107641, 107661), False, 'from PIL import Image, ImageTk\n'), ((107678, 107780), 'Globals.DVH_film_dose_write_image.create_image', 'Globals.DVH_film_dose_write_image.create_image', (['(0)', '(0)'], {'image': 'scaled_image_visual_film', 'anchor': '"""nw"""'}), "(0, 0, image=\n scaled_image_visual_film, anchor='nw')\n", (107724, 107780), False, 'import Globals\n'), ((113348, 113559), 'Globals.DVH_distance_reference_point_ROI.append', 'Globals.DVH_distance_reference_point_ROI.append', (['[(Globals.DVH_film_reference_point[0] - Globals.DVH_ROI_coords[0][0]) * 0.2,\n (Globals.DVH_film_reference_point[1] - Globals.DVH_ROI_coords[0][1]) * 0.2]'], {}), '([(Globals.\n DVH_film_reference_point[0] - Globals.DVH_ROI_coords[0][0]) * 0.2, (\n Globals.DVH_film_reference_point[1] - Globals.DVH_ROI_coords[0][1]) * 0.2])\n', (113395, 113559), False, 'import Globals\n'), ((113581, 113792), 'Globals.DVH_distance_reference_point_ROI.append', 'Globals.DVH_distance_reference_point_ROI.append', (['[(Globals.DVH_film_reference_point[0] - Globals.DVH_ROI_coords[1][0]) * 0.2,\n (Globals.DVH_film_reference_point[1] - Globals.DVH_ROI_coords[1][1]) * 0.2]'], {}), '([(Globals.\n DVH_film_reference_point[0] - Globals.DVH_ROI_coords[1][0]) * 0.2, (\n Globals.DVH_film_reference_point[1] - Globals.DVH_ROI_coords[1][1]) * 0.2])\n', (113628, 113792), False, 'import Globals\n'), ((113816, 114027), 'Globals.DVH_distance_reference_point_ROI.append', 'Globals.DVH_distance_reference_point_ROI.append', (['[(Globals.DVH_film_reference_point[0] - Globals.DVH_ROI_coords[2][0]) * 0.2,\n (Globals.DVH_film_reference_point[1] - Globals.DVH_ROI_coords[2][1]) * 0.2]'], {}), '([(Globals.\n DVH_film_reference_point[0] - Globals.DVH_ROI_coords[2][0]) * 0.2, (\n Globals.DVH_film_reference_point[1] - Globals.DVH_ROI_coords[2][1]) * 0.2])\n', (113863, 114027), False, 'import Globals\n'), ((114051, 114262), 'Globals.DVH_distance_reference_point_ROI.append', 'Globals.DVH_distance_reference_point_ROI.append', (['[(Globals.DVH_film_reference_point[0] - Globals.DVH_ROI_coords[3][0]) * 0.2,\n (Globals.DVH_film_reference_point[1] - Globals.DVH_ROI_coords[3][1]) * 0.2]'], {}), '([(Globals.\n DVH_film_reference_point[0] - Globals.DVH_ROI_coords[3][0]) * 0.2, (\n Globals.DVH_film_reference_point[1] - Globals.DVH_ROI_coords[3][1]) * 0.2])\n', (114098, 114262), False, 'import Globals\n'), ((114392, 114583), 'Globals.DVH_distance_isocenter_ROI.append', 'Globals.DVH_distance_isocenter_ROI.append', (['[(Globals.DVH_film_isocenter[0] - Globals.DVH_ROI_coords[0][0]) * 0.2, (\n Globals.DVH_film_isocenter[1] - Globals.DVH_ROI_coords[0][1]) * 0.2]'], {}), '([(Globals.DVH_film_isocenter[0] -\n Globals.DVH_ROI_coords[0][0]) * 0.2, (Globals.DVH_film_isocenter[1] -\n Globals.DVH_ROI_coords[0][1]) * 0.2])\n', (114433, 114583), False, 'import Globals\n'), ((114607, 114798), 'Globals.DVH_distance_isocenter_ROI.append', 'Globals.DVH_distance_isocenter_ROI.append', (['[(Globals.DVH_film_isocenter[0] - Globals.DVH_ROI_coords[1][0]) * 0.2, (\n Globals.DVH_film_isocenter[1] - Globals.DVH_ROI_coords[1][1]) * 0.2]'], {}), '([(Globals.DVH_film_isocenter[0] -\n Globals.DVH_ROI_coords[1][0]) * 0.2, (Globals.DVH_film_isocenter[1] -\n Globals.DVH_ROI_coords[1][1]) * 0.2])\n', (114648, 114798), False, 'import Globals\n'), ((114824, 115015), 'Globals.DVH_distance_isocenter_ROI.append', 'Globals.DVH_distance_isocenter_ROI.append', (['[(Globals.DVH_film_isocenter[0] - Globals.DVH_ROI_coords[2][0]) * 0.2, (\n Globals.DVH_film_isocenter[1] - Globals.DVH_ROI_coords[2][1]) * 0.2]'], {}), '([(Globals.DVH_film_isocenter[0] -\n Globals.DVH_ROI_coords[2][0]) * 0.2, (Globals.DVH_film_isocenter[1] -\n Globals.DVH_ROI_coords[2][1]) * 0.2])\n', (114865, 115015), False, 'import Globals\n'), ((115041, 115232), 'Globals.DVH_distance_isocenter_ROI.append', 'Globals.DVH_distance_isocenter_ROI.append', (['[(Globals.DVH_film_isocenter[0] - Globals.DVH_ROI_coords[3][0]) * 0.2, (\n Globals.DVH_film_isocenter[1] - Globals.DVH_ROI_coords[3][1]) * 0.2]'], {}), '([(Globals.DVH_film_isocenter[0] -\n Globals.DVH_ROI_coords[3][0]) * 0.2, (Globals.DVH_film_isocenter[1] -\n Globals.DVH_ROI_coords[3][1]) * 0.2])\n', (115082, 115232), False, 'import Globals\n'), ((4141, 4201), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[0][0] / 2)'], {}), '(Globals.DVH_distance_reference_point_ROI[0][0] / 2)\n', (4149, 4201), True, 'import numpy as np\n'), ((4216, 4276), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[0][1] / 2)'], {}), '(Globals.DVH_distance_reference_point_ROI[0][1] / 2)\n', (4224, 4276), True, 'import numpy as np\n'), ((4339, 4399), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[1][0] / 2)'], {}), '(Globals.DVH_distance_reference_point_ROI[1][0] / 2)\n', (4347, 4399), True, 'import numpy as np\n'), ((4414, 4474), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[1][1] / 2)'], {}), '(Globals.DVH_distance_reference_point_ROI[1][1] / 2)\n', (4422, 4474), True, 'import numpy as np\n'), ((4537, 4597), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[2][0] / 2)'], {}), '(Globals.DVH_distance_reference_point_ROI[2][0] / 2)\n', (4545, 4597), True, 'import numpy as np\n'), ((4612, 4672), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[2][1] / 2)'], {}), '(Globals.DVH_distance_reference_point_ROI[2][1] / 2)\n', (4620, 4672), True, 'import numpy as np\n'), ((4735, 4795), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[3][0] / 2)'], {}), '(Globals.DVH_distance_reference_point_ROI[3][0] / 2)\n', (4743, 4795), True, 'import numpy as np\n'), ((4810, 4870), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[3][1] / 2)'], {}), '(Globals.DVH_distance_reference_point_ROI[3][1] / 2)\n', (4818, 4870), True, 'import numpy as np\n'), ((5682, 5742), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[0][0] / 3)'], {}), '(Globals.DVH_distance_reference_point_ROI[0][0] / 3)\n', (5690, 5742), True, 'import numpy as np\n'), ((5757, 5817), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[0][1] / 3)'], {}), '(Globals.DVH_distance_reference_point_ROI[0][1] / 3)\n', (5765, 5817), True, 'import numpy as np\n'), ((5880, 5940), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[1][0] / 3)'], {}), '(Globals.DVH_distance_reference_point_ROI[1][0] / 3)\n', (5888, 5940), True, 'import numpy as np\n'), ((5955, 6015), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[1][1] / 3)'], {}), '(Globals.DVH_distance_reference_point_ROI[1][1] / 3)\n', (5963, 6015), True, 'import numpy as np\n'), ((6078, 6138), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[2][0] / 3)'], {}), '(Globals.DVH_distance_reference_point_ROI[2][0] / 3)\n', (6086, 6138), True, 'import numpy as np\n'), ((6153, 6213), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[2][1] / 3)'], {}), '(Globals.DVH_distance_reference_point_ROI[2][1] / 3)\n', (6161, 6213), True, 'import numpy as np\n'), ((6276, 6336), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[3][0] / 3)'], {}), '(Globals.DVH_distance_reference_point_ROI[3][0] / 3)\n', (6284, 6336), True, 'import numpy as np\n'), ((6351, 6411), 'numpy.round', 'np.round', (['(Globals.DVH_distance_reference_point_ROI[3][1] / 3)'], {}), '(Globals.DVH_distance_reference_point_ROI[3][1] / 3)\n', (6359, 6411), True, 'import numpy as np\n'), ((11066, 11100), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (11098, 11100), False, 'import Globals\n'), ((11209, 11272), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong here."""'], {}), "('Error', 'Something has gone wrong here.')\n", (11229, 11272), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((11710, 11744), 'Globals.DCH_film_orientation.get', 'Globals.DCH_film_orientation.get', ([], {}), '()\n', (11742, 11744), False, 'import Globals\n'), ((11789, 11848), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (11800, 11848), True, 'import numpy as np\n'), ((12014, 12048), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(1)', '(2)'], {}), '(dataset_swapped, 1, 2)\n', (12025, 12048), True, 'import numpy as np\n'), ((12858, 12892), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (12890, 12892), False, 'import Globals\n'), ((12936, 12995), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (12947, 12995), True, 'import numpy as np\n'), ((13161, 13195), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(1)', '(2)'], {}), '(dataset_swapped, 1, 2)\n', (13172, 13195), True, 'import numpy as np\n'), ((25399, 25414), 'matplotlib.cm.viridis', 'cm.viridis', (['img'], {}), '(img)\n', (25409, 25414), False, 'from matplotlib import cm\n'), ((25715, 25760), 'Globals.profiles_doseplan_text_image.height', 'Globals.profiles_doseplan_text_image.height', ([], {}), '()\n', (25758, 25760), False, 'import Globals\n'), ((25793, 25837), 'Globals.profiles_doseplan_text_image.width', 'Globals.profiles_doseplan_text_image.width', ([], {}), '()\n', (25835, 25837), False, 'import Globals\n'), ((25950, 25995), 'Globals.profiles_doseplan_text_image.height', 'Globals.profiles_doseplan_text_image.height', ([], {}), '()\n', (25993, 25995), False, 'import Globals\n'), ((26028, 26072), 'Globals.profiles_doseplan_text_image.width', 'Globals.profiles_doseplan_text_image.width', ([], {}), '()\n', (26070, 26072), False, 'import Globals\n'), ((27859, 27920), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 10, img.shape[0] * 10)'}), '(img, dsize=(img.shape[1] * 10, img.shape[0] * 10))\n', (27869, 27920), False, 'import cv2\n'), ((27974, 28035), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 15, img.shape[0] * 15)'}), '(img, dsize=(img.shape[1] * 15, img.shape[0] * 15))\n', (27984, 28035), False, 'import cv2\n'), ((30324, 30378), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[0][0] / 2)'], {}), '(Globals.DVH_distance_isocenter_ROI[0][0] / 2)\n', (30332, 30378), True, 'import numpy as np\n'), ((30393, 30447), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[0][1] / 2)'], {}), '(Globals.DVH_distance_isocenter_ROI[0][1] / 2)\n', (30401, 30447), True, 'import numpy as np\n'), ((30510, 30564), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[1][0] / 2)'], {}), '(Globals.DVH_distance_isocenter_ROI[1][0] / 2)\n', (30518, 30564), True, 'import numpy as np\n'), ((30579, 30633), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[1][1] / 2)'], {}), '(Globals.DVH_distance_isocenter_ROI[1][1] / 2)\n', (30587, 30633), True, 'import numpy as np\n'), ((30696, 30750), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[2][0] / 2)'], {}), '(Globals.DVH_distance_isocenter_ROI[2][0] / 2)\n', (30704, 30750), True, 'import numpy as np\n'), ((30765, 30819), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[2][1] / 2)'], {}), '(Globals.DVH_distance_isocenter_ROI[2][1] / 2)\n', (30773, 30819), True, 'import numpy as np\n'), ((30882, 30936), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[3][0] / 2)'], {}), '(Globals.DVH_distance_isocenter_ROI[3][0] / 2)\n', (30890, 30936), True, 'import numpy as np\n'), ((30951, 31005), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[3][1] / 2)'], {}), '(Globals.DVH_distance_isocenter_ROI[3][1] / 2)\n', (30959, 31005), True, 'import numpy as np\n'), ((31409, 31463), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[0][0] / 3)'], {}), '(Globals.DVH_distance_isocenter_ROI[0][0] / 3)\n', (31417, 31463), True, 'import numpy as np\n'), ((31478, 31532), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[0][1] / 3)'], {}), '(Globals.DVH_distance_isocenter_ROI[0][1] / 3)\n', (31486, 31532), True, 'import numpy as np\n'), ((31595, 31649), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[1][0] / 3)'], {}), '(Globals.DVH_distance_isocenter_ROI[1][0] / 3)\n', (31603, 31649), True, 'import numpy as np\n'), ((31664, 31718), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[1][1] / 3)'], {}), '(Globals.DVH_distance_isocenter_ROI[1][1] / 3)\n', (31672, 31718), True, 'import numpy as np\n'), ((31781, 31835), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[2][0] / 3)'], {}), '(Globals.DVH_distance_isocenter_ROI[2][0] / 3)\n', (31789, 31835), True, 'import numpy as np\n'), ((31850, 31904), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[2][1] / 3)'], {}), '(Globals.DVH_distance_isocenter_ROI[2][1] / 3)\n', (31858, 31904), True, 'import numpy as np\n'), ((31967, 32021), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[3][0] / 3)'], {}), '(Globals.DVH_distance_isocenter_ROI[3][0] / 3)\n', (31975, 32021), True, 'import numpy as np\n'), ((32036, 32090), 'numpy.round', 'np.round', (['(Globals.DVH_distance_isocenter_ROI[3][1] / 3)'], {}), '(Globals.DVH_distance_isocenter_ROI[3][1] / 3)\n', (32044, 32090), True, 'import numpy as np\n'), ((33218, 33252), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (33250, 33252), False, 'import Globals\n'), ((33361, 33424), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong here."""'], {}), "('Error', 'Something has gone wrong here.')\n", (33381, 33424), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((33829, 33863), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (33861, 33863), False, 'import Globals\n'), ((33908, 33967), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (33919, 33967), True, 'import numpy as np\n'), ((34133, 34167), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(1)', '(2)'], {}), '(dataset_swapped, 1, 2)\n', (34144, 34167), True, 'import numpy as np\n'), ((34944, 34978), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (34976, 34978), False, 'import Globals\n'), ((35022, 35081), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (35033, 35081), True, 'import numpy as np\n'), ((35247, 35281), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(1)', '(2)'], {}), '(dataset_swapped, 1, 2)\n', (35258, 35281), True, 'import numpy as np\n'), ((47396, 47411), 'matplotlib.cm.viridis', 'cm.viridis', (['img'], {}), '(img)\n', (47406, 47411), False, 'from matplotlib import cm\n'), ((47712, 47757), 'Globals.profiles_doseplan_text_image.height', 'Globals.profiles_doseplan_text_image.height', ([], {}), '()\n', (47755, 47757), False, 'import Globals\n'), ((47790, 47834), 'Globals.profiles_doseplan_text_image.width', 'Globals.profiles_doseplan_text_image.width', ([], {}), '()\n', (47832, 47834), False, 'import Globals\n'), ((47947, 47992), 'Globals.profiles_doseplan_text_image.height', 'Globals.profiles_doseplan_text_image.height', ([], {}), '()\n', (47990, 47992), False, 'import Globals\n'), ((48025, 48069), 'Globals.profiles_doseplan_text_image.width', 'Globals.profiles_doseplan_text_image.width', ([], {}), '()\n', (48067, 48069), False, 'import Globals\n'), ((49852, 49913), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 10, img.shape[0] * 10)'}), '(img, dsize=(img.shape[1] * 10, img.shape[0] * 10))\n', (49862, 49913), False, 'import cv2\n'), ((49967, 50028), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(img.shape[1] * 15, img.shape[0] * 15)'}), '(img, dsize=(img.shape[1] * 15, img.shape[0] * 15))\n', (49977, 50028), False, 'import cv2\n'), ((59554, 59573), 'matplotlib.cm.viridis', 'cm.viridis', (['img_ROI'], {}), '(img_ROI)\n', (59564, 59573), False, 'from matplotlib import cm\n'), ((59874, 59919), 'Globals.profiles_doseplan_text_image.height', 'Globals.profiles_doseplan_text_image.height', ([], {}), '()\n', (59917, 59919), False, 'import Globals\n'), ((59952, 59996), 'Globals.profiles_doseplan_text_image.width', 'Globals.profiles_doseplan_text_image.width', ([], {}), '()\n', (59994, 59996), False, 'import Globals\n'), ((60109, 60154), 'Globals.profiles_doseplan_text_image.height', 'Globals.profiles_doseplan_text_image.height', ([], {}), '()\n', (60152, 60154), False, 'import Globals\n'), ((60187, 60231), 'Globals.profiles_doseplan_text_image.width', 'Globals.profiles_doseplan_text_image.width', ([], {}), '()\n', (60229, 60231), False, 'import Globals\n'), ((71126, 71170), 'Globals.DVH_done_button.config', 'Globals.DVH_done_button.config', ([], {'state': 'ACTIVE'}), '(state=ACTIVE)\n', (71156, 71170), False, 'import Globals\n'), ((112287, 112337), 'Globals.profiles_scanned_image_text_image.height', 'Globals.profiles_scanned_image_text_image.height', ([], {}), '()\n', (112335, 112337), False, 'import Globals\n'), ((112345, 112394), 'Globals.profiles_scanned_image_text_image.width', 'Globals.profiles_scanned_image_text_image.width', ([], {}), '()\n', (112392, 112394), False, 'import Globals\n'), ((112674, 112724), 'Globals.profiles_film_dose_map_text_image.height', 'Globals.profiles_film_dose_map_text_image.height', ([], {}), '()\n', (112722, 112724), False, 'import Globals\n'), ((112732, 112781), 'Globals.profiles_film_dose_map_text_image.width', 'Globals.profiles_film_dose_map_text_image.width', ([], {}), '()\n', (112779, 112781), False, 'import Globals\n'), ((12197, 12231), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (12229, 12231), False, 'import Globals\n'), ((12273, 12332), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (12284, 12332), True, 'import numpy as np\n'), ((12494, 12552), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (12514, 12552), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((13344, 13378), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (13376, 13378), False, 'import Globals\n'), ((13423, 13482), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (13434, 13482), True, 'import numpy as np\n'), ((13648, 13682), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(1)', '(2)'], {}), '(dataset_swapped, 1, 2)\n', (13659, 13682), True, 'import numpy as np\n'), ((14492, 14526), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (14524, 14526), False, 'import Globals\n'), ((14570, 14629), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (14581, 14629), True, 'import numpy as np\n'), ((34316, 34350), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (34348, 34350), False, 'import Globals\n'), ((34392, 34451), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (34403, 34451), True, 'import numpy as np\n'), ((34613, 34671), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (34633, 34671), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((35430, 35464), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (35462, 35464), False, 'import Globals\n'), ((35509, 35568), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (35520, 35568), True, 'import numpy as np\n'), ((35734, 35768), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(1)', '(2)'], {}), '(dataset_swapped, 1, 2)\n', (35745, 35768), True, 'import numpy as np\n'), ((36545, 36579), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (36577, 36579), False, 'import Globals\n'), ((36623, 36682), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (36634, 36682), True, 'import numpy as np\n'), ((58848, 58950), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Invalid factor. Must be number.\n (Code: closeUploadDoseplans)"""'], {}), '(\'Error\',\n """Invalid factor. Must be number.\n (Code: closeUploadDoseplans)""")\n', (58868, 58950), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((99669, 99728), 'Globals.DVH_input_lateral_displacement.get', 'Globals.DVH_input_lateral_displacement.get', (['"""1.0"""', '"""end-1c"""'], {}), "('1.0', 'end-1c')\n", (99711, 99728), False, 'import Globals\n'), ((100179, 100243), 'Globals.DVH_input_longitudinal_displacement.get', 'Globals.DVH_input_longitudinal_displacement.get', (['"""1.0"""', '"""end-1c"""'], {}), "('1.0', 'end-1c')\n", (100226, 100243), False, 'import Globals\n'), ((100715, 100775), 'Globals.DVH_input_vertical_displacement.get', 'Globals.DVH_input_vertical_displacement.get', (['"""1.0"""', '"""end-1c"""'], {}), "('1.0', 'end-1c')\n", (100758, 100775), False, 'import Globals\n'), ((101489, 101534), 'Globals.DVH_slice_offset.get', 'Globals.DVH_slice_offset.get', (['"""1.0"""', '"""end-1c"""'], {}), "('1.0', 'end-1c')\n", (101517, 101534), False, 'import Globals\n'), ((104853, 105002), 'tkinter.Radiobutton', 'Radiobutton', (['choose_batch_frame'], {'text': '""""""', 'bg': '"""#ffffff"""', 'cursor': '"""hand2"""', 'font': "('calibri', '14')", 'variable': 'Globals.DVH_film_batch', 'value': 'batch_cnt'}), "(choose_batch_frame, text='', bg='#ffffff', cursor='hand2', font\n =('calibri', '14'), variable=Globals.DVH_film_batch, value=batch_cnt)\n", (104864, 105002), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((110406, 110456), 'Globals.profiles_scanned_image_text_image.height', 'Globals.profiles_scanned_image_text_image.height', ([], {}), '()\n', (110454, 110456), False, 'import Globals\n'), ((110493, 110542), 'Globals.profiles_scanned_image_text_image.width', 'Globals.profiles_scanned_image_text_image.width', ([], {}), '()\n', (110540, 110542), False, 'import Globals\n'), ((110664, 110714), 'Globals.profiles_scanned_image_text_image.height', 'Globals.profiles_scanned_image_text_image.height', ([], {}), '()\n', (110712, 110714), False, 'import Globals\n'), ((110751, 110800), 'Globals.profiles_scanned_image_text_image.width', 'Globals.profiles_scanned_image_text_image.width', ([], {}), '()\n', (110798, 110800), False, 'import Globals\n'), ((111037, 111087), 'Globals.profiles_film_dose_map_text_image.height', 'Globals.profiles_film_dose_map_text_image.height', ([], {}), '()\n', (111085, 111087), False, 'import Globals\n'), ((111124, 111173), 'Globals.profiles_film_dose_map_text_image.width', 'Globals.profiles_film_dose_map_text_image.width', ([], {}), '()\n', (111171, 111173), False, 'import Globals\n'), ((111294, 111344), 'Globals.profiles_film_dose_map_text_image.height', 'Globals.profiles_film_dose_map_text_image.height', ([], {}), '()\n', (111342, 111344), False, 'import Globals\n'), ((111381, 111430), 'Globals.profiles_film_dose_map_text_image.width', 'Globals.profiles_film_dose_map_text_image.width', ([], {}), '()\n', (111428, 111430), False, 'import Globals\n'), ((13831, 13865), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (13863, 13865), False, 'import Globals\n'), ((13907, 13966), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(1)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 1, 2)\n', (13918, 13966), True, 'import numpy as np\n'), ((14128, 14186), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (14148, 14186), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((14778, 14812), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (14810, 14812), False, 'import Globals\n'), ((14857, 14916), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(1)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 1, 2)\n', (14868, 14916), True, 'import numpy as np\n'), ((15926, 15960), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (15958, 15960), False, 'import Globals\n'), ((16004, 16063), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(1)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 1, 2)\n', (16015, 16063), True, 'import numpy as np\n'), ((18395, 18453), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (18415, 18453), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((35917, 35951), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (35949, 35951), False, 'import Globals\n'), ((35993, 36052), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(1)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 1, 2)\n', (36004, 36052), True, 'import numpy as np\n'), ((36214, 36272), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (36234, 36272), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((36831, 36865), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (36863, 36865), False, 'import Globals\n'), ((36910, 36969), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(1)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 1, 2)\n', (36921, 36969), True, 'import numpy as np\n'), ((37946, 37980), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (37978, 37980), False, 'import Globals\n'), ((38024, 38083), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(1)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 1, 2)\n', (38035, 38083), True, 'import numpy as np\n'), ((40382, 40440), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (40402, 40440), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((99798, 99857), 'Globals.DVH_input_lateral_displacement.get', 'Globals.DVH_input_lateral_displacement.get', (['"""1.0"""', '"""end-1c"""'], {}), "('1.0', 'end-1c')\n", (99840, 99857), False, 'import Globals\n'), ((99961, 100065), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""The displacements must be numbers\n (Code: lateral displacement)"""'], {}), '(\'Error\',\n """The displacements must be numbers\n (Code: lateral displacement)""")\n', (99981, 100065), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((100313, 100377), 'Globals.DVH_input_longitudinal_displacement.get', 'Globals.DVH_input_longitudinal_displacement.get', (['"""1.0"""', '"""end-1c"""'], {}), "('1.0', 'end-1c')\n", (100360, 100377), False, 'import Globals\n'), ((100487, 100596), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""The displacements must be numbers\n (Code: longitudinal displacement)"""'], {}), '(\'Error\',\n """The displacements must be numbers\n (Code: longitudinal displacement)""")\n', (100507, 100596), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((100845, 100905), 'Globals.DVH_input_vertical_displacement.get', 'Globals.DVH_input_vertical_displacement.get', (['"""1.0"""', '"""end-1c"""'], {}), "('1.0', 'end-1c')\n", (100888, 100905), False, 'import Globals\n'), ((101011, 101116), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""The displacements must be numbers\n (Code: vertical displacement)"""'], {}), '(\'Error\',\n """The displacements must be numbers\n (Code: vertical displacement)""")\n', (101031, 101116), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((101604, 101649), 'Globals.DVH_slice_offset.get', 'Globals.DVH_slice_offset.get', (['"""1.0"""', '"""end-1c"""'], {}), "('1.0', 'end-1c')\n", (101632, 101649), False, 'import Globals\n'), ((101754, 101857), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Slice offset must be a number \n(Code: finishFilmMarkers(false)"""'], {}), '(\'Error\',\n """Slice offset must be a number \n(Code: finishFilmMarkers(false)""")\n', (101774, 101857), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((105510, 105538), 'Globals.DVH_film_batch.get', 'Globals.DVH_film_batch.get', ([], {}), '()\n', (105536, 105538), False, 'import Globals\n'), ((107552, 107572), 'matplotlib.cm.viridis', 'cm.viridis', (['img_film'], {}), '(img_film)\n', (107562, 107572), False, 'from matplotlib import cm\n'), ((15065, 15099), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (15097, 15099), False, 'import Globals\n'), ((15141, 15200), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (15152, 15200), True, 'import numpy as np\n'), ((15366, 15400), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(1)', '(2)'], {}), '(dataset_swapped, 1, 2)\n', (15377, 15400), True, 'import numpy as np\n'), ((15562, 15620), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (15582, 15620), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((16212, 16246), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (16244, 16246), False, 'import Globals\n'), ((16291, 16350), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (16302, 16350), True, 'import numpy as np\n'), ((17360, 17394), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (17392, 17394), False, 'import Globals\n'), ((17438, 17497), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (17449, 17497), True, 'import numpy as np\n'), ((17663, 17697), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(0)', '(1)'], {}), '(dataset_swapped, 0, 1)\n', (17674, 17697), True, 'import numpy as np\n'), ((37118, 37152), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (37150, 37152), False, 'import Globals\n'), ((37194, 37253), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (37205, 37253), True, 'import numpy as np\n'), ((37419, 37453), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(1)', '(2)'], {}), '(dataset_swapped, 1, 2)\n', (37430, 37453), True, 'import numpy as np\n'), ((37615, 37673), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (37635, 37673), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((38232, 38266), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (38264, 38266), False, 'import Globals\n'), ((38311, 38370), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (38322, 38370), True, 'import numpy as np\n'), ((39347, 39381), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (39379, 39381), False, 'import Globals\n'), ((39425, 39484), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (39436, 39484), True, 'import numpy as np\n'), ((39650, 39684), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(0)', '(1)'], {}), '(dataset_swapped, 0, 1)\n', (39661, 39684), True, 'import numpy as np\n'), ((16499, 16533), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (16531, 16533), False, 'import Globals\n'), ((16575, 16634), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (16586, 16634), True, 'import numpy as np\n'), ((16800, 16834), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(0)', '(2)'], {}), '(dataset_swapped, 0, 2)\n', (16811, 16834), True, 'import numpy as np\n'), ((16996, 17054), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (17016, 17054), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((17846, 17880), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (17878, 17880), False, 'import Globals\n'), ((38519, 38553), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (38551, 38553), False, 'import Globals\n'), ((38595, 38654), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(1)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 1)\n', (38606, 38654), True, 'import numpy as np\n'), ((38820, 38854), 'numpy.swapaxes', 'np.swapaxes', (['dataset_swapped', '(0)', '(2)'], {}), '(dataset_swapped, 0, 2)\n', (38831, 38854), True, 'import numpy as np\n'), ((39016, 39074), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (39036, 39074), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((39833, 39867), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (39865, 39867), False, 'import Globals\n'), ((17979, 18013), 'Globals.DCH_film_orientation.get', 'Globals.DCH_film_orientation.get', ([], {}), '()\n', (18011, 18013), False, 'import Globals\n'), ((18055, 18114), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (18066, 18114), True, 'import numpy as np\n'), ((18276, 18334), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (18296, 18334), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n'), ((39966, 40000), 'Globals.DVH_film_orientation.get', 'Globals.DVH_film_orientation.get', ([], {}), '()\n', (39998, 40000), False, 'import Globals\n'), ((40042, 40101), 'numpy.swapaxes', 'np.swapaxes', (['Globals.DVH_dataset_doseplan.pixel_array', '(0)', '(2)'], {}), '(Globals.DVH_dataset_doseplan.pixel_array, 0, 2)\n', (40053, 40101), True, 'import numpy as np\n'), ((40263, 40321), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Something has gone wrong."""'], {}), "('Error', 'Something has gone wrong.')\n", (40283, 40321), False, 'from tkinter import filedialog, INSERT, DISABLED, messagebox, NORMAL, simpledialog, PhotoImage, BOTH, Canvas, N, S, W, E, ALL, Frame, SUNKEN, Radiobutton, GROOVE, ACTIVE, FLAT, END, Scrollbar, HORIZONTAL, VERTICAL, ttk, TOP, RIGHT, LEFT, ttk\n')]
|
import numpy as np
import sys
import pandas as pd
import matplotlib.pyplot as plt
# read in csv as data frame
data_val = pd.read_csv('val_event.csv',delimiter=' ')
data_mu = pd.read_csv('ylength_mu_michel.csv',delimiter=' ')
data_pi = pd.read_csv('ylength_pi.csv',delimiter=' ')
mergedmu = data_val.merge(data_mu, on=['Subrun','Event','Type'])
mergedpi = data_val.merge(data_pi, on=['Subrun','Event','Type'])
# make histogram
plt.figure();
'''
Subtracting the probability from the prediction give a probability of being a muon from 0 to 1
I tried to match the color scheme of the hist you sent me, but you can change to whatever you want :)
'''
# first plot pions
# everything in [] are the conditions for how you select rows in pandas and the data.<whatever> is the column you select in data
plt.hist(np.abs(mergedpi.Pred[mergedpi.Type == 'Pion'] - mergedpi.Prob[mergedpi.Type == 'Pion']),bins=np.linspace(0,1,20),color='red',alpha=0.6,label='Pions');
# then plot muons
plt.hist(np.abs(mergedmu.Pred[mergedmu.Type == 'Muon'] - mergedmu.Prob[mergedmu.Type == 'Muon']),bins=np.linspace(0,1,20),color='blue',alpha=0.6,label='Muons');
plt.xlabel('Probability');
plt.ylabel('Event');
plt.legend(loc='upper center',frameon=False);
# uncomment to save
plt.savefig("probhist_primary.png")
|
[
"numpy.abs",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((123, 166), 'pandas.read_csv', 'pd.read_csv', (['"""val_event.csv"""'], {'delimiter': '""" """'}), "('val_event.csv', delimiter=' ')\n", (134, 166), True, 'import pandas as pd\n'), ((177, 228), 'pandas.read_csv', 'pd.read_csv', (['"""ylength_mu_michel.csv"""'], {'delimiter': '""" """'}), "('ylength_mu_michel.csv', delimiter=' ')\n", (188, 228), True, 'import pandas as pd\n'), ((239, 283), 'pandas.read_csv', 'pd.read_csv', (['"""ylength_pi.csv"""'], {'delimiter': '""" """'}), "('ylength_pi.csv', delimiter=' ')\n", (250, 283), True, 'import pandas as pd\n'), ((432, 444), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (442, 444), True, 'import matplotlib.pyplot as plt\n'), ((1140, 1165), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Probability"""'], {}), "('Probability')\n", (1150, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1167, 1186), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Event"""'], {}), "('Event')\n", (1177, 1186), True, 'import matplotlib.pyplot as plt\n'), ((1188, 1233), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'frameon': '(False)'}), "(loc='upper center', frameon=False)\n", (1198, 1233), True, 'import matplotlib.pyplot as plt\n'), ((1255, 1290), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""probhist_primary.png"""'], {}), "('probhist_primary.png')\n", (1266, 1290), True, 'import matplotlib.pyplot as plt\n'), ((809, 900), 'numpy.abs', 'np.abs', (["(mergedpi.Pred[mergedpi.Type == 'Pion'] - mergedpi.Prob[mergedpi.Type ==\n 'Pion'])"], {}), "(mergedpi.Pred[mergedpi.Type == 'Pion'] - mergedpi.Prob[mergedpi.Type ==\n 'Pion'])\n", (815, 900), True, 'import numpy as np\n'), ((987, 1078), 'numpy.abs', 'np.abs', (["(mergedmu.Pred[mergedmu.Type == 'Muon'] - mergedmu.Prob[mergedmu.Type ==\n 'Muon'])"], {}), "(mergedmu.Pred[mergedmu.Type == 'Muon'] - mergedmu.Prob[mergedmu.Type ==\n 'Muon'])\n", (993, 1078), True, 'import numpy as np\n'), ((902, 923), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (913, 923), True, 'import numpy as np\n'), ((1080, 1101), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (1091, 1101), True, 'import numpy as np\n')]
|
#!/usr/bin/python
"""The primary script to execute the tensorflow models."""
from __future__ import print_function
from munch import munchify
from six.moves import cPickle
from config.arguments import parser
""" Default model """
from model.model import Model
from utils.processor import BatchLoader, DataLoader, eval_loader
from utils.strings import FILES
import utils.adaptive as adaptive
import numpy as np
import tensorflow as tf
import json
import logging
import os
import sys
import time
import yaml
tf.reset_default_graph()
def main():
"""The main method of script."""
args = parser.parse_args()
with open(args.config_file, 'r') as stream:
args.config = munchify(yaml.load(stream))
args.save_dir = os.path.join(args.save_dir, args.job_id)
args.best_dir = os.path.join(args.best_dir, args.job_id)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if not os.path.exists(args.best_dir):
os.makedirs(args.best_dir)
print(args)
if args.mode == 'train':
train(args)
elif args.mode == 'test' or args.mode == 'valid':
test(args)
elif args.mode == 'generate':
generate(args)
def generate(args):
args.config.timesteps = 1
data_loader = DataLoader(args)
with tf.Session() as sess:
initializer = tf.random_uniform_initializer(-0.05, 0.05)
with tf.variable_scope("model", reuse=None, initializer=initializer):
model = Model(args, args.config.batch_size, mode='train')
steps_done = initialize_weights(sess, model, args, mode='test')
with tf.variable_scope("model", reuse=True, initializer=initializer):
model = Model(args, batch_size=1, mode='eval')
print("Loaded %d completed steps", steps_done)
states = sess.run(model.initial_states)
# First feed in the prior letters
probs = None
for i in args.line.split():
feed = {
model.input_data: np.array([[data_loader.vocab[i]]]),
model.initial_states: states
}
[states, probs] = sess.run([model.final_states, model.probs], feed)
# Now, time to begin the sampling process
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return(int(np.searchsorted(t, np.random.rand(1) * s)))
# Weird construct to optimize code
prior_length = len(args.line.split())
output = [' '] * (args.words + prior_length)
for i in range(prior_length):
output[i] = args.line.split()[i]
for i in range(args.words):
if i % 100 == 0:
print("%d out of %d generated" % (i, args.words))
token = weighted_pick(np.squeeze(probs))
if token == len(np.squeeze(probs)):
token = token - 1
output[i + prior_length] = data_loader.rev_vocab[token]
feed = {
model.input_data: np.array([[token]]),
model.initial_states: states
}
[states, probs] = sess.run([model.final_states, model.probs], feed)
output = ' '.join(output)
output = output.replace('</s>', '\n')
output = output + "\n"
# with open(os.path.join(args.save_dir, "generate_{0}.txt".format(args.job_id)), 'w') as f:
# f.write(output)
print(output)
def initialize_weights(sess, model, args, mode):
ckpt = tf.train.get_checkpoint_state(args.save_dir)
ckpt_best = tf.train.get_checkpoint_state(args.best_dir)
if mode == 'test' and ckpt_best:
print("Reading best model parameters from %s", ckpt_best.model_checkpoint_path)
model.saver.restore(sess, ckpt_best.model_checkpoint_path)
steps_done = int(ckpt_best.model_checkpoint_path.split('-')[-1])
# Since local variables are not saved
sess.run([
tf.local_variables_initializer()
])
elif mode == 'train' and ckpt:
print("Reading model parameters from %s", ckpt.model_checkpoint_path)
model.saver.restore(sess, ckpt.model_checkpoint_path)
steps_done = int(ckpt.model_checkpoint_path.split('-')[-1])
# Since local variables are not saved
sess.run([
tf.local_variables_initializer()
])
else:
steps_done = 0
sess.run([
tf.global_variables_initializer(),
tf.local_variables_initializer()
])
return steps_done
def evaluate(sess, model, eval_data, args, calculate_prob=False, rev_vocab=None):
"""Calculate perplexity after every epoch."""
states = sess.run(model.initial_states)
total_loss = 0.0
prob_output = ""
eval_x, eval_y, eval_len = eval_data['x'], eval_data['y'], eval_data['len']
for i in range(eval_x.shape[0]):
# Need to pass L1 to get evaluation perplexity
feed = {
model.input_data: eval_x[i:i + 1, :],
model.targets: eval_y[i:i + 1, :],
model.initial_states: states
}
if calculate_prob is True:
[states, loss, probs] = sess.run([model.final_states, model.loss, model.probs], feed)
total_loss += loss.sum()
for j in range(len(probs)):
position = i * args.config.timesteps + j
if position >= eval_len - 1:
continue
token = eval_y[i][j]
prob_output += rev_vocab[token] + " " + str(probs[j, token]) + "\n"
else:
[states, loss] = sess.run([model.final_states, model.loss], feed)
total_loss += loss.sum()
# need to subtract off loss from padding tokens
extra_tokens = (args.config.timesteps - eval_len % args.config.timesteps) % args.config.timesteps + 1
total_loss -= loss[-extra_tokens:].sum()
avg_entropy = total_loss / eval_len
ppl = np.exp(avg_entropy)
if calculate_prob is True:
return ppl, prob_output
else:
return ppl
def test(args):
data_loader = DataLoader(args)
if args.device == "gpu":
cfg_proto = tf.ConfigProto(intra_op_parallelism_threads=2)
cfg_proto.gpu_options.allow_growth = True
else:
cfg_proto = None
with tf.Session(config=cfg_proto) as sess:
initializer = tf.random_uniform_initializer(-0.05, 0.05)
with tf.variable_scope("model", reuse=None, initializer=initializer):
model = Model(args, args.config.batch_size, mode='train')
steps_done = initialize_weights(sess, model, args, mode='test')
with tf.variable_scope("model", reuse=True, initializer=initializer):
model_eval = Model(args, batch_size=1, mode='eval')
print("loaded %d completed steps", steps_done)
test_data = {}
test_data['x'], test_data['y'], test_data['len'] = eval_loader(args, data_loader.vocab, split=args.mode)
ppl, prob_output = evaluate(
sess, model_eval, test_data, args, calculate_prob=True, rev_vocab=data_loader.rev_vocab
)
with open(os.path.join(args.save_dir, "probs_{0}_{1}.txt".format(args.mode,args.job_id)), 'w') as f:
f.write(prob_output)
print("Perplexity is %.4f", ppl)
def train(args):
"""Prepare the data and begins training."""
# Load the text and vocabulary
data_loader = DataLoader(args)
# Prepare batches for training
batch_loader = BatchLoader(args, data_loader)
if args.device == "gpu":
cfg_proto = tf.ConfigProto(intra_op_parallelism_threads=2)
cfg_proto.gpu_options.allow_growth = True
else:
cfg_proto = None
with tf.Session(config=cfg_proto) as sess:
# Build training model, load old weights
initializer = tf.random_uniform_initializer(-0.05, 0.05)
with tf.variable_scope("model", reuse=None, initializer=initializer):
model = Model(args, args.config.batch_size, mode='train')
steps_done = initialize_weights(sess, model, args, mode='train')
print("loaded %d completed steps", steps_done)
# Reusing weights for evaluation model
with tf.variable_scope("model", reuse=True, initializer=initializer):
model_eval = Model(args, batch_size=1, mode='eval')
valid_data = {}
valid_data['x'], valid_data['y'], valid_data['len'] = eval_loader(args, data_loader.vocab, split='valid')
batch_loader.eval_data = valid_data
train_writer = tf.summary.FileWriter(args.save_dir + '/logs/', tf.get_default_graph())
# Making the graph read-only to prevent memory leaks
# https://stackoverflow.com/documentation/tensorflow/3883/how-to-debug-a-memory-leak-in-tensorflow/13426/use-graph-finalize-to-catch-nodes-being-added-to-the-graph#t=201612280201558374055
sess.graph.finalize()
start_epoch = model.epoch.eval()
for epoch in range(start_epoch, args.config.num_epochs):
run_epoch(sess, model, model_eval, args, batch_loader, epoch)
def run_epoch(sess, model, model_eval, args, batch_loader, epoch):
"""Run one epoch of training."""
best_ppl = model.best_ppl.eval()
last_ppl_update = model.last_ppl_update.eval()
margin_ppl = model.margin_ppl.eval()
adaptive_loss = getattr(adaptive, args.loss_mode)
# Reset batch pointer back to zero
batch_loader.reset_batch_pointer()
# Start from an empty RNN state
states = sess.run(model.initial_states)
start_batch = model.global_step.eval() % batch_loader.num_batches
if start_batch != 0:
print("Starting from batch %d / %d", start_batch, batch_loader.num_batches)
batch_loader.pointer += start_batch
for b in range(start_batch, batch_loader.num_batches):
start = time.time()
l1 = adaptive_loss(epoch, b, args=args)
sess.run(model.l1_assign, feed_dict={model.l1_new: l1})
time1 = time.time()
x, y, ngram = batch_loader.next_batch(l1)
time2 = time.time()
print("Time for loading ngram distribution - %.2f", time2 - time1)
# With probability 0.01 feed the initial state
if np.random.randint(1, 101) <= 1:
states = sess.run(model.initial_states)
feed = {model.input_data: x,
model.targets: y,
model.ngram: ngram,
model.initial_states: states}
time3 = time.time()
train_loss, l1, states, _ = sess.run([model.final_cost,
model.cost,
model.final_states,
model.train_op], feed)
end = time.time()
# print the result so far on terminal
batch_num = epoch * batch_loader.num_batches + b
total_num = args.config.num_epochs * batch_loader.num_batches
print("Time for TensorFlow calculations - %.2f", end - time3)
print("Epoch %d, %d / %d. Loss - %.4f, Time - %.2f", epoch, batch_num, total_num, train_loss, end - start)
# Save after `args.eval_freq` batches or at the very end
if batch_num != 0 and (batch_num % args.config.eval_freq == 0 or b == batch_loader.num_batches - 1):
ppl = evaluate(sess, model_eval, batch_loader.eval_data, args)
print("Perplexity after %d steps - %.4f", batch_num, ppl)
# Update rules for best_ppl / training schedule
print("Best ppl is %.4f, ppl < best_ppl is %s", model.best_ppl.eval(), str(ppl < best_ppl))
if ppl < best_ppl:
print("Saving Best Model")
# Storing perplexity and in TensorFlow variable and Python variable
best_ppl = ppl
sess.run(model.best_ppl_assign, feed_dict={model.best_ppl_new: ppl})
if margin_ppl - ppl > args.config.margin_ppl:
# In the case there has been a perplexity change of more than `margin_ppl`
# update the `last_ppl_update` and `margin_ppl` values
# This indicates a "significant" change in ppl
print("Updating margin_ppl, Change of %.4f", margin_ppl - ppl)
last_ppl_update = batch_num
margin_ppl = ppl
sess.run(model.last_ppl_update_assign, feed_dict={model.last_ppl_update_new: batch_num})
sess.run(model.margin_ppl_assign, feed_dict={model.margin_ppl_new: ppl})
# Saving the best model
checkpoint_path = os.path.join(args.best_dir, "lm.ckpt")
model.best_saver.save(sess, checkpoint_path, global_step=model.global_step, write_meta_graph=False)
else:
# Decay learning rate whenever ppl is greater than best_ppl so far
sess.run(model.lr_decay)
print("decaying lr after %d epochs to %.4f" % (model.epoch.eval(), model.lr.eval()))
checkpoint_path = os.path.join(args.save_dir, "lm.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step, write_meta_graph=False)
sess.run(model.epoch_incr)
if __name__ == '__main__':
main()
|
[
"yaml.load",
"numpy.sum",
"tensorflow.reset_default_graph",
"tensorflow.local_variables_initializer",
"tensorflow.ConfigProto",
"numpy.random.randint",
"config.arguments.parser.parse_args",
"numpy.exp",
"tensorflow.get_default_graph",
"os.path.join",
"tensorflow.random_uniform_initializer",
"utils.processor.eval_loader",
"os.path.exists",
"tensorflow.variable_scope",
"model.model.Model",
"numpy.cumsum",
"utils.processor.DataLoader",
"tensorflow.train.get_checkpoint_state",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"utils.processor.BatchLoader",
"numpy.squeeze",
"os.makedirs",
"time.time",
"numpy.array",
"numpy.random.rand"
] |
[((513, 537), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (535, 537), True, 'import tensorflow as tf\n'), ((593, 612), 'config.arguments.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (610, 612), False, 'from config.arguments import parser\n'), ((719, 759), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.job_id'], {}), '(args.save_dir, args.job_id)\n', (731, 759), False, 'import os\n'), ((777, 817), 'os.path.join', 'os.path.join', (['args.best_dir', 'args.job_id'], {}), '(args.best_dir, args.job_id)\n', (789, 817), False, 'import os\n'), ((1183, 1199), 'utils.processor.DataLoader', 'DataLoader', (['args'], {}), '(args)\n', (1193, 1199), False, 'from utils.processor import BatchLoader, DataLoader, eval_loader\n'), ((3054, 3098), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['args.save_dir'], {}), '(args.save_dir)\n', (3083, 3098), True, 'import tensorflow as tf\n'), ((3112, 3156), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['args.best_dir'], {}), '(args.best_dir)\n', (3141, 3156), True, 'import tensorflow as tf\n'), ((5150, 5169), 'numpy.exp', 'np.exp', (['avg_entropy'], {}), '(avg_entropy)\n', (5156, 5169), True, 'import numpy as np\n'), ((5278, 5294), 'utils.processor.DataLoader', 'DataLoader', (['args'], {}), '(args)\n', (5288, 5294), False, 'from utils.processor import BatchLoader, DataLoader, eval_loader\n'), ((6456, 6472), 'utils.processor.DataLoader', 'DataLoader', (['args'], {}), '(args)\n', (6466, 6472), False, 'from utils.processor import BatchLoader, DataLoader, eval_loader\n'), ((6521, 6551), 'utils.processor.BatchLoader', 'BatchLoader', (['args', 'data_loader'], {}), '(args, data_loader)\n', (6532, 6551), False, 'from utils.processor import BatchLoader, DataLoader, eval_loader\n'), ((826, 855), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (840, 855), False, 'import os\n'), ((859, 885), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {}), '(args.save_dir)\n', (870, 885), False, 'import os\n'), ((894, 923), 'os.path.exists', 'os.path.exists', (['args.best_dir'], {}), '(args.best_dir)\n', (908, 923), False, 'import os\n'), ((927, 953), 'os.makedirs', 'os.makedirs', (['args.best_dir'], {}), '(args.best_dir)\n', (938, 953), False, 'import os\n'), ((1206, 1218), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1216, 1218), True, 'import tensorflow as tf\n'), ((1245, 1287), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.05)', '(0.05)'], {}), '(-0.05, 0.05)\n', (1274, 1287), True, 'import tensorflow as tf\n'), ((5335, 5381), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': '(2)'}), '(intra_op_parallelism_threads=2)\n', (5349, 5381), True, 'import tensorflow as tf\n'), ((5458, 5486), 'tensorflow.Session', 'tf.Session', ([], {'config': 'cfg_proto'}), '(config=cfg_proto)\n', (5468, 5486), True, 'import tensorflow as tf\n'), ((5513, 5555), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.05)', '(0.05)'], {}), '(-0.05, 0.05)\n', (5542, 5555), True, 'import tensorflow as tf\n'), ((6003, 6056), 'utils.processor.eval_loader', 'eval_loader', (['args', 'data_loader.vocab'], {'split': 'args.mode'}), '(args, data_loader.vocab, split=args.mode)\n', (6014, 6056), False, 'from utils.processor import BatchLoader, DataLoader, eval_loader\n'), ((6593, 6639), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': '(2)'}), '(intra_op_parallelism_threads=2)\n', (6607, 6639), True, 'import tensorflow as tf\n'), ((6716, 6744), 'tensorflow.Session', 'tf.Session', ([], {'config': 'cfg_proto'}), '(config=cfg_proto)\n', (6726, 6744), True, 'import tensorflow as tf\n'), ((6813, 6855), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-0.05)', '(0.05)'], {}), '(-0.05, 0.05)\n', (6842, 6855), True, 'import tensorflow as tf\n'), ((7348, 7399), 'utils.processor.eval_loader', 'eval_loader', (['args', 'data_loader.vocab'], {'split': '"""valid"""'}), "(args, data_loader.vocab, split='valid')\n", (7359, 7399), False, 'from utils.processor import BatchLoader, DataLoader, eval_loader\n'), ((8647, 8658), 'time.time', 'time.time', ([], {}), '()\n', (8656, 8658), False, 'import time\n'), ((8769, 8780), 'time.time', 'time.time', ([], {}), '()\n', (8778, 8780), False, 'import time\n'), ((8835, 8846), 'time.time', 'time.time', ([], {}), '()\n', (8844, 8846), False, 'import time\n'), ((9167, 9178), 'time.time', 'time.time', ([], {}), '()\n', (9176, 9178), False, 'import time\n'), ((9336, 9347), 'time.time', 'time.time', ([], {}), '()\n', (9345, 9347), False, 'import time\n'), ((683, 700), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (692, 700), False, 'import yaml\n'), ((1295, 1358), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'None', 'initializer': 'initializer'}), "('model', reuse=None, initializer=initializer)\n", (1312, 1358), True, 'import tensorflow as tf\n'), ((1371, 1420), 'model.model.Model', 'Model', (['args', 'args.config.batch_size'], {'mode': '"""train"""'}), "(args, args.config.batch_size, mode='train')\n", (1376, 1420), False, 'from model.model import Model\n'), ((1494, 1557), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), "('model', reuse=True, initializer=initializer)\n", (1511, 1557), True, 'import tensorflow as tf\n'), ((1570, 1608), 'model.model.Model', 'Model', (['args'], {'batch_size': '(1)', 'mode': '"""eval"""'}), "(args, batch_size=1, mode='eval')\n", (1575, 1608), False, 'from model.model import Model\n'), ((2043, 2061), 'numpy.cumsum', 'np.cumsum', (['weights'], {}), '(weights)\n', (2052, 2061), True, 'import numpy as np\n'), ((2069, 2084), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (2075, 2084), True, 'import numpy as np\n'), ((5563, 5626), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'None', 'initializer': 'initializer'}), "('model', reuse=None, initializer=initializer)\n", (5580, 5626), True, 'import tensorflow as tf\n'), ((5639, 5688), 'model.model.Model', 'Model', (['args', 'args.config.batch_size'], {'mode': '"""train"""'}), "(args, args.config.batch_size, mode='train')\n", (5644, 5688), False, 'from model.model import Model\n'), ((5763, 5826), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), "('model', reuse=True, initializer=initializer)\n", (5780, 5826), True, 'import tensorflow as tf\n'), ((5844, 5882), 'model.model.Model', 'Model', (['args'], {'batch_size': '(1)', 'mode': '"""eval"""'}), "(args, batch_size=1, mode='eval')\n", (5849, 5882), False, 'from model.model import Model\n'), ((6863, 6926), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'None', 'initializer': 'initializer'}), "('model', reuse=None, initializer=initializer)\n", (6880, 6926), True, 'import tensorflow as tf\n'), ((6939, 6988), 'model.model.Model', 'Model', (['args', 'args.config.batch_size'], {'mode': '"""train"""'}), "(args, args.config.batch_size, mode='train')\n", (6944, 6988), False, 'from model.model import Model\n'), ((7154, 7217), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), "('model', reuse=True, initializer=initializer)\n", (7171, 7217), True, 'import tensorflow as tf\n'), ((7235, 7273), 'model.model.Model', 'Model', (['args'], {'batch_size': '(1)', 'mode': '"""eval"""'}), "(args, batch_size=1, mode='eval')\n", (7240, 7273), False, 'from model.model import Model\n'), ((7503, 7525), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (7523, 7525), True, 'import tensorflow as tf\n'), ((8970, 8995), 'numpy.random.randint', 'np.random.randint', (['(1)', '(101)'], {}), '(1, 101)\n', (8987, 8995), True, 'import numpy as np\n'), ((11280, 11318), 'os.path.join', 'os.path.join', (['args.save_dir', '"""lm.ckpt"""'], {}), "(args.save_dir, 'lm.ckpt')\n", (11292, 11318), False, 'import os\n'), ((1816, 1850), 'numpy.array', 'np.array', (['[[data_loader.vocab[i]]]'], {}), '([[data_loader.vocab[i]]])\n', (1824, 1850), True, 'import numpy as np\n'), ((2466, 2483), 'numpy.squeeze', 'np.squeeze', (['probs'], {}), '(probs)\n', (2476, 2483), True, 'import numpy as np\n'), ((2639, 2658), 'numpy.array', 'np.array', (['[[token]]'], {}), '([[token]])\n', (2647, 2658), True, 'import numpy as np\n'), ((3457, 3489), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3487, 3489), True, 'import tensorflow as tf\n'), ((10917, 10955), 'os.path.join', 'os.path.join', (['args.best_dir', '"""lm.ckpt"""'], {}), "(args.best_dir, 'lm.ckpt')\n", (10929, 10955), False, 'import os\n'), ((2504, 2521), 'numpy.squeeze', 'np.squeeze', (['probs'], {}), '(probs)\n', (2514, 2521), True, 'import numpy as np\n'), ((3773, 3805), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3803, 3805), True, 'import tensorflow as tf\n'), ((3851, 3884), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3882, 3884), True, 'import tensorflow as tf\n'), ((3889, 3921), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3919, 3921), True, 'import tensorflow as tf\n'), ((2118, 2135), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2132, 2135), True, 'import numpy as np\n')]
|
#######################################################
#This script is for evaluating for the task of SLR #
#######################################################
import argparse
import time
import collections
import os
import sys
import torch
import torch.nn
from torch.autograd import Variable
import torch.nn as nn
import numpy as np
import datetime as dt
import _pickle as pickle
from collections import OrderedDict
import cv2
from transformer_slr import make_model as TRANSFORMER
from dataloader_slr import loader #For SLR
from utils import path_data, Batch, greedy_decode
#Progress bar to visualize training progress
import progressbar
import matplotlib.pyplot as plt
#Evaluation metrics
from bleu import compute_bleu
from rouge import rouge
from beam_search import beam_decode
from nltk.translate.bleu_score import sentence_bleu, corpus_bleu
#https://pypi.org/project/py-rouge/
#import rouge
#Lavenshtein distance (WER)
from jiwer import wer
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
###
# Arg parsing
##############
parser = argparse.ArgumentParser(description='Evaluation')
parser.add_argument('--data', type=str, default=os.path.join('data','phoenix-2014.v3', 'phoenix2014-release','phoenix-2014-multisigner'),
help='location of the test data corpus')
parser.add_argument('--model_path', type=str, default=os.path.join("EXPERIMENTATIONS"),
help='location of the entire trained model')
parser.add_argument('--lookup_table', type=str, default=os.path.join('data','slr_lookup.txt'),
help='location of the words lookup table')
parser.add_argument('--rescale', type=int, default=224,
help='rescale data images. NOTE: use same image size as the training or else you get worse results.')
#Put to 0 to avoid memory segementation fault
parser.add_argument('--num_workers', type=int, default=0,
help='NOTE: put num of workers to 0 to avoid memory saturation.')
parser.add_argument('--image_type', type=str, default='rgb',
help='Evaluate on rgb/grayscale images')
parser.add_argument('--show_sample', action='store_true',
help='Show a sample a preprocessed data.')
parser.add_argument('--batch_size', type=int, default=1,
help='size of one minibatch')
parser.add_argument('--save', action='store_true',
help='save the results of the evaluation')
parser.add_argument('--hand_query', action='store_true',
help='Set hand cropped image as a query for transformer network.')
parser.add_argument('--data_stats', type=str, default='data_stats.pt',
help='Normalize images using the dataset stats (mean/std).')
parser.add_argument('--hand_stats', type=str, default='hand_stats.pt',
help='Normalize images using the dataset stats (mean/std).')
parser.add_argument('--emb_type', type=str, default='2d',
help='Type of image embeddings 2d or 3d.')
parser.add_argument('--emb_network', type=str, default='mb2',
help='Image embeddings network: mb2/mb2-ssd/rcnn')
parser.add_argument('--decoding', type=str, default='greedy',
help='Decoding method (greedy/beam).')
parser.add_argument('--n_beam', type=int, default=4,
help='Beam width when using bean search for decoding.')
parser.add_argument('--rel_window', type=int, default=None)
parser.add_argument('--bleu', action='store_true',
help='Use bleu for evaluation.')
parser.add_argument('--rouge', action='store_true',
help='Use rouge for evaluation.')
parser.add_argument('--txt', type=str, default=None,
help='Run evaluation from txt files.')
parser.add_argument('--heatmap', action='store_true',
help='produce heatmap.')
#----------------------------------------------------------------------------------------
#Same seed for reproducibility)
parser.add_argument('--seed', type=int, default=1111, help='random seed')
#Save folder with the date
start_date = dt.datetime.now().strftime("%Y-%m-%d-%H.%M")
print ("Start Time: "+start_date)
args = parser.parse_args()
#Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
#experiment_path = PureWindowsPath('EXPERIMENTATIONS\\' + start_date)
save_path = os.path.join('EVALUATION', start_date)
# Creates an experimental directory and dumps all the args to a text file
if(args.save):
if(os.path.exists(save_path)):
print('Evaluation already exists..')
else:
os.makedirs(save_path)
print ("\nPutting log in EVALUATION/%s"%start_date)
#Dump all configurations/hyperparameters in txt
with open (os.path.join(save_path,'eval_config.txt'), 'w') as f:
f.write('Experimentation done at: '+ str(start_date)+' with current configurations:\n')
for arg in vars(args):
f.write(arg+' : '+str(getattr(args, arg))+'\n')
#-------------------------------------------------------------------------------
train_path, valid_path, test_path = path_data(data_path=args.data, task='SLR', hand_query=args.hand_query)
#Load stats
if(args.data_stats):
args.data_stats = torch.load(args.data_stats, map_location=torch.device('cpu'))
if(args.hand_stats):
args.hand_stats = torch.load(args.hand_stats, map_location=torch.device('cpu'))
if (args.image_type == 'rgb'):
channels = 3
elif(args.image_type == 'grayscale'):
channels = 1
else:
print('Image type is not supported!')
quit(0)
#No data augmentation for test data
test_dataloader, test_size = loader(csv_file=test_path[1],
root_dir=test_path[0],
lookup=args.lookup_table,
rescale = args.rescale,
augmentation = None,
batch_size = args.batch_size,
num_workers = args.num_workers,
show_sample = args.show_sample,
istrain=False,
hand_dir=test_path[2],
data_stats=args.data_stats,
hand_stats=args.hand_stats,
channels=channels
)
#No data augmentation for test data
valid_dataloader, valid_size = loader(csv_file=valid_path[1],
root_dir=valid_path[0],
lookup=args.lookup_table,
rescale = args.rescale,
augmentation = None,
batch_size = args.batch_size,
num_workers = args.num_workers,
show_sample = args.show_sample,
istrain=False,
hand_dir=valid_path[2],
data_stats=args.data_stats,
hand_stats=args.hand_stats,
channels=channels
)
print('Test dataset size: '+str(test_size))
print('Valid dataset size: '+str(valid_size))
#Retrieve size of target vocab
with open(args.lookup_table, 'rb') as pickle_file:
vocab = pickle.load(pickle_file)
vocab_size = len(vocab)
#Switch keys and values of vocab to easily look for words
vocab = {y:x for x,y in vocab.items()}
print('vocabulary size:' + str(vocab_size))
#Loop through test and val sets
dataloaders = [valid_dataloader, test_dataloader]
sizes = [valid_size, test_size]
dataset = ['valid', 'test']
#Blank token index
blank_index = 1232
#-------------------------------------------------------------------------------
#Run evaluation from txt files
if(args.txt):
for d in range(len(dataset)):
print(dataset[d])
#Hypotheses file
with open(os.path.join(args.txt,'simpl_translations_'+dataset[d]+'.txt')) as f:
hyp = f.read().splitlines()
#Reference file
with open(os.path.join(args.txt,'simpl_references_'+dataset[d]+'.txt')) as f:
ref = f.read().splitlines()
assert len(hyp) == len(ref)
total_wer_score = 0.0
count = 0
#Measuring WER
for i in range(len(ref)):
total_wer_score += wer(ref[i], hyp[i], standardize=True)
count += 1
print(total_wer_score/count)
quit(0)
#-------------------------------------------------------------------------------
#Run on GPU
if torch.cuda.is_available():
print("Using GPU")
device = torch.device("cuda:0")
else:
#Run on CPU
print("WARNING: You are about to run on cpu, and this will likely run out \
of memory. \n You can try setting batch_size=1 to reduce memory usage")
device = torch.device("cpu")
#-------------------------------------------------------------------------------
#Load the whole model with state dict
model = TRANSFORMER(tgt_vocab=vocab_size, n_stacks=2, n_units=1280,
n_heads=10, d_ff=2048, dropout=0.3, image_size=224,
emb_type='2d', emb_network='mb2')
model.load_state_dict(torch.load(args.model_path)['state_dict'])
#Load entire model w/ weights
#model = torch.load(args.model_path, map_location=device)
model = model.to(device)
print("Model successfully loaded")
model.eval() # Set model to evaluate mode
print ("Evaluating..")
start_time = time.time()
for d in range(len(sizes)):
dataloader = dataloaders[d]
size = sizes[d]
print(dataset[d])
#For progress bar
bar = progressbar.ProgressBar(maxval=size, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
i = 0
count = 0
#Save translation and reference sentences
translation_corpus = []
reference_corpus = []
total_wer_score = 0.0
count = 0
#Loop over minibatches
for step, (x, x_lengths, y, y_lengths, hand_regions, hand_lengths, name) in enumerate(dataloader):
#Update progress bar with every iter
i += len(x)
bar.update(i)
if(args.hand_query):
hand_regions = hand_regions.to(device)
else:
hand_regions = None
y = torch.from_numpy(y).to(device)
x = x.to(device)
batch = Batch(x_lengths, y_lengths, hand_lengths, trg=None, DEVICE=device, emb_type=args.emb_type, fixed_padding=None, rel_window=args.rel_window)
#with torch.no_grad():
output, output_context, output_hand = model.forward(x, batch.src_mask, batch.rel_mask, hand_regions)
#CTC loss expects (Seq, batch, vocab)
if(args.hand_query):
output = output.transpose(0,1)
output_context = output_context.transpose(0,1)
output_hand = output_hand.transpose(0,1)
else:
output = output_context.transpose(0,1)
#Predicted words with highest prob
_, pred = torch.max(output, dim=-1)
#Remove <BLANK>
#pred = pred[pred != blank_index]
if(args.heatmap):
output[17, 0, pred[17].item()].backward(retain_graph=True)
# pull the gradients out of the images feature map
#They should have same shape
gradients = model.get_activations_gradient()
activations = model.get_activations().detach()
# pool the gradients across the channels
pooled_gradients = torch.mean(gradients, dim=[2, 3])
# weight the channels by corresponding gradients
for i in range(57):
for j in range(1280):
activations[i, j, :, :] = activations[i, j, :, :] * pooled_gradients[i, j]
# average the channels of the activations
heatmap = torch.mean(activations, dim=1).squeeze()
maxi = torch.max(heatmap)
# relu on top of the heatmap
heatmap = np.maximum(heatmap.cpu().numpy(), 0)
# normalize the heatmap
heatmap /= maxi.cpu().numpy()
for i in range(heatmap.shape[0]):
#Get image
img = cv2.imread(os.path.join(args.data, 'keyfeatures/fullFrame-210x260px/dev/10January_2011_Monday_tagesschau_default-7', 'images'+'{:04d}'.format(i+1)+'.png'))
img = cv2.resize(img, (args.rescale, args.rescale))
h = heatmap[i]
h = cv2.resize(h, (args.rescale, args.rescale))
h = np.uint8(255 * h)
h = cv2.applyColorMap(h, cv2.COLORMAP_JET)
assert img.shape == h.shape
#h = h*0.4 + img
h = cv2.addWeighted(h, 0.5, img, 0.8, 0)
cv2.imwrite("samples/heatmap"+str(i)+".png", h)
x_lengths = torch.IntTensor(x_lengths)
y_lengths = torch.IntTensor(y_lengths)
decodes, _ = tf.nn.ctc_beam_search_decoder(inputs=output.cpu().detach().numpy(),
sequence_length=x_lengths.cpu().detach().numpy(), merge_repeated=False, beam_width=10, top_paths=1)
pred = decodes[0]
pred = tf.sparse.to_dense(pred).numpy()
#Loop over translations and references
for j in range(len(y)):
ys = y[j, :y_lengths[j]]
p = pred[j]
#Remove <UNK> token
p = p[p != 0]
ys = ys[ys != 0]
hyp = (' '.join([vocab[x.item()] for x in p]))
gt = (' '.join([vocab[x.item()] for x in ys]))
total_wer_score += wer(gt, hyp, standardize=True)
count += 1
#Convert index tokens to words
translation_corpus.append(hyp)
reference_corpus.append(gt)
#Free some memory
#NOTE: this helps alot in avoiding cuda out of memory
del x, y, batch
assert len(translation_corpus) == len(reference_corpus)
print('WER score:'+str(total_wer_score/count))
if(args.save):
#Save results in txt files
with open(os.path.join(save_path, 'translations_'+dataset[d]+'.txt') ,'w') as trans_file:
trans_file.write("\n".join(translation_corpus))
with open(os.path.join(save_path, 'references_'+dataset[d]+'.txt'), 'w') as ref_file:
ref_file.write("\n".join(reference_corpus))
if(args.bleu):
#Default return
#NOTE: bleu score of camgoz results is slightly better than ntlk -> use it instead
#bleu_4 = corpus_bleu(reference_corpus, translation_corpus)
bleu_4, _, _, _, _, _ = compute_bleu(references_corpus, translation_corpus, max_order=4)
#weights = (1.0/1.0, )
bleu_1, _, _, _, _, _ = compute_bleu(references_corpus, translation_corpus, max_order=1)
#weights = (1.0/2.0, 1.0/2.0, )
#bleu_2 = corpus_bleu(reference_corpus, translation_corpus, weights)
bleu_2, _, _, _, _, _ = compute_bleu(references_corpus, translation_corpus, max_order=2)
#weights = (1.0/3.0, 1.0/3.0, 1.0/3.0,)
#bleu_3 = corpus_bleu(reference_corpus, translation_corpus, weights)
bleu_3, _, _, _, _, _ = compute_bleu(references_corpus, translation_corpus, max_order=3)
log_str = 'Bleu Evaluation: ' + '\t' \
+ 'Bleu_1: ' + str(bleu_1) + '\t' \
+ 'Bleu_2: ' + str(bleu_2) + '\t' \
+ 'Bleu_3: ' + str(bleu_3) + '\t' \
+ 'Bleu_4: ' + str(bleu_4)
print(log_str)
if(args.save):
#Save evaluation results in a log file
with open(os.path.join(args.save_path, 'log.txt'), 'a') as f:
f.write(log_str+'\n')
if(args.rouge):
reference_corpus = [" ".join(reference) for reference in reference_corpus]
translation_corpus = [" ".join(hypothesis) for hypothesis in translation_corpus]
score = rouge(translation_corpus, reference_corpus)
print(score["rouge_l/f_score"])
log_str = 'Rouge Evaluation: ' + '\t'
print(log_str)
if(args.save):
#Save evaluation results in a log file
with open(os.path.join(args.save_path, 'log.txt'), 'a') as f:
f.write(log_str+'\n')
|
[
"argparse.ArgumentParser",
"progressbar.Percentage",
"torch.device",
"os.path.join",
"_pickle.load",
"torch.load",
"os.path.exists",
"progressbar.Bar",
"dataloader_slr.loader",
"tensorflow.compat.v1.sparse.to_dense",
"utils.Batch",
"datetime.datetime.now",
"cv2.resize",
"torch.mean",
"transformer_slr.make_model",
"numpy.uint8",
"tensorflow.compat.v1.enable_eager_execution",
"torch.manual_seed",
"cv2.addWeighted",
"bleu.compute_bleu",
"torch.cuda.is_available",
"torch.max",
"torch.IntTensor",
"cv2.applyColorMap",
"utils.path_data",
"torch.from_numpy",
"rouge.rouge",
"os.makedirs",
"time.time",
"jiwer.wer"
] |
[((997, 1024), 'tensorflow.compat.v1.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (1022, 1024), True, 'import tensorflow.compat.v1 as tf\n'), ((1070, 1119), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluation"""'}), "(description='Evaluation')\n", (1093, 1119), False, 'import argparse\n'), ((4318, 4346), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4335, 4346), False, 'import torch\n'), ((4430, 4468), 'os.path.join', 'os.path.join', (['"""EVALUATION"""', 'start_date'], {}), "('EVALUATION', start_date)\n", (4442, 4468), False, 'import os\n'), ((5166, 5236), 'utils.path_data', 'path_data', ([], {'data_path': 'args.data', 'task': '"""SLR"""', 'hand_query': 'args.hand_query'}), "(data_path=args.data, task='SLR', hand_query=args.hand_query)\n", (5175, 5236), False, 'from utils import path_data, Batch, greedy_decode\n'), ((5692, 6030), 'dataloader_slr.loader', 'loader', ([], {'csv_file': 'test_path[1]', 'root_dir': 'test_path[0]', 'lookup': 'args.lookup_table', 'rescale': 'args.rescale', 'augmentation': 'None', 'batch_size': 'args.batch_size', 'num_workers': 'args.num_workers', 'show_sample': 'args.show_sample', 'istrain': '(False)', 'hand_dir': 'test_path[2]', 'data_stats': 'args.data_stats', 'hand_stats': 'args.hand_stats', 'channels': 'channels'}), '(csv_file=test_path[1], root_dir=test_path[0], lookup=args.\n lookup_table, rescale=args.rescale, augmentation=None, batch_size=args.\n batch_size, num_workers=args.num_workers, show_sample=args.show_sample,\n istrain=False, hand_dir=test_path[2], data_stats=args.data_stats,\n hand_stats=args.hand_stats, channels=channels)\n', (5698, 6030), False, 'from dataloader_slr import loader\n'), ((6300, 6641), 'dataloader_slr.loader', 'loader', ([], {'csv_file': 'valid_path[1]', 'root_dir': 'valid_path[0]', 'lookup': 'args.lookup_table', 'rescale': 'args.rescale', 'augmentation': 'None', 'batch_size': 'args.batch_size', 'num_workers': 'args.num_workers', 'show_sample': 'args.show_sample', 'istrain': '(False)', 'hand_dir': 'valid_path[2]', 'data_stats': 'args.data_stats', 'hand_stats': 'args.hand_stats', 'channels': 'channels'}), '(csv_file=valid_path[1], root_dir=valid_path[0], lookup=args.\n lookup_table, rescale=args.rescale, augmentation=None, batch_size=args.\n batch_size, num_workers=args.num_workers, show_sample=args.show_sample,\n istrain=False, hand_dir=valid_path[2], data_stats=args.data_stats,\n hand_stats=args.hand_stats, channels=channels)\n', (6306, 6641), False, 'from dataloader_slr import loader\n'), ((8284, 8309), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8307, 8309), False, 'import torch\n'), ((8710, 8859), 'transformer_slr.make_model', 'TRANSFORMER', ([], {'tgt_vocab': 'vocab_size', 'n_stacks': '(2)', 'n_units': '(1280)', 'n_heads': '(10)', 'd_ff': '(2048)', 'dropout': '(0.3)', 'image_size': '(224)', 'emb_type': '"""2d"""', 'emb_network': '"""mb2"""'}), "(tgt_vocab=vocab_size, n_stacks=2, n_units=1280, n_heads=10,\n d_ff=2048, dropout=0.3, image_size=224, emb_type='2d', emb_network='mb2')\n", (8721, 8859), True, 'from transformer_slr import make_model as TRANSFORMER\n'), ((9236, 9247), 'time.time', 'time.time', ([], {}), '()\n', (9245, 9247), False, 'import time\n'), ((4566, 4591), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (4580, 4591), False, 'import os\n'), ((7028, 7052), '_pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (7039, 7052), True, 'import _pickle as pickle\n'), ((8347, 8369), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (8359, 8369), False, 'import torch\n'), ((8559, 8578), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (8571, 8578), False, 'import torch\n'), ((1170, 1264), 'os.path.join', 'os.path.join', (['"""data"""', '"""phoenix-2014.v3"""', '"""phoenix2014-release"""', '"""phoenix-2014-multisigner"""'], {}), "('data', 'phoenix-2014.v3', 'phoenix2014-release',\n 'phoenix-2014-multisigner')\n", (1182, 1264), False, 'import os\n'), ((1376, 1408), 'os.path.join', 'os.path.join', (['"""EXPERIMENTATIONS"""'], {}), "('EXPERIMENTATIONS')\n", (1388, 1408), False, 'import os\n'), ((1533, 1571), 'os.path.join', 'os.path.join', (['"""data"""', '"""slr_lookup.txt"""'], {}), "('data', 'slr_lookup.txt')\n", (1545, 1571), False, 'import os\n'), ((4159, 4176), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (4174, 4176), True, 'import datetime as dt\n'), ((4657, 4679), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (4668, 4679), False, 'import os\n'), ((8961, 8988), 'torch.load', 'torch.load', (['args.model_path'], {}), '(args.model_path)\n', (8971, 8988), False, 'import torch\n'), ((10112, 10255), 'utils.Batch', 'Batch', (['x_lengths', 'y_lengths', 'hand_lengths'], {'trg': 'None', 'DEVICE': 'device', 'emb_type': 'args.emb_type', 'fixed_padding': 'None', 'rel_window': 'args.rel_window'}), '(x_lengths, y_lengths, hand_lengths, trg=None, DEVICE=device, emb_type\n =args.emb_type, fixed_padding=None, rel_window=args.rel_window)\n', (10117, 10255), False, 'from utils import path_data, Batch, greedy_decode\n'), ((10751, 10776), 'torch.max', 'torch.max', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (10760, 10776), False, 'import torch\n'), ((12578, 12604), 'torch.IntTensor', 'torch.IntTensor', (['x_lengths'], {}), '(x_lengths)\n', (12593, 12604), False, 'import torch\n'), ((12625, 12651), 'torch.IntTensor', 'torch.IntTensor', (['y_lengths'], {}), '(y_lengths)\n', (12640, 12651), False, 'import torch\n'), ((14334, 14398), 'bleu.compute_bleu', 'compute_bleu', (['references_corpus', 'translation_corpus'], {'max_order': '(4)'}), '(references_corpus, translation_corpus, max_order=4)\n', (14346, 14398), False, 'from bleu import compute_bleu\n'), ((14463, 14527), 'bleu.compute_bleu', 'compute_bleu', (['references_corpus', 'translation_corpus'], {'max_order': '(1)'}), '(references_corpus, translation_corpus, max_order=1)\n', (14475, 14527), False, 'from bleu import compute_bleu\n'), ((14678, 14742), 'bleu.compute_bleu', 'compute_bleu', (['references_corpus', 'translation_corpus'], {'max_order': '(2)'}), '(references_corpus, translation_corpus, max_order=2)\n', (14690, 14742), False, 'from bleu import compute_bleu\n'), ((14901, 14965), 'bleu.compute_bleu', 'compute_bleu', (['references_corpus', 'translation_corpus'], {'max_order': '(3)'}), '(references_corpus, translation_corpus, max_order=3)\n', (14913, 14965), False, 'from bleu import compute_bleu\n'), ((15603, 15646), 'rouge.rouge', 'rouge', (['translation_corpus', 'reference_corpus'], {}), '(translation_corpus, reference_corpus)\n', (15608, 15646), False, 'from rouge import rouge\n'), ((4805, 4847), 'os.path.join', 'os.path.join', (['save_path', '"""eval_config.txt"""'], {}), "(save_path, 'eval_config.txt')\n", (4817, 4847), False, 'import os\n'), ((5335, 5354), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5347, 5354), False, 'import torch\n'), ((5441, 5460), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5453, 5460), False, 'import torch\n'), ((8072, 8109), 'jiwer.wer', 'wer', (['ref[i]', 'hyp[i]'], {'standardize': '(True)'}), '(ref[i], hyp[i], standardize=True)\n', (8075, 8109), False, 'from jiwer import wer\n'), ((11248, 11281), 'torch.mean', 'torch.mean', (['gradients'], {'dim': '[2, 3]'}), '(gradients, dim=[2, 3])\n', (11258, 11281), False, 'import torch\n'), ((11645, 11663), 'torch.max', 'torch.max', (['heatmap'], {}), '(heatmap)\n', (11654, 11663), False, 'import torch\n'), ((13327, 13357), 'jiwer.wer', 'wer', (['gt', 'hyp'], {'standardize': '(True)'}), '(gt, hyp, standardize=True)\n', (13330, 13357), False, 'from jiwer import wer\n'), ((7637, 7704), 'os.path.join', 'os.path.join', (['args.txt', "('simpl_translations_' + dataset[d] + '.txt')"], {}), "(args.txt, 'simpl_translations_' + dataset[d] + '.txt')\n", (7649, 7704), False, 'import os\n'), ((7790, 7855), 'os.path.join', 'os.path.join', (['args.txt', "('simpl_references_' + dataset[d] + '.txt')"], {}), "(args.txt, 'simpl_references_' + dataset[d] + '.txt')\n", (7802, 7855), False, 'import os\n'), ((9432, 9462), 'progressbar.Bar', 'progressbar.Bar', (['"""="""', '"""["""', '"""]"""'], {}), "('=', '[', ']')\n", (9447, 9462), False, 'import progressbar\n'), ((9469, 9493), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (9491, 9493), False, 'import progressbar\n'), ((10039, 10058), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (10055, 10058), False, 'import torch\n'), ((12118, 12163), 'cv2.resize', 'cv2.resize', (['img', '(args.rescale, args.rescale)'], {}), '(img, (args.rescale, args.rescale))\n', (12128, 12163), False, 'import cv2\n'), ((12215, 12258), 'cv2.resize', 'cv2.resize', (['h', '(args.rescale, args.rescale)'], {}), '(h, (args.rescale, args.rescale))\n', (12225, 12258), False, 'import cv2\n'), ((12279, 12296), 'numpy.uint8', 'np.uint8', (['(255 * h)'], {}), '(255 * h)\n', (12287, 12296), True, 'import numpy as np\n'), ((12317, 12355), 'cv2.applyColorMap', 'cv2.applyColorMap', (['h', 'cv2.COLORMAP_JET'], {}), '(h, cv2.COLORMAP_JET)\n', (12334, 12355), False, 'import cv2\n'), ((12455, 12491), 'cv2.addWeighted', 'cv2.addWeighted', (['h', '(0.5)', 'img', '(0.8)', '(0)'], {}), '(h, 0.5, img, 0.8, 0)\n', (12470, 12491), False, 'import cv2\n'), ((12913, 12937), 'tensorflow.compat.v1.sparse.to_dense', 'tf.sparse.to_dense', (['pred'], {}), '(pred)\n', (12931, 12937), True, 'import tensorflow.compat.v1 as tf\n'), ((13807, 13869), 'os.path.join', 'os.path.join', (['save_path', "('translations_' + dataset[d] + '.txt')"], {}), "(save_path, 'translations_' + dataset[d] + '.txt')\n", (13819, 13869), False, 'import os\n'), ((13966, 14026), 'os.path.join', 'os.path.join', (['save_path', "('references_' + dataset[d] + '.txt')"], {}), "(save_path, 'references_' + dataset[d] + '.txt')\n", (13978, 14026), False, 'import os\n'), ((11585, 11615), 'torch.mean', 'torch.mean', (['activations'], {'dim': '(1)'}), '(activations, dim=1)\n', (11595, 11615), False, 'import torch\n'), ((15302, 15341), 'os.path.join', 'os.path.join', (['args.save_path', '"""log.txt"""'], {}), "(args.save_path, 'log.txt')\n", (15314, 15341), False, 'import os\n'), ((15854, 15893), 'os.path.join', 'os.path.join', (['args.save_path', '"""log.txt"""'], {}), "(args.save_path, 'log.txt')\n", (15866, 15893), False, 'import os\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import paddle
import pandas as pd
import yaml
from paddle import nn
from paddle.io import DataLoader
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
from yacs.config import CfgNode
from paddlespeech.text.models.ernie_linear import ErnieLinear
from paddlespeech.text.models.ernie_linear import PuncDataset
from paddlespeech.text.models.ernie_linear import PuncDatasetFromErnieTokenizer
DefinedClassifier = {
'ErnieLinear': ErnieLinear,
}
DefinedLoss = {
"ce": nn.CrossEntropyLoss,
}
DefinedDataset = {
'Punc': PuncDataset,
'Ernie': PuncDatasetFromErnieTokenizer,
}
def evaluation(y_pred, y_test):
precision, recall, f1, _ = precision_recall_fscore_support(
y_test, y_pred, average=None, labels=[1, 2, 3])
overall = precision_recall_fscore_support(
y_test, y_pred, average='macro', labels=[1, 2, 3])
result = pd.DataFrame(
np.array([precision, recall, f1]),
columns=list(['O', 'COMMA', 'PERIOD', 'QUESTION'])[1:],
index=['Precision', 'Recall', 'F1'])
result['OVERALL'] = overall[:3]
return result
def test(args):
with open(args.config) as f:
config = CfgNode(yaml.safe_load(f))
print("========Args========")
print(yaml.safe_dump(vars(args)))
print("========Config========")
print(config)
test_dataset = DefinedDataset[config["dataset_type"]](
train_path=config["test_path"], **config["data_params"])
test_loader = DataLoader(
test_dataset,
batch_size=config.batch_size,
shuffle=False,
drop_last=False)
model = DefinedClassifier[config["model_type"]](**config["model"])
state_dict = paddle.load(args.checkpoint)
model.set_state_dict(state_dict["main_params"])
model.eval()
punc_list = []
for i in range(len(test_loader.dataset.id2punc)):
punc_list.append(test_loader.dataset.id2punc[i])
test_total_label = []
test_total_predict = []
for i, batch in enumerate(test_loader):
input, label = batch
label = paddle.reshape(label, shape=[-1])
y, logit = model(input)
pred = paddle.argmax(logit, axis=1)
test_total_label.extend(label.numpy().tolist())
test_total_predict.extend(pred.numpy().tolist())
t = classification_report(
test_total_label, test_total_predict, target_names=punc_list)
print(t)
t2 = evaluation(test_total_label, test_total_predict)
print('=========================================================')
print(t2)
def main():
# parse args and config and redirect to train_sp
parser = argparse.ArgumentParser(description="Test a ErnieLinear model.")
parser.add_argument("--config", type=str, help="ErnieLinear config file.")
parser.add_argument("--checkpoint", type=str, help="snapshot to load.")
parser.add_argument(
"--ngpu", type=int, default=1, help="if ngpu=0, use cpu.")
args = parser.parse_args()
if args.ngpu == 0:
paddle.set_device("cpu")
elif args.ngpu > 0:
paddle.set_device("gpu")
else:
print("ngpu should >= 0 !")
test(args)
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"paddle.load",
"paddle.reshape",
"paddle.argmax",
"sklearn.metrics.classification_report",
"numpy.array",
"yaml.safe_load",
"paddle.set_device",
"paddle.io.DataLoader",
"sklearn.metrics.precision_recall_fscore_support"
] |
[((1357, 1436), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'average': 'None', 'labels': '[1, 2, 3]'}), '(y_test, y_pred, average=None, labels=[1, 2, 3])\n', (1388, 1436), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((1460, 1546), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'average': '"""macro"""', 'labels': '[1, 2, 3]'}), "(y_test, y_pred, average='macro', labels=[1,\n 2, 3])\n", (1491, 1546), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((2149, 2239), 'paddle.io.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'config.batch_size', 'shuffle': '(False)', 'drop_last': '(False)'}), '(test_dataset, batch_size=config.batch_size, shuffle=False,\n drop_last=False)\n', (2159, 2239), False, 'from paddle.io import DataLoader\n'), ((2357, 2385), 'paddle.load', 'paddle.load', (['args.checkpoint'], {}), '(args.checkpoint)\n', (2368, 2385), False, 'import paddle\n'), ((2962, 3050), 'sklearn.metrics.classification_report', 'classification_report', (['test_total_label', 'test_total_predict'], {'target_names': 'punc_list'}), '(test_total_label, test_total_predict, target_names=\n punc_list)\n', (2983, 3050), False, 'from sklearn.metrics import classification_report\n'), ((3291, 3355), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test a ErnieLinear model."""'}), "(description='Test a ErnieLinear model.')\n", (3314, 3355), False, 'import argparse\n'), ((1587, 1620), 'numpy.array', 'np.array', (['[precision, recall, f1]'], {}), '([precision, recall, f1])\n', (1595, 1620), True, 'import numpy as np\n'), ((2731, 2764), 'paddle.reshape', 'paddle.reshape', (['label'], {'shape': '[-1]'}), '(label, shape=[-1])\n', (2745, 2764), False, 'import paddle\n'), ((2812, 2840), 'paddle.argmax', 'paddle.argmax', (['logit'], {'axis': '(1)'}), '(logit, axis=1)\n', (2825, 2840), False, 'import paddle\n'), ((3667, 3691), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (3684, 3691), False, 'import paddle\n'), ((1861, 1878), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1875, 1878), False, 'import yaml\n'), ((3724, 3748), 'paddle.set_device', 'paddle.set_device', (['"""gpu"""'], {}), "('gpu')\n", (3741, 3748), False, 'import paddle\n')]
|
import os
import glob
from segmentation import *
from utils.image_io import prepare_image
import numpy as np
from cv2.ximgproc import guidedFilter
from matplotlib import pyplot as plt
from _utils import *
image_dir = '../double_dip/images'
prior_hint_dir_fg = '../double_dip/saliency/output_fg'
prior_hint_dir_bg = '../double_dip/saliency/output_bg'
saliency_dir_fg = '../double_dip/saliency/output_fg'
saliency_dir_bg = '../double_dip/saliency/output_bg'
def _clear_output():
os.system('rm -rf output/*')
def _make_dir(image_name):
os.system('mkdir output/{}'.format(image_name))
def _copy_output_to_dir(image_name):
os.system('mv output/*{}* output/{}/'.format(image_name, image_name))
def _plot_learning_curve(image_name, s_obj, title):
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('epochs')
ax1.set_ylabel('loss', color=color)
ax1.plot(s_obj.learning_curve, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('PSNR', color=color)
ax2.plot(s_obj.psnr_learning_curve, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout()
plt.title(title)
fig.savefig('output/{}_{}.jpg'.format(
image_name.split('.')[0], '_'.join(title.split(' '))))
def _compare_learning_curve(image_name, s_obj_1, s_obj_2, title, legend):
fig = plt.figure()
plt.plot(s_obj_1.psnr_learning_curve)
plt.plot(s_obj_2.psnr_learning_curve)
plt.legend(legend)
plt.title(title)
fig.savefig('output/{}_{}.jpg'.format(
image_name.split('.')[0], '_'.join(title.split(' '))))
_clear_output()
for input_path in glob.glob(image_dir + '/*'):
image_name = os.path.basename(input_path).split('.')[0]
print('----> started training on image << {} >>'.format(image_name))
if image_name == 'eagle':
continue ###!!!
_make_dir(image_name)
im = prepare_image(os.path.join(image_dir, image_name + '.jpg'))
orig_fg = prepare_image(os.path.join(saliency_dir_fg, image_name + '.jpg'))
orig_bg = prepare_image(os.path.join(saliency_dir_bg, image_name + '.jpg'))
prior_hint_name = image_name.split('.')[0] + '_cluster_hint' + '.jpg'
prior_fg = prepare_image(os.path.join(prior_hint_dir_fg, prior_hint_name))
prior_bg = prepare_image(os.path.join(prior_hint_dir_bg, prior_hint_name))
# Configs
stage_1_iter = 500
stage_2_iter = 500
show_every = 200
# Original training
s = Segmentation(
"{}_orig".format(image_name),
im,
bg_hint=orig_bg,
fg_hint=orig_fg,
plot_during_training=True,
show_every=show_every,
first_step_iter_num=stage_1_iter,
second_step_iter_num=stage_2_iter)
s.optimize()
s.finalize()
_plot_learning_curve(image_name, s, 'original learning curve')
# Prior-based hint training
s_prior = Segmentation(
"{}_prior".format(image_name),
im,
bg_hint=prior_bg,
fg_hint=prior_fg,
plot_during_training=True,
show_every=show_every,
first_step_iter_num=stage_1_iter,
second_step_iter_num=stage_2_iter)
s_prior.optimize()
s_prior.finalize()
_plot_learning_curve(image_name, s_prior, 'prior hint learning curve')
_compare_learning_curve(
image_name,
s,
s_prior,
'orig vs prior hint learning curve',
['orig', 'with_prior'])
# Debug mask
def _fix_mask(src_image, learned_mask):
"""
fixing the masks using soft matting
:return:
"""
new_mask = guidedFilter(
src_image.transpose(1, 2, 0).astype(np.float32),
learned_mask[0].astype(np.float32),
radius=7,
eps=1e-4)
def to_bin(x):
v = np.zeros_like(x)
v[x > 0.5] = 1
return v
return to_bin(np.array([new_mask]))
src_image = s_prior.images[0]
learned_mask_np = torch_to_np(s_prior.mask_net_outputs[0])
fixed_mask_np = s_prior.fixed_masks[0]
better_mask_np = _fix_mask(src_image, learned_mask_np)
better_mask = np_to_pil(better_mask_np)
better_mask.save('output/{}_better_mask_prior.jpg'.format(image_name))
src_image = s.images[0]
learned_mask_np = torch_to_np(s.mask_net_outputs[0])
fixed_mask_np = s.fixed_masks[0]
better_mask_np = _fix_mask(src_image, learned_mask_np)
better_mask = np_to_pil(better_mask_np)
better_mask.save('output/{}_better_mask_orig.jpg'.format(image_name))
_copy_output_to_dir(image_name)
|
[
"matplotlib.pyplot.title",
"numpy.zeros_like",
"os.path.join",
"matplotlib.pyplot.plot",
"os.path.basename",
"matplotlib.pyplot.legend",
"os.system",
"matplotlib.pyplot.figure",
"numpy.array",
"glob.glob",
"matplotlib.pyplot.subplots"
] |
[((1711, 1738), 'glob.glob', 'glob.glob', (["(image_dir + '/*')"], {}), "(image_dir + '/*')\n", (1720, 1738), False, 'import glob\n'), ((486, 514), 'os.system', 'os.system', (['"""rm -rf output/*"""'], {}), "('rm -rf output/*')\n", (495, 514), False, 'import os\n'), ((787, 801), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (799, 801), True, 'from matplotlib import pyplot as plt\n'), ((1208, 1224), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1217, 1224), True, 'from matplotlib import pyplot as plt\n'), ((1416, 1428), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1426, 1428), True, 'from matplotlib import pyplot as plt\n'), ((1433, 1470), 'matplotlib.pyplot.plot', 'plt.plot', (['s_obj_1.psnr_learning_curve'], {}), '(s_obj_1.psnr_learning_curve)\n', (1441, 1470), True, 'from matplotlib import pyplot as plt\n'), ((1475, 1512), 'matplotlib.pyplot.plot', 'plt.plot', (['s_obj_2.psnr_learning_curve'], {}), '(s_obj_2.psnr_learning_curve)\n', (1483, 1512), True, 'from matplotlib import pyplot as plt\n'), ((1517, 1535), 'matplotlib.pyplot.legend', 'plt.legend', (['legend'], {}), '(legend)\n', (1527, 1535), True, 'from matplotlib import pyplot as plt\n'), ((1540, 1556), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1549, 1556), True, 'from matplotlib import pyplot as plt\n'), ((1996, 2040), 'os.path.join', 'os.path.join', (['image_dir', "(image_name + '.jpg')"], {}), "(image_dir, image_name + '.jpg')\n", (2008, 2040), False, 'import os\n'), ((2071, 2121), 'os.path.join', 'os.path.join', (['saliency_dir_fg', "(image_name + '.jpg')"], {}), "(saliency_dir_fg, image_name + '.jpg')\n", (2083, 2121), False, 'import os\n'), ((2151, 2201), 'os.path.join', 'os.path.join', (['saliency_dir_bg', "(image_name + '.jpg')"], {}), "(saliency_dir_bg, image_name + '.jpg')\n", (2163, 2201), False, 'import os\n'), ((2309, 2357), 'os.path.join', 'os.path.join', (['prior_hint_dir_fg', 'prior_hint_name'], {}), '(prior_hint_dir_fg, prior_hint_name)\n', (2321, 2357), False, 'import os\n'), ((2388, 2436), 'os.path.join', 'os.path.join', (['prior_hint_dir_bg', 'prior_hint_name'], {}), '(prior_hint_dir_bg, prior_hint_name)\n', (2400, 2436), False, 'import os\n'), ((3917, 3933), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (3930, 3933), True, 'import numpy as np\n'), ((4005, 4025), 'numpy.array', 'np.array', (['[new_mask]'], {}), '([new_mask])\n', (4013, 4025), True, 'import numpy as np\n'), ((1758, 1786), 'os.path.basename', 'os.path.basename', (['input_path'], {}), '(input_path)\n', (1774, 1786), False, 'import os\n')]
|
import json
import pickle
import sys
from datetime import datetime
from json import JSONEncoder
import numpy as np
import pandas as pd
import watchdog.events
import watchdog.observers
import time
import tensorflow as tf
import configparser
import os
from kafka import KafkaProducer
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from tensorflow.python.keras.models import load_model
sys.path.append(sys.path[0] + '/..')
from mmt.readerMMT import eventsToFeatures
import warnings
warnings.filterwarnings('ignore')
conf_path = './config.config'
max_message_size = 104857600 #bytes
# ndarray json encoder
class NumpyArrayEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
# Kafka Producer
producer = KafkaProducer(bootstrap_servers=['localhost:9092'],max_request_size=max_message_size) # value_serializer=serializer
# Watchdog part for monitoring creation of csv files from mmt
class Handler(watchdog.events.PatternMatchingEventHandler):
def __init__(self):
# Watch for new csvs from mmt probe folder (./server/csv/)
watchdog.events.PatternMatchingEventHandler.__init__(self, patterns=['*_1__data.csv'],
## Monitoring csv repot files (_1_) with name with _data
ignore_directories=True, case_sensitive=False)
def on_closed(self, event):
print("Closing action on csv - % s." % event.src_path)
start_time = time.time()
mmt_csv = event.src_path
ips, x_features = eventsToFeatures(mmt_csv)
# if there are more ips then grouped samples from features (i.e. there is an ip but no features for the ip) -> we delete the ip from ip list
ips = pd.merge(ips, x_features, how='inner', on=['ip.session_id', 'meta.direction'])
ips = ips[['ip.session_id', 'meta.direction', 'ip']]
x_features.drop(columns=['ip.session_id', 'meta.direction'], inplace=True)
print("Prediction - test")
# rescaling with scaler used with trained model
x_test = np.asarray(x_features, np.float32)
x_test = scaler.transform(x_test)
# prediction
y_pred = model.predict(x_test)
y_pred = np.transpose(np.round(y_pred)).reshape(y_pred.shape[0], )
preds = np.array([y_pred]).T
# adding predictions to features as last column
res = np.append(x_features, preds, axis=1)
res = np.append(ips, res, axis=1)
# print(res.nbytes)
# results json encoding
j_res = json.dumps(res, cls=NumpyArrayEncoder).encode('utf-8')
print(f'Producing message @ {datetime.now()} | Message') # = {str(j_res)}')
psend = producer.send('predictions', j_res)
# print(psend)
producer.flush()
# pd.DataFrame(res).to_csv(f"{predictions_dir}predictions_{classification_id}.csv", index=False,
# header=prediction_names)
print("--- %s seconds ---" % (time.time() - start_time))
y_pred = None
res = None
features = None
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read(conf_path)
mmt_csv_dir = config['DEFAULT']['mmt_probe_csv_dir']
model_path = config['DEFAULT']['model_path']
scaler_path = config['DEFAULT']['scaler_path']
print(f'{mmt_csv_dir},{model_path},{scaler_path}')
if not mmt_csv_dir or not model_path or not scaler_path:
exit('Config does not contain all needed paths')
print("Loading model...")
model = load_model(model_path)
print("Model loaded.\nLoading scaler...")
scaler = pickle.load(open(scaler_path, 'rb')) # "./saved_scalers/scaler_2022-03-02_10-37-27.pkl"
print("Scaler loaded.")
res=np.ndarray(shape=(2,2), dtype=float, order='F')
j_res = json.dumps(res, cls=NumpyArrayEncoder).encode('utf-8')
print(f'Producing message @ {datetime.now()} | Message') # = {str(j_res)}')
asd = producer.send('messages', j_res)
# asd = producer.send('messages', 'j_res')
print(asd)
producer.flush()
event_handler = Handler()
observer = watchdog.observers.Observer()
print("Starting watchdog.")
observer.schedule(event_handler, path=mmt_csv_dir, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
[
"sys.path.append",
"tensorflow.python.keras.models.load_model",
"warnings.filterwarnings",
"pandas.merge",
"numpy.asarray",
"kafka.KafkaProducer",
"json.dumps",
"time.time",
"time.sleep",
"numpy.append",
"datetime.datetime.now",
"tensorflow.compat.v1.logging.set_verbosity",
"numpy.array",
"numpy.round",
"mmt.readerMMT.eventsToFeatures",
"configparser.ConfigParser",
"numpy.ndarray",
"json.JSONEncoder.default"
] |
[((284, 346), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (318, 346), True, 'import tensorflow as tf\n'), ((402, 438), 'sys.path.append', 'sys.path.append', (["(sys.path[0] + '/..')"], {}), "(sys.path[0] + '/..')\n", (417, 438), False, 'import sys\n'), ((499, 532), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (522, 532), False, 'import warnings\n'), ((837, 928), 'kafka.KafkaProducer', 'KafkaProducer', ([], {'bootstrap_servers': "['localhost:9092']", 'max_request_size': 'max_message_size'}), "(bootstrap_servers=['localhost:9092'], max_request_size=\n max_message_size)\n", (850, 928), False, 'from kafka import KafkaProducer\n'), ((3257, 3284), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (3282, 3284), False, 'import configparser\n'), ((3686, 3708), 'tensorflow.python.keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (3696, 3708), False, 'from tensorflow.python.keras.models import load_model\n'), ((3894, 3942), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(2, 2)', 'dtype': 'float', 'order': '"""F"""'}), "(shape=(2, 2), dtype=float, order='F')\n", (3904, 3942), True, 'import numpy as np\n'), ((776, 806), 'json.JSONEncoder.default', 'JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (795, 806), False, 'from json import JSONEncoder\n'), ((1607, 1618), 'time.time', 'time.time', ([], {}), '()\n', (1616, 1618), False, 'import time\n'), ((1678, 1703), 'mmt.readerMMT.eventsToFeatures', 'eventsToFeatures', (['mmt_csv'], {}), '(mmt_csv)\n', (1694, 1703), False, 'from mmt.readerMMT import eventsToFeatures\n'), ((1868, 1946), 'pandas.merge', 'pd.merge', (['ips', 'x_features'], {'how': '"""inner"""', 'on': "['ip.session_id', 'meta.direction']"}), "(ips, x_features, how='inner', on=['ip.session_id', 'meta.direction'])\n", (1876, 1946), True, 'import pandas as pd\n'), ((2200, 2234), 'numpy.asarray', 'np.asarray', (['x_features', 'np.float32'], {}), '(x_features, np.float32)\n', (2210, 2234), True, 'import numpy as np\n'), ((2522, 2558), 'numpy.append', 'np.append', (['x_features', 'preds'], {'axis': '(1)'}), '(x_features, preds, axis=1)\n', (2531, 2558), True, 'import numpy as np\n'), ((2573, 2600), 'numpy.append', 'np.append', (['ips', 'res'], {'axis': '(1)'}), '(ips, res, axis=1)\n', (2582, 2600), True, 'import numpy as np\n'), ((2430, 2448), 'numpy.array', 'np.array', (['[y_pred]'], {}), '([y_pred])\n', (2438, 2448), True, 'import numpy as np\n'), ((3954, 3992), 'json.dumps', 'json.dumps', (['res'], {'cls': 'NumpyArrayEncoder'}), '(res, cls=NumpyArrayEncoder)\n', (3964, 3992), False, 'import json\n'), ((4458, 4471), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4468, 4471), False, 'import time\n'), ((2678, 2716), 'json.dumps', 'json.dumps', (['res'], {'cls': 'NumpyArrayEncoder'}), '(res, cls=NumpyArrayEncoder)\n', (2688, 2716), False, 'import json\n'), ((4043, 4057), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4055, 4057), False, 'from datetime import datetime\n'), ((2368, 2384), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (2376, 2384), True, 'import numpy as np\n'), ((2771, 2785), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2783, 2785), False, 'from datetime import datetime\n'), ((3123, 3134), 'time.time', 'time.time', ([], {}), '()\n', (3132, 3134), False, 'import time\n')]
|
import os
import sys
import numpy as np
import pandas as pd
import logging
import gc
import tqdm
import pickle
import time
gc.enable()
cwd = os.getcwd()
train_path = os.path.join(cwd, 'train_artifact')
test_path = os.path.join(cwd, 'test_artifact')
input_path = os.path.join(cwd, 'input_artifact')
embed_path = os.path.join(cwd, 'embed_artifact')
def initiate_logger(log_path):
"""
Initialize a logger with file handler and stream handler
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-s: %(message)s', datefmt='%H:%M:%S')
fh = logging.FileHandler(log_path)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.info('===================================')
logger.info('Begin executing at {}'.format(time.ctime()))
logger.info('===================================')
return logger
def rough_split(logger=None):
"""
Split training data (900,000) records into training file and validation file using ratio 9:1.
"""
for npy_path in ['train_idx_shuffle.npy', 'train_age.npy', 'train_gender.npy']:
with open(os.path.join(input_path, npy_path), 'rb') as f:
npy = np.load(f)
with open(os.path.join(input_path, '{}_tra.npy'.format(npy_path.split('.')[0])), 'wb') as f:
np.save(f, npy[:810000])
with open(os.path.join(input_path, '{}_val.npy'.format(npy_path.split('.')[0])), 'wb') as f:
np.save(f, npy[810000:])
if logger: logger.info('{} splitted'.format(npy_path))
for pkl_path in ['train_creative_id_seq.pkl', 'train_ad_id_seq.pkl', 'train_advertiser_id_seq.pkl', 'train_product_id_seq.pkl']:
with open(os.path.join(input_path, pkl_path), 'rb') as f:
pkl = pickle.load(f)
with open(os.path.join(input_path, '{}_tra.pkl'.format(pkl_path.split('.')[0])), 'wb') as f:
pickle.dump(pkl[:810000], f)
with open(os.path.join(input_path, '{}_val.pkl'.format(pkl_path.split('.')[0])), 'wb') as f:
pickle.dump(pkl[810000:], f)
if logger: logger.info('{} splitted'.format(pkl_path))
def fine_split(logger=None):
"""
Split train data (900,000 records) into 10 files
Split test data (1,000,000 records) into 10 files
"""
input_split_path = os.path.join(cwd, 'input_split_artifact')
if not os.path.isdir(input_split_path): os.mkdir(input_split_path)
for npy_path in ['train_idx_shuffle.npy', 'train_age.npy', 'train_gender.npy']:
with open(os.path.join(input_path, npy_path), 'rb') as f:
npy = np.load(f)
for i in range(10):
with open(os.path.join(input_split_path, '{}_{}.npy'.format(npy_path.split('.')[0], i+1)), 'wb') as f:
np.save(f, npy[i*90000:(i+1)*90000])
if logger: logger.info('{} splitted'.format(npy_path))
for pkl_path in ['train_creative_id_seq.pkl', 'train_ad_id_seq.pkl', 'train_advertiser_id_seq.pkl', 'train_product_id_seq.pkl',]:
with open(os.path.join(input_path, pkl_path), 'rb') as f:
pkl = pickle.load(f)
for i in range(10):
with open(os.path.join(input_split_path, '{}_{}.pkl'.format(pkl_path.split('.')[0], i+1)), 'wb') as f:
pickle.dump(pkl[i*90000:(i+1)*90000], f)
if logger: logger.info('{} splitted'.format(pkl_path))
for npy_path in ['test_idx_shuffle.npy']:
with open(os.path.join(input_path, npy_path), 'rb') as f:
npy = np.load(f)
for i in range(10):
with open(os.path.join(input_split_path, '{}_{}.npy'.format(npy_path.split('.')[0], i+1)), 'wb') as f:
np.save(f, npy[i*100000:(i+1)*100000])
if logger: logger.info('{} splitted'.format(npy_path))
for pkl_path in ['test_creative_id_seq.pkl', 'test_ad_id_seq.pkl', 'test_advertiser_id_seq.pkl', 'test_product_id_seq.pkl',]:
with open(os.path.join(input_path, pkl_path), 'rb') as f:
pkl = pickle.load(f)
for i in range(10):
with open(os.path.join(input_split_path, '{}_{}.pkl'.format(pkl_path.split('.')[0], i+1)), 'wb') as f:
pickle.dump(pkl[i*100000:(i+1)*100000], f)
if logger: logger.info('{} splitted'.format(pkl_path))
if __name__=='__main__':
logger = initiate_logger('input_split.log')
if len(sys.argv)==1:
rough_split(logger=logger)
else:
fine_split(logger=logger)
|
[
"os.mkdir",
"numpy.load",
"numpy.save",
"pickle.dump",
"os.path.join",
"logging.FileHandler",
"os.getcwd",
"os.path.isdir",
"logging.StreamHandler",
"time.ctime",
"logging.Formatter",
"pickle.load",
"gc.enable",
"logging.getLogger"
] |
[((125, 136), 'gc.enable', 'gc.enable', ([], {}), '()\n', (134, 136), False, 'import gc\n'), ((143, 154), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (152, 154), False, 'import os\n'), ((168, 203), 'os.path.join', 'os.path.join', (['cwd', '"""train_artifact"""'], {}), "(cwd, 'train_artifact')\n", (180, 203), False, 'import os\n'), ((216, 250), 'os.path.join', 'os.path.join', (['cwd', '"""test_artifact"""'], {}), "(cwd, 'test_artifact')\n", (228, 250), False, 'import os\n'), ((264, 299), 'os.path.join', 'os.path.join', (['cwd', '"""input_artifact"""'], {}), "(cwd, 'input_artifact')\n", (276, 299), False, 'import os\n'), ((313, 348), 'os.path.join', 'os.path.join', (['cwd', '"""embed_artifact"""'], {}), "(cwd, 'embed_artifact')\n", (325, 348), False, 'import os\n'), ((459, 486), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (476, 486), False, 'import logging\n'), ((531, 616), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)-s: %(message)s"""'], {'datefmt': '"""%H:%M:%S"""'}), "('%(asctime)s %(levelname)-s: %(message)s', datefmt='%H:%M:%S'\n )\n", (548, 616), False, 'import logging\n'), ((618, 647), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (637, 647), False, 'import logging\n'), ((732, 765), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (753, 765), False, 'import logging\n'), ((2308, 2349), 'os.path.join', 'os.path.join', (['cwd', '"""input_split_artifact"""'], {}), "(cwd, 'input_split_artifact')\n", (2320, 2349), False, 'import os\n'), ((2358, 2389), 'os.path.isdir', 'os.path.isdir', (['input_split_path'], {}), '(input_split_path)\n', (2371, 2389), False, 'import os\n'), ((2391, 2417), 'os.mkdir', 'os.mkdir', (['input_split_path'], {}), '(input_split_path)\n', (2399, 2417), False, 'import os\n'), ((940, 952), 'time.ctime', 'time.ctime', ([], {}), '()\n', (950, 952), False, 'import time\n'), ((1308, 1318), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1315, 1318), True, 'import numpy as np\n'), ((1417, 1441), 'numpy.save', 'np.save', (['f', 'npy[:810000]'], {}), '(f, npy[:810000])\n', (1424, 1441), True, 'import numpy as np\n'), ((1540, 1564), 'numpy.save', 'np.save', (['f', 'npy[810000:]'], {}), '(f, npy[810000:])\n', (1547, 1564), True, 'import numpy as np\n'), ((1821, 1835), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1832, 1835), False, 'import pickle\n'), ((1934, 1962), 'pickle.dump', 'pickle.dump', (['pkl[:810000]', 'f'], {}), '(pkl[:810000], f)\n', (1945, 1962), False, 'import pickle\n'), ((2061, 2089), 'pickle.dump', 'pickle.dump', (['pkl[810000:]', 'f'], {}), '(pkl[810000:], f)\n', (2072, 2089), False, 'import pickle\n'), ((2570, 2580), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (2577, 2580), True, 'import numpy as np\n'), ((3007, 3021), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3018, 3021), False, 'import pickle\n'), ((3365, 3375), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (3372, 3375), True, 'import numpy as np\n'), ((3800, 3814), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3811, 3814), False, 'import pickle\n'), ((1251, 1285), 'os.path.join', 'os.path.join', (['input_path', 'npy_path'], {}), '(input_path, npy_path)\n', (1263, 1285), False, 'import os\n'), ((1764, 1798), 'os.path.join', 'os.path.join', (['input_path', 'pkl_path'], {}), '(input_path, pkl_path)\n', (1776, 1798), False, 'import os\n'), ((2513, 2547), 'os.path.join', 'os.path.join', (['input_path', 'npy_path'], {}), '(input_path, npy_path)\n', (2525, 2547), False, 'import os\n'), ((2713, 2755), 'numpy.save', 'np.save', (['f', 'npy[i * 90000:(i + 1) * 90000]'], {}), '(f, npy[i * 90000:(i + 1) * 90000])\n', (2720, 2755), True, 'import numpy as np\n'), ((2950, 2984), 'os.path.join', 'os.path.join', (['input_path', 'pkl_path'], {}), '(input_path, pkl_path)\n', (2962, 2984), False, 'import os\n'), ((3154, 3200), 'pickle.dump', 'pickle.dump', (['pkl[i * 90000:(i + 1) * 90000]', 'f'], {}), '(pkl[i * 90000:(i + 1) * 90000], f)\n', (3165, 3200), False, 'import pickle\n'), ((3308, 3342), 'os.path.join', 'os.path.join', (['input_path', 'npy_path'], {}), '(input_path, npy_path)\n', (3320, 3342), False, 'import os\n'), ((3508, 3552), 'numpy.save', 'np.save', (['f', 'npy[i * 100000:(i + 1) * 100000]'], {}), '(f, npy[i * 100000:(i + 1) * 100000])\n', (3515, 3552), True, 'import numpy as np\n'), ((3743, 3777), 'os.path.join', 'os.path.join', (['input_path', 'pkl_path'], {}), '(input_path, pkl_path)\n', (3755, 3777), False, 'import os\n'), ((3947, 3995), 'pickle.dump', 'pickle.dump', (['pkl[i * 100000:(i + 1) * 100000]', 'f'], {}), '(pkl[i * 100000:(i + 1) * 100000], f)\n', (3958, 3995), False, 'import pickle\n')]
|
from typing import Union, List, Dict, Optional, Callable, Set
from skdecide.builders.solver.policy import DeterministicPolicies, UncertainPolicies
from skdecide import Domain, Solver
from skdecide.builders.scheduling.modes import SingleMode
from skdecide.builders.scheduling.scheduling_domains_modelling import State, SchedulingAction, SchedulingActionEnum
from skdecide.builders.scheduling.scheduling_domains import SchedulingDomain, D, MultiModeRCPSP, SingleModeRCPSP
from skdecide import rollout_episode
from skdecide.hub.solver.sgs_policies.sgs_policies import PolicyMethodParams, BasePolicyMethod, PolicyRCPSP
from skdecide.hub.solver.do_solver.do_solver_scheduling import PolicyRCPSP, DOSolver, PolicyMethodParams, BasePolicyMethod, SolvingMethod
from skdecide.builders.discrete_optimization.rcpsp.solver.cpm import CPM
from skdecide.hub.solver.do_solver.sk_to_do_binding import build_do_domain
from skdecide.builders.discrete_optimization.rcpsp.rcpsp_model import RCPSPSolution
from enum import Enum
from deap.gp import PrimitiveSet, PrimitiveTree, genHalfAndHalf
from deap import gp
from deap import algorithms
from deap.base import Toolbox, Fitness
from deap import creator
from deap import tools
import operator
import itertools
import numpy as np
import random
from scipy import stats
from scipy.spatial import distance
def if_then_else(input, output1, output2):
if input: return output1
else: return output2
def protected_div(left, right):
if right != 0.:
return left/right
else:
return 1.
def max_operator(left, right):
return max(left, right)
def min_operator(left, right):
return min(left, right)
def feature_task_duration(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
return domain.sample_task_duration(task_id)
def feature_total_n_res(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
val = 0
mode_consumption = domain.get_task_modes(task_id)[1]
for res in mode_consumption.get_ressource_names():
val += mode_consumption.get_resource_need(res)
return val
def feature_n_successors(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
return len(domain.get_successors_task(task_id))/ len(domain.get_tasks_ids())
def feature_n_predecessors(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
return len(domain.get_predecessors_task(task_id))/ len(domain.get_tasks_ids())
def get_resource_requirements_across_duration(domain: SchedulingDomain, task_id: int, **kwargs):
values = []
mode_consumption = domain.get_task_modes(task_id)[1]
duration = domain.get_latest_sampled_duration(task_id, 1, 0.)
if duration > 0:
for res in mode_consumption.get_ressource_names():
tmp = 0
for t in range(duration):
need = domain.get_task_modes(task_id)[1].get_resource_need_at_time(res, t)
total = domain.sample_quantity_resource(res, t)
tmp += need / total
values.append(tmp/duration)
else:
values = [0.]
# print(task_id,':', values)
return values
def feature_average_resource_requirements(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
values = get_resource_requirements_across_duration(domain=domain, task_id=task_id)
val = np.mean(values)
return val
def feature_minimum_resource_requirements(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
values = get_resource_requirements_across_duration(domain=domain, task_id=task_id)
val = np.min(values)
return val
def feature_non_zero_minimum_resource_requirements(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
values = get_resource_requirements_across_duration(domain=domain, task_id=task_id)
if np.sum(values) > 0.:
val = np.min([x for x in values if x > 0.])
else:
val = np.min(values)
return val
def feature_maximum_resource_requirements(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
values = get_resource_requirements_across_duration(domain=domain, task_id=task_id)
val = np.max(values)
return val
def feature_resource_requirements(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
values = get_resource_requirements_across_duration(domain=domain, task_id=task_id)
val = len([x for x in values if x > 0.]) / len(values)
return val
def feature_all_descendants(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
return len(domain.full_successors[task_id]) / len(domain.get_tasks_ids())
def feature_precedence_done(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, state: State, **kwargs):
return task_id in domain.task_possible_to_launch_precedence(state=state)
def compute_cpm(do_domain):
cpm_solver = CPM(do_domain)
path = cpm_solver.run_classic_cpm()
cpm = cpm_solver.map_node
cpm_esd = cpm[path[-1]]._ESD # to normalize...
return cpm, cpm_esd
def feature_esd(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
""" Will only work if you store cpm results into the object. dirty trick"""
return cpm[task_id]._ESD/cpm_esd
def feature_lsd(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
""" Will only work if you store cpm results into the object. dirty trick"""
return cpm[task_id]._LSD/cpm_esd
def feature_efd(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
""" Will only work if you store cpm results into the object. dirty trick"""
return cpm[task_id]._EFD/cpm_esd
def feature_lfd(domain: SchedulingDomain, cpm, cpm_esd, task_id: int, **kwargs):
""" Will only work if you store cpm results into the object. dirty trick"""
return cpm[task_id]._LFD/cpm_esd
class D(SchedulingDomain, SingleMode):
pass
class FeatureEnum(Enum):
TASK_DURATION = "task_duration"
RESSOURCE_TOTAL = "total_nres"
N_SUCCESSORS = "n_successors"
N_PREDECESSORS = "n_predecessors"
RESSOURCE_REQUIRED = "res_requ"
RESSOURCE_AVG = "avg_res_requ"
RESSOURCE_MIN = "min_res_requ"
RESSOURCE_NZ_MIN = "nz_min_res_requ"
RESSOURCE_MAX = "max_res_requ"
ALL_DESCENDANTS = "all_descendants"
PRECEDENCE_DONE = "precedence_done"
EARLIEST_START_DATE = "ESD"
LATEST_START_DATE = "LSD"
EARLIEST_FINISH_DATE = "EFD"
LATEST_FINISH_DATE = "LFD"
feature_function_map = {FeatureEnum.TASK_DURATION: feature_task_duration,
FeatureEnum.RESSOURCE_TOTAL: feature_total_n_res,
FeatureEnum.N_SUCCESSORS: feature_n_successors,
FeatureEnum.N_PREDECESSORS: feature_n_predecessors, #
FeatureEnum.RESSOURCE_REQUIRED: feature_resource_requirements, #
FeatureEnum.RESSOURCE_AVG: feature_average_resource_requirements, #
FeatureEnum.RESSOURCE_MIN: feature_minimum_resource_requirements, #
FeatureEnum.RESSOURCE_NZ_MIN: feature_non_zero_minimum_resource_requirements, #
FeatureEnum.RESSOURCE_MAX: feature_maximum_resource_requirements, #
FeatureEnum.ALL_DESCENDANTS: feature_all_descendants, #
FeatureEnum.PRECEDENCE_DONE: feature_precedence_done,
FeatureEnum.EARLIEST_START_DATE: feature_esd, #
FeatureEnum.EARLIEST_FINISH_DATE: feature_efd, #
FeatureEnum.LATEST_START_DATE: feature_lsd, #
FeatureEnum.LATEST_FINISH_DATE: feature_lfd} #
feature_static_map = {FeatureEnum.TASK_DURATION: True,
FeatureEnum.RESSOURCE_TOTAL: True,
FeatureEnum.N_SUCCESSORS: True,
FeatureEnum.N_PREDECESSORS: True, #
FeatureEnum.RESSOURCE_REQUIRED: True, #
FeatureEnum.RESSOURCE_AVG: True, #
FeatureEnum.RESSOURCE_MIN: True, #
FeatureEnum.RESSOURCE_NZ_MIN: True, #
FeatureEnum.RESSOURCE_MAX: True, #
FeatureEnum.ALL_DESCENDANTS: True, #
FeatureEnum.PRECEDENCE_DONE: False,
FeatureEnum.EARLIEST_START_DATE: True, #
FeatureEnum.EARLIEST_FINISH_DATE: True, #
FeatureEnum.LATEST_START_DATE: True, #
FeatureEnum.LATEST_FINISH_DATE: True} #
class EvaluationGPHH(Enum):
SGS = 0
PERMUTATION_DISTANCE = 1
# SGS_DEVIATION = 2
class PermutationDistance(Enum):
KTD = 0
HAMMING = 1
KTD_HAMMING = 2
class ParametersGPHH:
set_feature: Set[FeatureEnum] = None
set_primitves: PrimitiveSet = None
tournament_ratio: float = None
pop_size: int = None
n_gen: int = None
min_tree_depth: int = None
max_tree_depth: int = None
crossover_rate: float = None
mutation_rate: float = None
base_policy_method = None
delta_index_freedom: int = None
delta_time_freedom: int = None
deap_verbose: bool = None
evaluation: EvaluationGPHH = None
permutation_distance = PermutationDistance.KTD
def __init__(self,
set_feature,
set_primitves,
tournament_ratio, pop_size, n_gen, min_tree_depth, max_tree_depth, crossover_rate, mutation_rate, base_policy_method, delta_index_freedom, delta_time_freedom, deap_verbose, evaluation, permutation_distance
):
self.set_feature = set_feature
self.set_primitves = set_primitves
self.tournament_ratio = tournament_ratio
self.pop_size = pop_size
self.n_gen = n_gen
self.min_tree_depth = min_tree_depth
self.max_tree_depth = max_tree_depth
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.base_policy_method = base_policy_method
self.delta_index_freedom = delta_index_freedom
self.delta_time_freedom = delta_time_freedom
self.deap_verbose = deap_verbose
self.evaluation = evaluation
self.permutation_distance = permutation_distance
@staticmethod
def default():
set_feature = {FeatureEnum.EARLIEST_FINISH_DATE,
FeatureEnum.EARLIEST_START_DATE,
FeatureEnum.LATEST_FINISH_DATE,
FeatureEnum.LATEST_START_DATE,
FeatureEnum.N_PREDECESSORS,
FeatureEnum.N_SUCCESSORS,
FeatureEnum.ALL_DESCENDANTS,
FeatureEnum.RESSOURCE_REQUIRED,
FeatureEnum.RESSOURCE_AVG,
FeatureEnum.RESSOURCE_MAX,
# FeatureEnum.RESSOURCE_MIN
FeatureEnum.RESSOURCE_NZ_MIN
}
pset = PrimitiveSet("main", len(set_feature))
pset.addPrimitive(operator.add, 2)
pset.addPrimitive(operator.sub, 2)
pset.addPrimitive(operator.mul, 2)
pset.addPrimitive(protected_div, 2)
pset.addPrimitive(max_operator, 2)
pset.addPrimitive(min_operator, 2)
pset.addPrimitive(operator.neg, 1)
return ParametersGPHH(
set_feature=set_feature,
set_primitves=pset,
tournament_ratio=0.1,
pop_size=40,
n_gen=100,
min_tree_depth=1,
max_tree_depth=4,
crossover_rate=0.7,
mutation_rate=0.3,
base_policy_method=BasePolicyMethod.FOLLOW_GANTT,
delta_index_freedom=0,
delta_time_freedom=0,
deap_verbose=True,
# evaluation=EvaluationGPHH.PERMUTATION_DISTANCE,
evaluation=EvaluationGPHH.SGS,
permutation_distance=PermutationDistance.KTD)
@staticmethod
def fast_test():
set_feature = {FeatureEnum.EARLIEST_FINISH_DATE,
FeatureEnum.EARLIEST_START_DATE,
FeatureEnum.LATEST_FINISH_DATE,
FeatureEnum.LATEST_START_DATE,
FeatureEnum.N_PREDECESSORS,
FeatureEnum.N_SUCCESSORS,
FeatureEnum.ALL_DESCENDANTS,
FeatureEnum.RESSOURCE_REQUIRED,
FeatureEnum.RESSOURCE_AVG,
FeatureEnum.RESSOURCE_MAX,
# FeatureEnum.RESSOURCE_MIN
FeatureEnum.RESSOURCE_NZ_MIN
}
pset = PrimitiveSet("main", len(set_feature))
pset.addPrimitive(operator.add, 2)
pset.addPrimitive(operator.sub, 2)
pset.addPrimitive(operator.mul, 2)
pset.addPrimitive(protected_div, 2)
pset.addPrimitive(max_operator, 2)
pset.addPrimitive(min_operator, 2)
pset.addPrimitive(operator.neg, 1)
return ParametersGPHH(
set_feature=set_feature,
set_primitves=pset,
tournament_ratio=0.1,
pop_size=10,
n_gen=2,
min_tree_depth=1,
max_tree_depth=4,
crossover_rate=0.7,
mutation_rate=0.3,
base_policy_method=BasePolicyMethod.FOLLOW_GANTT,
delta_index_freedom=0,
delta_time_freedom=0,
deap_verbose=True,
evaluation=EvaluationGPHH.SGS,
# evaluation=EvaluationGPHH.PERMUTATION_DISTANCE,
permutation_distance=PermutationDistance.KTD)
@staticmethod
def default_for_set_features(set_feature: Set[FeatureEnum]):
pset = PrimitiveSet("main", len(set_feature))
pset.addPrimitive(operator.add, 2)
pset.addPrimitive(operator.sub, 2)
pset.addPrimitive(operator.mul, 2)
# pset.addPrimitive(protected_div, 2)
pset.addPrimitive(max_operator, 2)
pset.addPrimitive(min_operator, 2)
pset.addPrimitive(operator.neg, 1)
return ParametersGPHH(
set_feature=set_feature,
set_primitves=pset,
tournament_ratio=0.25,
pop_size=20,
n_gen=20,
min_tree_depth=1,
max_tree_depth=4,
crossover_rate=0.7,
mutation_rate=0.1,
base_policy_method=BasePolicyMethod.SGS_READY,
delta_index_freedom=0,
delta_time_freedom=0,
deap_verbose=True,
evaluation=EvaluationGPHH.PERMUTATION_DISTANCE,
permutation_distance=PermutationDistance.KTD)
class GPHH(Solver, DeterministicPolicies):
T_domain = D
training_domains: List[T_domain]
verbose: bool
weight: int
pset: PrimitiveSet
toolbox: Toolbox
policy: DeterministicPolicies
params_gphh: ParametersGPHH
# policy: GPHHPolicy
evaluation_method: EvaluationGPHH
reference_permutations: Dict
permutation_distance: PermutationDistance
def __init__(self,
training_domains: List[T_domain],
domain_model: SchedulingDomain,
weight: int,
# set_feature: Set[FeatureEnum]=None,
params_gphh: ParametersGPHH=ParametersGPHH.default(),
reference_permutations=None,
# reference_makespans=None,
training_domains_names=None,
verbose: bool=False):
self.training_domains = training_domains
self.domain_model = domain_model
self.params_gphh = params_gphh
# self.set_feature = set_feature
self.set_feature = self.params_gphh.set_feature
print('self.set_feature: ', self.set_feature)
print('Evaluation: ', self.params_gphh.evaluation)
# if set_feature is None:
# self.set_feature = {FeatureEnum.RESSOURCE_TOTAL,
# FeatureEnum.TASK_DURATION,
# FeatureEnum.N_SUCCESSORS,
# FeatureEnum.N_SUCCESSORS,
# FeatureEnum.RESSOURCE_AVG}
self.list_feature = list(self.set_feature)
self.verbose = verbose
self.pset = self.init_primitives(self.params_gphh.set_primitves)
self.weight = weight
self.evaluation_method = self.params_gphh.evaluation
self.initialize_cpm_data_for_training()
if self.evaluation_method == EvaluationGPHH.PERMUTATION_DISTANCE:
self.init_reference_permutations(reference_permutations, training_domains_names)
self.permutation_distance = self.params_gphh.permutation_distance
# if self.evaluation_method == EvaluationGPHH.SGS_DEVIATION:
# self.init_reference_makespans(reference_makespans, training_domains_names)
def init_reference_permutations(self, reference_permutations={}, training_domains_names=[]) -> None:
self.reference_permutations = {}
for i in range(len(self.training_domains)):
td = self.training_domains[i]
td_name = training_domains_names[i]
if td_name not in reference_permutations.keys():
# Run CP
td.set_inplace_environment(False)
solver = DOSolver(policy_method_params=PolicyMethodParams(base_policy_method=BasePolicyMethod.SGS_PRECEDENCE,
delta_index_freedom=0,
delta_time_freedom=0),
method=SolvingMethod.CP)
solver.solve(domain_factory=lambda: td)
raw_permutation = solver.best_solution.rcpsp_permutation
full_permutation = [x+2 for x in raw_permutation]
full_permutation.insert(0, 1)
full_permutation.append(np.max(full_permutation)+1)
print('full_perm: ', full_permutation)
self.reference_permutations[td] = full_permutation
else:
self.reference_permutations[td] = reference_permutations[td_name]
# def init_reference_makespans(self, reference_makespans={}, training_domains_names=[]) -> None:
# self.reference_makespans = {}
# for i in range(len(self.training_domains)):
# td = self.training_domains[i]
# td_name = training_domains_names[i]
# # for td in self.training_domains:
# print('td:',td)
# if td_name not in reference_makespans.keys():
# # Run CP
# td.set_inplace_environment(False)
# solver = DOSolver(policy_method_params=PolicyMethodParams(base_policy_method=BasePolicyMethod.FOLLOW_GANTT,
# delta_index_freedom=0,
# delta_time_freedom=0),
# method=SolvingMethod.CP)
# solver.solve(domain_factory=lambda: td)
#
# state = td.get_initial_state()
# states, actions, values = rollout_episode(domain=td,
# max_steps=1000,
# solver=solver,
# from_memory=state,
# verbose=False,
# outcome_formatter=lambda
# o: f'{o.observation} - cost: {o.value.cost:.2f}')
#
# makespan = sum([v.cost for v in values])
# self.reference_makespans[td] = makespan
# else:
# self.reference_makespans[td] = reference_makespans[td_name]
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self.domain = domain_factory()
tournament_ratio = self.params_gphh.tournament_ratio
pop_size = self.params_gphh.pop_size
n_gen = self.params_gphh.n_gen
min_tree_depth = self.params_gphh.min_tree_depth
max_tree_depth = self.params_gphh.max_tree_depth
crossover_rate = self.params_gphh.crossover_rate
mutation_rate = self.params_gphh.mutation_rate
creator.create("FitnessMin", Fitness, weights=(self.weight,))
creator.create("Individual", PrimitiveTree, fitness=creator.FitnessMin)
self.toolbox = Toolbox()
self.toolbox.register("expr", genHalfAndHalf, pset=self.pset, min_=min_tree_depth, max_=max_tree_depth)
self.toolbox.register("individual", tools.initIterate, creator.Individual, self.toolbox.expr)
self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)
self.toolbox.register("compile", gp.compile, pset=self.pset)
if self.evaluation_method == EvaluationGPHH.SGS:
self.toolbox.register("evaluate", self.evaluate_heuristic, domains=self.training_domains)
# if self.evaluation_method == EvaluationGPHH.SGS_DEVIATION:
# self.toolbox.register("evaluate", self.evaluate_heuristic_sgs_deviation, domains=self.training_domains)
elif self.evaluation_method == EvaluationGPHH.PERMUTATION_DISTANCE:
self.toolbox.register("evaluate", self.evaluate_heuristic_permutation, domains=self.training_domains)
# self.toolbox.register("evaluate", self.evaluate_heuristic, domains=[self.training_domains[1]])
self.toolbox.register("select", tools.selTournament, tournsize=int(tournament_ratio * pop_size))
self.toolbox.register("mate", gp.cxOnePoint)
self.toolbox.register("expr_mut", gp.genFull, min_=0, max_=max_tree_depth)
self.toolbox.register("mutate", gp.mutUniform, expr=self.toolbox.expr_mut, pset=self.pset)
self.toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
self.toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.mean)
mstats.register("std", np.std)
mstats.register("min", np.min)
mstats.register("max", np.max)
pop = self.toolbox.population(n=pop_size)
hof = tools.HallOfFame(1)
self.hof = hof
pop, log = algorithms.eaSimple(pop, self.toolbox, crossover_rate, mutation_rate, n_gen, stats=mstats,
halloffame=hof, verbose=True)
self.best_heuristic = hof[0]
print('best_heuristic: ', self.best_heuristic)
self.func_heuristic = self.toolbox.compile(expr=self.best_heuristic)
self.policy = GPHHPolicy(self.domain, self.domain_model,
self.func_heuristic,
features=self.list_feature,
params_gphh=self.params_gphh,
recompute_cpm=True)
def _get_next_action(self, observation: D.T_agent[D.T_observation]) -> D.T_agent[D.T_concurrency[D.T_event]]:
action = self.policy.sample_action(observation)
# print('action_1: ', action.action)
return action
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
def init_primitives(self, pset) -> PrimitiveSet:
for i in range(len(self.list_feature)):
pset.renameArguments(**{"ARG"+str(i): self.list_feature[i].value})
return pset
def evaluate_heuristic(self, individual, domains) -> float:
vals = []
func_heuristic = self.toolbox.compile(expr=individual)
# print('individual', individual)
for domain in domains:
###
initial_state = domain.get_initial_state()
do_model = build_do_domain(domain)
modes = [initial_state.tasks_mode.get(j, 1) for j in sorted(domain.get_tasks_ids())]
modes = modes[1:-1]
cpm = self.cpm_data[domain]['cpm']
cpm_esd = self.cpm_data[domain]['cpm_esd']
raw_values = []
for task_id in domain.get_available_tasks(initial_state):
input_features = [feature_function_map[lf](domain=domain,
cpm=cpm,
cpm_esd=cpm_esd,
task_id=task_id,
state=initial_state)
for lf in self.list_feature]
output_value = func_heuristic(*input_features)
raw_values.append(output_value)
normalized_values = [x + 1 for x in sorted(range(len(raw_values)), key=lambda k: raw_values[k],
reverse=False)]
normalized_values_for_do = [normalized_values[i] - 2 for i in range(len(normalized_values)) if
normalized_values[i] not in {1, len(normalized_values)}]
solution = RCPSPSolution(problem=do_model,
rcpsp_permutation=normalized_values_for_do,
rcpsp_modes=modes,
)
last_activity = max(list(solution.rcpsp_schedule.keys()))
do_makespan = solution.rcpsp_schedule[last_activity]['end_time']
vals.append(do_makespan)
fitness = [np.mean(vals)]
# fitness = [np.max(vals)]
return fitness
# def evaluate_heuristic_sgs_deviation(self, individual, domains) -> float:
# vals = []
# func_heuristic = self.toolbox.compile(expr=individual)
# # selected_domains = random.sample(domains, 3)
# selected_domains = domains
#
# for domain in selected_domains:
# policy = GPHHPolicy(domain, domain,
# func_heuristic,
# features=self.list_feature,
# params_gphh=self.params_gphh, recompute_cpm=False, cpm_data=self.cpm_data
# )
# state = domain.get_initial_state().copy()
# domain.set_inplace_environment(True) # we can use True because we don't use the value
#
# states, actions, values = rollout_episode(domain=domain,
# max_steps=10000,
# solver=policy,
# from_memory=state,
# verbose=False,
# outcome_formatter=lambda
# o: f'{o.observation} - cost: {o.value.cost:.2f}')
#
# makespan = states[-1].t
# ref_makespan = self.reference_makespans[domain]
# makespan_deviation = (makespan - ref_makespan) / ref_makespan
# # print('mk: ', makespan, ' - mk_dev: ', makespan_deviation, ' - ref: ', ref_makespan)
# vals.append(makespan_deviation)
#
# # fitness = [np.mean(vals)]
# fitness = [np.mean(vals)]
# return fitness
def initialize_cpm_data_for_training(self):
self.cpm_data = {}
for domain in self.training_domains:
do_model = build_do_domain(domain)
cpm, cpm_esd = compute_cpm(do_model)
self.cpm_data[domain] = {'cpm': cpm,
'cpm_esd': cpm_esd}
def evaluate_heuristic_permutation(self, individual, domains) -> float:
vals = []
func_heuristic = self.toolbox.compile(expr=individual)
# print('individual', individual)
for domain in domains:
raw_values = []
initial_state = domain.get_initial_state()
regenerate_cpm = False
if regenerate_cpm:
do_model = build_do_domain(domain)
cpm, cpm_esd = compute_cpm(do_model)
else:
cpm = self.cpm_data[domain]['cpm']
cpm_esd = self.cpm_data[domain]['cpm_esd']
for task_id in domain.get_available_tasks(state=initial_state):
input_features = [feature_function_map[lf](domain=domain,
cpm = cpm,
cpm_esd=cpm_esd,
task_id=task_id,
state=initial_state)
for lf in self.list_feature]
output_value = func_heuristic(*input_features)
raw_values.append(output_value)
most_common_raw_val = max(raw_values, key=raw_values.count)
most_common_count = raw_values.count(most_common_raw_val)
heuristic_permutation = [x + 1 for x in sorted(range(len(raw_values)), key=lambda k: raw_values[k],
reverse=False)]
if self.permutation_distance == PermutationDistance.KTD:
dist, p_value = stats.kendalltau(heuristic_permutation, self.reference_permutations[domain])
dist = -dist
if self.permutation_distance == PermutationDistance.HAMMING:
dist = distance.hamming(heuristic_permutation, self.reference_permutations[domain])
if self.permutation_distance == PermutationDistance.KTD_HAMMING:
ktd, _ = stats.kendalltau(heuristic_permutation, self.reference_permutations[domain])
dist = -ktd + distance.hamming(heuristic_permutation, self.reference_permutations[domain])
penalty = most_common_count / len(raw_values)
# penalty = 0.
penalized_distance = dist + penalty
vals.append(penalized_distance)
fitness = [np.mean(vals)]
# fitness = [np.max(vals)]
return fitness
def test_features(self, domain, task_id, observation):
for f in FeatureEnum:
print('feature: ', f)
calculated_feature = feature_function_map[f](domain=domain,
task_id=task_id,
state=observation)
print('\tcalculated_feature: ',calculated_feature)
def set_domain(self, domain):
self.domain = domain
if self.policy is not None:
self.policy.domain = domain
class GPHHPolicy(DeterministicPolicies):
def __init__(self, domain: SchedulingDomain, domain_model: SchedulingDomain, func_heuristic, features: List[FeatureEnum]=None, params_gphh=None, recompute_cpm=True, cpm_data=None):
self.domain = domain
self.domain_model = domain_model
self.func_heuristic = func_heuristic
self.list_feature = features
self.params_gphh = params_gphh
self.recompute_cpm = recompute_cpm
self.cpm_data = cpm_data
def reset(self):
pass
def _get_next_action(self, observation: D.T_agent[D.T_observation]) -> D.T_agent[D.T_concurrency[D.T_event]]:
run_sgs = True
cheat_mode = False
do_model = build_do_domain(self.domain_model)
modes = [observation.tasks_mode.get(j, 1) for j in sorted(self.domain.get_tasks_ids())]
modes = modes[1:-1]
if run_sgs:
scheduled_tasks_start_times = {}
for j in observation.tasks_details.keys():
if observation.tasks_details[j].start is not None:
scheduled_tasks_start_times[j] = observation.tasks_details[j].start
do_model.mode_details[j][1]['duration'] = observation.tasks_details[j].sampled_duration
# do_model = build_do_domain(self.domain)
# modes = [observation.tasks_mode.get(j, 1) for j in sorted(self.domain.get_tasks_ids())]
# modes = modes[1:-1]
#
# if run_sgs:
# scheduled_tasks_start_times = {}
# for j in observation.tasks_details.keys():
# if observation.tasks_details[j].start is not None:
# scheduled_tasks_start_times[j] = observation.tasks_details[j].start
# else:
# if not cheat_mode:
# do_model.mode_details[j][1]['duration'] = self.domain_model.sample_task_duration(j, 1, 0.)
if self.recompute_cpm:
cpm, cpm_esd = compute_cpm(do_model)
else:
cpm = self.cpm_data[self.domain]['cpm']
cpm_esd = self.cpm_data[self.domain]['cpm_esd']
t = observation.t
raw_values = []
for task_id in self.domain.get_available_tasks(observation):
input_features = [feature_function_map[lf](domain=self.domain,
cpm = cpm,
cpm_esd=cpm_esd,
task_id=task_id,
state=observation)
for lf in self.list_feature]
output_value = self.func_heuristic(*input_features)
raw_values.append(output_value)
normalized_values = [x+1 for x in sorted(range(len(raw_values)), key=lambda k: raw_values[k],
reverse=False)]
normalized_values_for_do = [normalized_values[i] - 2 for i in range(len(normalized_values)) if
normalized_values[i] not in {1, len(normalized_values)}]
# print(t, ': ', normalized_values)
# print('normalized_values_for_do: ', normalized_values_for_do)
modes_dictionnary = {}
for i in range(len(normalized_values)):
modes_dictionnary[i+1] = 1
if run_sgs:
solution = RCPSPSolution(problem=do_model,
rcpsp_permutation=normalized_values_for_do,
rcpsp_modes=modes,
)
solution.generate_schedule_from_permutation_serial_sgs_2(current_t=t,
completed_tasks=
{j: observation.tasks_details[j]
for j in observation.tasks_complete},
scheduled_tasks_start_times=scheduled_tasks_start_times)
schedule = solution.rcpsp_schedule
else:
schedule = None
sgs_policy = PolicyRCPSP(domain=self.domain,
schedule=schedule,
policy_method_params=PolicyMethodParams(
# base_policy_method=BasePolicyMethod.SGS_PRECEDENCE,
# base_policy_method=BasePolicyMethod.SGS_READY,
base_policy_method=self.params_gphh.base_policy_method,
delta_index_freedom=self.params_gphh.delta_index_freedom,
delta_time_freedom=self.params_gphh.delta_time_freedom),
permutation_task=normalized_values,
modes_dictionnary=modes_dictionnary)
action: SchedulingAction = sgs_policy.sample_action(observation)
# print('action_2: ', action.action)
return action
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
class PoolAggregationMethod(Enum):
MEAN = "mean"
MEDIAN = "median"
RANDOM = "random"
class PooledGPHHPolicy(DeterministicPolicies):
def __init__(self, domain: SchedulingDomain, domain_model: SchedulingDomain, func_heuristics, pool_aggregation_method: PoolAggregationMethod = PoolAggregationMethod.MEAN, remove_extremes_values:int = 0, features: List[FeatureEnum]=None, params_gphh=None):
self.domain = domain
self.domain_model = domain_model
self.func_heuristics = func_heuristics
self.list_feature = features
self.params_gphh = params_gphh
self.pool_aggregation_method = pool_aggregation_method
self.remove_extremes_values = remove_extremes_values
def reset(self):
pass
def _get_next_action(self, observation: D.T_agent[D.T_observation]) -> D.T_agent[D.T_concurrency[D.T_event]]:
run_sgs = True
cheat_mode = False
regenerate_cpm = True
do_model = build_do_domain(self.domain_model)
modes = [observation.tasks_mode.get(j, 1) for j in sorted(self.domain.get_tasks_ids())]
modes = modes[1:-1]
if run_sgs:
scheduled_tasks_start_times = {}
for j in observation.tasks_details.keys():
if observation.tasks_details[j].start is not None:
scheduled_tasks_start_times[j] = observation.tasks_details[j].start
do_model.mode_details[j][1]['duration'] = observation.tasks_details[j].sampled_duration
# do_model = build_do_domain(self.domain)
# modes = [observation.tasks_mode.get(j, 1) for j in sorted(self.domain.get_tasks_ids())]
# modes = modes[1:-1]
#
# if run_sgs:
# scheduled_tasks_start_times = {}
# for j in observation.tasks_details.keys():
# # schedule[j] = {}
# if observation.tasks_details[j].start is not None:
# # schedule[j]["start_time"] = observation.tasks_details[j].start
# scheduled_tasks_start_times[j] = observation.tasks_details[j].start
# # if observation.tasks_details[j].end is not None:
# # schedule[j]["end_time"] = observation.tasks_details[j].end
# else:
# if not cheat_mode:
# do_model.mode_details[j][1]['duration'] = self.domain_model.sample_task_duration(j, 1, 0.)
if regenerate_cpm:
cpm, cpm_esd = compute_cpm(do_model)
t = observation.t
raw_values = []
for task_id in self.domain.get_available_tasks(observation):
input_features = [feature_function_map[lf](
domain=self.domain,
cpm = cpm,
cpm_esd=cpm_esd,
task_id=task_id,
state=observation)
for lf in self.list_feature]
output_values = []
for f in self.func_heuristics:
output_value = f(*input_features)
output_values.append(output_value)
# print('output_values: ', output_values)
if self.remove_extremes_values > 0:
the_median = float(np.median(output_values))
tmp = {}
for i in range(len(output_values)):
tmp[i] = abs(output_values[i] - the_median)
tmp = sorted(tmp.items(), key=lambda x: x[1], reverse=True)
to_remove = [tmp[i][0] for i in range(self.remove_extremes_values)]
output_values = list(np.delete(output_values, to_remove))
# print('output_values filtered: ', output_values)
if self.pool_aggregation_method == PoolAggregationMethod.MEAN:
agg_value = np.mean(output_values)
elif self.pool_aggregation_method == PoolAggregationMethod.MEDIAN:
agg_value = np.median(output_values)
elif self.pool_aggregation_method == PoolAggregationMethod.RANDOM:
index = random.randint(len(output_values))
agg_value = output_values[index]
# print('agg_value: ', agg_value)
raw_values.append(agg_value)
normalized_values = [x+1 for x in sorted(range(len(raw_values)), key=lambda k: raw_values[k],
reverse=False)]
normalized_values_for_do = [normalized_values[i] - 2 for i in range(len(normalized_values)) if
normalized_values[i] not in {1, len(normalized_values)}]
# print('normalized_values: ', normalized_values)
# print('normalized_values_for_do: ', normalized_values_for_do)
modes_dictionnary = {}
for i in range(len(normalized_values)):
modes_dictionnary[i+1] = 1
if run_sgs:
solution = RCPSPSolution(problem=do_model,
rcpsp_permutation=normalized_values_for_do,
rcpsp_modes=modes,
)
solution.generate_schedule_from_permutation_serial_sgs_2(current_t=t,
completed_tasks=
{j: observation.tasks_details[j]
for j in observation.tasks_complete},
scheduled_tasks_start_times=scheduled_tasks_start_times)
schedule = solution.rcpsp_schedule
else:
schedule = None
sgs_policy = PolicyRCPSP(domain=self.domain,
schedule=schedule,
policy_method_params=PolicyMethodParams(
# base_policy_method=BasePolicyMethod.SGS_PRECEDENCE,
# base_policy_method=BasePolicyMethod.SGS_READY,
base_policy_method=self.params_gphh.base_policy_method,
delta_index_freedom=self.params_gphh.delta_index_freedom,
delta_time_freedom=self.params_gphh.delta_time_freedom),
permutation_task=normalized_values,
modes_dictionnary=modes_dictionnary)
action: SchedulingAction = sgs_policy.sample_action(observation)
# print('action_2: ', action.action)
return action
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
class FixedPermutationPolicy(DeterministicPolicies):
def __init__(self, domain: SchedulingDomain, domain_model: SchedulingDomain, fixed_perm):
self.domain = domain
self.domain_model = domain_model
self.fixed_perm = fixed_perm
def reset(self):
pass
def _get_next_action(self, observation: D.T_agent[D.T_observation]) -> D.T_agent[D.T_concurrency[D.T_event]]:
run_sgs = True
cheat_mode = False
do_model = build_do_domain(self.domain_model)
modes = [observation.tasks_mode.get(j, 1) for j in sorted(self.domain.get_tasks_ids())]
modes = modes[1:-1]
if run_sgs:
scheduled_tasks_start_times = {}
for j in observation.tasks_details.keys():
if observation.tasks_details[j].start is not None:
scheduled_tasks_start_times[j] = observation.tasks_details[j].start
do_model.mode_details[j][1]['duration'] = observation.tasks_details[j].sampled_duration
# do_model = build_do_domain(self.domain)
# modes = [observation.tasks_mode.get(j, 1) for j in sorted(self.domain.get_tasks_ids())]
# modes = modes[1:-1]
#
# if run_sgs:
# scheduled_tasks_start_times = {}
# for j in observation.tasks_details.keys():
# # schedule[j] = {}
# if observation.tasks_details[j].start is not None:
# # schedule[j]["start_time"] = observation.tasks_details[j].start
# scheduled_tasks_start_times[j] = observation.tasks_details[j].start
# # if observation.tasks_details[j].end is not None:
# # schedule[j]["end_time"] = observation.tasks_details[j].end
# else:
# if not cheat_mode:
# # print('do_model: ', do_model)
# do_model.mode_details[j][1]['duration'] = self.domain_model.sample_task_duration(j, 1, 0.)
normalized_values = self.fixed_perm
normalized_values_for_do = [normalized_values[i] - 2 for i in range(len(normalized_values)) if
normalized_values[i] not in {1, len(normalized_values)}]
# print('normalized_values: ', normalized_values)
# print('normalized_values_for_do: ', normalized_values_for_do)
t = observation.t
modes_dictionnary = {}
for i in range(len(normalized_values)):
modes_dictionnary[i+1] = 1
if run_sgs:
solution = RCPSPSolution(problem=do_model,
rcpsp_permutation=normalized_values_for_do,
rcpsp_modes=modes,
)
solution.generate_schedule_from_permutation_serial_sgs_2(current_t=t,
completed_tasks=
{j: observation.tasks_details[j]
for j in observation.tasks_complete},
scheduled_tasks_start_times=scheduled_tasks_start_times)
schedule = solution.rcpsp_schedule
else:
schedule = None
sgs_policy = PolicyRCPSP(domain=self.domain,
schedule=schedule,
policy_method_params=PolicyMethodParams(
# base_policy_method=BasePolicyMethod.SGS_PRECEDENCE,
# base_policy_method=BasePolicyMethod.SGS_READY,
base_policy_method=BasePolicyMethod.FOLLOW_GANTT,
# delta_index_freedom=self.params_gphh.delta_index_freedom,
# delta_time_freedom=self.params_gphh.delta_time_freedom
),
permutation_task=normalized_values,
modes_dictionnary=modes_dictionnary)
action: SchedulingAction = sgs_policy.sample_action(observation)
# print('action_2: ', action.action)
return action
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
|
[
"numpy.sum",
"numpy.mean",
"scipy.stats.kendalltau",
"deap.tools.HallOfFame",
"deap.base.Toolbox",
"numpy.max",
"skdecide.builders.discrete_optimization.rcpsp.solver.cpm.CPM",
"skdecide.hub.solver.do_solver.sk_to_do_binding.build_do_domain",
"deap.algorithms.eaSimple",
"numpy.median",
"operator.attrgetter",
"numpy.min",
"deap.creator.create",
"skdecide.builders.discrete_optimization.rcpsp.rcpsp_model.RCPSPSolution",
"numpy.delete",
"skdecide.hub.solver.do_solver.do_solver_scheduling.PolicyMethodParams",
"deap.tools.MultiStatistics",
"deap.tools.Statistics",
"scipy.spatial.distance.hamming"
] |
[((3338, 3353), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (3345, 3353), True, 'import numpy as np\n'), ((3576, 3590), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (3582, 3590), True, 'import numpy as np\n'), ((4153, 4167), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (4159, 4167), True, 'import numpy as np\n'), ((4854, 4868), 'skdecide.builders.discrete_optimization.rcpsp.solver.cpm.CPM', 'CPM', (['do_domain'], {}), '(do_domain)\n', (4857, 4868), False, 'from skdecide.builders.discrete_optimization.rcpsp.solver.cpm import CPM\n'), ((3819, 3833), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (3825, 3833), True, 'import numpy as np\n'), ((3854, 3892), 'numpy.min', 'np.min', (['[x for x in values if x > 0.0]'], {}), '([x for x in values if x > 0.0])\n', (3860, 3892), True, 'import numpy as np\n'), ((3916, 3930), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (3922, 3930), True, 'import numpy as np\n'), ((20446, 20507), 'deap.creator.create', 'creator.create', (['"""FitnessMin"""', 'Fitness'], {'weights': '(self.weight,)'}), "('FitnessMin', Fitness, weights=(self.weight,))\n", (20460, 20507), False, 'from deap import creator\n'), ((20516, 20587), 'deap.creator.create', 'creator.create', (['"""Individual"""', 'PrimitiveTree'], {'fitness': 'creator.FitnessMin'}), "('Individual', PrimitiveTree, fitness=creator.FitnessMin)\n", (20530, 20587), False, 'from deap import creator\n'), ((20612, 20621), 'deap.base.Toolbox', 'Toolbox', ([], {}), '()\n', (20619, 20621), False, 'from deap.base import Toolbox, Fitness\n'), ((22211, 22259), 'deap.tools.Statistics', 'tools.Statistics', (['(lambda ind: ind.fitness.values)'], {}), '(lambda ind: ind.fitness.values)\n', (22227, 22259), False, 'from deap import tools\n'), ((22281, 22302), 'deap.tools.Statistics', 'tools.Statistics', (['len'], {}), '(len)\n', (22297, 22302), False, 'from deap import tools\n'), ((22320, 22377), 'deap.tools.MultiStatistics', 'tools.MultiStatistics', ([], {'fitness': 'stats_fit', 'size': 'stats_size'}), '(fitness=stats_fit, size=stats_size)\n', (22341, 22377), False, 'from deap import tools\n'), ((22600, 22619), 'deap.tools.HallOfFame', 'tools.HallOfFame', (['(1)'], {}), '(1)\n', (22616, 22619), False, 'from deap import tools\n'), ((22662, 22786), 'deap.algorithms.eaSimple', 'algorithms.eaSimple', (['pop', 'self.toolbox', 'crossover_rate', 'mutation_rate', 'n_gen'], {'stats': 'mstats', 'halloffame': 'hof', 'verbose': '(True)'}), '(pop, self.toolbox, crossover_rate, mutation_rate, n_gen,\n stats=mstats, halloffame=hof, verbose=True)\n', (22681, 22786), False, 'from deap import algorithms\n'), ((31789, 31823), 'skdecide.hub.solver.do_solver.sk_to_do_binding.build_do_domain', 'build_do_domain', (['self.domain_model'], {}), '(self.domain_model)\n', (31804, 31823), False, 'from skdecide.hub.solver.do_solver.sk_to_do_binding import build_do_domain\n'), ((37295, 37329), 'skdecide.hub.solver.do_solver.sk_to_do_binding.build_do_domain', 'build_do_domain', (['self.domain_model'], {}), '(self.domain_model)\n', (37310, 37329), False, 'from skdecide.hub.solver.do_solver.sk_to_do_binding import build_do_domain\n'), ((43716, 43750), 'skdecide.hub.solver.do_solver.sk_to_do_binding.build_do_domain', 'build_do_domain', (['self.domain_model'], {}), '(self.domain_model)\n', (43731, 43750), False, 'from skdecide.hub.solver.do_solver.sk_to_do_binding import build_do_domain\n'), ((24151, 24174), 'skdecide.hub.solver.do_solver.sk_to_do_binding.build_do_domain', 'build_do_domain', (['domain'], {}), '(domain)\n', (24166, 24174), False, 'from skdecide.hub.solver.do_solver.sk_to_do_binding import build_do_domain\n'), ((25462, 25560), 'skdecide.builders.discrete_optimization.rcpsp.rcpsp_model.RCPSPSolution', 'RCPSPSolution', ([], {'problem': 'do_model', 'rcpsp_permutation': 'normalized_values_for_do', 'rcpsp_modes': 'modes'}), '(problem=do_model, rcpsp_permutation=normalized_values_for_do,\n rcpsp_modes=modes)\n', (25475, 25560), False, 'from skdecide.builders.discrete_optimization.rcpsp.rcpsp_model import RCPSPSolution\n'), ((25874, 25887), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (25881, 25887), True, 'import numpy as np\n'), ((27854, 27877), 'skdecide.hub.solver.do_solver.sk_to_do_binding.build_do_domain', 'build_do_domain', (['domain'], {}), '(domain)\n', (27869, 27877), False, 'from skdecide.hub.solver.do_solver.sk_to_do_binding import build_do_domain\n'), ((30463, 30476), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (30470, 30476), True, 'import numpy as np\n'), ((34488, 34586), 'skdecide.builders.discrete_optimization.rcpsp.rcpsp_model.RCPSPSolution', 'RCPSPSolution', ([], {'problem': 'do_model', 'rcpsp_permutation': 'normalized_values_for_do', 'rcpsp_modes': 'modes'}), '(problem=do_model, rcpsp_permutation=normalized_values_for_do,\n rcpsp_modes=modes)\n', (34501, 34586), False, 'from skdecide.builders.discrete_optimization.rcpsp.rcpsp_model import RCPSPSolution\n'), ((41410, 41508), 'skdecide.builders.discrete_optimization.rcpsp.rcpsp_model.RCPSPSolution', 'RCPSPSolution', ([], {'problem': 'do_model', 'rcpsp_permutation': 'normalized_values_for_do', 'rcpsp_modes': 'modes'}), '(problem=do_model, rcpsp_permutation=normalized_values_for_do,\n rcpsp_modes=modes)\n', (41423, 41508), False, 'from skdecide.builders.discrete_optimization.rcpsp.rcpsp_model import RCPSPSolution\n'), ((45812, 45910), 'skdecide.builders.discrete_optimization.rcpsp.rcpsp_model.RCPSPSolution', 'RCPSPSolution', ([], {'problem': 'do_model', 'rcpsp_permutation': 'normalized_values_for_do', 'rcpsp_modes': 'modes'}), '(problem=do_model, rcpsp_permutation=normalized_values_for_do,\n rcpsp_modes=modes)\n', (45825, 45910), False, 'from skdecide.builders.discrete_optimization.rcpsp.rcpsp_model import RCPSPSolution\n'), ((28444, 28467), 'skdecide.hub.solver.do_solver.sk_to_do_binding.build_do_domain', 'build_do_domain', (['domain'], {}), '(domain)\n', (28459, 28467), False, 'from skdecide.hub.solver.do_solver.sk_to_do_binding import build_do_domain\n'), ((29699, 29775), 'scipy.stats.kendalltau', 'stats.kendalltau', (['heuristic_permutation', 'self.reference_permutations[domain]'], {}), '(heuristic_permutation, self.reference_permutations[domain])\n', (29715, 29775), False, 'from scipy import stats\n'), ((29902, 29978), 'scipy.spatial.distance.hamming', 'distance.hamming', (['heuristic_permutation', 'self.reference_permutations[domain]'], {}), '(heuristic_permutation, self.reference_permutations[domain])\n', (29918, 29978), False, 'from scipy.spatial import distance\n'), ((30082, 30158), 'scipy.stats.kendalltau', 'stats.kendalltau', (['heuristic_permutation', 'self.reference_permutations[domain]'], {}), '(heuristic_permutation, self.reference_permutations[domain])\n', (30098, 30158), False, 'from scipy import stats\n'), ((35451, 35647), 'skdecide.hub.solver.do_solver.do_solver_scheduling.PolicyMethodParams', 'PolicyMethodParams', ([], {'base_policy_method': 'self.params_gphh.base_policy_method', 'delta_index_freedom': 'self.params_gphh.delta_index_freedom', 'delta_time_freedom': 'self.params_gphh.delta_time_freedom'}), '(base_policy_method=self.params_gphh.base_policy_method,\n delta_index_freedom=self.params_gphh.delta_index_freedom,\n delta_time_freedom=self.params_gphh.delta_time_freedom)\n', (35469, 35647), False, 'from skdecide.hub.solver.do_solver.do_solver_scheduling import PolicyRCPSP, DOSolver, PolicyMethodParams, BasePolicyMethod, SolvingMethod\n'), ((40320, 40342), 'numpy.mean', 'np.mean', (['output_values'], {}), '(output_values)\n', (40327, 40342), True, 'import numpy as np\n'), ((42373, 42569), 'skdecide.hub.solver.do_solver.do_solver_scheduling.PolicyMethodParams', 'PolicyMethodParams', ([], {'base_policy_method': 'self.params_gphh.base_policy_method', 'delta_index_freedom': 'self.params_gphh.delta_index_freedom', 'delta_time_freedom': 'self.params_gphh.delta_time_freedom'}), '(base_policy_method=self.params_gphh.base_policy_method,\n delta_index_freedom=self.params_gphh.delta_index_freedom,\n delta_time_freedom=self.params_gphh.delta_time_freedom)\n', (42391, 42569), False, 'from skdecide.hub.solver.do_solver.do_solver_scheduling import PolicyRCPSP, DOSolver, PolicyMethodParams, BasePolicyMethod, SolvingMethod\n'), ((46775, 46843), 'skdecide.hub.solver.do_solver.do_solver_scheduling.PolicyMethodParams', 'PolicyMethodParams', ([], {'base_policy_method': 'BasePolicyMethod.FOLLOW_GANTT'}), '(base_policy_method=BasePolicyMethod.FOLLOW_GANTT)\n', (46793, 46843), False, 'from skdecide.hub.solver.do_solver.do_solver_scheduling import PolicyRCPSP, DOSolver, PolicyMethodParams, BasePolicyMethod, SolvingMethod\n'), ((22039, 22068), 'operator.attrgetter', 'operator.attrgetter', (['"""height"""'], {}), "('height')\n", (22058, 22068), False, 'import operator\n'), ((22144, 22173), 'operator.attrgetter', 'operator.attrgetter', (['"""height"""'], {}), "('height')\n", (22163, 22173), False, 'import operator\n'), ((30189, 30265), 'scipy.spatial.distance.hamming', 'distance.hamming', (['heuristic_permutation', 'self.reference_permutations[domain]'], {}), '(heuristic_permutation, self.reference_permutations[domain])\n', (30205, 30265), False, 'from scipy.spatial import distance\n'), ((39752, 39776), 'numpy.median', 'np.median', (['output_values'], {}), '(output_values)\n', (39761, 39776), True, 'import numpy as np\n'), ((40116, 40151), 'numpy.delete', 'np.delete', (['output_values', 'to_remove'], {}), '(output_values, to_remove)\n', (40125, 40151), True, 'import numpy as np\n'), ((40450, 40474), 'numpy.median', 'np.median', (['output_values'], {}), '(output_values)\n', (40459, 40474), True, 'import numpy as np\n'), ((17350, 17469), 'skdecide.hub.solver.do_solver.do_solver_scheduling.PolicyMethodParams', 'PolicyMethodParams', ([], {'base_policy_method': 'BasePolicyMethod.SGS_PRECEDENCE', 'delta_index_freedom': '(0)', 'delta_time_freedom': '(0)'}), '(base_policy_method=BasePolicyMethod.SGS_PRECEDENCE,\n delta_index_freedom=0, delta_time_freedom=0)\n', (17368, 17469), False, 'from skdecide.hub.solver.do_solver.do_solver_scheduling import PolicyRCPSP, DOSolver, PolicyMethodParams, BasePolicyMethod, SolvingMethod\n'), ((17931, 17955), 'numpy.max', 'np.max', (['full_permutation'], {}), '(full_permutation)\n', (17937, 17955), True, 'import numpy as np\n')]
|
from machin.utils.media import (
show_image, create_video, create_video_subproc,
create_image, create_image_subproc
)
from os.path import join
import os
import pytest
import numpy as np
@pytest.fixture(scope="function")
def images():
images = [
np.random.randint(0, 255, size=[128, 128], dtype=np.uint8)
for _ in range(120)
]
return images
@pytest.fixture(scope="function")
def images_f():
images = [
np.random.rand(128, 128)
for _ in range(120)
]
return images
def test_show_image(images):
show_image(images[0], show_normalized=True)
show_image(images[0], show_normalized=False)
def test_create_video(images, tmpdir):
tmp_dir = str(tmpdir.make_numbered_dir())
create_video(images, tmp_dir, "vid", extension=".gif")
assert os.path.exists(join(tmp_dir, "vid.gif"))
create_video(images, tmp_dir, "vid", extension=".mp4")
assert os.path.exists(join(tmp_dir, "vid.mp4"))
def test_create_video_float(images_f, tmpdir):
tmp_dir = str(tmpdir.make_numbered_dir())
create_video(images_f, tmp_dir, "vid", extension=".gif")
assert os.path.exists(join(tmp_dir, "vid.gif"))
create_video(images_f, tmp_dir, "vid", extension=".mp4")
assert os.path.exists(join(tmp_dir, "vid.mp4"))
def test_create_video_subproc(images, tmpdir):
tmp_dir = str(tmpdir.make_numbered_dir())
create_video_subproc([], tmp_dir, "empty", extension=".gif")()
create_video_subproc(images, tmp_dir, "vid", extension=".gif")()
assert os.path.exists(join(tmp_dir, "vid.gif"))
def test_create_image(images, tmpdir):
tmp_dir = str(tmpdir.make_numbered_dir())
create_image(images[0], tmp_dir, "img", extension=".png")
assert os.path.exists(join(tmp_dir, "img.png"))
def test_create_image_float(images_f, tmpdir):
tmp_dir = str(tmpdir.make_numbered_dir())
create_image(images_f[0], tmp_dir, "img", extension=".png")
assert os.path.exists(join(tmp_dir, "img.png"))
def test_create_image_subproc(images, tmpdir):
tmp_dir = str(tmpdir.make_numbered_dir())
create_image_subproc(images[0], tmp_dir, "img", extension=".png")()
assert os.path.exists(join(tmp_dir, "img.png"))
|
[
"machin.utils.media.create_video_subproc",
"machin.utils.media.create_image",
"machin.utils.media.create_image_subproc",
"pytest.fixture",
"numpy.random.randint",
"numpy.random.rand",
"machin.utils.media.create_video",
"os.path.join",
"machin.utils.media.show_image"
] |
[((197, 229), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (211, 229), False, 'import pytest\n'), ((381, 413), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (395, 413), False, 'import pytest\n'), ((565, 608), 'machin.utils.media.show_image', 'show_image', (['images[0]'], {'show_normalized': '(True)'}), '(images[0], show_normalized=True)\n', (575, 608), False, 'from machin.utils.media import show_image, create_video, create_video_subproc, create_image, create_image_subproc\n'), ((613, 657), 'machin.utils.media.show_image', 'show_image', (['images[0]'], {'show_normalized': '(False)'}), '(images[0], show_normalized=False)\n', (623, 657), False, 'from machin.utils.media import show_image, create_video, create_video_subproc, create_image, create_image_subproc\n'), ((749, 803), 'machin.utils.media.create_video', 'create_video', (['images', 'tmp_dir', '"""vid"""'], {'extension': '""".gif"""'}), "(images, tmp_dir, 'vid', extension='.gif')\n", (761, 803), False, 'from machin.utils.media import show_image, create_video, create_video_subproc, create_image, create_image_subproc\n'), ((860, 914), 'machin.utils.media.create_video', 'create_video', (['images', 'tmp_dir', '"""vid"""'], {'extension': '""".mp4"""'}), "(images, tmp_dir, 'vid', extension='.mp4')\n", (872, 914), False, 'from machin.utils.media import show_image, create_video, create_video_subproc, create_image, create_image_subproc\n'), ((1066, 1122), 'machin.utils.media.create_video', 'create_video', (['images_f', 'tmp_dir', '"""vid"""'], {'extension': '""".gif"""'}), "(images_f, tmp_dir, 'vid', extension='.gif')\n", (1078, 1122), False, 'from machin.utils.media import show_image, create_video, create_video_subproc, create_image, create_image_subproc\n'), ((1179, 1235), 'machin.utils.media.create_video', 'create_video', (['images_f', 'tmp_dir', '"""vid"""'], {'extension': '""".mp4"""'}), "(images_f, tmp_dir, 'vid', extension='.mp4')\n", (1191, 1235), False, 'from machin.utils.media import show_image, create_video, create_video_subproc, create_image, create_image_subproc\n'), ((1662, 1719), 'machin.utils.media.create_image', 'create_image', (['images[0]', 'tmp_dir', '"""img"""'], {'extension': '""".png"""'}), "(images[0], tmp_dir, 'img', extension='.png')\n", (1674, 1719), False, 'from machin.utils.media import show_image, create_video, create_video_subproc, create_image, create_image_subproc\n'), ((1871, 1930), 'machin.utils.media.create_image', 'create_image', (['images_f[0]', 'tmp_dir', '"""img"""'], {'extension': '""".png"""'}), "(images_f[0], tmp_dir, 'img', extension='.png')\n", (1883, 1930), False, 'from machin.utils.media import show_image, create_video, create_video_subproc, create_image, create_image_subproc\n'), ((267, 325), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '[128, 128]', 'dtype': 'np.uint8'}), '(0, 255, size=[128, 128], dtype=np.uint8)\n', (284, 325), True, 'import numpy as np\n'), ((453, 477), 'numpy.random.rand', 'np.random.rand', (['(128)', '(128)'], {}), '(128, 128)\n', (467, 477), True, 'import numpy as np\n'), ((830, 854), 'os.path.join', 'join', (['tmp_dir', '"""vid.gif"""'], {}), "(tmp_dir, 'vid.gif')\n", (834, 854), False, 'from os.path import join\n'), ((941, 965), 'os.path.join', 'join', (['tmp_dir', '"""vid.mp4"""'], {}), "(tmp_dir, 'vid.mp4')\n", (945, 965), False, 'from os.path import join\n'), ((1149, 1173), 'os.path.join', 'join', (['tmp_dir', '"""vid.gif"""'], {}), "(tmp_dir, 'vid.gif')\n", (1153, 1173), False, 'from os.path import join\n'), ((1262, 1286), 'os.path.join', 'join', (['tmp_dir', '"""vid.mp4"""'], {}), "(tmp_dir, 'vid.mp4')\n", (1266, 1286), False, 'from os.path import join\n'), ((1387, 1447), 'machin.utils.media.create_video_subproc', 'create_video_subproc', (['[]', 'tmp_dir', '"""empty"""'], {'extension': '""".gif"""'}), "([], tmp_dir, 'empty', extension='.gif')\n", (1407, 1447), False, 'from machin.utils.media import show_image, create_video, create_video_subproc, create_image, create_image_subproc\n'), ((1454, 1516), 'machin.utils.media.create_video_subproc', 'create_video_subproc', (['images', 'tmp_dir', '"""vid"""'], {'extension': '""".gif"""'}), "(images, tmp_dir, 'vid', extension='.gif')\n", (1474, 1516), False, 'from machin.utils.media import show_image, create_video, create_video_subproc, create_image, create_image_subproc\n'), ((1545, 1569), 'os.path.join', 'join', (['tmp_dir', '"""vid.gif"""'], {}), "(tmp_dir, 'vid.gif')\n", (1549, 1569), False, 'from os.path import join\n'), ((1746, 1770), 'os.path.join', 'join', (['tmp_dir', '"""img.png"""'], {}), "(tmp_dir, 'img.png')\n", (1750, 1770), False, 'from os.path import join\n'), ((1957, 1981), 'os.path.join', 'join', (['tmp_dir', '"""img.png"""'], {}), "(tmp_dir, 'img.png')\n", (1961, 1981), False, 'from os.path import join\n'), ((2082, 2147), 'machin.utils.media.create_image_subproc', 'create_image_subproc', (['images[0]', 'tmp_dir', '"""img"""'], {'extension': '""".png"""'}), "(images[0], tmp_dir, 'img', extension='.png')\n", (2102, 2147), False, 'from machin.utils.media import show_image, create_video, create_video_subproc, create_image, create_image_subproc\n'), ((2176, 2200), 'os.path.join', 'join', (['tmp_dir', '"""img.png"""'], {}), "(tmp_dir, 'img.png')\n", (2180, 2200), False, 'from os.path import join\n')]
|
# ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2020
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
import datetime
import numpy as np
import pandas as pd
from sts.models.prophet import (
add_season_weekday_indicators,
seasonal_daily_prophet_model
)
from sts.data.loader import load_california_electricity_demand
# Load all available data for training
df = load_california_electricity_demand()
# Take log transform for fully multiplicative model
df['y'] = df.y.apply(np.log)
# Fit best current model
model = seasonal_daily_prophet_model(df)
# Make predictions for one year ahead of most recent training data
future = add_season_weekday_indicators(
model.make_future_dataframe(periods=24*365, freq='H')
)
forecast = model.predict(future)
samples = model.predictive_samples(future)
# Reverse log transform
predictions = np.exp(samples['yhat'])
prediction_df = (
future
.merge(pd.DataFrame(predictions), left_index=True, right_index=True)
.drop(['winter_weekday', 'winter_weekend', 'summer_weekday', 'summer_weekend'],
axis='columns')
.tail(24*365) # keep one year of hourly predictions from most recent training date
)
# Save predictions
prediction_df.to_csv('data/forecast.csv', index=False)
|
[
"pandas.DataFrame",
"sts.data.loader.load_california_electricity_demand",
"numpy.exp",
"sts.models.prophet.seasonal_daily_prophet_model"
] |
[((2318, 2354), 'sts.data.loader.load_california_electricity_demand', 'load_california_electricity_demand', ([], {}), '()\n', (2352, 2354), False, 'from sts.data.loader import load_california_electricity_demand\n'), ((2473, 2505), 'sts.models.prophet.seasonal_daily_prophet_model', 'seasonal_daily_prophet_model', (['df'], {}), '(df)\n', (2501, 2505), False, 'from sts.models.prophet import add_season_weekday_indicators, seasonal_daily_prophet_model\n'), ((2793, 2816), 'numpy.exp', 'np.exp', (["samples['yhat']"], {}), "(samples['yhat'])\n", (2799, 2816), True, 'import numpy as np\n'), ((2858, 2883), 'pandas.DataFrame', 'pd.DataFrame', (['predictions'], {}), '(predictions)\n', (2870, 2883), True, 'import pandas as pd\n')]
|
from __future__ import print_function
import argparse
import gzip
import json
import logging
import os
import traceback
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, Dense, Flatten
logging.basicConfig(level=logging.DEBUG)
# Define the model object
class SmallConv(Model):
def __init__(self):
super(SmallConv, self).__init__()
self.conv1 = Conv2D(32, 3, activation="relu")
self.flatten = Flatten()
self.d1 = Dense(128, activation="relu")
self.d2 = Dense(10)
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
# Decode and preprocess data
def convert_to_numpy(data_dir, images_file, labels_file):
"""Byte string to numpy arrays"""
with gzip.open(os.path.join(data_dir, images_file), "rb") as f:
images = np.frombuffer(f.read(), np.uint8, offset=16).reshape(-1, 28, 28)
with gzip.open(os.path.join(data_dir, labels_file), "rb") as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
return (images, labels)
def mnist_to_numpy(data_dir, train):
"""Load raw MNIST data into numpy array
Args:
data_dir (str): directory of MNIST raw data.
This argument can be accessed via SM_CHANNEL_TRAINING
train (bool): use training data
Returns:
tuple of images and labels as numpy array
"""
if train:
images_file = "train-images-idx3-ubyte.gz"
labels_file = "train-labels-idx1-ubyte.gz"
else:
images_file = "t10k-images-idx3-ubyte.gz"
labels_file = "t10k-labels-idx1-ubyte.gz"
return convert_to_numpy(data_dir, images_file, labels_file)
def normalize(x, axis):
eps = np.finfo(float).eps
mean = np.mean(x, axis=axis, keepdims=True)
# avoid division by zero
std = np.std(x, axis=axis, keepdims=True) + eps
return (x - mean) / std
# Training logic
def train(args):
# create data loader from the train / test channels
x_train, y_train = mnist_to_numpy(data_dir=args.train, train=True)
x_test, y_test = mnist_to_numpy(data_dir=args.test, train=False)
x_train, x_test = x_train.astype(np.float32), x_test.astype(np.float32)
# normalize the inputs to mean 0 and std 1
x_train, x_test = normalize(x_train, (1, 2)), normalize(x_test, (1, 2))
# expand channel axis
# tf uses depth minor convention
x_train, x_test = np.expand_dims(x_train, axis=3), np.expand_dims(x_test, axis=3)
# normalize the data to mean 0 and std 1
train_loader = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(len(x_train))
.batch(args.batch_size)
)
test_loader = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(args.batch_size)
model = SmallConv()
model.compile()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(
learning_rate=args.learning_rate, beta_1=args.beta_1, beta_2=args.beta_2
)
train_loss = tf.keras.metrics.Mean(name="train_loss")
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="train_accuracy")
test_loss = tf.keras.metrics.Mean(name="test_loss")
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="test_accuracy")
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images, training=True)
loss = loss_fn(labels, predictions)
grad = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grad, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
return
@tf.function
def test_step(images, labels):
predictions = model(images, training=False)
t_loss = loss_fn(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
return
print("Training starts ...")
for epoch in range(args.epochs):
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
for batch, (images, labels) in enumerate(train_loader):
train_step(images, labels)
for images, labels in test_loader:
test_step(images, labels)
print(
f"Epoch {epoch + 1}, "
f"Loss: {train_loss.result()}, "
f"Accuracy: {train_accuracy.result() * 100}, "
f"Test Loss: {test_loss.result()}, "
f"Test Accuracy: {test_accuracy.result() * 100}"
)
# Save the model
# A version number is needed for the serving container
# to load the model
version = "00000000"
ckpt_dir = os.path.join(args.model_dir, version)
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
model.save(ckpt_dir)
return
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--batch-size", type=int, default=32)
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--learning-rate", type=float, default=1e-3)
parser.add_argument("--beta_1", type=float, default=0.9)
parser.add_argument("--beta_2", type=float, default=0.999)
# Environment variables given by the training image
parser.add_argument("--model-dir", type=str, default=os.environ["SM_MODEL_DIR"])
parser.add_argument("--train", type=str, default=os.environ["SM_CHANNEL_TRAINING"])
parser.add_argument("--test", type=str, default=os.environ["SM_CHANNEL_TESTING"])
parser.add_argument("--current-host", type=str, default=os.environ["SM_CURRENT_HOST"])
parser.add_argument("--hosts", type=list, default=json.loads(os.environ["SM_HOSTS"]))
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
train(args)
|
[
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.metrics.Mean",
"numpy.mean",
"os.path.join",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"json.loads",
"numpy.std",
"os.path.exists",
"numpy.finfo",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Conv2D",
"os.makedirs",
"logging.basicConfig",
"numpy.expand_dims",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.GradientTape"
] |
[((260, 300), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (279, 300), False, 'import logging\n'), ((1832, 1868), 'numpy.mean', 'np.mean', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (1839, 1868), True, 'import numpy as np\n'), ((2916, 2979), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (2961, 2979), True, 'import tensorflow as tf\n'), ((2996, 3099), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'args.learning_rate', 'beta_1': 'args.beta_1', 'beta_2': 'args.beta_2'}), '(learning_rate=args.learning_rate, beta_1=args.\n beta_1, beta_2=args.beta_2)\n', (3020, 3099), True, 'import tensorflow as tf\n'), ((3127, 3167), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""train_loss"""'}), "(name='train_loss')\n", (3148, 3167), True, 'import tensorflow as tf\n'), ((3189, 3254), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""train_accuracy"""'}), "(name='train_accuracy')\n", (3231, 3254), True, 'import tensorflow as tf\n'), ((3272, 3311), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""test_loss"""'}), "(name='test_loss')\n", (3293, 3311), True, 'import tensorflow as tf\n'), ((3332, 3396), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""test_accuracy"""'}), "(name='test_accuracy')\n", (3374, 3396), True, 'import tensorflow as tf\n'), ((4867, 4904), 'os.path.join', 'os.path.join', (['args.model_dir', 'version'], {}), '(args.model_dir, version)\n', (4879, 4904), False, 'import os\n'), ((5041, 5066), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5064, 5066), False, 'import argparse\n'), ((441, 473), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3)'], {'activation': '"""relu"""'}), "(32, 3, activation='relu')\n", (447, 473), False, 'from tensorflow.keras.layers import Conv2D, Dense, Flatten\n'), ((497, 506), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (504, 506), False, 'from tensorflow.keras.layers import Conv2D, Dense, Flatten\n'), ((525, 554), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (530, 554), False, 'from tensorflow.keras.layers import Conv2D, Dense, Flatten\n'), ((573, 582), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (578, 582), False, 'from tensorflow.keras.layers import Conv2D, Dense, Flatten\n'), ((1800, 1815), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1808, 1815), True, 'import numpy as np\n'), ((1908, 1943), 'numpy.std', 'np.std', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (1914, 1943), True, 'import numpy as np\n'), ((2499, 2530), 'numpy.expand_dims', 'np.expand_dims', (['x_train'], {'axis': '(3)'}), '(x_train, axis=3)\n', (2513, 2530), True, 'import numpy as np\n'), ((2532, 2562), 'numpy.expand_dims', 'np.expand_dims', (['x_test'], {'axis': '(3)'}), '(x_test, axis=3)\n', (2546, 2562), True, 'import numpy as np\n'), ((4916, 4940), 'os.path.exists', 'os.path.exists', (['ckpt_dir'], {}), '(ckpt_dir)\n', (4930, 4940), False, 'import os\n'), ((4950, 4971), 'os.makedirs', 'os.makedirs', (['ckpt_dir'], {}), '(ckpt_dir)\n', (4961, 4971), False, 'import os\n'), ((856, 891), 'os.path.join', 'os.path.join', (['data_dir', 'images_file'], {}), '(data_dir, images_file)\n', (868, 891), False, 'import os\n'), ((1007, 1042), 'os.path.join', 'os.path.join', (['data_dir', 'labels_file'], {}), '(data_dir, labels_file)\n', (1019, 1042), False, 'import os\n'), ((2781, 2833), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_test, y_test)'], {}), '((x_test, y_test))\n', (2815, 2833), True, 'import tensorflow as tf\n'), ((3464, 3481), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3479, 3481), True, 'import tensorflow as tf\n'), ((5842, 5876), 'json.loads', 'json.loads', (["os.environ['SM_HOSTS']"], {}), "(os.environ['SM_HOSTS'])\n", (5852, 5876), False, 'import json\n'), ((2638, 2692), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x_train, y_train)'], {}), '((x_train, y_train))\n', (2672, 2692), True, 'import tensorflow as tf\n')]
|
import pathlib
import numpy
import pytest
import helpers
import meshio
@pytest.mark.parametrize(
"mesh", [helpers.tri_mesh, helpers.quad_mesh, helpers.tri_quad_mesh]
)
def test_obj(mesh):
def writer(*args, **kwargs):
return meshio.obj.write(*args, **kwargs)
for k, c in enumerate(mesh.cells):
mesh.cells[k] = meshio.CellBlock(c.type, c.data.astype(numpy.int32))
helpers.write_read(writer, meshio.obj.read, mesh, 1.0e-12)
@pytest.mark.parametrize(
"filename, ref_sum, ref_num_cells", [("elephav.obj", 3.678372172450000e05, 1148)]
)
def test_reference_file(filename, ref_sum, ref_num_cells):
this_dir = pathlib.Path(__file__).resolve().parent
filename = this_dir / "meshes" / "obj" / filename
mesh = meshio.read(filename)
tol = 1.0e-5
s = numpy.sum(mesh.points)
assert abs(s - ref_sum) < tol * abs(ref_sum)
assert mesh.cells[0].type == "triangle"
assert len(mesh.cells[0].data) == ref_num_cells
|
[
"numpy.sum",
"meshio.read",
"pathlib.Path",
"meshio.obj.write",
"pytest.mark.parametrize",
"helpers.write_read"
] |
[((76, 173), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mesh"""', '[helpers.tri_mesh, helpers.quad_mesh, helpers.tri_quad_mesh]'], {}), "('mesh', [helpers.tri_mesh, helpers.quad_mesh,\n helpers.tri_quad_mesh])\n", (99, 173), False, 'import pytest\n'), ((462, 565), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""filename, ref_sum, ref_num_cells"""', "[('elephav.obj', 367837.217245, 1148)]"], {}), "('filename, ref_sum, ref_num_cells', [('elephav.obj',\n 367837.217245, 1148)])\n", (485, 565), False, 'import pytest\n'), ((400, 456), 'helpers.write_read', 'helpers.write_read', (['writer', 'meshio.obj.read', 'mesh', '(1e-12)'], {}), '(writer, meshio.obj.read, mesh, 1e-12)\n', (418, 456), False, 'import helpers\n'), ((755, 776), 'meshio.read', 'meshio.read', (['filename'], {}), '(filename)\n', (766, 776), False, 'import meshio\n'), ((802, 824), 'numpy.sum', 'numpy.sum', (['mesh.points'], {}), '(mesh.points)\n', (811, 824), False, 'import numpy\n'), ((244, 277), 'meshio.obj.write', 'meshio.obj.write', (['*args'], {}), '(*args, **kwargs)\n', (260, 277), False, 'import meshio\n'), ((649, 671), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (661, 671), False, 'import pathlib\n')]
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Directed graph object for representing coupling between physical qubits.
The nodes of the graph correspond to physical qubits (represented as integers) and the
directed edges indicate which physical qubits are coupled and the permitted direction of
CNOT gates. The object has a distance function that can be used to map quantum circuits
onto a device with this coupling.
"""
import io
import warnings
import numpy as np
import retworkx as rx
from qiskit.transpiler.exceptions import CouplingError
from qiskit.exceptions import MissingOptionalLibraryError
class CouplingMap:
"""
Directed graph specifying fixed coupling.
Nodes correspond to physical qubits (integers) and directed edges correspond
to permitted CNOT gates
"""
__slots__ = ("description", "graph", "_dist_matrix", "_qubit_list", "_size", "_is_symmetric")
def __init__(self, couplinglist=None, description=None):
"""
Create coupling graph. By default, the generated coupling has no nodes.
Args:
couplinglist (list or None): An initial coupling graph, specified as
an adjacency list containing couplings, e.g. [[0,1], [0,2], [1,2]].
It is required that nodes are contiguously indexed starting at 0.
Missed nodes will be added as isolated nodes in the coupling map.
description (str): A string to describe the coupling map.
"""
self.description = description
# the coupling map graph
self.graph = rx.PyDiGraph()
# a dict of dicts from node pairs to distances
self._dist_matrix = None
# a sorted list of physical qubits (integers) in this coupling map
self._qubit_list = None
# number of qubits in the graph
self._size = None
self._is_symmetric = None
if couplinglist is not None:
self.graph.extend_from_edge_list([tuple(x) for x in couplinglist])
def size(self):
"""Return the number of physical qubits in this graph."""
if self._size is None:
self._size = len(self.graph)
return self._size
def get_edges(self):
"""
Gets the list of edges in the coupling graph.
Returns:
Tuple(int,int): Each edge is a pair of physical qubits.
"""
return self.graph.edge_list()
def add_physical_qubit(self, physical_qubit):
"""Add a physical qubit to the coupling graph as a node.
physical_qubit (int): An integer representing a physical qubit.
Raises:
CouplingError: if trying to add duplicate qubit
"""
if not isinstance(physical_qubit, int):
raise CouplingError("Physical qubits should be integers.")
if physical_qubit in self.physical_qubits:
raise CouplingError(
"The physical qubit %s is already in the coupling graph" % physical_qubit
)
self.graph.add_node(physical_qubit)
self._dist_matrix = None # invalidate
self._qubit_list = None # invalidate
self._size = None # invalidate
def add_edge(self, src, dst):
"""
Add directed edge to coupling graph.
src (int): source physical qubit
dst (int): destination physical qubit
"""
if src not in self.physical_qubits:
self.add_physical_qubit(src)
if dst not in self.physical_qubits:
self.add_physical_qubit(dst)
self.graph.add_edge(src, dst, None)
self._dist_matrix = None # invalidate
self._is_symmetric = None # invalidate
def subgraph(self, nodelist):
"""Return a CouplingMap object for a subgraph of self.
nodelist (list): list of integer node labels
"""
warnings.warn(
"The .subgraph() method is deprecated and will be removed in a "
"future release. Instead the .reduce() method should be used "
"instead which does the same thing but preserves nodelist order.",
DeprecationWarning,
stacklevel=2,
)
subcoupling = CouplingMap()
subcoupling.graph = self.graph.subgraph(nodelist)
return subcoupling
@property
def physical_qubits(self):
"""Returns a sorted list of physical_qubits"""
if self._qubit_list is None:
self._qubit_list = self.graph.node_indexes()
return self._qubit_list
def is_connected(self):
"""
Test if the graph is connected.
Return True if connected, False otherwise
"""
try:
return rx.is_weakly_connected(self.graph)
except rx.NullGraph:
return False
def neighbors(self, physical_qubit):
"""Return the nearest neighbors of a physical qubit.
Directionality matters, i.e. a neighbor must be reachable
by going one hop in the direction of an edge.
"""
return self.graph.neighbors(physical_qubit)
@property
def distance_matrix(self):
"""Return the distance matrix for the coupling map."""
self.compute_distance_matrix()
return self._dist_matrix
def compute_distance_matrix(self):
"""Compute the full distance matrix on pairs of nodes.
The distance map self._dist_matrix is computed from the graph using
all_pairs_shortest_path_length. This is normally handled internally
by the :attr:`~qiskit.transpiler.CouplingMap.distance_matrix`
attribute or the :meth:`~qiskit.transpiler.CouplingMap.distance` method
but can be called if you're accessing the distance matrix outside of
those or want to pre-generate it.
"""
if self._dist_matrix is None:
if not self.is_connected():
raise CouplingError("coupling graph not connected")
self._dist_matrix = rx.digraph_distance_matrix(self.graph, as_undirected=True)
def distance(self, physical_qubit1, physical_qubit2):
"""Returns the undirected distance between physical_qubit1 and physical_qubit2.
Args:
physical_qubit1 (int): A physical qubit
physical_qubit2 (int): Another physical qubit
Returns:
int: The undirected distance
Raises:
CouplingError: if the qubits do not exist in the CouplingMap
"""
if physical_qubit1 >= self.size():
raise CouplingError("%s not in coupling graph" % physical_qubit1)
if physical_qubit2 >= self.size():
raise CouplingError("%s not in coupling graph" % physical_qubit2)
self.compute_distance_matrix()
return int(self._dist_matrix[physical_qubit1, physical_qubit2])
def shortest_undirected_path(self, physical_qubit1, physical_qubit2):
"""Returns the shortest undirected path between physical_qubit1 and physical_qubit2.
Args:
physical_qubit1 (int): A physical qubit
physical_qubit2 (int): Another physical qubit
Returns:
List: The shortest undirected path
Raises:
CouplingError: When there is no path between physical_qubit1, physical_qubit2.
"""
paths = rx.digraph_dijkstra_shortest_paths(
self.graph, source=physical_qubit1, target=physical_qubit2, as_undirected=True
)
if not paths:
raise CouplingError(
f"Nodes {str(physical_qubit1)} and {str(physical_qubit2)} are not connected"
)
return paths[physical_qubit2]
@property
def is_symmetric(self):
"""
Test if the graph is symmetric.
Return True if symmetric, False otherwise
"""
if self._is_symmetric is None:
self._is_symmetric = self._check_symmetry()
return self._is_symmetric
def make_symmetric(self):
"""
Convert uni-directional edges into bi-directional.
"""
edges = self.get_edges()
for src, dest in edges:
if (dest, src) not in edges:
self.add_edge(dest, src)
self._dist_matrix = None # invalidate
self._is_symmetric = None # invalidate
def _check_symmetry(self):
"""
Calculates symmetry
Returns:
Bool: True if symmetric, False otherwise
"""
return self.graph.is_symmetric()
def reduce(self, mapping):
"""Returns a reduced coupling map that
corresponds to the subgraph of qubits
selected in the mapping.
Args:
mapping (list): A mapping of reduced qubits to device
qubits.
Returns:
CouplingMap: A reduced coupling_map for the selected qubits.
Raises:
CouplingError: Reduced coupling map must be connected.
"""
from scipy.sparse import coo_matrix, csgraph
reduced_qubits = len(mapping)
inv_map = [None] * (max(mapping) + 1)
for idx, val in enumerate(mapping):
inv_map[val] = idx
reduced_cmap = []
for edge in self.get_edges():
if edge[0] in mapping and edge[1] in mapping:
reduced_cmap.append([inv_map[edge[0]], inv_map[edge[1]]])
# Verify coupling_map is connected
rows = np.array([edge[0] for edge in reduced_cmap], dtype=int)
cols = np.array([edge[1] for edge in reduced_cmap], dtype=int)
data = np.ones_like(rows)
mat = coo_matrix((data, (rows, cols)), shape=(reduced_qubits, reduced_qubits)).tocsr()
if csgraph.connected_components(mat)[0] != 1:
raise CouplingError("coupling_map must be connected.")
return CouplingMap(reduced_cmap)
@classmethod
def from_full(cls, num_qubits, bidirectional=True) -> "CouplingMap":
"""Return a fully connected coupling map on n qubits."""
cmap = cls(description="full")
if bidirectional:
cmap.graph = rx.generators.directed_mesh_graph(num_qubits)
else:
edge_list = []
for i in range(num_qubits):
for j in range(i):
edge_list.append((j, i))
cmap.graph.extend_from_edge_list(edge_list)
return cmap
@classmethod
def from_line(cls, num_qubits, bidirectional=True) -> "CouplingMap":
"""Return a coupling map of n qubits connected in a line."""
cmap = cls(description="line")
cmap.graph = rx.generators.directed_path_graph(num_qubits, bidirectional=bidirectional)
return cmap
@classmethod
def from_ring(cls, num_qubits, bidirectional=True) -> "CouplingMap":
"""Return a coupling map of n qubits connected to each of their neighbors in a ring."""
cmap = cls(description="ring")
cmap.graph = rx.generators.directed_cycle_graph(num_qubits, bidirectional=bidirectional)
return cmap
@classmethod
def from_grid(cls, num_rows, num_columns, bidirectional=True) -> "CouplingMap":
"""Return a coupling map of qubits connected on a grid of num_rows x num_columns."""
cmap = cls(description="grid")
cmap.graph = rx.generators.directed_grid_graph(
num_rows, num_columns, bidirectional=bidirectional
)
return cmap
@classmethod
def from_heavy_hex(cls, distance, bidirectional=True) -> "CouplingMap":
"""Return a heavy hexagon graph coupling map.
A heavy hexagon graph is described in:
https://journals.aps.org/prx/abstract/10.1103/PhysRevX.10.011022
Args:
distance (int): The code distance for the generated heavy hex
graph. The value for distance can be any odd positive integer.
The distance relates to the number of qubits by:
:math:`n = \\frac{5d^2 - 2d - 1}{2}` where :math:`n` is the
number of qubits and :math:`d` is the ``distance`` parameter.
bidirectional (bool): Whether the edges in the output coupling
graph are bidirectional or not. By default this is set to
``True``
Returns:
CouplingMap: A heavy hex coupling graph
"""
cmap = cls(description="heavy-hex")
cmap.graph = rx.generators.directed_heavy_hex_graph(distance, bidirectional=bidirectional)
return cmap
@classmethod
def from_heavy_square(cls, distance, bidirectional=True) -> "CouplingMap":
"""Return a heavy square graph coupling map.
A heavy square graph is described in:
https://journals.aps.org/prx/abstract/10.1103/PhysRevX.10.011022
Args:
distance (int): The code distance for the generated heavy square
graph. The value for distance can be any odd positive integer.
The distance relates to the number of qubits by:
:math:`n = 3d^2 - 2d` where :math:`n` is the
number of qubits and :math:`d` is the ``distance`` parameter.
bidirectional (bool): Whether the edges in the output coupling
graph are bidirectional or not. By default this is set to
``True``
Returns:
CouplingMap: A heavy square coupling graph
"""
cmap = cls(description="heavy-square")
cmap.graph = rx.generators.directed_heavy_square_graph(
distance, bidirectional=bidirectional
)
return cmap
@classmethod
def from_hexagonal_lattice(cls, rows, cols, bidirectional=True) -> "CouplingMap":
"""Return a hexagonal lattice graph coupling map.
Args:
rows (int): The number of rows to generate the graph with.
cols (int): The number of columns to generate the graph with.
bidirectional (bool): Whether the edges in the output coupling
graph are bidirectional or not. By default this is set to
``True``
Returns:
CouplingMap: A hexagonal lattice coupling graph
"""
cmap = cls(description="hexagonal-lattice")
cmap.graph = rx.generators.directed_hexagonal_lattice_graph(
rows, cols, bidirectional=bidirectional
)
return cmap
def largest_connected_component(self):
"""Return a set of qubits in the largest connected component."""
return max(rx.weakly_connected_components(self.graph), key=len)
def __str__(self):
"""Return a string representation of the coupling graph."""
string = ""
if self.get_edges():
string += "["
string += ", ".join([f"[{src}, {dst}]" for (src, dst) in self.get_edges()])
string += "]"
return string
def draw(self):
"""Draws the coupling map.
This function needs `pydot <https://github.com/erocarrera/pydot>`_,
which in turn needs `Graphviz <https://www.graphviz.org/>`_ to be
installed. Additionally, `pillow <https://python-pillow.org/>`_ will
need to be installed.
Returns:
PIL.Image: Drawn coupling map.
Raises:
MissingOptionalLibraryError: when pydot or pillow are not installed.
"""
try:
import pydot
except ImportError as ex:
raise MissingOptionalLibraryError(
libname="pydot",
name="coupling map drawer",
pip_install="pip install pydot",
) from ex
try:
from PIL import Image
except ImportError as ex:
raise MissingOptionalLibraryError(
libname="pillow",
name="coupling map drawer",
pip_install="pip install pillow",
) from ex
dot_str = self.graph.to_dot()
dot = pydot.graph_from_dot_data(dot_str)[0]
png = dot.create_png(prog="neato")
return Image.open(io.BytesIO(png))
|
[
"retworkx.generators.directed_grid_graph",
"scipy.sparse.csgraph.connected_components",
"retworkx.is_weakly_connected",
"qiskit.transpiler.exceptions.CouplingError",
"retworkx.digraph_distance_matrix",
"qiskit.exceptions.MissingOptionalLibraryError",
"retworkx.PyDiGraph",
"scipy.sparse.coo_matrix",
"retworkx.generators.directed_path_graph",
"io.BytesIO",
"retworkx.digraph_dijkstra_shortest_paths",
"numpy.ones_like",
"retworkx.generators.directed_heavy_square_graph",
"retworkx.weakly_connected_components",
"retworkx.generators.directed_heavy_hex_graph",
"retworkx.generators.directed_mesh_graph",
"retworkx.generators.directed_cycle_graph",
"pydot.graph_from_dot_data",
"numpy.array",
"warnings.warn",
"retworkx.generators.directed_hexagonal_lattice_graph"
] |
[((2012, 2026), 'retworkx.PyDiGraph', 'rx.PyDiGraph', ([], {}), '()\n', (2024, 2026), True, 'import retworkx as rx\n'), ((4287, 4533), 'warnings.warn', 'warnings.warn', (['"""The .subgraph() method is deprecated and will be removed in a future release. Instead the .reduce() method should be used instead which does the same thing but preserves nodelist order."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'The .subgraph() method is deprecated and will be removed in a future release. Instead the .reduce() method should be used instead which does the same thing but preserves nodelist order.'\n , DeprecationWarning, stacklevel=2)\n", (4300, 4533), False, 'import warnings\n'), ((7735, 7853), 'retworkx.digraph_dijkstra_shortest_paths', 'rx.digraph_dijkstra_shortest_paths', (['self.graph'], {'source': 'physical_qubit1', 'target': 'physical_qubit2', 'as_undirected': '(True)'}), '(self.graph, source=physical_qubit1,\n target=physical_qubit2, as_undirected=True)\n', (7769, 7853), True, 'import retworkx as rx\n'), ((9843, 9898), 'numpy.array', 'np.array', (['[edge[0] for edge in reduced_cmap]'], {'dtype': 'int'}), '([edge[0] for edge in reduced_cmap], dtype=int)\n', (9851, 9898), True, 'import numpy as np\n'), ((9914, 9969), 'numpy.array', 'np.array', (['[edge[1] for edge in reduced_cmap]'], {'dtype': 'int'}), '([edge[1] for edge in reduced_cmap], dtype=int)\n', (9922, 9969), True, 'import numpy as np\n'), ((9985, 10003), 'numpy.ones_like', 'np.ones_like', (['rows'], {}), '(rows)\n', (9997, 10003), True, 'import numpy as np\n'), ((11013, 11087), 'retworkx.generators.directed_path_graph', 'rx.generators.directed_path_graph', (['num_qubits'], {'bidirectional': 'bidirectional'}), '(num_qubits, bidirectional=bidirectional)\n', (11046, 11087), True, 'import retworkx as rx\n'), ((11355, 11430), 'retworkx.generators.directed_cycle_graph', 'rx.generators.directed_cycle_graph', (['num_qubits'], {'bidirectional': 'bidirectional'}), '(num_qubits, bidirectional=bidirectional)\n', (11389, 11430), True, 'import retworkx as rx\n'), ((11706, 11796), 'retworkx.generators.directed_grid_graph', 'rx.generators.directed_grid_graph', (['num_rows', 'num_columns'], {'bidirectional': 'bidirectional'}), '(num_rows, num_columns, bidirectional=\n bidirectional)\n', (11739, 11796), True, 'import retworkx as rx\n'), ((12811, 12888), 'retworkx.generators.directed_heavy_hex_graph', 'rx.generators.directed_heavy_hex_graph', (['distance'], {'bidirectional': 'bidirectional'}), '(distance, bidirectional=bidirectional)\n', (12849, 12888), True, 'import retworkx as rx\n'), ((13881, 13966), 'retworkx.generators.directed_heavy_square_graph', 'rx.generators.directed_heavy_square_graph', (['distance'], {'bidirectional': 'bidirectional'}), '(distance, bidirectional=bidirectional\n )\n', (13922, 13966), True, 'import retworkx as rx\n'), ((14662, 14754), 'retworkx.generators.directed_hexagonal_lattice_graph', 'rx.generators.directed_hexagonal_lattice_graph', (['rows', 'cols'], {'bidirectional': 'bidirectional'}), '(rows, cols, bidirectional=\n bidirectional)\n', (14708, 14754), True, 'import retworkx as rx\n'), ((3196, 3248), 'qiskit.transpiler.exceptions.CouplingError', 'CouplingError', (['"""Physical qubits should be integers."""'], {}), "('Physical qubits should be integers.')\n", (3209, 3248), False, 'from qiskit.transpiler.exceptions import CouplingError\n'), ((3318, 3410), 'qiskit.transpiler.exceptions.CouplingError', 'CouplingError', (["('The physical qubit %s is already in the coupling graph' % physical_qubit)"], {}), "('The physical qubit %s is already in the coupling graph' %\n physical_qubit)\n", (3331, 3410), False, 'from qiskit.transpiler.exceptions import CouplingError\n'), ((5125, 5159), 'retworkx.is_weakly_connected', 'rx.is_weakly_connected', (['self.graph'], {}), '(self.graph)\n', (5147, 5159), True, 'import retworkx as rx\n'), ((6398, 6456), 'retworkx.digraph_distance_matrix', 'rx.digraph_distance_matrix', (['self.graph'], {'as_undirected': '(True)'}), '(self.graph, as_undirected=True)\n', (6424, 6456), True, 'import retworkx as rx\n'), ((6951, 7010), 'qiskit.transpiler.exceptions.CouplingError', 'CouplingError', (["('%s not in coupling graph' % physical_qubit1)"], {}), "('%s not in coupling graph' % physical_qubit1)\n", (6964, 7010), False, 'from qiskit.transpiler.exceptions import CouplingError\n'), ((7072, 7131), 'qiskit.transpiler.exceptions.CouplingError', 'CouplingError', (["('%s not in coupling graph' % physical_qubit2)"], {}), "('%s not in coupling graph' % physical_qubit2)\n", (7085, 7131), False, 'from qiskit.transpiler.exceptions import CouplingError\n'), ((10173, 10221), 'qiskit.transpiler.exceptions.CouplingError', 'CouplingError', (['"""coupling_map must be connected."""'], {}), "('coupling_map must be connected.')\n", (10186, 10221), False, 'from qiskit.transpiler.exceptions import CouplingError\n'), ((10510, 10555), 'retworkx.generators.directed_mesh_graph', 'rx.generators.directed_mesh_graph', (['num_qubits'], {}), '(num_qubits)\n', (10543, 10555), True, 'import retworkx as rx\n'), ((14928, 14970), 'retworkx.weakly_connected_components', 'rx.weakly_connected_components', (['self.graph'], {}), '(self.graph)\n', (14958, 14970), True, 'import retworkx as rx\n'), ((16367, 16401), 'pydot.graph_from_dot_data', 'pydot.graph_from_dot_data', (['dot_str'], {}), '(dot_str)\n', (16392, 16401), False, 'import pydot\n'), ((16475, 16490), 'io.BytesIO', 'io.BytesIO', (['png'], {}), '(png)\n', (16485, 16490), False, 'import io\n'), ((6320, 6365), 'qiskit.transpiler.exceptions.CouplingError', 'CouplingError', (['"""coupling graph not connected"""'], {}), "('coupling graph not connected')\n", (6333, 6365), False, 'from qiskit.transpiler.exceptions import CouplingError\n'), ((10019, 10091), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(data, (rows, cols))'], {'shape': '(reduced_qubits, reduced_qubits)'}), '((data, (rows, cols)), shape=(reduced_qubits, reduced_qubits))\n', (10029, 10091), False, 'from scipy.sparse import coo_matrix, csgraph\n'), ((10112, 10145), 'scipy.sparse.csgraph.connected_components', 'csgraph.connected_components', (['mat'], {}), '(mat)\n', (10140, 10145), False, 'from scipy.sparse import coo_matrix, csgraph\n'), ((15859, 15968), 'qiskit.exceptions.MissingOptionalLibraryError', 'MissingOptionalLibraryError', ([], {'libname': '"""pydot"""', 'name': '"""coupling map drawer"""', 'pip_install': '"""pip install pydot"""'}), "(libname='pydot', name='coupling map drawer',\n pip_install='pip install pydot')\n", (15886, 15968), False, 'from qiskit.exceptions import MissingOptionalLibraryError\n'), ((16136, 16247), 'qiskit.exceptions.MissingOptionalLibraryError', 'MissingOptionalLibraryError', ([], {'libname': '"""pillow"""', 'name': '"""coupling map drawer"""', 'pip_install': '"""pip install pillow"""'}), "(libname='pillow', name='coupling map drawer',\n pip_install='pip install pillow')\n", (16163, 16247), False, 'from qiskit.exceptions import MissingOptionalLibraryError\n')]
|
import numpy as np
import pandas as pd
import pytest
from rayml.data_checks import (
DataCheckActionCode,
DataCheckActionOption,
DataCheckMessageCode,
DataCheckWarning,
IDColumnsDataCheck,
)
id_data_check_name = IDColumnsDataCheck.name
def test_id_cols_data_check_init():
id_cols_check = IDColumnsDataCheck()
assert id_cols_check.id_threshold == 1.0
id_cols_check = IDColumnsDataCheck(id_threshold=0.0)
assert id_cols_check.id_threshold == 0
id_cols_check = IDColumnsDataCheck(id_threshold=0.5)
assert id_cols_check.id_threshold == 0.5
id_cols_check = IDColumnsDataCheck(id_threshold=1.0)
assert id_cols_check.id_threshold == 1.0
with pytest.raises(
ValueError, match="id_threshold must be a float between 0 and 1, inclusive."
):
IDColumnsDataCheck(id_threshold=-0.1)
with pytest.raises(
ValueError, match="id_threshold must be a float between 0 and 1, inclusive."
):
IDColumnsDataCheck(id_threshold=1.1)
def test_id_columns_warning():
X_dict = {
"col_1_id": [0, 1, 2, 3],
"col_2": [2, 3, 4, 5],
"col_3_id": [1, 1, 2, 3],
"Id": [3, 1, 2, 0],
"col_5": [0, 0, 1, 2],
"col_6": [0.1, 0.2, 0.3, 0.4],
}
X = pd.DataFrame.from_dict(X_dict)
id_cols_check = IDColumnsDataCheck(id_threshold=0.95)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id', 'col_2', 'col_3_id' are 95.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
)
],
).to_dict(),
]
X = pd.DataFrame.from_dict(X_dict)
id_cols_check = IDColumnsDataCheck(id_threshold=1.0)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id' are 100.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id"]},
)
],
).to_dict(),
]
def test_id_columns_strings():
X_dict = {
"col_1_id": ["a", "b", "c", "d"],
"col_2": ["w", "x", "y", "z"],
"col_3_id": [
"123456789012345",
"234567890123456",
"3456789012345678",
"45678901234567",
],
"Id": ["z", "y", "x", "a"],
"col_5": ["0", "0", "1", "2"],
"col_6": [0.1, 0.2, 0.3, 0.4],
}
X = pd.DataFrame.from_dict(X_dict)
X.ww.init(
logical_types={
"col_1_id": "categorical",
"col_2": "categorical",
"Id": "categorical",
"col_5": "categorical",
}
)
id_cols_check = IDColumnsDataCheck(id_threshold=0.95)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id', 'col_2', 'col_3_id' are 95.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id", "col_2", "col_3_id"]},
)
],
).to_dict(),
]
id_cols_check = IDColumnsDataCheck(id_threshold=1.0)
assert id_cols_check.validate(X) == [
DataCheckWarning(
message="Columns 'Id', 'col_1_id' are 100.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": ["Id", "col_1_id"]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": ["Id", "col_1_id"]},
)
],
).to_dict(),
]
def test_id_cols_data_check_input_formats():
id_cols_check = IDColumnsDataCheck(id_threshold=0.8)
# test empty pd.DataFrame
assert id_cols_check.validate(pd.DataFrame()) == []
# test Woodwork
ww_input = pd.DataFrame(np.array([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]]))
ww_input.ww.init()
assert id_cols_check.validate(ww_input) == [
DataCheckWarning(
message="Columns '0', '1' are 80.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": [0, 1]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": [0, 1]},
)
],
).to_dict(),
]
# test 2D list
assert id_cols_check.validate([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]]) == [
DataCheckWarning(
message="Columns '0', '1' are 80.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": [0, 1]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": [0, 1]},
)
],
).to_dict(),
]
# test np.array
assert id_cols_check.validate(
np.array([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]])
) == [
DataCheckWarning(
message="Columns '0', '1' are 80.0% or more likely to be an ID column",
data_check_name=id_data_check_name,
message_code=DataCheckMessageCode.HAS_ID_COLUMN,
details={"columns": [0, 1]},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=id_data_check_name,
metadata={"columns": [0, 1]},
)
],
).to_dict(),
]
|
[
"pandas.DataFrame",
"rayml.data_checks.DataCheckActionOption",
"pandas.DataFrame.from_dict",
"pytest.raises",
"numpy.array",
"rayml.data_checks.IDColumnsDataCheck"
] |
[((316, 336), 'rayml.data_checks.IDColumnsDataCheck', 'IDColumnsDataCheck', ([], {}), '()\n', (334, 336), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((403, 439), 'rayml.data_checks.IDColumnsDataCheck', 'IDColumnsDataCheck', ([], {'id_threshold': '(0.0)'}), '(id_threshold=0.0)\n', (421, 439), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((504, 540), 'rayml.data_checks.IDColumnsDataCheck', 'IDColumnsDataCheck', ([], {'id_threshold': '(0.5)'}), '(id_threshold=0.5)\n', (522, 540), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((607, 643), 'rayml.data_checks.IDColumnsDataCheck', 'IDColumnsDataCheck', ([], {'id_threshold': '(1.0)'}), '(id_threshold=1.0)\n', (625, 643), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((1272, 1302), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['X_dict'], {}), '(X_dict)\n', (1294, 1302), True, 'import pandas as pd\n'), ((1323, 1360), 'rayml.data_checks.IDColumnsDataCheck', 'IDColumnsDataCheck', ([], {'id_threshold': '(0.95)'}), '(id_threshold=0.95)\n', (1341, 1360), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((2051, 2081), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['X_dict'], {}), '(X_dict)\n', (2073, 2081), True, 'import pandas as pd\n'), ((2102, 2138), 'rayml.data_checks.IDColumnsDataCheck', 'IDColumnsDataCheck', ([], {'id_threshold': '(1.0)'}), '(id_threshold=1.0)\n', (2120, 2138), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((3172, 3202), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['X_dict'], {}), '(X_dict)\n', (3194, 3202), True, 'import pandas as pd\n'), ((3422, 3459), 'rayml.data_checks.IDColumnsDataCheck', 'IDColumnsDataCheck', ([], {'id_threshold': '(0.95)'}), '(id_threshold=0.95)\n', (3440, 3459), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((4162, 4198), 'rayml.data_checks.IDColumnsDataCheck', 'IDColumnsDataCheck', ([], {'id_threshold': '(1.0)'}), '(id_threshold=1.0)\n', (4180, 4198), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((4885, 4921), 'rayml.data_checks.IDColumnsDataCheck', 'IDColumnsDataCheck', ([], {'id_threshold': '(0.8)'}), '(id_threshold=0.8)\n', (4903, 4921), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((699, 795), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""id_threshold must be a float between 0 and 1, inclusive."""'}), "(ValueError, match=\n 'id_threshold must be a float between 0 and 1, inclusive.')\n", (712, 795), False, 'import pytest\n'), ((814, 851), 'rayml.data_checks.IDColumnsDataCheck', 'IDColumnsDataCheck', ([], {'id_threshold': '(-0.1)'}), '(id_threshold=-0.1)\n', (832, 851), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((861, 957), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""id_threshold must be a float between 0 and 1, inclusive."""'}), "(ValueError, match=\n 'id_threshold must be a float between 0 and 1, inclusive.')\n", (874, 957), False, 'import pytest\n'), ((976, 1012), 'rayml.data_checks.IDColumnsDataCheck', 'IDColumnsDataCheck', ([], {'id_threshold': '(1.1)'}), '(id_threshold=1.1)\n', (994, 1012), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((5059, 5109), 'numpy.array', 'np.array', (['[[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]]'], {}), '([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]])\n', (5067, 5109), True, 'import numpy as np\n'), ((4987, 5001), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4999, 5001), True, 'import pandas as pd\n'), ((6437, 6487), 'numpy.array', 'np.array', (['[[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]]'], {}), '([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5]])\n', (6445, 6487), True, 'import numpy as np\n'), ((1770, 1929), 'rayml.data_checks.DataCheckActionOption', 'DataCheckActionOption', (['DataCheckActionCode.DROP_COL'], {'data_check_name': 'id_data_check_name', 'metadata': "{'columns': ['Id', 'col_1_id', 'col_2', 'col_3_id']}"}), "(DataCheckActionCode.DROP_COL, data_check_name=\n id_data_check_name, metadata={'columns': ['Id', 'col_1_id', 'col_2',\n 'col_3_id']})\n", (1791, 1929), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((2507, 2641), 'rayml.data_checks.DataCheckActionOption', 'DataCheckActionOption', (['DataCheckActionCode.DROP_COL'], {'data_check_name': 'id_data_check_name', 'metadata': "{'columns': ['Id', 'col_1_id']}"}), "(DataCheckActionCode.DROP_COL, data_check_name=\n id_data_check_name, metadata={'columns': ['Id', 'col_1_id']})\n", (2528, 2641), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((3869, 4028), 'rayml.data_checks.DataCheckActionOption', 'DataCheckActionOption', (['DataCheckActionCode.DROP_COL'], {'data_check_name': 'id_data_check_name', 'metadata': "{'columns': ['Id', 'col_1_id', 'col_2', 'col_3_id']}"}), "(DataCheckActionCode.DROP_COL, data_check_name=\n id_data_check_name, metadata={'columns': ['Id', 'col_1_id', 'col_2',\n 'col_3_id']})\n", (3890, 4028), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((4567, 4701), 'rayml.data_checks.DataCheckActionOption', 'DataCheckActionOption', (['DataCheckActionCode.DROP_COL'], {'data_check_name': 'id_data_check_name', 'metadata': "{'columns': ['Id', 'col_1_id']}"}), "(DataCheckActionCode.DROP_COL, data_check_name=\n id_data_check_name, metadata={'columns': ['Id', 'col_1_id']})\n", (4588, 4701), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((5488, 5610), 'rayml.data_checks.DataCheckActionOption', 'DataCheckActionOption', (['DataCheckActionCode.DROP_COL'], {'data_check_name': 'id_data_check_name', 'metadata': "{'columns': [0, 1]}"}), "(DataCheckActionCode.DROP_COL, data_check_name=\n id_data_check_name, metadata={'columns': [0, 1]})\n", (5509, 5610), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((6134, 6256), 'rayml.data_checks.DataCheckActionOption', 'DataCheckActionOption', (['DataCheckActionCode.DROP_COL'], {'data_check_name': 'id_data_check_name', 'metadata': "{'columns': [0, 1]}"}), "(DataCheckActionCode.DROP_COL, data_check_name=\n id_data_check_name, metadata={'columns': [0, 1]})\n", (6155, 6256), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n'), ((6804, 6926), 'rayml.data_checks.DataCheckActionOption', 'DataCheckActionOption', (['DataCheckActionCode.DROP_COL'], {'data_check_name': 'id_data_check_name', 'metadata': "{'columns': [0, 1]}"}), "(DataCheckActionCode.DROP_COL, data_check_name=\n id_data_check_name, metadata={'columns': [0, 1]})\n", (6825, 6926), False, 'from rayml.data_checks import DataCheckActionCode, DataCheckActionOption, DataCheckMessageCode, DataCheckWarning, IDColumnsDataCheck\n')]
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for non_semantic_speech_benchmark.eval_embedding.sklearn.sklearn_utils."""
import os
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from non_semantic_speech_benchmark.eval_embedding.sklearn import sklearn_utils
class SklearnUtilsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
{'l2_normalization': True},
{'l2_normalization': False},
)
def test_tfexample_to_nps(self, l2_normalization):
path = os.path.join(absltest.get_default_test_tmpdir(), 'dummy_tfrecords')
embedding_name = 'fake_emb'
label_name = 'label/fake_lbl'
label_list = ['yes', 'no']
np.random.seed(10)
# Generate fake embeddings and labels.
fake_data = [
(np.random.rand(100), 1),
(np.random.rand(100), 0),
(np.random.rand(100), 1),
]
def _emb_lbl_i_to_tfexample(emb, label_index):
"""Package fake data as a tf.Example."""
ex = tf.train.Example()
ex.features.feature[
f'embedding/{embedding_name}'].float_list.value.extend(emb)
ex.features.feature[label_name].bytes_list.value.append(
label_list[label_index].encode('utf-8'))
return ex
# Write TFRecord of tf.Examples to disk.
with tf.python_io.TFRecordWriter(path) as writer:
for emb, label_index in fake_data:
ex = _emb_lbl_i_to_tfexample(emb, label_index)
writer.write(ex.SerializeToString())
# Convert them back.
npx, npy, _ = sklearn_utils.tfexamples_to_nps(path, embedding_name,
label_name, label_list,
l2_normalization)
# Check that output is correct.
expected_embs = np.array([d[0] for d in fake_data], np.float32)
if l2_normalization:
expected_embs /= np.linalg.norm(expected_embs, axis=1, ord=2,
keepdims=True)
self.assertAllEqual(npx, expected_embs)
self.assertAllEqual(npy, (1, 0, 1))
def test_speaker_normalization(self):
original_embeddings = np.array(
[
[0.5, 2.1],
[0.5, 0.6],
[-.5, 2.1],
],
np.float32)
speaker_ids = np.array(
[
'id1',
'id2',
'id1',
],
np.str)
expected_normalized_embeddings = np.array(
[
[1.0, 0.0],
[0.0, 0.0],
[-1., 0.0],
],
np.float32)
normalized_embeddings = sklearn_utils._speaker_normalization(
original_embeddings, speaker_ids)
self.assertAllEqual(normalized_embeddings, expected_normalized_embeddings)
if __name__ == '__main__':
tf.test.main()
|
[
"non_semantic_speech_benchmark.eval_embedding.sklearn.sklearn_utils.tfexamples_to_nps",
"numpy.random.seed",
"tensorflow.compat.v1.train.Example",
"absl.testing.absltest.get_default_test_tmpdir",
"tensorflow.compat.v1.python_io.TFRecordWriter",
"absl.testing.parameterized.parameters",
"tensorflow.compat.v1.test.main",
"numpy.array",
"numpy.linalg.norm",
"numpy.random.rand",
"non_semantic_speech_benchmark.eval_embedding.sklearn.sklearn_utils._speaker_normalization"
] |
[((999, 1085), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'l2_normalization': True}", "{'l2_normalization': False}"], {}), "({'l2_normalization': True}, {'l2_normalization': \n False})\n", (1023, 1085), False, 'from absl.testing import parameterized\n'), ((3382, 3396), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (3394, 3396), True, 'import tensorflow.compat.v1 as tf\n'), ((1332, 1350), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (1346, 1350), True, 'import numpy as np\n'), ((2161, 2260), 'non_semantic_speech_benchmark.eval_embedding.sklearn.sklearn_utils.tfexamples_to_nps', 'sklearn_utils.tfexamples_to_nps', (['path', 'embedding_name', 'label_name', 'label_list', 'l2_normalization'], {}), '(path, embedding_name, label_name,\n label_list, l2_normalization)\n', (2192, 2260), False, 'from non_semantic_speech_benchmark.eval_embedding.sklearn import sklearn_utils\n'), ((2414, 2461), 'numpy.array', 'np.array', (['[d[0] for d in fake_data]', 'np.float32'], {}), '([d[0] for d in fake_data], np.float32)\n', (2422, 2461), True, 'import numpy as np\n'), ((2759, 2818), 'numpy.array', 'np.array', (['[[0.5, 2.1], [0.5, 0.6], [-0.5, 2.1]]', 'np.float32'], {}), '([[0.5, 2.1], [0.5, 0.6], [-0.5, 2.1]], np.float32)\n', (2767, 2818), True, 'import numpy as np\n'), ((2900, 2939), 'numpy.array', 'np.array', (["['id1', 'id2', 'id1']", 'np.str'], {}), "(['id1', 'id2', 'id1'], np.str)\n", (2908, 2939), True, 'import numpy as np\n'), ((3041, 3100), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 0.0], [-1.0, 0.0]]', 'np.float32'], {}), '([[1.0, 0.0], [0.0, 0.0], [-1.0, 0.0]], np.float32)\n', (3049, 3100), True, 'import numpy as np\n'), ((3192, 3262), 'non_semantic_speech_benchmark.eval_embedding.sklearn.sklearn_utils._speaker_normalization', 'sklearn_utils._speaker_normalization', (['original_embeddings', 'speaker_ids'], {}), '(original_embeddings, speaker_ids)\n', (3228, 3262), False, 'from non_semantic_speech_benchmark.eval_embedding.sklearn import sklearn_utils\n'), ((1175, 1209), 'absl.testing.absltest.get_default_test_tmpdir', 'absltest.get_default_test_tmpdir', ([], {}), '()\n', (1207, 1209), False, 'from absl.testing import absltest\n'), ((1630, 1648), 'tensorflow.compat.v1.train.Example', 'tf.train.Example', ([], {}), '()\n', (1646, 1648), True, 'import tensorflow.compat.v1 as tf\n'), ((1931, 1964), 'tensorflow.compat.v1.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['path'], {}), '(path)\n', (1958, 1964), True, 'import tensorflow.compat.v1 as tf\n'), ((2510, 2569), 'numpy.linalg.norm', 'np.linalg.norm', (['expected_embs'], {'axis': '(1)', 'ord': '(2)', 'keepdims': '(True)'}), '(expected_embs, axis=1, ord=2, keepdims=True)\n', (2524, 2569), True, 'import numpy as np\n'), ((1422, 1441), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (1436, 1441), True, 'import numpy as np\n'), ((1456, 1475), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (1470, 1475), True, 'import numpy as np\n'), ((1490, 1509), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (1504, 1509), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
import sys
# read the image
image = cv2.imread(sys.argv[1])
# convert to grayscale
grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# perform edge detection
edges = cv2.Canny(grayscale, 30, 100)
# detect lines in the image using hough lines technique
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 60, np.array([]), 50, 5)
# iterate over the output lines and draw them
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(image, (x1, y1), (x2, y2), color=(20, 220, 20), thickness=3)
# show the image
plt.imshow(image)
plt.show()
|
[
"cv2.line",
"cv2.Canny",
"matplotlib.pyplot.show",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"cv2.imread",
"numpy.array"
] |
[((105, 128), 'cv2.imread', 'cv2.imread', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (115, 128), False, 'import cv2\n'), ((168, 207), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (180, 207), False, 'import cv2\n'), ((245, 274), 'cv2.Canny', 'cv2.Canny', (['grayscale', '(30)', '(100)'], {}), '(grayscale, 30, 100)\n', (254, 274), False, 'import cv2\n'), ((605, 622), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (615, 622), True, 'import matplotlib.pyplot as plt\n'), ((624, 634), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (632, 634), True, 'import matplotlib.pyplot as plt\n'), ((384, 396), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (392, 396), True, 'import numpy as np\n'), ((514, 583), 'cv2.line', 'cv2.line', (['image', '(x1, y1)', '(x2, y2)'], {'color': '(20, 220, 20)', 'thickness': '(3)'}), '(image, (x1, y1), (x2, y2), color=(20, 220, 20), thickness=3)\n', (522, 583), False, 'import cv2\n')]
|
import tensorflow
from tensorflow.keras.datasets import cifar10
from tensorflow import keras
import numpy as np
num_classes = 10
class EvalDataset(object):
def __init__(self, batch_size=100):
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
# Convert class vectors to binary class matrices.
y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)
y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
self.test_images = x_test
self.test_labels = y_test
def __len__(self):
return len(self.test_images)
def __getitem__(self, idx):
return self.test_images[idx], self.test_labels[idx]
from neural_compressor.experimental import Benchmark, common
evaluator = Benchmark('benchmark.yaml')
evaluator.model = common.Model('./baseline_model')
evaluator.b_dataloader = common.DataLoader(EvalDataset())
evaluator('performance')
|
[
"tensorflow.keras.utils.to_categorical",
"neural_compressor.experimental.Benchmark",
"tensorflow.keras.datasets.cifar10.load_data",
"neural_compressor.experimental.common.Model",
"numpy.mean"
] |
[((1031, 1058), 'neural_compressor.experimental.Benchmark', 'Benchmark', (['"""benchmark.yaml"""'], {}), "('benchmark.yaml')\n", (1040, 1058), False, 'from neural_compressor.experimental import Benchmark, common\n'), ((1077, 1109), 'neural_compressor.experimental.common.Model', 'common.Model', (['"""./baseline_model"""'], {}), "('./baseline_model')\n", (1089, 1109), False, 'from neural_compressor.experimental import Benchmark, common\n'), ((246, 265), 'tensorflow.keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (263, 265), False, 'from tensorflow.keras.datasets import cifar10\n'), ((433, 457), 'numpy.mean', 'np.mean', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (440, 457), True, 'import numpy as np\n'), ((598, 657), 'tensorflow.keras.utils.to_categorical', 'tensorflow.keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (635, 657), False, 'import tensorflow\n'), ((675, 733), 'tensorflow.keras.utils.to_categorical', 'tensorflow.keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (712, 733), False, 'import tensorflow\n')]
|
import ast, gast
import inspect
import numpy as np
import sys
import typing
from chainer_compiler.elichika.typing import types
from chainer_compiler.elichika.typing.type_inference import InferenceEngine
from chainer_compiler.elichika.typing.utils import node_description, is_expr
from chainer_compiler.elichika.parser import utils
class IDAssignor(gast.NodeVisitor):
def __init__(self):
self.counter = 0
self.node2id = {}
def visit(self, node):
self.node2id[node] = self.counter
self.counter += 1
return super().visit(node)
def run(self, node, subroutine_node):
self.visit(node)
for ns in subroutine_node.values():
for n in ns:
self.visit(n)
return self.node2id
def generate_node2id(tree, subroutine_node):
a = IDAssignor()
node2id = a.run(tree, subroutine_node)
return node2id
def generate_id2node(node2id):
id2node = {}
for n, i in node2id.items():
id2node[i] = n
return id2node
def generate_node2type(tree, args, is_debug=False, module=None, type_hints={}):
reset_state()
tc = InferenceEngine(is_debug=is_debug, module=module)
func_body = tree.body[0] # XXX: only checks first function
try:
node2type = tc.infer_function_value_args(func_body, args, type_hints=type_hints)
return node2type, tc.subroutine_node
except Exception as e:
tc.dump_tyenv()
raise e
def generate_id2type(node2type, node2id):
id2type = {}
for n, t in node2type.items():
if n not in node2id.keys(): continue # user-defined modules in nn.Sequential
id2type[node2id[n]] = t
return id2type
def generate_assertion(type_table_name, id2type, id2node, ofile=None):
for i, t in sorted(id2type.items()):
node = id2node[i]
if not is_expr(node):
if not isinstance(node, gast.FunctionDef):
continue
output = " # === function {} ===".format(node.name)
else:
comment = "\t# " + node_description(node)
output = " self.assertEqual(str({}[{}]), \"{}\"){}".format( \
type_table_name, i, t, comment)
if ofile is None:
print(output)
else:
ofile.write(output + '\n')
# For testing
def generate_id2type_from_forward(model, args, is_debug=False):
code = utils.clip_head(inspect.getsource(model.forward))
tree = gast.ast_to_gast(ast.parse(code))
module = sys.modules[model.forward.__module__]
node2type, subroutine_node = generate_node2type(
tree, (model,) + args, is_debug=is_debug, module=module,
type_hints=typing.get_type_hints(model.forward))
node2id = generate_node2id(tree, subroutine_node)
id2type = generate_id2type(node2type, node2id)
return id2type
# For debug
def generate_type_inference_results(model, forward_args, is_debug=True):
code = utils.clip_head(inspect.getsource(model.forward))
node = gast.ast_to_gast(ast.parse(code))
# node = Canonicalizer().visit(node)
module = sys.modules[model.forward.__module__]
node2type, subroutine_node = generate_node2type(
node, (model,) + forward_args, is_debug=is_debug, module=module,
type_hints=typing.get_type_hints(model.forward))
node2id = generate_node2id(node, subroutine_node)
id2type = generate_id2type(node2type, node2id)
id2node = generate_id2node(node2id)
return id2type, id2node
def reset_state():
np.random.seed(42)
types.var_counter = 0
if __name__ == '__main__':
import argparse
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
from tests.elichika_typing.EspNet_test import *
from tests.elichika_typing.Models_test import *
parser = argparse.ArgumentParser()
parser.add_argument("-e", help="Execute the script", action="store_true")
parser.add_argument("-o",
help="Specify file name to output the assertions", type=str)
args = parser.parse_args()
class Test():
def forward(self):
x = np.zeros((1, 1)).astype('float32')
y = F.pad_sequence([x], length=5)
return y
# model, forward_args = gen_MLP_model()
model, forward_args = gen_GoogLeNet_model()
# model, forward_args = gen_AttDot_model()
# model, forward_args = gen_AttLoc_model()
# model, forward_args = gen_BLSTM_model()
# model, forward_args = gen_VGG2L_model()
# model, forward_args = gen_StatelessLSTM_model()
# model, forward_args = gen_Decoder_model()
# model, forward_args = gen_E2E_model()
# model, forward_args = Test(), ()
if args.e:
model.forward(*forward_args)
else:
id2type, id2node = generate_type_inference_results(model, forward_args)
if args.o:
ofile = open(args.o, 'w')
generate_assertion("id2type", id2type, id2node, ofile)
|
[
"chainer_compiler.elichika.typing.type_inference.InferenceEngine",
"numpy.random.seed",
"argparse.ArgumentParser",
"chainer_compiler.elichika.typing.utils.is_expr",
"typing.get_type_hints",
"numpy.zeros",
"inspect.getsource",
"chainer.functions.pad_sequence",
"ast.parse",
"chainer_compiler.elichika.typing.utils.node_description"
] |
[((1137, 1186), 'chainer_compiler.elichika.typing.type_inference.InferenceEngine', 'InferenceEngine', ([], {'is_debug': 'is_debug', 'module': 'module'}), '(is_debug=is_debug, module=module)\n', (1152, 1186), False, 'from chainer_compiler.elichika.typing.type_inference import InferenceEngine\n'), ((3532, 3550), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3546, 3550), True, 'import numpy as np\n'), ((3852, 3877), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3875, 3877), False, 'import argparse\n'), ((2429, 2461), 'inspect.getsource', 'inspect.getsource', (['model.forward'], {}), '(model.forward)\n', (2446, 2461), False, 'import inspect\n'), ((2491, 2506), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (2500, 2506), False, 'import ast, gast\n'), ((2980, 3012), 'inspect.getsource', 'inspect.getsource', (['model.forward'], {}), '(model.forward)\n', (2997, 3012), False, 'import inspect\n'), ((3042, 3057), 'ast.parse', 'ast.parse', (['code'], {}), '(code)\n', (3051, 3057), False, 'import ast, gast\n'), ((1850, 1863), 'chainer_compiler.elichika.typing.utils.is_expr', 'is_expr', (['node'], {}), '(node)\n', (1857, 1863), False, 'from chainer_compiler.elichika.typing.utils import node_description, is_expr\n'), ((2704, 2740), 'typing.get_type_hints', 'typing.get_type_hints', (['model.forward'], {}), '(model.forward)\n', (2725, 2740), False, 'import typing\n'), ((3296, 3332), 'typing.get_type_hints', 'typing.get_type_hints', (['model.forward'], {}), '(model.forward)\n', (3317, 3332), False, 'import typing\n'), ((4204, 4233), 'chainer.functions.pad_sequence', 'F.pad_sequence', (['[x]'], {'length': '(5)'}), '([x], length=5)\n', (4218, 4233), True, 'import chainer.functions as F\n'), ((2061, 2083), 'chainer_compiler.elichika.typing.utils.node_description', 'node_description', (['node'], {}), '(node)\n', (2077, 2083), False, 'from chainer_compiler.elichika.typing.utils import node_description, is_expr\n'), ((4153, 4169), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (4161, 4169), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# Copyright (c) 2015 10X Genomics, Inc. All rights reserved.
#
import cPickle
from collections import defaultdict
from itertools import izip
import json
import numpy as np
import cellranger.constants as cr_constants
import cellranger.library_constants as lib_constants
from cellranger.molecule_counter import MoleculeCounter
import cellranger.rna.library as rna_library
import cellranger.utils as cr_utils
import tenkit.safe_json as tk_safe_json
import tenkit.stats as tk_stats
__MRO__ = """
stage SUBSAMPLE_READS(
in h5 molecule_info,
in csv filtered_barcodes,
out json summary,
src py "stages/counter/subsample_reads",
) split using (
in int chunk_start,
in int chunk_len,
in map[] subsample_info,
out pickle metrics,
)
"""
def get_cell_associated_barcodes(genomes, filtered_barcodes_csv):
""" Get cell-associated barcodes by genome.
Args:
genomes (list of str): Genome names.
filtered_barcodes_csv (str): Path to CSV file.
Returns:
dict of (str, set): Map genome to list of cell-assoc barcodes. Empty-string key is for all genomes."""
cell_bcs = {}
for genome in genomes:
# Get all cell-assoc barcodes (ignoring genome) for the "" (blank) genome string
cell_bcs[genome] = cr_utils.get_cell_associated_barcode_set(filtered_barcodes_csv,
genome)
# All cell-associated barcodes
cell_bcs[''] = reduce(lambda x,y: x | y, cell_bcs.itervalues(), set())
return cell_bcs
def split(args):
# Get required info from the mol info
mc = MoleculeCounter.open(args.molecule_info, 'r')
genomes = sorted(set(f.tags.get('genome', '') for f in mc.feature_reference.feature_defs))
cell_bcs_by_genome = get_cell_associated_barcodes(genomes, args.filtered_barcodes)
# Get cell counts per gem group
n_cells_per_gg = defaultdict(int)
for bc in cell_bcs_by_genome['']:
_, gem_group = cr_utils.split_barcode_seq(bc)
n_cells_per_gg[gem_group] += 1
# Assign gem group cell counts to their constituent libraries
# TODO FIXME: Need to allow for per-library cell counts
# because some feature types might only have a subset of the GEX cell-assoc barcodes.
n_cells_per_lib = np.zeros(len(mc.library_info), dtype=int)
for lib_idx, lib in enumerate(mc.library_info):
n_cells_per_lib[lib_idx] = n_cells_per_gg[lib['gem_group']]
if n_cells_per_lib.sum() == 0:
return {'chunks': []}
library_info = mc.library_info
raw_count_per_lib = np.array(mc.get_raw_read_pairs_per_library())
raw_rppc_per_lib = raw_count_per_lib.astype(float) / n_cells_per_lib
usable_count_per_lib = np.array(mc.get_usable_read_pairs_per_library())
subsamplings = list() # track subsample info definitions
library_types = sorted(set(lib['library_type'] for lib in library_info))
for library_type in library_types:
# All libraries w/ this type
lib_indexes = np.array([i for i,lib in enumerate(library_info) if lib['library_type'] == library_type])
# For plotting, we want a series of target depths that exist for all
# libraries w/ the same library type. When there's a single library
# per type (the common case), this is trivial - split it into deciles.
# But if there are multiple libraries with different depths, (e.g.,
# because gem-group-aggregation was used to increase cell numbers),
# we need to find depths that are achievable for all libraries.
# For now, let the lowest-depth library for a given type dictate this.
min_raw_rppc = np.min(raw_rppc_per_lib[lib_indexes])
# Use deciles of the raw read pairs per cell.
deciles = np.arange(0.1, 1.1, 0.1)
plot_targets = map(round, min_raw_rppc * deciles)
# TODO: separate this work (internal + non)
raw_targets = cr_constants.SUBSAMPLE_READS_PER_CELL + \
plot_targets
# TODO: separate this work (internal + non)
usable_targets = cr_constants.SUBSAMPLE_READS_PER_CELL + \
plot_targets
for targets, depth_type in \
((raw_targets, cr_constants.RAW_SUBSAMPLE_TYPE), \
((usable_targets, cr_constants.MAPPED_SUBSAMPLE_TYPE)),):
targets = sorted(list(set(map(int, targets))))
for target_rppc in targets:
if depth_type == cr_constants.RAW_SUBSAMPLE_TYPE:
# Infer the usable depth required to achieve this raw depth
usable_read_fracs = usable_count_per_lib.astype(float) / raw_count_per_lib
target_usable_counts = target_rppc * n_cells_per_lib * usable_read_fracs
else:
target_usable_counts = target_rppc * n_cells_per_lib
# Zero out libraries of the other types
rates = np.zeros(len(library_info), dtype=float)
rates[lib_indexes] = target_usable_counts[lib_indexes].astype(float) \
/ usable_count_per_lib[lib_indexes]
# Clamp rates that are close to 1 to 1
rates[np.absolute(rates - 1) < 1e-3] = 1
# Zero out the libraries for which we have fewer reads than the target
rates[rates > 1] = 0.0
enough_data = np.any((rates > 0) & (rates <= 1))
if not enough_data:
rates = np.zeros(len(rates))
subsamplings.append({
'library_type': library_type,
'subsample_type': depth_type,
'target_read_pairs_per_cell': int(target_rppc),
'library_subsample_rates': list(map(float, rates)),
})
# Each chunk needs to store a piece of the mol info h5
tgt_chunk_len = cr_constants.NUM_MOLECULE_INFO_ENTRIES_PER_CHUNK
# Split the molecule info h5 into equi-RAM chunks
chunks = []
for chunk_start, chunk_len in mc.get_chunks(tgt_chunk_len, preserve_boundaries=True):
chunks.append({
'chunk_start': chunk_start,
'chunk_len': chunk_len,
'subsample_info': subsamplings,
# The estimate_mem_gb only count the memory usage for the MoleculeCounter object, which is
# under-estimated the actual memory usage.
# Based on memory profiling with test case fuzzer_114, actual memory usageis ~4x more
# than estimate_mem_gb (without cap), here set scale = 6.
'__mem_gb': MoleculeCounter.estimate_mem_gb(chunk_len, scale=6),
})
join = {
'__mem_gb': 6,
}
mc.close()
# TODO: is this really necessary w/ martian 3
if len(chunks) == 0:
chunks.append({
'chunk_start': str(0),
'chunk_len': str(0),
'subsample_info': [],
})
return {'chunks': chunks, 'join': join}
def main(args, outs):
np.random.seed(0)
mc = MoleculeCounter.open(args.molecule_info, 'r')
# Get cell-associated barcodes
genomes = sorted(set(f.tags.get('genome', '') for f in mc.feature_reference.feature_defs))
cell_bcs_by_genome = get_cell_associated_barcodes(genomes, args.filtered_barcodes)
# Load chunk of relevant data from the mol_info
chunk = slice(int(args.chunk_start), int(args.chunk_start) + int(args.chunk_len))
mol_library_idx = mc.get_column_lazy('library_idx')[chunk]
mol_read_pairs = mc.get_column_lazy('count')[chunk]
mol_gem_group = mc.get_column_lazy('gem_group')[chunk]
mol_barcode_idx = mc.get_column_lazy('barcode_idx')[chunk]
mol_feature_idx = mc.get_column_lazy('feature_idx')[chunk]
barcodes = mc.get_ref_column('barcodes')
# Give each cell-associated barcode an integer index
cell_bcs = sorted(list(cell_bcs_by_genome['']))
cell_bc_to_int = {bc: i for i, bc in enumerate(cell_bcs)}
# Give each genome an integer index
genome_to_int = {g: i for i, g in enumerate(genomes)}
feature_int_to_genome_int = np.fromiter((genome_to_int[f.tags.get('genome', '')] for f in mc.feature_reference.feature_defs),
dtype=int)
mol_genome_idx = feature_int_to_genome_int[mol_feature_idx]
# determine which (library type, genome) pairs have any associated reads
lib_types = sorted(set(lib['library_type'] for lib in mc.library_info))
lib_type_to_int = {l: i for i, l in enumerate(lib_types)}
lib_idx_to_lib_type_idx = np.fromiter((lib_type_to_int[lib['library_type']] for lib in mc.library_info),
dtype=np.int)
lib_type_genome_any_reads = np.zeros((len(lib_types), len(genomes)), dtype=np.bool)
lib_genome_idx_pairs = set(izip(mol_library_idx[mol_read_pairs > 0],
mol_genome_idx[mol_read_pairs > 0]))
for (lib_idx, genome_idx) in lib_genome_idx_pairs:
lib_type_idx = lib_idx_to_lib_type_idx[lib_idx]
lib_type_genome_any_reads[lib_type_idx, genome_idx] = True
# Run each subsampling task on this chunk of data
n_tasks = len(args.subsample_info)
n_genomes = len(genomes)
n_cells = len(cell_bcs)
umis_per_bc = np.zeros((n_tasks, n_genomes, n_cells))
features_det_per_bc = np.zeros((n_tasks, n_genomes, n_cells))
read_pairs_per_task = np.zeros((n_tasks, n_genomes))
umis_per_task = np.zeros((n_tasks, n_genomes))
for task_idx, task in enumerate(args.subsample_info):
# Per-library subsampling rates
rates_per_library = np.array(task['library_subsample_rates'], dtype=float)
if np.count_nonzero(rates_per_library) == 0:
continue
mol_rate = rates_per_library[mol_library_idx]
# Subsampled read pairs per molecule
new_read_pairs = np.random.binomial(mol_read_pairs, mol_rate)
# Compute tallies for each barcode
group_keys = (mol_gem_group, mol_barcode_idx)
group_values = (mol_feature_idx, mol_genome_idx, new_read_pairs)
for (gg, bc_idx), (feature_idx, genome_idx, read_pairs) in \
cr_utils.numpy_groupby(group_values, group_keys):
barcode = cr_utils.format_barcode_seq(barcodes[bc_idx], gg)
cell_idx = cell_bc_to_int.get(barcode)
for this_genome_idx in xrange(len(genomes)):
umis = np.flatnonzero((read_pairs > 0) & (genome_idx == this_genome_idx))
this_genome_read_pairs = np.sum(read_pairs[genome_idx == this_genome_idx])
# Tally UMIs and median features detected
if barcode in cell_bcs_by_genome[genomes[this_genome_idx]]:
# This is a cell-associated barcode for this genome
umis_per_bc[task_idx, this_genome_idx, cell_idx] = len(umis)
features_det_per_bc[task_idx, this_genome_idx, cell_idx] = np.count_nonzero(np.bincount(feature_idx[umis]))
# Tally numbers for duplicate fraction
read_pairs_per_task[task_idx, this_genome_idx] += np.sum(this_genome_read_pairs)
umis_per_task[task_idx, this_genome_idx] += len(umis)
with open(outs.metrics, 'w') as f:
data = {
'umis_per_bc': umis_per_bc,
'features_det_per_bc': features_det_per_bc,
'read_pairs': read_pairs_per_task,
'umis': umis_per_task,
'lib_type_genome_any_reads': lib_type_genome_any_reads,
}
cPickle.dump(data, f, protocol = cPickle.HIGHEST_PROTOCOL)
def make_metric_name(name, library_type, genome, ss_type, ss_depth):
lt_prefix = rna_library.get_library_type_metric_prefix(library_type)
return '%s%s_%s_%s_%s' % (lt_prefix, genome, ss_type, ss_depth, name)
def compute_dup_frac(read_pairs, umis):
return tk_stats.robust_divide(read_pairs - umis, read_pairs) if read_pairs > 0 else 0.0
def join(args, outs, chunk_defs, chunk_outs):
# Merge tallies
data = None
for chunk in chunk_outs:
with open(chunk.metrics) as f:
chunk_data = cPickle.load(f)
if data is None:
data = chunk_data
else:
for k,v in data.iteritems():
data[k] += chunk_data[k]
# Compute metrics for each subsampling rate
summary = {}
with MoleculeCounter.open(args.molecule_info, 'r') as mc:
genomes = sorted(set(f.tags.get('genome', '') for f in mc.feature_reference.feature_defs))
lib_types = sorted(set(lib['library_type'] for lib in mc.library_info))
lib_type_map = dict((lt, idx) for (idx, lt) in enumerate(lib_types))
cell_bcs_by_genome = get_cell_associated_barcodes(genomes, args.filtered_barcodes)
# Give each cell-associated barcode an integer index
cell_bcs = sorted(list(cell_bcs_by_genome['']))
cell_bc_to_int = {bc: i for i, bc in enumerate(cell_bcs)}
subsample_info = chunk_defs[0].subsample_info if len(chunk_defs) > 0 else []
for i, task in enumerate(subsample_info):
lib_type = task['library_type']
lib_type_idx = lib_type_map[lib_type]
ss_type = task['subsample_type']
ss_depth = task['target_read_pairs_per_cell']
if rna_library.has_genomes(lib_type):
genome_ints = list(range(data['umis_per_bc'].shape[1]))
else:
genome_ints = [0]
# Per-genome metrics
for g in genome_ints:
if not data['lib_type_genome_any_reads'][lib_type_idx, g]:
continue
genome = genomes[g]
# Only compute on cell-associated barcodes for this genome.
# This only matters when there are multiple genomes present.
cell_inds = np.array(sorted(cell_bc_to_int[bc] for bc in cell_bcs_by_genome[genome]))
median_umis_per_cell = np.median(data['umis_per_bc'][i,g,cell_inds])
summary[make_metric_name('subsampled_filtered_bcs_median_counts',
lib_type, genome, ss_type, ss_depth)] = median_umis_per_cell
median_features_per_cell = np.median(data['features_det_per_bc'][i,g,cell_inds])
summary[make_metric_name('subsampled_filtered_bcs_median_unique_genes_detected',
lib_type, genome, ss_type, ss_depth)] = median_features_per_cell
dup_frac = compute_dup_frac(data['read_pairs'][i,g], data['umis'][i,g])
summary[make_metric_name('subsampled_duplication_frac',
lib_type, genome, ss_type, ss_depth)] = dup_frac
# Whole-dataset duplication frac
all_read_pairs = np.sum(data['read_pairs'][i,:])
all_umis = np.sum(data['umis'][i,:])
dup_frac = compute_dup_frac(all_read_pairs, all_umis)
summary[make_metric_name('subsampled_duplication_frac',
lib_type, lib_constants.MULTI_REFS_PREFIX, ss_type, ss_depth)] = dup_frac
with open(outs.summary, 'w') as f:
json.dump(tk_safe_json.json_sanitize(summary), f, indent=4, sort_keys=True)
|
[
"numpy.absolute",
"numpy.random.seed",
"numpy.sum",
"cPickle.load",
"collections.defaultdict",
"cellranger.molecule_counter.MoleculeCounter.estimate_mem_gb",
"numpy.arange",
"numpy.fromiter",
"cellranger.molecule_counter.MoleculeCounter.open",
"cellranger.rna.library.get_library_type_metric_prefix",
"cellranger.utils.numpy_groupby",
"numpy.bincount",
"tenkit.stats.robust_divide",
"cellranger.utils.format_barcode_seq",
"numpy.random.binomial",
"numpy.median",
"cellranger.utils.get_cell_associated_barcode_set",
"numpy.min",
"tenkit.safe_json.json_sanitize",
"numpy.count_nonzero",
"cellranger.utils.split_barcode_seq",
"numpy.flatnonzero",
"numpy.zeros",
"numpy.any",
"cPickle.dump",
"cellranger.rna.library.has_genomes",
"numpy.array",
"itertools.izip"
] |
[((1651, 1696), 'cellranger.molecule_counter.MoleculeCounter.open', 'MoleculeCounter.open', (['args.molecule_info', '"""r"""'], {}), "(args.molecule_info, 'r')\n", (1671, 1696), False, 'from cellranger.molecule_counter import MoleculeCounter\n'), ((1938, 1954), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1949, 1954), False, 'from collections import defaultdict\n'), ((7077, 7094), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (7091, 7094), True, 'import numpy as np\n'), ((7105, 7150), 'cellranger.molecule_counter.MoleculeCounter.open', 'MoleculeCounter.open', (['args.molecule_info', '"""r"""'], {}), "(args.molecule_info, 'r')\n", (7125, 7150), False, 'from cellranger.molecule_counter import MoleculeCounter\n'), ((8627, 8724), 'numpy.fromiter', 'np.fromiter', (["(lib_type_to_int[lib['library_type']] for lib in mc.library_info)"], {'dtype': 'np.int'}), "((lib_type_to_int[lib['library_type']] for lib in mc.\n library_info), dtype=np.int)\n", (8638, 8724), True, 'import numpy as np\n'), ((9346, 9385), 'numpy.zeros', 'np.zeros', (['(n_tasks, n_genomes, n_cells)'], {}), '((n_tasks, n_genomes, n_cells))\n', (9354, 9385), True, 'import numpy as np\n'), ((9412, 9451), 'numpy.zeros', 'np.zeros', (['(n_tasks, n_genomes, n_cells)'], {}), '((n_tasks, n_genomes, n_cells))\n', (9420, 9451), True, 'import numpy as np\n'), ((9478, 9508), 'numpy.zeros', 'np.zeros', (['(n_tasks, n_genomes)'], {}), '((n_tasks, n_genomes))\n', (9486, 9508), True, 'import numpy as np\n'), ((9529, 9559), 'numpy.zeros', 'np.zeros', (['(n_tasks, n_genomes)'], {}), '((n_tasks, n_genomes))\n', (9537, 9559), True, 'import numpy as np\n'), ((11760, 11816), 'cellranger.rna.library.get_library_type_metric_prefix', 'rna_library.get_library_type_metric_prefix', (['library_type'], {}), '(library_type)\n', (11802, 11816), True, 'import cellranger.rna.library as rna_library\n'), ((1312, 1383), 'cellranger.utils.get_cell_associated_barcode_set', 'cr_utils.get_cell_associated_barcode_set', (['filtered_barcodes_csv', 'genome'], {}), '(filtered_barcodes_csv, genome)\n', (1352, 1383), True, 'import cellranger.utils as cr_utils\n'), ((2016, 2046), 'cellranger.utils.split_barcode_seq', 'cr_utils.split_barcode_seq', (['bc'], {}), '(bc)\n', (2042, 2046), True, 'import cellranger.utils as cr_utils\n'), ((3710, 3747), 'numpy.min', 'np.min', (['raw_rppc_per_lib[lib_indexes]'], {}), '(raw_rppc_per_lib[lib_indexes])\n', (3716, 3747), True, 'import numpy as np\n'), ((3821, 3845), 'numpy.arange', 'np.arange', (['(0.1)', '(1.1)', '(0.1)'], {}), '(0.1, 1.1, 0.1)\n', (3830, 3845), True, 'import numpy as np\n'), ((8882, 8959), 'itertools.izip', 'izip', (['mol_library_idx[mol_read_pairs > 0]', 'mol_genome_idx[mol_read_pairs > 0]'], {}), '(mol_library_idx[mol_read_pairs > 0], mol_genome_idx[mol_read_pairs > 0])\n', (8886, 8959), False, 'from itertools import izip\n'), ((9687, 9741), 'numpy.array', 'np.array', (["task['library_subsample_rates']"], {'dtype': 'float'}), "(task['library_subsample_rates'], dtype=float)\n", (9695, 9741), True, 'import numpy as np\n'), ((9943, 9987), 'numpy.random.binomial', 'np.random.binomial', (['mol_read_pairs', 'mol_rate'], {}), '(mol_read_pairs, mol_rate)\n', (9961, 9987), True, 'import numpy as np\n'), ((10240, 10288), 'cellranger.utils.numpy_groupby', 'cr_utils.numpy_groupby', (['group_values', 'group_keys'], {}), '(group_values, group_keys)\n', (10262, 10288), True, 'import cellranger.utils as cr_utils\n'), ((11614, 11670), 'cPickle.dump', 'cPickle.dump', (['data', 'f'], {'protocol': 'cPickle.HIGHEST_PROTOCOL'}), '(data, f, protocol=cPickle.HIGHEST_PROTOCOL)\n', (11626, 11670), False, 'import cPickle\n'), ((11943, 11996), 'tenkit.stats.robust_divide', 'tk_stats.robust_divide', (['(read_pairs - umis)', 'read_pairs'], {}), '(read_pairs - umis, read_pairs)\n', (11965, 11996), True, 'import tenkit.stats as tk_stats\n'), ((12443, 12488), 'cellranger.molecule_counter.MoleculeCounter.open', 'MoleculeCounter.open', (['args.molecule_info', '"""r"""'], {}), "(args.molecule_info, 'r')\n", (12463, 12488), False, 'from cellranger.molecule_counter import MoleculeCounter\n'), ((13333, 13366), 'cellranger.rna.library.has_genomes', 'rna_library.has_genomes', (['lib_type'], {}), '(lib_type)\n', (13356, 13366), True, 'import cellranger.rna.library as rna_library\n'), ((14766, 14798), 'numpy.sum', 'np.sum', (["data['read_pairs'][i, :]"], {}), "(data['read_pairs'][i, :])\n", (14772, 14798), True, 'import numpy as np\n'), ((14817, 14843), 'numpy.sum', 'np.sum', (["data['umis'][i, :]"], {}), "(data['umis'][i, :])\n", (14823, 14843), True, 'import numpy as np\n'), ((9754, 9789), 'numpy.count_nonzero', 'np.count_nonzero', (['rates_per_library'], {}), '(rates_per_library)\n', (9770, 9789), True, 'import numpy as np\n'), ((10313, 10362), 'cellranger.utils.format_barcode_seq', 'cr_utils.format_barcode_seq', (['barcodes[bc_idx]', 'gg'], {}), '(barcodes[bc_idx], gg)\n', (10340, 10362), True, 'import cellranger.utils as cr_utils\n'), ((12200, 12215), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (12212, 12215), False, 'import cPickle\n'), ((13948, 13995), 'numpy.median', 'np.median', (["data['umis_per_bc'][i, g, cell_inds]"], {}), "(data['umis_per_bc'][i, g, cell_inds])\n", (13957, 13995), True, 'import numpy as np\n'), ((14210, 14265), 'numpy.median', 'np.median', (["data['features_det_per_bc'][i, g, cell_inds]"], {}), "(data['features_det_per_bc'][i, g, cell_inds])\n", (14219, 14265), True, 'import numpy as np\n'), ((15135, 15170), 'tenkit.safe_json.json_sanitize', 'tk_safe_json.json_sanitize', (['summary'], {}), '(summary)\n', (15161, 15170), True, 'import tenkit.safe_json as tk_safe_json\n'), ((5467, 5501), 'numpy.any', 'np.any', (['((rates > 0) & (rates <= 1))'], {}), '((rates > 0) & (rates <= 1))\n', (5473, 5501), True, 'import numpy as np\n'), ((6669, 6720), 'cellranger.molecule_counter.MoleculeCounter.estimate_mem_gb', 'MoleculeCounter.estimate_mem_gb', (['chunk_len'], {'scale': '(6)'}), '(chunk_len, scale=6)\n', (6700, 6720), False, 'from cellranger.molecule_counter import MoleculeCounter\n'), ((10496, 10562), 'numpy.flatnonzero', 'np.flatnonzero', (['((read_pairs > 0) & (genome_idx == this_genome_idx))'], {}), '((read_pairs > 0) & (genome_idx == this_genome_idx))\n', (10510, 10562), True, 'import numpy as np\n'), ((10604, 10653), 'numpy.sum', 'np.sum', (['read_pairs[genome_idx == this_genome_idx]'], {}), '(read_pairs[genome_idx == this_genome_idx])\n', (10610, 10653), True, 'import numpy as np\n'), ((11192, 11222), 'numpy.sum', 'np.sum', (['this_genome_read_pairs'], {}), '(this_genome_read_pairs)\n', (11198, 11222), True, 'import numpy as np\n'), ((5274, 5296), 'numpy.absolute', 'np.absolute', (['(rates - 1)'], {}), '(rates - 1)\n', (5285, 5296), True, 'import numpy as np\n'), ((11038, 11068), 'numpy.bincount', 'np.bincount', (['feature_idx[umis]'], {}), '(feature_idx[umis])\n', (11049, 11068), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2018-09-18 13:25:04
# @Last Modified by: <NAME>
# @Last Modified time: 2018-09-18 13:35:04
"""
CUSTOM ESTIMATOR AS DECORATORS for Scikit-Learn Pipelines
"""
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin
import pandas as pd
class SKTransform(BaseEstimator, TransformerMixin):
"""Sklearn Custom Transformer Decorator"""
def __init__(self, f):
self.transform_func = f
def __call__(self, X):
return self.transform_func(X)
def __iter__(self):
return (i for i in [self.transform_func.__name__, self])
def __getitem__(self, i):
return [self.transform_func.__name__, self][i]
def fit(self, X, y=None):
return self
def transform(self, X):
if isinstance(X, pd.DataFrame):
return self.transform_func(X.values)
return self.transform_func(X)
class SKClassify(BaseEstimator, ClassifierMixin):
"""Sklearn Custom Classifier Decorator"""
def __init__(self, f):
self.predict_func = f
def __call__(self, X):
return self.predict_func(X)
def __iter__(self):
return (i for i in [self.predict_func.__name__, self])
def __getitem__(self, i):
return [self.predict_func.__name__, self][i]
def fit(self, X, y=None):
return self
def fit_predict(self, X, y=None):
return self.predict(X)
def predict(self, X, y=None):
if isinstance(X, pd.DataFrame):
return self.predict_func(X.values)
return self.predict_func(X)
if __name__ == '__main__':
from sklearn.pipeline import Pipeline
import numpy as np
@SKTransform
def power2(x):
return x**2
@SKClassify
def lessThan50(x):
return x < 50
ppl = Pipeline([
power2,
lessThan50,
])
print('Prediction:\n', ppl.predict(np.array([3, 6, 8, 10])))
|
[
"sklearn.pipeline.Pipeline",
"numpy.array"
] |
[((1813, 1843), 'sklearn.pipeline.Pipeline', 'Pipeline', (['[power2, lessThan50]'], {}), '([power2, lessThan50])\n', (1821, 1843), False, 'from sklearn.pipeline import Pipeline\n'), ((1906, 1929), 'numpy.array', 'np.array', (['[3, 6, 8, 10]'], {}), '([3, 6, 8, 10])\n', (1914, 1929), True, 'import numpy as np\n')]
|
# vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
import pdb
from . import kf_2d, kf_3d, double_measurement_kf, imm
from . import linear_assignment
from . import iou_matching
from .track import Track
from . import JPDA_matching
from . import tracking_utils
import math
from nn_matching import NearestNeighborDistanceMetric
import cv2
class Tracker:
"""
This is the multi-target tracker.
Parameters
----------
metric : nn_matching.NearestNeighborDistanceMetric
A distance metric for measurement-to-track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
Attributes
----------
metric : nn_matching.NearestNeighborDistanceMetric
The distance metric used for measurement to track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of frames that a track remains in initialization phase.
kf : EKF.KalmanFilter
A Kalman filter to filter target trajectories in image space.
tracks : List[Track]
The list of active tracks at the current time step.
"""
def __init__(self, max_age=5, n_init=3,
JPDA=False, m_best_sol=1, assn_thresh=0.0,
matching_strategy=None,
kf_appearance_feature=None,
gate_full_state=False, lstm = None, cuda = False, appearance_model = None,
calib = None, kf_vel_params=(1./20, 1./160, 1, 1, 2), dummy_node_cost_iou=0.4, dummy_node_cost_app=0.2, nn_budget = None, use_imm=False, kf_walk_params=(1./20, 1./160, 1, 1, 2),
markov=(0.9, 0.7), uncertainty_limit=1.8, optical_flow=False, gate_limit=400):
self.max_age = max_age
self.n_init = n_init
self.metric = NearestNeighborDistanceMetric("euclidean", nn_budget)
if not use_imm:
self.kf = kf_2d.KalmanFilter2D(*kf_vel_params, gate_limit)
self.use_imm = False
else:
self.kf = imm.IMMFilter2D(kf_vel_params, kf_walk_params, markov=markov)
self.use_imm = True
self.tracks = []
self._next_id = 1
self.JPDA = JPDA
self.m_best_sol = m_best_sol
self.assn_thresh = assn_thresh
self.matching_strategy = matching_strategy
self.kf_appearance_feature = kf_appearance_feature
self.gate_only_position = not gate_full_state
self.lstm = lstm
self.cuda = cuda
self.dummy_node_cost_app = dummy_node_cost_app
self.dummy_node_cost_iou = dummy_node_cost_iou
self.appearance_model = appearance_model
self.prev_frame = None
self.uncertainty_limit = uncertainty_limit
self.optical_flow = optical_flow
# @profile
def gated_metric(self, tracks, dets, track_indices, detection_indices, compare_2d = False):
targets = np.array([tracks[i].track_id for i in track_indices])
if not compare_2d and self.metric.check_samples(targets):
compare_2d = True
if compare_2d:
features = np.array([dets[i].appearance_feature for i in detection_indices])
else:
features = np.array([dets[i].feature for i in detection_indices])
#cost_matrix = self.metric.distance(features, targets, compare_2d)
cost_matrix_appearance = self.metric.distance_torch(features, targets, compare_2d)
cost_matrix_iou = iou_matching.iou_cost(tracks, dets, track_indices, detection_indices)
gate_mask = linear_assignment.gate_cost_matrix(
self.kf, tracks, dets, track_indices,
detection_indices, only_position=self.gate_only_position)
cost_matrix = np.dstack((cost_matrix_appearance, cost_matrix_iou))
return cost_matrix, gate_mask
def predict(self):
"""Propagate track state distributions one time step forward.
This function should be called once every time step, before `update`.
"""
for track in self.tracks:
track.predict(self.kf)
# @profile
def update(self, cur_frame, detections, compare_2d = False):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
self.cur_frame = cv2.cvtColor((255*cur_frame).permute(1,2,0).cpu().numpy(), cv2.COLOR_BGR2GRAY)
matches, unmatched_tracks, unmatched_detections = \
self._match(detections, compare_2d)
# update filter for each assigned track
# Only do this for non-JPDA because in JPDA the kf states are updated
# during the matching process
if not self.JPDA:
# Map matched tracks to detections
track_detection_map = {t:d for (t,d) in matches}
# Map unmatched tracks to -1 for no detection
for t in unmatched_tracks:
track_detection_map[t] = -1
for track_idx, detection_idx in matches:
self.tracks[track_idx].update(self.kf, detections,
detection_idx=detection_idx, JPDA=self.JPDA,
cur_frame = self.cur_frame, appearance_model = self.appearance_model,
lstm = self.lstm)
# update track state for unmatched tracks
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
# create new tracks
self.prune_tracks()
flow = None
if unmatched_detections:
if self.optical_flow and self.prev_frame is not None:
flow = cv2.calcOpticalFlowFarneback(self.prev_frame, self.cur_frame, None, 0.5, 3, 15, 3, 5, 1.2, 0)
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx], flow)
# Update distance metric.
active_targets = [t.track_id for t in self.tracks]
features, features_2d, targets, targets_2d = [], [], [], []
for track in self.tracks:
features += track.features
features_2d += track.features_2d
targets += [track.track_id for _ in track.features]
targets_2d += [track.track_id for _ in track.features_2d]
track.features = []
track.features_2d = []
self.metric.partial_fit(
np.asarray(features), np.asarray(features_2d), np.asarray(targets), np.asarray(targets_2d), active_targets)
self.prev_frame = self.cur_frame
# @profile
def _match(self, detections, compare_2d):
# Associate all tracks using combined cost matrices.
if self.JPDA:
# Run JPDA on all tracks
marginalizations = \
linear_assignment.JPDA(self.gated_metric, self.dummy_node_cost_app, self.dummy_node_cost_iou, self.tracks, \
detections, m=self.m_best_sol, compare_2d = compare_2d)
# for track in self.tracks: #TODO: REMOVE
# print(track.track_id)
# print(marginalizations)
jpda_matcher = JPDA_matching.Matcher(
detections, marginalizations, range(len(self.tracks)),
self.matching_strategy, assignment_threshold=self.assn_thresh)
matches_a, unmatched_tracks_a, unmatched_detections = jpda_matcher.match()
# Map matched tracks to detections
# Map matched tracks to detections
track_detection_map = {t:d for (t,d) in matches_a}
# Map unmatched tracks to -1 for no detection
for t in unmatched_tracks_a:
track_detection_map[t] = -1
# update Kalman state
if marginalizations.shape[0] > 0:
for i in range(len(self.tracks)):
self.tracks[i].update(self.kf, detections,
marginalization=marginalizations[i,:], detection_idx=track_detection_map[i],
JPDA=self.JPDA, cur_frame = self.cur_frame, appearance_model = self.appearance_model, lstm = self.lstm)
else:
confirmed_tracks = [i for i, t in enumerate(self.tracks) if t.is_confirmed()]
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
self.gated_metric, self.dummy_node_cost_iou, self.max_age,
self.tracks, detections, confirmed_tracks, compare_2d = compare_2d)
return matches_a, unmatched_tracks_a, unmatched_detections
def _initiate_track(self, detection, flow=None):
if self.use_imm:
mean, covariance, model_probabilities = self.kf.initiate(detection.to_xywh(), flow)
else:
mean, covariance = self.kf.initiate(detection.to_xywh(), flow)
model_probabilities = None
self.tracks.append(Track(
mean, covariance, model_probabilities, self._next_id, self.n_init, self.max_age,
kf_appearance_feature = self.kf_appearance_feature,
feature=detection.feature, appearance_feature = detection.appearance_feature,
cuda = self.cuda, lstm = self.lstm, last_det = detection))
self._next_id += 1
def prune_tracks(self):
h, w = self.cur_frame.shape
for track in self.tracks:
# Check if track is leaving
if self.use_imm:
predicted_mean, predicted_cov = self.kf.combine_states(track.mean, track.covariance, track.model_probabilities) #TODO: This doesn't predict. Mean should def predict
else:
predicted_mean = self.kf.predict_mean(track.mean)
predicted_cov = track.covariance
predicted_pos = predicted_mean[:2]
predicted_vel = predicted_mean[4:6]
predicted_pos[0] -= w/2
predicted_pos[1] -= h/2
cos_theta = np.dot(predicted_pos, predicted_vel)/(np.linalg.norm(predicted_pos)*
np.linalg.norm(predicted_vel) + 1e-6)
predicted_pos[0] += w/2
predicted_pos[1] += h/2
# Thresholds for deciding whether track is outside image
BORDER_VALUE = 0
if (cos_theta > 0 and
(predicted_pos[0] - track.mean[2]/2<= BORDER_VALUE or
predicted_pos[0] + track.mean[2]/2 >= w - BORDER_VALUE)):
if track.is_exiting() and not track.matched:
track.delete_track()
else:
track.mark_exiting()
# Check if track is too uncertain
# cov_axis,_ = np.linalg.eigh(predicted_cov)
# if np.abs(np.sqrt(cov_axis[-1]))*6 > self.uncertainty_limit*np.linalg.norm(predicted_mean[2:4]):
# track.delete_track()
self.tracks = [t for t in self.tracks if not t.is_deleted()]
|
[
"numpy.dstack",
"nn_matching.NearestNeighborDistanceMetric",
"numpy.asarray",
"numpy.array",
"numpy.linalg.norm",
"cv2.calcOpticalFlowFarneback",
"numpy.dot"
] |
[((2036, 2089), 'nn_matching.NearestNeighborDistanceMetric', 'NearestNeighborDistanceMetric', (['"""euclidean"""', 'nn_budget'], {}), "('euclidean', nn_budget)\n", (2065, 2089), False, 'from nn_matching import NearestNeighborDistanceMetric\n'), ((3126, 3179), 'numpy.array', 'np.array', (['[tracks[i].track_id for i in track_indices]'], {}), '([tracks[i].track_id for i in track_indices])\n', (3134, 3179), True, 'import numpy as np\n'), ((3941, 3993), 'numpy.dstack', 'np.dstack', (['(cost_matrix_appearance, cost_matrix_iou)'], {}), '((cost_matrix_appearance, cost_matrix_iou))\n', (3950, 3993), True, 'import numpy as np\n'), ((3322, 3387), 'numpy.array', 'np.array', (['[dets[i].appearance_feature for i in detection_indices]'], {}), '([dets[i].appearance_feature for i in detection_indices])\n', (3330, 3387), True, 'import numpy as np\n'), ((3425, 3479), 'numpy.array', 'np.array', (['[dets[i].feature for i in detection_indices]'], {}), '([dets[i].feature for i in detection_indices])\n', (3433, 3479), True, 'import numpy as np\n'), ((6679, 6699), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (6689, 6699), True, 'import numpy as np\n'), ((6701, 6724), 'numpy.asarray', 'np.asarray', (['features_2d'], {}), '(features_2d)\n', (6711, 6724), True, 'import numpy as np\n'), ((6726, 6745), 'numpy.asarray', 'np.asarray', (['targets'], {}), '(targets)\n', (6736, 6745), True, 'import numpy as np\n'), ((6747, 6769), 'numpy.asarray', 'np.asarray', (['targets_2d'], {}), '(targets_2d)\n', (6757, 6769), True, 'import numpy as np\n'), ((5940, 6037), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['self.prev_frame', 'self.cur_frame', 'None', '(0.5)', '(3)', '(15)', '(3)', '(5)', '(1.2)', '(0)'], {}), '(self.prev_frame, self.cur_frame, None, 0.5, 3,\n 15, 3, 5, 1.2, 0)\n', (5968, 6037), False, 'import cv2\n'), ((10200, 10236), 'numpy.dot', 'np.dot', (['predicted_pos', 'predicted_vel'], {}), '(predicted_pos, predicted_vel)\n', (10206, 10236), True, 'import numpy as np\n'), ((10238, 10267), 'numpy.linalg.norm', 'np.linalg.norm', (['predicted_pos'], {}), '(predicted_pos)\n', (10252, 10267), True, 'import numpy as np\n'), ((10321, 10350), 'numpy.linalg.norm', 'np.linalg.norm', (['predicted_vel'], {}), '(predicted_vel)\n', (10335, 10350), True, 'import numpy as np\n')]
|
"""PhoSim Instance Catalog"""
from __future__ import absolute_import, division, print_function
import numpy as np
from lsst.sims.catUtils.exampleCatalogDefinitions import (PhoSimCatalogZPoint,
PhoSimCatalogPoint,
PhoSimCatalogSersic2D,
PhoSimCatalogSN)
from .twinklesVariabilityMixins import VariabilityTwinkles
from lsst.sims.catUtils.mixins import VariabilityAGN, PhotometryGalaxies
from lsst.sims.catUtils.exampleCatalogDefinitions.phoSimCatalogExamples import PhoSimSpecMap as psmp
from lsst.sims.catalogs.definitions import CompoundInstanceCatalog
from lsst.sims.catalogs.db import CompoundCatalogDBObject
#__all__ = ['TwinklesCatalogZPoint', 'TwinklesPhoSimCatalogSN']
__all__ = ["TwinklesCatalogPoint", "TwinklesCatalogSersic2D",
"TwinklesCatalogZPoint", "TwinklesCatalogSN", "TwinklesCompoundInstanceCatalog"]
twinkles_sn_sed_dir = 'spectra_files'
twinkles_spec_map = psmp
twinkles_spec_map.subdir_map['(^specFile_)'] = twinkles_sn_sed_dir
class TwinklesCatalogPoint(PhoSimCatalogPoint):
specFileMap = twinkles_spec_map
class TwinklesCatalogSersic2D(PhoSimCatalogSersic2D):
specFileMap = twinkles_spec_map
class TwinklesCatalogZPoint(PhoSimCatalogZPoint, VariabilityTwinkles, VariabilityAGN):
"""
PhoSim Instance Catalog Class for strongly lensed (and therefore time-delayed)
AGN
"""
specFileMap = twinkles_spec_map
catalog_type = 'twinkles_catalog_ZPOINT'
class TwinklesCatalogSN(PhoSimCatalogSN):
"""
Modification of the PhoSimCatalogSN mixin to provide shorter sedFileNames
by leaving out the parts of the directory name
"""
def get_shorterFileNames(self):
fnames = self.column_by_name('sedFilepath')
sep = 'spectra_files/specFile_'
split_names = []
for fname in fnames:
if 'None' not in fname:
fname = sep + fname.split(sep)[-1]
else:
fname = 'None'
split_names.append(fname)
return np.array(split_names)
# column_outputs = PhoSimCatalogSN.column_outputs
# column_outputs[PhoSimCatalogSN.column_outputs.index('sedFilepath')] = \
# 'shorterFileNames'
column_outputs = ['prefix', 'uniqueId', 'raPhoSim', 'decPhoSim',
'phoSimMagNorm', 'shorterFileNames', 'redshift',
'gamma1', 'gamma2', 'kappa', 'raOffset', 'decOffset',
'spatialmodel', 'internalExtinctionModel',
'galacticExtinctionModel', 'galacticAv', 'galacticRv']
cannot_be_null = ['x0', 't0', 'z', 'shorterFileNames']
class TwinklesCompoundInstanceCatalog(CompoundInstanceCatalog):
use_spec_map = twinkles_spec_map
def write_catalog(self, filename, chunk_size=None, write_header=True, write_mode='w'):
"""
Write the stored list of InstanceCatalogs to a single ASCII output catalog.
@param [in] filename is the name of the file to be written
@param [in] chunk_size is an optional parameter telling the CompoundInstanceCatalog
to query the database in manageable chunks (in case returning the whole catalog
takes too much memory)
@param [in] write_header a boolean specifying whether or not to add a header
to the output catalog (Note: only one header will be written; there will not be
a header for each InstanceCatalog in the CompoundInstanceCatalog; default True)
@param [in] write_mode is 'w' if you want to overwrite the output file or
'a' if you want to append to an existing output file (default: 'w')
"""
instantiated_ic_list = [None]*len(self._ic_list)
# first, loop over all of the InstanceCatalog and CatalogDBObject classes, pre-processing
# them (i.e. verifying that they have access to all of the columns they need)
for ix, (icClass, dboClass) in enumerate(zip(self._ic_list, self._dbo_list)):
dbo = dboClass()
ic = icClass(dbo, obs_metadata=self._obs_metadata)
# assign all non-private member variables of the CompoundInstanceCatalog
# to the instantiated InstanceCatalogs
for kk in self.__dict__:
if kk[0] != '_' and not hasattr(self.__dict__[kk], '__call__'):
setattr(ic, kk, self.__dict__[kk])
for kk in self.__class__.__dict__:
if kk[0] != '_' and not hasattr(self.__class__.__dict__[kk], '__call__'):
setattr(ic, kk, self.__class__.__dict__[kk])
ic._write_pre_process()
instantiated_ic_list[ix] = ic
for row in self._dbObjectGroupList:
if len(row) == 1:
ic = instantiated_ic_list[row[0]]
ic._query_and_write(filename, chunk_size=chunk_size,
write_header=write_header, write_mode=write_mode,
obs_metadata=self._obs_metadata,
constraint=self._constraint)
write_mode = 'a'
write_header = False
default_compound_dbo = None
if self._compoundDBclass is not None:
if not hasattr(self._compoundDBclass, '__getitem__'):
default_compound_dbo = CompoundCatalogDBObject
else:
for dbo in self._compoundDBclass:
if dbo._table_restriction is None:
default_compound_dbo = dbo
break
if default_compound_dbo is None:
default_compound_dbo is CompoundCatalogDBObject
for row in self._dbObjectGroupList:
if len(row) > 1:
dbObjClassList = [self._dbo_list[ix] for ix in row]
catList = [instantiated_ic_list[ix] for ix in row]
for cat in catList:
cat._pre_screen = True
if self._compoundDBclass is None:
compound_dbo = CompoundCatalogDBObject(dbObjClassList)
elif not hasattr(self._compoundDBclass, '__getitem__'):
# if self._compoundDBclass is not a list
try:
compound_dbo = self._compoundDBclass(dbObjClassList)
except:
compound_dbo = default_compound_dbo(dbObjClassList)
else:
compound_dbo = None
for candidate in self._compoundDBclass:
use_it = True
if False in [candidate._table_restriction is not None and
dbo.tableid in candidate._table_restriction
for dbo in dbObjClassList]:
use_it = False
if use_it:
compound_dbo = candidate(dbObjClassList)
break
if compound_dbo is None:
compound_dbo = default_compound_dbo(dbObjClassList)
compound_dbo.mjd = self._obs_metadata.mjd.TAI
compound_dbo.specFileMap = self.use_spec_map
self._write_compound(catList, compound_dbo, filename,
chunk_size=chunk_size, write_header=write_header,
write_mode=write_mode)
write_mode = 'a'
write_header = False
|
[
"lsst.sims.catalogs.db.CompoundCatalogDBObject",
"numpy.array"
] |
[((2155, 2176), 'numpy.array', 'np.array', (['split_names'], {}), '(split_names)\n', (2163, 2176), True, 'import numpy as np\n'), ((6160, 6199), 'lsst.sims.catalogs.db.CompoundCatalogDBObject', 'CompoundCatalogDBObject', (['dbObjClassList'], {}), '(dbObjClassList)\n', (6183, 6199), False, 'from lsst.sims.catalogs.db import CompoundCatalogDBObject\n')]
|
# more or less the same simulation, but split up in to chunks that fit into memory
# for large states (CA, IA, KS, OK, TX)
# chunks of size 2000 (2000 locations in one part calculated and create temporary file)
location_chunk = 2000
import argparse
import datetime
import glob
import math
import numpy as np
import os
import rasterio
import statsmodels.api as sm
import pandas as pd
import statsmodels.api as sm
import time
import xarray as xr
import sys
sys.path.append('../')
from functools import reduce
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
from utils import windpower_simulation_era5_large
from dask.diagnostics import ProgressBar
ProgressBar().register()
from paths_usa import *
# get state and GWA version
parser = argparse.ArgumentParser(description='Insert state and optionally GWA')
parser.add_argument('-state')
parser.add_argument('-GWA')
args = parser.parse_args()
state = args.state
if(args.GWA == None):
GWA = "3"
else:
GWA = args.GWA
if GWA == "2":
results_path = results_path + '/results_GWA2'
if not os.path.exists(results_path):
os.mkdir(results_path)
startGWA = '1987'
endGWA = '2016'
else:
startGWA = '2008'
endGWA = '2017'
# define start date for simulation
startyearmonth = '2000-12'
outfile = results_path + '/windpower_??_ERA5_GWA.nc'
if results_path + '/windpower_' + state + '_ERA5_GWA.nc' not in glob.glob(outfile):
wind = xr.open_mfdataset(era_path + "/eff_ws/era5_wind_USA_*.nc", chunks = {'time': 38})
alpha = xr.open_mfdataset(era_path + "/eff_ws/era5_alpha_USA_*.nc", chunks = {'time': 38})
# with GWA
turbine_data_era_gwa = pd.read_csv(usa_path + '/turbine_data_era_gwa' + GWA + '.csv', parse_dates=['commissioning'])
if GWA == "3":
if state == 'PR':
GWA = xr.open_rasterio(usa_path+'/GWA/GWA3_PR100m.tif')
else:
GWA = xr.open_rasterio(usa_path+'/GWA/GWA3_USA100m.tif')
else:
if state == 'AK':
GWA = xr.open_rasterio(usa_path+'/GWA/GWA_AK100m.tif')
elif state == 'HI':
GWA = xr.open_rasterio(usa_path+'/GWA/GWA_HI100m.tif')
elif state == 'PR':
GWA = xr.open_rasterio(usa_path+'/GWA/GWA_PR100m.tif')
else:
GWA = xr.open_rasterio(usa_path+'/GWA/GWA_USA100m.tif')
ind = turbine_data_era_gwa.state == state
print('calculating ERA5 ' + state + ' GWA')
t1 = time.time()
# number of locations in state
dat_len = sum(ind)
numit = round(dat_len/location_chunk+0.5) # number of necessary iterations
i1 = 0
i2 = i1 + location_chunk
for it in range(numit):
outfile_temp = results_path + "/wp_"+state+"_ERA5_GWA_temp" + str(it+1) +".nc"
if i2 > dat_len:
i2 = dat_len
if outfile_temp not in glob.glob(results_path + "/wp_"+state+"_ERA5_GWA_temp*.nc"):
wps = windpower_simulation_era5_large(wind.wh100,
alpha.alpha,
turbine_data_era_gwa.height[ind].values[i1:i2],
turbine_data_era_gwa.capacity[ind].values[i1:i2],
turbine_data_era_gwa.sp[ind].values[i1:i2],
turbine_data_era_gwa.lon[ind].values[i1:i2],
turbine_data_era_gwa.lat[ind].values[i1:i2],
pd.to_datetime(turbine_data_era_gwa.commissioning[ind].values[i1:i2]).year.values,
startyearmonth,
GWA,
startGWA,
endGWA)
# adapt numbers of locations in dataset
wps = wps.assign_coords(location = np.arange(i1,i2))
# save temporary file
print('saving to '+results_path + "/wp_"+state+"_ERA5_GWA_temp" + str(it+1) +".nc")
wps.to_dataset(name='wp').to_netcdf(results_path + "/wp_"+state+"_ERA5_GWA_temp" + str(it+1) +".nc")
print('saved to '+results_path + "/wp_"+state+"_ERA5_GWA_temp" + str(it+1) +".nc")
wps.close()
del(wps)
i1 = i2
i2 = i2 + location_chunk
print(round(i1/dat_len,3)*100,'% done in ',state)
# merge and delete temporary files
wps = xr.open_mfdataset(results_path + "/wp_"+state+"_ERA5_GWA_temp*.nc", chunks = {'time': 100})
print('saving to'+results_path + "/windpower_"+state+"_ERA5_GWA.nc")
wps.drop(['x','y']).to_netcdf(results_path + "/windpower_"+state+"_ERA5_GWA.nc")
print('saved to '+results_path + "/windpower_"+state+"_ERA5_GWA.nc")
t2 = time.time()
# remove temporary files
for file in glob.glob(results_path + "/wp_"+state+"_ERA5_GWA_temp*.nc"):
os.remove(file)
print(t2-t1)
|
[
"sys.path.append",
"os.mkdir",
"os.remove",
"argparse.ArgumentParser",
"pandas.read_csv",
"xarray.open_rasterio",
"os.path.exists",
"time.time",
"dask.diagnostics.ProgressBar",
"numpy.arange",
"pandas.to_datetime",
"glob.glob",
"xarray.open_mfdataset"
] |
[((457, 479), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (472, 479), False, 'import sys\n'), ((766, 836), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Insert state and optionally GWA"""'}), "(description='Insert state and optionally GWA')\n", (789, 836), False, 'import argparse\n'), ((1410, 1428), 'glob.glob', 'glob.glob', (['outfile'], {}), '(outfile)\n', (1419, 1428), False, 'import glob\n'), ((1442, 1521), 'xarray.open_mfdataset', 'xr.open_mfdataset', (["(era_path + '/eff_ws/era5_wind_USA_*.nc')"], {'chunks': "{'time': 38}"}), "(era_path + '/eff_ws/era5_wind_USA_*.nc', chunks={'time': 38})\n", (1459, 1521), True, 'import xarray as xr\n'), ((1536, 1621), 'xarray.open_mfdataset', 'xr.open_mfdataset', (["(era_path + '/eff_ws/era5_alpha_USA_*.nc')"], {'chunks': "{'time': 38}"}), "(era_path + '/eff_ws/era5_alpha_USA_*.nc', chunks={'time': 38}\n )\n", (1553, 1621), True, 'import xarray as xr\n'), ((1661, 1759), 'pandas.read_csv', 'pd.read_csv', (["(usa_path + '/turbine_data_era_gwa' + GWA + '.csv')"], {'parse_dates': "['commissioning']"}), "(usa_path + '/turbine_data_era_gwa' + GWA + '.csv', parse_dates=\n ['commissioning'])\n", (1672, 1759), True, 'import pandas as pd\n'), ((2431, 2442), 'time.time', 'time.time', ([], {}), '()\n', (2440, 2442), False, 'import time\n'), ((4574, 4671), 'xarray.open_mfdataset', 'xr.open_mfdataset', (["(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc')"], {'chunks': "{'time': 100}"}), "(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc',\n chunks={'time': 100})\n", (4591, 4671), True, 'import xarray as xr\n'), ((4926, 4937), 'time.time', 'time.time', ([], {}), '()\n', (4935, 4937), False, 'import time\n'), ((4988, 5051), 'glob.glob', 'glob.glob', (["(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc')"], {}), "(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc')\n", (4997, 5051), False, 'import glob\n'), ((678, 691), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (689, 691), False, 'from dask.diagnostics import ProgressBar\n'), ((1079, 1107), 'os.path.exists', 'os.path.exists', (['results_path'], {}), '(results_path)\n', (1093, 1107), False, 'import os\n'), ((1117, 1139), 'os.mkdir', 'os.mkdir', (['results_path'], {}), '(results_path)\n', (1125, 1139), False, 'import os\n'), ((5057, 5072), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (5066, 5072), False, 'import os\n'), ((1818, 1869), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA3_PR100m.tif')"], {}), "(usa_path + '/GWA/GWA3_PR100m.tif')\n", (1834, 1869), True, 'import xarray as xr\n'), ((1900, 1952), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA3_USA100m.tif')"], {}), "(usa_path + '/GWA/GWA3_USA100m.tif')\n", (1916, 1952), True, 'import xarray as xr\n'), ((2005, 2055), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA_AK100m.tif')"], {}), "(usa_path + '/GWA/GWA_AK100m.tif')\n", (2021, 2055), True, 'import xarray as xr\n'), ((2816, 2879), 'glob.glob', 'glob.glob', (["(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc')"], {}), "(results_path + '/wp_' + state + '_ERA5_GWA_temp*.nc')\n", (2825, 2879), False, 'import glob\n'), ((2100, 2150), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA_HI100m.tif')"], {}), "(usa_path + '/GWA/GWA_HI100m.tif')\n", (2116, 2150), True, 'import xarray as xr\n'), ((2195, 2245), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA_PR100m.tif')"], {}), "(usa_path + '/GWA/GWA_PR100m.tif')\n", (2211, 2245), True, 'import xarray as xr\n'), ((2276, 2327), 'xarray.open_rasterio', 'xr.open_rasterio', (["(usa_path + '/GWA/GWA_USA100m.tif')"], {}), "(usa_path + '/GWA/GWA_USA100m.tif')\n", (2292, 2327), True, 'import xarray as xr\n'), ((3955, 3972), 'numpy.arange', 'np.arange', (['i1', 'i2'], {}), '(i1, i2)\n', (3964, 3972), True, 'import numpy as np\n'), ((3534, 3603), 'pandas.to_datetime', 'pd.to_datetime', (['turbine_data_era_gwa.commissioning[ind].values[i1:i2]'], {}), '(turbine_data_era_gwa.commissioning[ind].values[i1:i2])\n', (3548, 3603), True, 'import pandas as pd\n')]
|
"""OVK learning, unit tests.
The :mod:`sklearn.tests.test_semisuo` tests semisup module.
"""
import operalib as ovk
import numpy as np
def test_semisup_linop():
"""Test ovk.semisup.SemisupLinop."""
np.random.seed()
n = 100
p = 5
lbda2 = .1
# supervised indices
B = np.random.randint(2, size=(n)).astype(np.bool)
# Graph Laplacian
n_unsup = np.sum(~B)
L = np.random.randn(n_unsup, n_unsup)
L = np.dot(L, L.T)
U, J = ovk.ridge._SemisupLinop(lbda2, B, L, p).gen()
y = np.random.randn(n, p)
# lbda2 * np.dot(L, y[~B, :]).ravel()
# print(np.concatenate((y[B, :].ravel(),
# lbda2 * np.dot(L, y[~B, :]).ravel())))
res = np.empty((n, p))
res[B, :] = y[B, :]
res[~B] = 0
assert np.allclose(J * y.ravel(), res.ravel())
res = np.empty((n, p))
res[B, :] = y[B, :]
res[~B] = lbda2 * np.dot(L, y[~B, :])
assert np.allclose(U * y.ravel(), res.ravel())
|
[
"numpy.random.seed",
"numpy.sum",
"numpy.random.randn",
"numpy.empty",
"numpy.random.randint",
"numpy.dot",
"operalib.ridge._SemisupLinop"
] |
[((210, 226), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (224, 226), True, 'import numpy as np\n'), ((383, 393), 'numpy.sum', 'np.sum', (['(~B)'], {}), '(~B)\n', (389, 393), True, 'import numpy as np\n'), ((402, 435), 'numpy.random.randn', 'np.random.randn', (['n_unsup', 'n_unsup'], {}), '(n_unsup, n_unsup)\n', (417, 435), True, 'import numpy as np\n'), ((444, 458), 'numpy.dot', 'np.dot', (['L', 'L.T'], {}), '(L, L.T)\n', (450, 458), True, 'import numpy as np\n'), ((526, 547), 'numpy.random.randn', 'np.random.randn', (['n', 'p'], {}), '(n, p)\n', (541, 547), True, 'import numpy as np\n'), ((713, 729), 'numpy.empty', 'np.empty', (['(n, p)'], {}), '((n, p))\n', (721, 729), True, 'import numpy as np\n'), ((832, 848), 'numpy.empty', 'np.empty', (['(n, p)'], {}), '((n, p))\n', (840, 848), True, 'import numpy as np\n'), ((895, 914), 'numpy.dot', 'np.dot', (['L', 'y[~B, :]'], {}), '(L, y[~B, :])\n', (901, 914), True, 'import numpy as np\n'), ((299, 327), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'n'}), '(2, size=n)\n', (316, 327), True, 'import numpy as np\n'), ((471, 510), 'operalib.ridge._SemisupLinop', 'ovk.ridge._SemisupLinop', (['lbda2', 'B', 'L', 'p'], {}), '(lbda2, B, L, p)\n', (494, 510), True, 'import operalib as ovk\n')]
|
# /usr/bin/env python3
import numpy as np
def operadores():
a=np.random.randint(3,10,size=10)
b=np.random.randint(4,78,size=10)
print(np.add(a,b))
print(np.subtract(b,a))
print(np.negative(a,b))
print(np.multiply(a,b))
print(np.divide(a,b))
print(np.floor_divide(b,a))
print(np.power(b,a))
print(np.mod(b,a))
#funion abs
#abs() : integrada en python
#np.abs() o np.absolute() : integrada en numpy
if __name__=="__main__":
operadores()
|
[
"numpy.divide",
"numpy.multiply",
"numpy.subtract",
"numpy.floor_divide",
"numpy.power",
"numpy.negative",
"numpy.mod",
"numpy.random.randint",
"numpy.add"
] |
[((69, 102), 'numpy.random.randint', 'np.random.randint', (['(3)', '(10)'], {'size': '(10)'}), '(3, 10, size=10)\n', (86, 102), True, 'import numpy as np\n'), ((108, 141), 'numpy.random.randint', 'np.random.randint', (['(4)', '(78)'], {'size': '(10)'}), '(4, 78, size=10)\n', (125, 141), True, 'import numpy as np\n'), ((151, 163), 'numpy.add', 'np.add', (['a', 'b'], {}), '(a, b)\n', (157, 163), True, 'import numpy as np\n'), ((175, 192), 'numpy.subtract', 'np.subtract', (['b', 'a'], {}), '(b, a)\n', (186, 192), True, 'import numpy as np\n'), ((204, 221), 'numpy.negative', 'np.negative', (['a', 'b'], {}), '(a, b)\n', (215, 221), True, 'import numpy as np\n'), ((233, 250), 'numpy.multiply', 'np.multiply', (['a', 'b'], {}), '(a, b)\n', (244, 250), True, 'import numpy as np\n'), ((262, 277), 'numpy.divide', 'np.divide', (['a', 'b'], {}), '(a, b)\n', (271, 277), True, 'import numpy as np\n'), ((289, 310), 'numpy.floor_divide', 'np.floor_divide', (['b', 'a'], {}), '(b, a)\n', (304, 310), True, 'import numpy as np\n'), ((322, 336), 'numpy.power', 'np.power', (['b', 'a'], {}), '(b, a)\n', (330, 336), True, 'import numpy as np\n'), ((348, 360), 'numpy.mod', 'np.mod', (['b', 'a'], {}), '(b, a)\n', (354, 360), True, 'import numpy as np\n')]
|
"""This module creates a new dataframe with a movie id and its corresponding
mean sentiment score. Mean sentiment score is computed by taking the average of
the sentiment scores for all the movie's comments
"""
from os import listdir
import os.path as op
import pandas as pd
import numpy as np
from .analyze_comments_tblob import analyze_comments
import movie_analysis as mv
data_path = op.join(mv.__path__[0], 'data/movie_comments')
def get_sentiment_score():
"""This function makes a new df with the movie_id and sentiment score
as columns
"""
final_df = pd.DataFrame(columns=['movie_id', 'sentiment_score'])
filenames2 = find_csv_filenames(data_path)
for name in filenames2:
new_name = data_path+"/"+name
df = pd.read_csv(new_name, encoding='latin1')
sentiment_df = pd.DataFrame(data=df)
sentiment_df.columns = ["comment"]
if not sentiment_df.empty:
sentiment_df = analyze_comments(sentiment_df)
if name.endswith('.csv'):
name = name[:-4]
sentiment_score = np.asarray(sentiment_df.iloc[:, 1], dtype=np.float).mean()
final_df = add_row(name, sentiment_score, final_df)
# final_df.to_csv("sentiment_scores.csv", encoding='latin1')
return final_df
def find_csv_filenames(path_to_dir, suffix=".csv"):
"""This function returns a list of all the filenames that end with '.csv'
"""
filenames = listdir(path_to_dir)
return [filename for filename in filenames if filename.endswith(suffix)]
def add_row(movie_id, mean_score, final_df):
"""This function adds a row to the data frame with the movie_id and corresponding
sentiment_score values
"""
df2 = pd.DataFrame([[movie_id, mean_score]], columns=['movie_id', 'sentiment_score'])
final_df = final_df.append(df2, ignore_index=True)
return final_df
|
[
"pandas.DataFrame",
"pandas.read_csv",
"numpy.asarray",
"os.path.join",
"os.listdir"
] |
[((389, 435), 'os.path.join', 'op.join', (['mv.__path__[0]', '"""data/movie_comments"""'], {}), "(mv.__path__[0], 'data/movie_comments')\n", (396, 435), True, 'import os.path as op\n'), ((577, 630), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['movie_id', 'sentiment_score']"}), "(columns=['movie_id', 'sentiment_score'])\n", (589, 630), True, 'import pandas as pd\n'), ((1444, 1464), 'os.listdir', 'listdir', (['path_to_dir'], {}), '(path_to_dir)\n', (1451, 1464), False, 'from os import listdir\n'), ((1720, 1799), 'pandas.DataFrame', 'pd.DataFrame', (['[[movie_id, mean_score]]'], {'columns': "['movie_id', 'sentiment_score']"}), "([[movie_id, mean_score]], columns=['movie_id', 'sentiment_score'])\n", (1732, 1799), True, 'import pandas as pd\n'), ((757, 797), 'pandas.read_csv', 'pd.read_csv', (['new_name'], {'encoding': '"""latin1"""'}), "(new_name, encoding='latin1')\n", (768, 797), True, 'import pandas as pd\n'), ((821, 842), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'df'}), '(data=df)\n', (833, 842), True, 'import pandas as pd\n'), ((1080, 1131), 'numpy.asarray', 'np.asarray', (['sentiment_df.iloc[:, 1]'], {'dtype': 'np.float'}), '(sentiment_df.iloc[:, 1], dtype=np.float)\n', (1090, 1131), True, 'import numpy as np\n')]
|
from datetime import datetime
import numpy as np
import warnings
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__created__ = datetime(2008, 8, 15)
__modified__ = datetime(2015, 7, 25)
__version__ = "1.5"
__status__ = "Development"
'''
Various vertical coordinates
Presently, only ocean s-coordinates are supported. Future plans will be to
include all of the vertical coordinate systems defined by the CF conventions.
vgrid.py function copied from https://github.com/kshedstrom/pyroms (Frederic Castruccio)
'''
def calculateVgrid(self):
print(("--->Setting up vertical coordinates using self.vtransform: %s self.vstretching: %s"%(self.vtransform,self.vstretching)))
if self.vtransform == 1:
vgrid = s_coordinate(self.h, self.theta_b, self.theta_s, self.tcline, self.nlevels, self.vtransform, self.vstretching, zeta=None)
elif self.vtransform == 2 and self.vstretching == 2:
vgrid = s_coordinate_2(self.h, self.theta_b, self.theta_s, self.tcline, self.nlevels, self.vtransform, self.vstretching, zeta=None)
elif self.vtransform == 2 and self.vstretching == 4:
vgrid = s_coordinate_4(self.h, self.theta_b, self.theta_s, self.tcline, self.nlevels, self.vtransform, self.vstretching, zeta=None)
else:
raise Warning('Unknow vertical transformation Vtrans')
self.z_r = vgrid.z_r[0,:]
self.z_w = vgrid.z_w[0,:]
self.Cs_rho = vgrid.Cs_r
self.Cs_w = vgrid.Cs_w
self.s_rho = vgrid.s_rho
self.s_w = vgrid.s_w
class s_coordinate(object):
"""
Song and Haidvogel (1994) vertical coordinate transformation (Vtransform=1) and
stretching functions (Vstretching=1).
return an object that can be indexed to return depths
s = s_coordinate(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, tcline, N, vtransform, vstretching, zeta=None):
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.tcline = tcline
self.N = int(N)
self.Np = self.N+1
self.vtransform = vtransform
self.vstretching = vstretching
self.hc = min(self.hmin, self.tcline)
self.Vtrans = 1
if self.vtransform==1:
if (self.tcline > self.hmin):
warnings.warn('Vertical transformation parameters are not defined correctly in either gridid.txt or in the history files: \n Tcline = %d and hmin = %d. \n You need to make sure that Tcline <= hmin when using transformation 1.' %(self.Tcline,self.hmin))
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
lev = np.arange(1,self.N+1,1)
ds = 1.0 / self.N
self.s_rho = -self.c1 + (lev - self.p5) * ds
def _get_s_w(self):
lev = np.arange(0,self.Np,1)
ds = 1.0 / (self.Np-1)
self.s_w = -self.c1 + lev * ds
def _get_Cs_r(self):
if (self.theta_s >= 0):
Ptheta = np.sinh(self.theta_s * self.s_rho) / np.sinh(self.theta_s)
Rtheta = np.tanh(self.theta_s * (self.s_rho + self.p5)) / \
(self.c2 * np.tanh(self.p5 * self.theta_s)) - self.p5
self.Cs_r = (self.c1 - self.theta_b) * Ptheta + self.theta_b * Rtheta
else:
self.Cs_r = self.s_rho
def _get_Cs_w(self):
if (self.theta_s >= 0):
Ptheta = np.sinh(self.theta_s * self.s_w) / np.sinh(self.theta_s)
Rtheta = np.tanh(self.theta_s * (self.s_w + self.p5)) / \
(self.c2 * np.tanh(self.p5 * self.theta_s)) - self.p5
self.Cs_w = (self.c1 - self.theta_b) * Ptheta + self.theta_b * Rtheta
else:
self.Cs_w = self.s_w
class s_coordinate_2(s_coordinate):
"""
<NAME> (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=2).
return an object that can be indexed to return depths
s = s_coordinate_2(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, tcline, N, vtransform, vstretching, zeta=None):
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.tcline = tcline
self.N = int(N)
self.Np = self.N+1
self.vtransform = vtransform
self.vstretching = vstretching
self.hc = self.tcline
self.Vtrans = 2
self.Aweight = 1.0
self.Bweight = 1.0
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
super(s_coordinate_2, self)._get_s_rho()
def _get_s_w(self):
super(s_coordinate_2, self)._get_s_w()
def _get_Cs_r(self):
if (self.theta_s >= 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
if (self.theta_b >= 0):
Cbot = np.sinh(self.theta_b * (self.s_rho + self.c1)) / \
np.sinh(self.theta_b) - self.c1
Cweight = (self.s_rho + self.c1)**self.Aweight * \
(self.c1 + (self.Aweight / self.Bweight) * \
(self.c1 - (self.s_rho + self.c1)**self.Bweight))
self.Cs_r = Cweight * Csur + (self.c1 - Cweight) * Cbot
else:
self.Cs_r = Csur
else:
self.Cs_r = self.s_rho
def _get_Cs_w(self):
if (self.theta_s >= 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
if (self.theta_b >= 0):
Cbot = np.sinh(self.theta_b * (self.s_w + self.c1)) / \
np.sinh(self.theta_b) - self.c1
Cweight = (self.s_w + self.c1)**self.Aweight * \
(self.c1 + (self.Aweight / self.Bweight) * \
(self.c1 - (self.s_w + self.c1)**self.Bweight))
self.Cs_w = Cweight * Csur + (self.c1 - Cweight) * Cbot
else:
self.Cs_w = Csur
else:
self.Cs_w = self.s_w
class s_coordinate_4(s_coordinate):
"""
<NAME> (2005) UCLA-ROMS vertical coordinate transformation (Vtransform=2) and
stretching functions (Vstretching=4).
return an object that can be indexed to return depths
s = s_coordinate_4(h, theta_b, theta_s, Tcline, N)
"""
def __init__(self, h, theta_b, theta_s, tcline, N, vtransform, vstretching, zeta=None):
self.h = np.asarray(h)
self.hmin = h.min()
self.theta_b = theta_b
self.theta_s = theta_s
self.tcline = tcline
self.N = int(N)
self.Np = self.N+1
self.vtransform = vtransform
self.vstretching = vstretching
self.hc = self.tcline
self.Vtrans = 4
self.c1 = 1.0
self.c2 = 2.0
self.p5 = 0.5
if zeta is None:
self.zeta = np.zeros(h.shape)
else:
self.zeta = zeta
self._get_s_rho()
self._get_s_w()
self._get_Cs_r()
self._get_Cs_w()
self.z_r = z_r(self.h, self.hc, self.N, self.s_rho, self.Cs_r, self.zeta, self.Vtrans)
self.z_w = z_w(self.h, self.hc, self.Np, self.s_w, self.Cs_w, self.zeta, self.Vtrans)
def _get_s_rho(self):
super(s_coordinate_4, self)._get_s_rho()
def _get_s_w(self):
super(s_coordinate_4, self)._get_s_w()
def _get_Cs_r(self):
if (self.theta_s > 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_rho)) / \
(np.cosh(self.theta_s) - self.c1)
else:
Csur = -self.s_rho**2
if (self.theta_b > 0):
Cbot = (np.exp(self.theta_b * Csur) - self.c1 ) / \
(self.c1 - np.exp(-self.theta_b))
self.Cs_r = Cbot
else:
self.Cs_r = Csur
def _get_Cs_w(self):
if (self.theta_s > 0):
Csur = (self.c1 - np.cosh(self.theta_s * self.s_w)) / \
(np.cosh(self.theta_s) - self.c1)
else:
Csur = -self.s_w**2
if (self.theta_b > 0):
Cbot = (np.exp(self.theta_b * Csur) - self.c1 ) / \
( self.c1 - np.exp(-self.theta_b) )
self.Cs_w = Cbot
else:
self.Cs_w = Csur
class z_r(object):
"""
return an object that can be indexed to return depths of rho point
z_r = z_r(h, hc, N, s_rho, Cs_r, zeta, Vtrans)
"""
def __init__(self, h, hc, N, s_rho, Cs_r, zeta, Vtrans):
self.h = h
self.hc = hc
self.N = N
self.s_rho = s_rho
self.Cs_r = Cs_r
self.zeta = zeta
self.Vtrans = Vtrans
def __getitem__(self, key):
if isinstance(key, tuple) and len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key[0]]
res_index = (slice(None),) + key[1:]
elif len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key]
res_index = slice(None)
else:
zeta = self.zeta
res_index = key
if self.h.ndim == zeta.ndim: # Assure a time-dimension exists
zeta = zeta[np.newaxis, :]
ti = zeta.shape[0]
z_r = np.empty((ti, self.N) + self.h.shape, 'd')
if self.Vtrans == 1:
for n in range(ti):
for k in range(self.N):
z0 = self.hc * self.s_rho[k] + (self.h - self.hc) * self.Cs_r[k]
z_r[n,k,:] = z0 + zeta[n,:] * (1.0 + z0 / self.h)
elif self.Vtrans == 2 or self.Vtrans == 4:
for n in range(ti):
for k in range(self.N):
z0 = (self.hc * self.s_rho[k] + self.h * self.Cs_r[k]) / \
(self.hc + self.h)
z_r[n,k,:] = zeta[n,:] + (zeta[n,:] + self.h) * z0
return np.squeeze(z_r[res_index])
class z_w(object):
"""
return an object that can be indexed to return depths of w point
z_w = z_w(h, hc, Np, s_w, Cs_w, zeta, Vtrans)
"""
def __init__(self, h, hc, Np, s_w, Cs_w, zeta, Vtrans):
self.h = h
self.hc = hc
self.Np = Np
self.s_w = s_w
self.Cs_w = Cs_w
self.zeta = zeta
self.Vtrans = Vtrans
def __getitem__(self, key):
if isinstance(key, tuple) and len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key[0]]
res_index = (slice(None),) + key[1:]
elif len(self.zeta.shape) > len(self.h.shape):
zeta = self.zeta[key]
res_index = slice(None)
else:
zeta = self.zeta
res_index = key
if self.h.ndim == zeta.ndim: # Assure a time-dimension exists
zeta = zeta[np.newaxis, :]
ti = zeta.shape[0]
z_w = np.empty((ti, self.Np) + self.h.shape, 'd')
if self.Vtrans == 1:
for n in range(ti):
for k in range(self.Np):
z0 = self.hc * self.s_w[k] + (self.h - self.hc) * self.Cs_w[k]
z_w[n,k,:] = z0 + zeta[n,:] * (1.0 + z0 / self.h)
elif self.Vtrans == 2 or self.Vtrans == 4:
for n in range(ti):
for k in range(self.Np):
z0 = (self.hc * self.s_w[k] + self.h * self.Cs_w[k]) / \
(self.hc + self.h)
z_w[n,k,:] = zeta[n,:] + (zeta[n,:] + self.h) * z0
return np.squeeze(z_w[res_index])
def get_z_levels(self):
"""
Get a list of all the variables contained in netCDF file "filename"
"""
self.z_r=-self.h
if len(self.z_r)==0:
print(("No depth matrix found in file %s"%(self.selffilename)))
|
[
"numpy.tanh",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"datetime.datetime",
"numpy.arange",
"numpy.exp",
"numpy.squeeze",
"warnings.warn",
"numpy.cosh",
"numpy.sinh"
] |
[((131, 152), 'datetime.datetime', 'datetime', (['(2008)', '(8)', '(15)'], {}), '(2008, 8, 15)\n', (139, 152), False, 'from datetime import datetime\n'), ((168, 189), 'datetime.datetime', 'datetime', (['(2015)', '(7)', '(25)'], {}), '(2015, 7, 25)\n', (176, 189), False, 'from datetime import datetime\n'), ((1891, 1904), 'numpy.asarray', 'np.asarray', (['h'], {}), '(h)\n', (1901, 1904), True, 'import numpy as np\n'), ((3091, 3118), 'numpy.arange', 'np.arange', (['(1)', '(self.N + 1)', '(1)'], {}), '(1, self.N + 1, 1)\n', (3100, 3118), True, 'import numpy as np\n'), ((3233, 3257), 'numpy.arange', 'np.arange', (['(0)', 'self.Np', '(1)'], {}), '(0, self.Np, 1)\n', (3242, 3257), True, 'import numpy as np\n'), ((4563, 4576), 'numpy.asarray', 'np.asarray', (['h'], {}), '(h)\n', (4573, 4576), True, 'import numpy as np\n'), ((7425, 7438), 'numpy.asarray', 'np.asarray', (['h'], {}), '(h)\n', (7435, 7438), True, 'import numpy as np\n'), ((10228, 10270), 'numpy.empty', 'np.empty', (['((ti, self.N) + self.h.shape)', '"""d"""'], {}), "((ti, self.N) + self.h.shape, 'd')\n", (10236, 10270), True, 'import numpy as np\n'), ((10863, 10889), 'numpy.squeeze', 'np.squeeze', (['z_r[res_index]'], {}), '(z_r[res_index])\n', (10873, 10889), True, 'import numpy as np\n'), ((11836, 11879), 'numpy.empty', 'np.empty', (['((ti, self.Np) + self.h.shape)', '"""d"""'], {}), "((ti, self.Np) + self.h.shape, 'd')\n", (11844, 11879), True, 'import numpy as np\n'), ((12470, 12496), 'numpy.squeeze', 'np.squeeze', (['z_w[res_index]'], {}), '(z_w[res_index])\n', (12480, 12496), True, 'import numpy as np\n'), ((2689, 2706), 'numpy.zeros', 'np.zeros', (['h.shape'], {}), '(h.shape)\n', (2697, 2706), True, 'import numpy as np\n'), ((5051, 5068), 'numpy.zeros', 'np.zeros', (['h.shape'], {}), '(h.shape)\n', (5059, 5068), True, 'import numpy as np\n'), ((7858, 7875), 'numpy.zeros', 'np.zeros', (['h.shape'], {}), '(h.shape)\n', (7866, 7875), True, 'import numpy as np\n'), ((2312, 2578), 'warnings.warn', 'warnings.warn', (['("""Vertical transformation parameters are not defined correctly in either gridid.txt or in the history files: \n Tcline = %d and hmin = %d. \n You need to make sure that Tcline <= hmin when using transformation 1."""\n % (self.Tcline, self.hmin))'], {}), '(\n """Vertical transformation parameters are not defined correctly in either gridid.txt or in the history files: \n Tcline = %d and hmin = %d. \n You need to make sure that Tcline <= hmin when using transformation 1."""\n % (self.Tcline, self.hmin))\n', (2325, 2578), False, 'import warnings\n'), ((3405, 3439), 'numpy.sinh', 'np.sinh', (['(self.theta_s * self.s_rho)'], {}), '(self.theta_s * self.s_rho)\n', (3412, 3439), True, 'import numpy as np\n'), ((3442, 3463), 'numpy.sinh', 'np.sinh', (['self.theta_s'], {}), '(self.theta_s)\n', (3449, 3463), True, 'import numpy as np\n'), ((3822, 3854), 'numpy.sinh', 'np.sinh', (['(self.theta_s * self.s_w)'], {}), '(self.theta_s * self.s_w)\n', (3829, 3854), True, 'import numpy as np\n'), ((3857, 3878), 'numpy.sinh', 'np.sinh', (['self.theta_s'], {}), '(self.theta_s)\n', (3864, 3878), True, 'import numpy as np\n'), ((3485, 3531), 'numpy.tanh', 'np.tanh', (['(self.theta_s * (self.s_rho + self.p5))'], {}), '(self.theta_s * (self.s_rho + self.p5))\n', (3492, 3531), True, 'import numpy as np\n'), ((3900, 3944), 'numpy.tanh', 'np.tanh', (['(self.theta_s * (self.s_w + self.p5))'], {}), '(self.theta_s * (self.s_w + self.p5))\n', (3907, 3944), True, 'import numpy as np\n'), ((5640, 5674), 'numpy.cosh', 'np.cosh', (['(self.theta_s * self.s_rho)'], {}), '(self.theta_s * self.s_rho)\n', (5647, 5674), True, 'import numpy as np\n'), ((5702, 5723), 'numpy.cosh', 'np.cosh', (['self.theta_s'], {}), '(self.theta_s)\n', (5709, 5723), True, 'import numpy as np\n'), ((6381, 6413), 'numpy.cosh', 'np.cosh', (['(self.theta_s * self.s_w)'], {}), '(self.theta_s * self.s_w)\n', (6388, 6413), True, 'import numpy as np\n'), ((6441, 6462), 'numpy.cosh', 'np.cosh', (['self.theta_s'], {}), '(self.theta_s)\n', (6448, 6462), True, 'import numpy as np\n'), ((8454, 8488), 'numpy.cosh', 'np.cosh', (['(self.theta_s * self.s_rho)'], {}), '(self.theta_s * self.s_rho)\n', (8461, 8488), True, 'import numpy as np\n'), ((8516, 8537), 'numpy.cosh', 'np.cosh', (['self.theta_s'], {}), '(self.theta_s)\n', (8523, 8537), True, 'import numpy as np\n'), ((8648, 8675), 'numpy.exp', 'np.exp', (['(self.theta_b * Csur)'], {}), '(self.theta_b * Csur)\n', (8654, 8675), True, 'import numpy as np\n'), ((8722, 8743), 'numpy.exp', 'np.exp', (['(-self.theta_b)'], {}), '(-self.theta_b)\n', (8728, 8743), True, 'import numpy as np\n'), ((8913, 8945), 'numpy.cosh', 'np.cosh', (['(self.theta_s * self.s_w)'], {}), '(self.theta_s * self.s_w)\n', (8920, 8945), True, 'import numpy as np\n'), ((8973, 8994), 'numpy.cosh', 'np.cosh', (['self.theta_s'], {}), '(self.theta_s)\n', (8980, 8994), True, 'import numpy as np\n'), ((9103, 9130), 'numpy.exp', 'np.exp', (['(self.theta_b * Csur)'], {}), '(self.theta_b * Csur)\n', (9109, 9130), True, 'import numpy as np\n'), ((9178, 9199), 'numpy.exp', 'np.exp', (['(-self.theta_b)'], {}), '(-self.theta_b)\n', (9184, 9199), True, 'import numpy as np\n'), ((3569, 3600), 'numpy.tanh', 'np.tanh', (['(self.p5 * self.theta_s)'], {}), '(self.p5 * self.theta_s)\n', (3576, 3600), True, 'import numpy as np\n'), ((3982, 4013), 'numpy.tanh', 'np.tanh', (['(self.p5 * self.theta_s)'], {}), '(self.p5 * self.theta_s)\n', (3989, 4013), True, 'import numpy as np\n'), ((5794, 5840), 'numpy.sinh', 'np.sinh', (['(self.theta_b * (self.s_rho + self.c1))'], {}), '(self.theta_b * (self.s_rho + self.c1))\n', (5801, 5840), True, 'import numpy as np\n'), ((5868, 5889), 'numpy.sinh', 'np.sinh', (['self.theta_b'], {}), '(self.theta_b)\n', (5875, 5889), True, 'import numpy as np\n'), ((6533, 6577), 'numpy.sinh', 'np.sinh', (['(self.theta_b * (self.s_w + self.c1))'], {}), '(self.theta_b * (self.s_w + self.c1))\n', (6540, 6577), True, 'import numpy as np\n'), ((6605, 6626), 'numpy.sinh', 'np.sinh', (['self.theta_b'], {}), '(self.theta_b)\n', (6612, 6626), True, 'import numpy as np\n')]
|
import sys
import pytest
import numpy as np
from numpy.testing import assert_array_equal, IS_PYPY
class TestDLPack:
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_dunder_dlpack_refcount(self):
x = np.arange(5)
y = x.__dlpack__()
assert sys.getrefcount(x) == 3
del y
assert sys.getrefcount(x) == 2
def test_dunder_dlpack_stream(self):
x = np.arange(5)
x.__dlpack__(stream=None)
with pytest.raises(RuntimeError):
x.__dlpack__(stream=1)
def test_strides_not_multiple_of_itemsize(self):
dt = np.dtype([('int', np.int32), ('char', np.int8)])
y = np.zeros((5,), dtype=dt)
z = y['int']
with pytest.raises(RuntimeError):
np._from_dlpack(z)
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_from_dlpack_refcount(self):
x = np.arange(5)
y = np._from_dlpack(x)
assert sys.getrefcount(x) == 3
del y
assert sys.getrefcount(x) == 2
@pytest.mark.parametrize("dtype", [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
np.complex64, np.complex128
])
def test_dtype_passthrough(self, dtype):
x = np.arange(5, dtype=dtype)
y = np._from_dlpack(x)
assert y.dtype == x.dtype
assert_array_equal(x, y)
def test_invalid_dtype(self):
x = np.asarray(np.datetime64('2021-05-27'))
with pytest.raises(TypeError):
np._from_dlpack(x)
def test_invalid_byte_swapping(self):
dt = np.dtype('=i8').newbyteorder()
x = np.arange(5, dtype=dt)
with pytest.raises(TypeError):
np._from_dlpack(x)
def test_non_contiguous(self):
x = np.arange(25).reshape((5, 5))
y1 = x[0]
assert_array_equal(y1, np._from_dlpack(y1))
y2 = x[:, 0]
assert_array_equal(y2, np._from_dlpack(y2))
y3 = x[1, :]
assert_array_equal(y3, np._from_dlpack(y3))
y4 = x[1]
assert_array_equal(y4, np._from_dlpack(y4))
y5 = np.diagonal(x).copy()
assert_array_equal(y5, np._from_dlpack(y5))
@pytest.mark.parametrize("ndim", range(33))
def test_higher_dims(self, ndim):
shape = (1,) * ndim
x = np.zeros(shape, dtype=np.float64)
assert shape == np._from_dlpack(x).shape
def test_dlpack_device(self):
x = np.arange(5)
assert x.__dlpack_device__() == (1, 0)
y = np._from_dlpack(x)
assert y.__dlpack_device__() == (1, 0)
z = y[::2]
assert z.__dlpack_device__() == (1, 0)
def dlpack_deleter_exception(self):
x = np.arange(5)
_ = x.__dlpack__()
raise RuntimeError
def test_dlpack_destructor_exception(self):
with pytest.raises(RuntimeError):
self.dlpack_deleter_exception()
def test_readonly(self):
x = np.arange(5)
x.flags.writeable = False
with pytest.raises(TypeError):
x.__dlpack__()
def test_ndim0(self):
x = np.array(1.0)
y = np._from_dlpack(x)
assert_array_equal(x, y)
def test_size1dims_arrays(self):
x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4),
buffer=np.ones(1000, dtype=np.uint8), order='F')
y = np._from_dlpack(x)
assert_array_equal(x, y)
|
[
"numpy.datetime64",
"numpy.testing.assert_array_equal",
"numpy.dtype",
"numpy.zeros",
"numpy.ones",
"sys.getrefcount",
"pytest.raises",
"pytest.mark.skipif",
"numpy.arange",
"numpy._from_dlpack",
"numpy.array",
"pytest.mark.parametrize",
"numpy.diagonal"
] |
[((124, 187), 'pytest.mark.skipif', 'pytest.mark.skipif', (['IS_PYPY'], {'reason': '"""PyPy can\'t get refcounts."""'}), '(IS_PYPY, reason="PyPy can\'t get refcounts.")\n', (142, 187), False, 'import pytest\n'), ((808, 871), 'pytest.mark.skipif', 'pytest.mark.skipif', (['IS_PYPY'], {'reason': '"""PyPy can\'t get refcounts."""'}), '(IS_PYPY, reason="PyPy can\'t get refcounts.")\n', (826, 871), False, 'import pytest\n'), ((1067, 1258), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.\n uint64, np.float16, np.float32, np.float64, np.complex64, np.complex128]'], {}), "('dtype', [np.int8, np.int16, np.int32, np.int64, np\n .uint8, np.uint16, np.uint32, np.uint64, np.float16, np.float32, np.\n float64, np.complex64, np.complex128])\n", (1090, 1258), False, 'import pytest\n'), ((243, 255), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (252, 255), True, 'import numpy as np\n'), ((429, 441), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (438, 441), True, 'import numpy as np\n'), ((621, 669), 'numpy.dtype', 'np.dtype', (["[('int', np.int32), ('char', np.int8)]"], {}), "([('int', np.int32), ('char', np.int8)])\n", (629, 669), True, 'import numpy as np\n'), ((682, 706), 'numpy.zeros', 'np.zeros', (['(5,)'], {'dtype': 'dt'}), '((5,), dtype=dt)\n', (690, 706), True, 'import numpy as np\n'), ((925, 937), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (934, 937), True, 'import numpy as np\n'), ((950, 968), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (965, 968), True, 'import numpy as np\n'), ((1344, 1369), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'dtype'}), '(5, dtype=dtype)\n', (1353, 1369), True, 'import numpy as np\n'), ((1382, 1400), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (1397, 1400), True, 'import numpy as np\n'), ((1444, 1468), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['x', 'y'], {}), '(x, y)\n', (1462, 1468), False, 'from numpy.testing import assert_array_equal, IS_PYPY\n'), ((1726, 1748), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'dt'}), '(5, dtype=dt)\n', (1735, 1748), True, 'import numpy as np\n'), ((2403, 2436), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float64'}), '(shape, dtype=np.float64)\n', (2411, 2436), True, 'import numpy as np\n'), ((2534, 2546), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2543, 2546), True, 'import numpy as np\n'), ((2606, 2624), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (2621, 2624), True, 'import numpy as np\n'), ((2791, 2803), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2800, 2803), True, 'import numpy as np\n'), ((3039, 3051), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (3048, 3051), True, 'import numpy as np\n'), ((3191, 3204), 'numpy.array', 'np.array', (['(1.0)'], {}), '(1.0)\n', (3199, 3204), True, 'import numpy as np\n'), ((3217, 3235), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (3232, 3235), True, 'import numpy as np\n'), ((3244, 3268), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['x', 'y'], {}), '(x, y)\n', (3262, 3268), False, 'from numpy.testing import assert_array_equal, IS_PYPY\n'), ((3464, 3482), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (3479, 3482), True, 'import numpy as np\n'), ((3491, 3515), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['x', 'y'], {}), '(x, y)\n', (3509, 3515), False, 'from numpy.testing import assert_array_equal, IS_PYPY\n'), ((298, 316), 'sys.getrefcount', 'sys.getrefcount', (['x'], {}), '(x)\n', (313, 316), False, 'import sys\n'), ((351, 369), 'sys.getrefcount', 'sys.getrefcount', (['x'], {}), '(x)\n', (366, 369), False, 'import sys\n'), ((490, 517), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (503, 517), False, 'import pytest\n'), ((742, 769), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (755, 769), False, 'import pytest\n'), ((783, 801), 'numpy._from_dlpack', 'np._from_dlpack', (['z'], {}), '(z)\n', (798, 801), True, 'import numpy as np\n'), ((984, 1002), 'sys.getrefcount', 'sys.getrefcount', (['x'], {}), '(x)\n', (999, 1002), False, 'import sys\n'), ((1037, 1055), 'sys.getrefcount', 'sys.getrefcount', (['x'], {}), '(x)\n', (1052, 1055), False, 'import sys\n'), ((1527, 1554), 'numpy.datetime64', 'np.datetime64', (['"""2021-05-27"""'], {}), "('2021-05-27')\n", (1540, 1554), True, 'import numpy as np\n'), ((1570, 1594), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1583, 1594), False, 'import pytest\n'), ((1608, 1626), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (1623, 1626), True, 'import numpy as np\n'), ((1763, 1787), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1776, 1787), False, 'import pytest\n'), ((1801, 1819), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (1816, 1819), True, 'import numpy as np\n'), ((1948, 1967), 'numpy._from_dlpack', 'np._from_dlpack', (['y1'], {}), '(y1)\n', (1963, 1967), True, 'import numpy as np\n'), ((2022, 2041), 'numpy._from_dlpack', 'np._from_dlpack', (['y2'], {}), '(y2)\n', (2037, 2041), True, 'import numpy as np\n'), ((2096, 2115), 'numpy._from_dlpack', 'np._from_dlpack', (['y3'], {}), '(y3)\n', (2111, 2115), True, 'import numpy as np\n'), ((2167, 2186), 'numpy._from_dlpack', 'np._from_dlpack', (['y4'], {}), '(y4)\n', (2182, 2186), True, 'import numpy as np\n'), ((2255, 2274), 'numpy._from_dlpack', 'np._from_dlpack', (['y5'], {}), '(y5)\n', (2270, 2274), True, 'import numpy as np\n'), ((2924, 2951), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (2937, 2951), False, 'import pytest\n'), ((3099, 3123), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3112, 3123), False, 'import pytest\n'), ((1683, 1698), 'numpy.dtype', 'np.dtype', (['"""=i8"""'], {}), "('=i8')\n", (1691, 1698), True, 'import numpy as np\n'), ((1868, 1881), 'numpy.arange', 'np.arange', (['(25)'], {}), '(25)\n', (1877, 1881), True, 'import numpy as np\n'), ((2202, 2216), 'numpy.diagonal', 'np.diagonal', (['x'], {}), '(x)\n', (2213, 2216), True, 'import numpy as np\n'), ((2462, 2480), 'numpy._from_dlpack', 'np._from_dlpack', (['x'], {}), '(x)\n', (2477, 2480), True, 'import numpy as np\n'), ((3410, 3439), 'numpy.ones', 'np.ones', (['(1000)'], {'dtype': 'np.uint8'}), '(1000, dtype=np.uint8)\n', (3417, 3439), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 4 07:10:55 2020
@author: sj
"""
import numpy as np
# left corner as (0,0), US in West and Asia in East
# only validated in Asia, North and East
#x as latitude (180), y as longitude (360),
# x = 114288 # latitude, filepath
# y = 214078 # longitude, filename
# z = zoom level
# x,y,z = 114288,214078,18
def g2latlng(x,y,z=18,vbs=0):
x,y = x+0.5,y+0.5 # to center
n = np.power(2,z)
lng = y / n * 360.0 - 180.0
lat_rad = np.arctan(np.sinh(np.pi * (1 - 2 * x / n)))
lat = lat_rad * 180.0 / np.pi
if vbs:
print(x,lat,y,lng)
return lat,lng
def to60(lat,lng,vbs=0):
lat0 = np.floor(lat)
tmp = (lat - lat0) * 60
lat1 = np.floor(tmp)
lat2 = (tmp - lat1) * 60
lat = int(lat0),int(lat1),lat2
lng0 = np.floor(lng)
tmp = (lng - lng0) * 60
lng1 = np.floor(tmp)
lng2 = (tmp - lng1) * 60
lng = int(lng0),int(lng1),lng2
if vbs:
print(lat,lng)
return lat,lng
if __name__ == '__main__':
if True:
x,y,z = 114453,213769,18
lat,lng = g2latlng(x,y,z,vbs=1)
to60(lat,lng,vbs=1)
|
[
"numpy.power",
"numpy.floor",
"numpy.sinh"
] |
[((444, 458), 'numpy.power', 'np.power', (['(2)', 'z'], {}), '(2, z)\n', (452, 458), True, 'import numpy as np\n'), ((686, 699), 'numpy.floor', 'np.floor', (['lat'], {}), '(lat)\n', (694, 699), True, 'import numpy as np\n'), ((741, 754), 'numpy.floor', 'np.floor', (['tmp'], {}), '(tmp)\n', (749, 754), True, 'import numpy as np\n'), ((843, 856), 'numpy.floor', 'np.floor', (['lng'], {}), '(lng)\n', (851, 856), True, 'import numpy as np\n'), ((898, 911), 'numpy.floor', 'np.floor', (['tmp'], {}), '(tmp)\n', (906, 911), True, 'import numpy as np\n'), ((516, 548), 'numpy.sinh', 'np.sinh', (['(np.pi * (1 - 2 * x / n))'], {}), '(np.pi * (1 - 2 * x / n))\n', (523, 548), True, 'import numpy as np\n')]
|
from PIL import Image
import numpy as np
import sys, os
from progress_bar import ProgressBar
def get_bit(pos, img):
# avoids modifying thumbnail
size = img.shape[0]*img.shape[1] - 4096
rgb = pos//size
if rgb > 2:
raise IndexError("Position is too large")
pos = pos % size + 4096
x,y = pos // img.shape[1], pos % img.shape[1]
return img[x][y][rgb] & 1
with Image.open(sys.argv[1]) as img:
with open(sys.argv[2], "w+") as out:
arrimg = np.array(img)
pos = 0
cur_char = ''
size_str = ""
while cur_char != "|":
ord_chr = 0
for i in range(8):
bit = get_bit(pos, arrimg)
pos += 1
ord_chr = ord_chr | bit << i
cur_char = chr(ord_chr)
size_str += cur_char
size = int(size_str[:-1])
pb = ProgressBar(size)
pb.begin()
for i in range(size):
ord_chr = 0
for i in range(8):
bit = get_bit(pos, arrimg)
pos += 1
ord_chr = ord_chr | bit << i
out.write(chr(ord_chr))
pb.add_progress()
|
[
"progress_bar.ProgressBar",
"numpy.array",
"PIL.Image.open"
] |
[((395, 418), 'PIL.Image.open', 'Image.open', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (405, 418), False, 'from PIL import Image\n'), ((485, 498), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (493, 498), True, 'import numpy as np\n'), ((874, 891), 'progress_bar.ProgressBar', 'ProgressBar', (['size'], {}), '(size)\n', (885, 891), False, 'from progress_bar import ProgressBar\n')]
|
# Iterative Conway's game of life in Python / CUDA C
# this version is meant to illustrate the use of shared kernel memory in CUDA.
# written by <NAME> for "Hands on GPU Programming with Python and CUDA"
import pycuda.autoinit
import pycuda.driver as drv
from pycuda import gpuarray
from pycuda.compiler import SourceModule
import numpy as np
import matplotlib.pyplot as plt
from time import time
shared_ker = SourceModule("""
#define _iters 1000000
#define _X ( threadIdx.x + blockIdx.x * blockDim.x )
#define _Y ( threadIdx.y + blockIdx.y * blockDim.y )
#define _WIDTH ( blockDim.x * gridDim.x )
#define _HEIGHT ( blockDim.y * gridDim.y )
#define _XM(x) ( (x + _WIDTH) % _WIDTH )
#define _YM(y) ( (y + _HEIGHT) % _HEIGHT )
#define _INDEX(x,y) ( _XM(x) + _YM(y) * _WIDTH )
// return the number of living neighbors for a given cell
__device__ int nbrs(int x, int y, int * in)
{
return ( in[ _INDEX(x -1, y+1) ] + in[ _INDEX(x-1, y) ] + in[ _INDEX(x-1, y-1) ] \
+ in[ _INDEX(x, y+1)] + in[_INDEX(x, y - 1)] \
+ in[ _INDEX(x+1, y+1) ] + in[ _INDEX(x+1, y) ] + in[ _INDEX(x+1, y-1) ] );
}
// p_lattice will now be the pointer to the global lattice, lattice to the shared
__global__ void conway_ker_shared(int * p_lattice, int iters)
{
// x, y are the appropriate values for the cell covered by this thread
int x = _X, y = _Y;
__shared__ int lattice[32*32];
lattice[_INDEX(x,y)] = p_lattice[_INDEX(x,y)];
__syncthreads(); // each thread copies its own value into the shared lattice, we need to wait for them to finisch
for (int i = 0; i < iters; i++)
{
// count the number of neighbors around the current cell
int n = nbrs(x, y, lattice);
int cell_value;
// if the current cell is alive, then determine if it lives or dies for the next generation.
if ( lattice[_INDEX(x,y)] == 1)
switch(n)
{
// if the cell is alive: it remains alive only if it has 2 or 3 neighbors.
case 2:
case 3: cell_value = 1;
break;
default: cell_value = 0;
}
else if( lattice[_INDEX(x,y)] == 0 )
switch(n)
{
// a dead cell comes to life only if it has 3 neighbors that are alive.
case 3: cell_value = 1;
break;
default: cell_value = 0;
}
__syncthreads();
lattice[_INDEX(x,y)] = cell_value;
__syncthreads();
}
__syncthreads();
p_lattice[_INDEX(x,y)] = lattice[_INDEX(x,y)];
__syncthreads();
}
""")
conway_ker_shared = shared_ker.get_function("conway_ker_shared")
if __name__ == '__main__':
# set lattice size
N = 32
lattice = np.int32( np.random.choice([1,0], N*N, p=[0.25, 0.75]).reshape(N, N) )
lattice_gpu = gpuarray.to_gpu(lattice)
time1 = time()
conway_ker_shared(lattice_gpu, np.int32(1e6), grid=(1,1,1), block=(32,32,1))
print("calc needed", (time() - time1)*1000, "ms")
fig = plt.figure(1)
plt.imshow(lattice_gpu.get())
plt.show()
|
[
"numpy.random.choice",
"pycuda.compiler.SourceModule",
"matplotlib.pyplot.show",
"time.time",
"matplotlib.pyplot.figure",
"numpy.int32",
"pycuda.gpuarray.to_gpu"
] |
[((417, 2837), 'pycuda.compiler.SourceModule', 'SourceModule', (['""" \n#define _iters 1000000 \n\n#define _X ( threadIdx.x + blockIdx.x * blockDim.x )\n#define _Y ( threadIdx.y + blockIdx.y * blockDim.y )\n\n#define _WIDTH ( blockDim.x * gridDim.x )\n#define _HEIGHT ( blockDim.y * gridDim.y )\n\n#define _XM(x) ( (x + _WIDTH) % _WIDTH )\n#define _YM(y) ( (y + _HEIGHT) % _HEIGHT )\n\n#define _INDEX(x,y) ( _XM(x) + _YM(y) * _WIDTH )\n\n// return the number of living neighbors for a given cell \n__device__ int nbrs(int x, int y, int * in)\n{\n return ( in[ _INDEX(x -1, y+1) ] + in[ _INDEX(x-1, y) ] + in[ _INDEX(x-1, y-1) ] + in[ _INDEX(x, y+1)] + in[_INDEX(x, y - 1)] + in[ _INDEX(x+1, y+1) ] + in[ _INDEX(x+1, y) ] + in[ _INDEX(x+1, y-1) ] );\n}\n\n// p_lattice will now be the pointer to the global lattice, lattice to the shared\n__global__ void conway_ker_shared(int * p_lattice, int iters)\n{\n // x, y are the appropriate values for the cell covered by this thread\n int x = _X, y = _Y;\n __shared__ int lattice[32*32];\n \n \n lattice[_INDEX(x,y)] = p_lattice[_INDEX(x,y)];\n __syncthreads(); // each thread copies its own value into the shared lattice, we need to wait for them to finisch\n\n for (int i = 0; i < iters; i++)\n {\n \n // count the number of neighbors around the current cell\n int n = nbrs(x, y, lattice);\n \n int cell_value;\n \n \n // if the current cell is alive, then determine if it lives or dies for the next generation.\n if ( lattice[_INDEX(x,y)] == 1)\n switch(n)\n {\n // if the cell is alive: it remains alive only if it has 2 or 3 neighbors.\n case 2:\n case 3: cell_value = 1;\n break;\n default: cell_value = 0; \n }\n else if( lattice[_INDEX(x,y)] == 0 )\n switch(n)\n {\n // a dead cell comes to life only if it has 3 neighbors that are alive.\n case 3: cell_value = 1;\n break;\n default: cell_value = 0; \n }\n \n __syncthreads();\n lattice[_INDEX(x,y)] = cell_value;\n __syncthreads();\n \n }\n \n __syncthreads();\n p_lattice[_INDEX(x,y)] = lattice[_INDEX(x,y)];\n __syncthreads();\n \n}\n"""'], {}), '(\n """ \n#define _iters 1000000 \n\n#define _X ( threadIdx.x + blockIdx.x * blockDim.x )\n#define _Y ( threadIdx.y + blockIdx.y * blockDim.y )\n\n#define _WIDTH ( blockDim.x * gridDim.x )\n#define _HEIGHT ( blockDim.y * gridDim.y )\n\n#define _XM(x) ( (x + _WIDTH) % _WIDTH )\n#define _YM(y) ( (y + _HEIGHT) % _HEIGHT )\n\n#define _INDEX(x,y) ( _XM(x) + _YM(y) * _WIDTH )\n\n// return the number of living neighbors for a given cell \n__device__ int nbrs(int x, int y, int * in)\n{\n return ( in[ _INDEX(x -1, y+1) ] + in[ _INDEX(x-1, y) ] + in[ _INDEX(x-1, y-1) ] + in[ _INDEX(x, y+1)] + in[_INDEX(x, y - 1)] + in[ _INDEX(x+1, y+1) ] + in[ _INDEX(x+1, y) ] + in[ _INDEX(x+1, y-1) ] );\n}\n\n// p_lattice will now be the pointer to the global lattice, lattice to the shared\n__global__ void conway_ker_shared(int * p_lattice, int iters)\n{\n // x, y are the appropriate values for the cell covered by this thread\n int x = _X, y = _Y;\n __shared__ int lattice[32*32];\n \n \n lattice[_INDEX(x,y)] = p_lattice[_INDEX(x,y)];\n __syncthreads(); // each thread copies its own value into the shared lattice, we need to wait for them to finisch\n\n for (int i = 0; i < iters; i++)\n {\n \n // count the number of neighbors around the current cell\n int n = nbrs(x, y, lattice);\n \n int cell_value;\n \n \n // if the current cell is alive, then determine if it lives or dies for the next generation.\n if ( lattice[_INDEX(x,y)] == 1)\n switch(n)\n {\n // if the cell is alive: it remains alive only if it has 2 or 3 neighbors.\n case 2:\n case 3: cell_value = 1;\n break;\n default: cell_value = 0; \n }\n else if( lattice[_INDEX(x,y)] == 0 )\n switch(n)\n {\n // a dead cell comes to life only if it has 3 neighbors that are alive.\n case 3: cell_value = 1;\n break;\n default: cell_value = 0; \n }\n \n __syncthreads();\n lattice[_INDEX(x,y)] = cell_value;\n __syncthreads();\n \n }\n \n __syncthreads();\n p_lattice[_INDEX(x,y)] = lattice[_INDEX(x,y)];\n __syncthreads();\n \n}\n"""\n )\n', (429, 2837), False, 'from pycuda.compiler import SourceModule\n'), ((3074, 3098), 'pycuda.gpuarray.to_gpu', 'gpuarray.to_gpu', (['lattice'], {}), '(lattice)\n', (3089, 3098), False, 'from pycuda import gpuarray\n'), ((3120, 3126), 'time.time', 'time', ([], {}), '()\n', (3124, 3126), False, 'from time import time\n'), ((3281, 3294), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3291, 3294), True, 'import matplotlib.pyplot as plt\n'), ((3333, 3343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3341, 3343), True, 'import matplotlib.pyplot as plt\n'), ((3162, 3181), 'numpy.int32', 'np.int32', (['(1000000.0)'], {}), '(1000000.0)\n', (3170, 3181), True, 'import numpy as np\n'), ((2995, 3042), 'numpy.random.choice', 'np.random.choice', (['[1, 0]', '(N * N)'], {'p': '[0.25, 0.75]'}), '([1, 0], N * N, p=[0.25, 0.75])\n', (3011, 3042), True, 'import numpy as np\n'), ((3234, 3240), 'time.time', 'time', ([], {}), '()\n', (3238, 3240), False, 'from time import time\n')]
|
#!/usr/bin/env python
'''======================================================
Created by: <NAME> and <NAME>
Last updated: March 2015
File name: DF_Plots.py
Organization: RISC Lab, Utah State University
======================================================'''
import roslib; roslib.load_manifest('risc_msgs')
import rospy
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import pylab as p
import matplotlib.pyplot as plt
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import * # states,controls,trajectory
from sensor_msgs.msg import Joy
#========================#
# Globals #
#========================#
rate = 20 # Hz
states = Cortex()
traj = Trajectories()
ctrl = Controls()
start_time = 0
euler_max = 45*np.pi/180
Button_pushed = False
plot_button = 3
#===================================#
# Plotting Variables #
#===================================#
states_of_interest = 16
storage_mat = np.asmatrix(np.zeros((1,states_of_interest)))
index = [0]
name = ['Initial']
#==================#
# Get States #
#==================#
def GetStates(I):
global states
states = I
#=========================#
# Get Joystick Data #
#=========================#
def GetJoy(I):
global Button_pushed
Button_pushed = I.buttons[plot_button]
#======================#
# Get Trajectory #
#======================#
def GetTrajectory(I):
global traj
traj = I
#====================#
# Get Controls #
#====================#
def GetControls(I):
global ctrl
ctrl = I
def Plots():
global storage_mat, index, name
if len(index) > 2:
for i in range(len(index)-1):
# assign data vectors
f = index[i+1]
if i+2 == len(index):
b = -1
else:
b = index[i+2]
x_act = storage_mat[f:b,0]
y_act = storage_mat[f:b,1]
z_act = storage_mat[f:b,2]
x_des = storage_mat[f:b,3]
y_des = storage_mat[f:b,4]
z_des = storage_mat[f:b,5]
phi_des = storage_mat[f:b,6]
theta_des = storage_mat[f:b,7]
psi_des = storage_mat[f:b,8]
phi_act = storage_mat[f:b,9]
theta_act = storage_mat[f:b,10]
psi_act = storage_mat[f:b,11]
xdot_err = storage_mat[f:b,12]
ydot_err = storage_mat[f:b,13]
zdot_err = storage_mat[f:b,14]
t = storage_mat[f:b,15]
# 3d plot
plot3d(name[i+1],x_act,y_act,z_act,x_des,y_des,z_des)
# Roll
plot2d(name[i+1] + ' Roll',phi_act,phi_des,t,'Time (s)','Angle (Deg)')
# Pitch
plot2d(name[i+1] + ' Pitch',theta_act,theta_des,t,'Time (s)','Angle (Deg)')
# Errors
plot3err(name[i+1] + ' Position Errors',x_des-x_act,y_des-y_act,z_des-z_act,t,'Time (s)', 'Error (m)', 'x', 'y', 'z')
plot3err(name[i+1] + ' Velocity Errors',xdot_err,ydot_err,zdot_err,t,'Time (s)', 'Error (m/s)', 'xdot', 'ydot', 'zdot')
plt.show(block=False)
else:
rospy.loginfo("insufficient data")
#==========================#
# Plotting Functions #
#==========================#
def plot3d(Traj_name,x_act,y_act,z_act,x_des,y_des,z_des):
x_act=list(np.array(x_act).reshape(-1))
y_act=list(np.array(y_act).reshape(-1))
z_act=list(np.array(z_act).reshape(-1))
x_des=list(np.array(x_des).reshape(-1))
y_des=list(np.array(y_des).reshape(-1))
z_des=list(np.array(z_des).reshape(-1))
fig = plt.figure(Traj_name)
ax = fig.gca(projection='3d')
ax.plot(x_act, y_act, z_act,'k-', label='Actual')
ax.plot(x_des, y_des, z_des,'r-', label='Desired')
ax.legend()
ax.set_title(Traj_name + ' Trajectory', fontsize=16)
ax.set_xlabel(r'X (m)', fontsize=14)
ax.set_ylabel(r'Y (m)', fontsize=14)
ax.set_zlabel(r'Z (m)', fontsize=14)
ax.set_xlim([-2, 2])
ax.set_ylim([-2, 2])
ax.set_zlim([0, 2])
def plot3err(plot_name,err1,err2,err3,time,xaxis_label, yaxis_label,label1, label2, label3):
Err1 = list(np.array(err1).reshape(-1))
Err2 = list(np.array(err2).reshape(-1))
Err3 = list(np.array(err3).reshape(-1))
time = list(np.array(time).reshape(-1))
fig = plt.figure(plot_name)
plt.plot(time, Err1,'b-', label=label1)
plt.plot(time, Err2,'k-', label=label2)
plt.plot(time, Err3,'r-', label=label3)
plt.legend()
plt.title(plot_name, fontsize=16)
plt.xlabel(xaxis_label, fontsize=14)
plt.ylabel(yaxis_label, fontsize=14)
plt.xlim((time[0],time[-1]))
y_min = min([min(Err1),min(Err2),min(Err3)])
y_min = y_min - .2*abs(y_min)
y_max = max([max(Err1),max(Err2),max(Err3)])
y_max = y_max + .2*abs(y_max)
plt.ylim((y_min,y_max))
def plot2d(plot_name,actual_data,commanded_data,time,xaxis_label, yaxis_label):
actual_data = list(np.array(actual_data).reshape(-1))
commanded_data = list(np.array(commanded_data).reshape(-1))
time = list(np.array(time).reshape(-1))
fig = plt.figure(plot_name)
plt.plot(time, actual_data, 'b-', label='actual')
plt.plot(time, commanded_data,'r:', label='Commanded')
plt.legend()
plt.title(plot_name, fontsize=16)
plt.xlabel(xaxis_label, fontsize=14)
plt.ylabel(yaxis_label, fontsize=14)
plt.xlim((time[0],time[-1]))
y_min = min([min(actual_data),min(commanded_data)])
y_min = y_min - .2*abs(y_min)
y_max = max([max(actual_data),max(commanded_data)])
y_max = y_max + .2*abs(y_max)
plt.ylim((y_min,y_max))
#==================#
# Datalogger #
#==================#
def Datalogger():
global start_time,states,ctrl, traj, euler_max
#======================================================#
# If all states of interest are present log data #
#======================================================#
if len(traj.Obj) > 0 and len(states.Obj) > 0 and len(ctrl.Obj) > 0:
global storage_mat
rospy.loginfo("logging data...")
x_act = states.Obj[0].x
y_act = states.Obj[0].y
z_act = states.Obj[0].z
x_des = traj.Obj[0].x
y_des = traj.Obj[0].y
z_des = traj.Obj[0].z
phi_traj = ctrl.Obj[0].phi*euler_max*180/np.pi
theta_traj = ctrl.Obj[0].theta*euler_max*180/np.pi
psi_traj = traj.Obj[0].psi*180/np.pi
phi_cort = states.Obj[0].phi
theta_cort = states.Obj[0].theta
psi_cort = states.Obj[0].psi
u_cort_err = traj.Obj[0].xdot - states.Obj[0].u
v_cort_err = traj.Obj[0].ydot - states.Obj[0].v
w_cort_err = traj.Obj[0].zdot - states.Obj[0].w
t = float(rospy.get_time() - start_time)
new_stack = np.asmatrix(np.array([x_act, y_act, z_act, z_des, y_des, z_des,\
phi_traj, theta_traj, psi_traj, phi_cort, theta_cort, psi_cort,\
u_cort_err, v_cort_err, w_cort_err,t]))
storage_mat = np.append(storage_mat,new_stack,0)
#==========================================================================#
# If there is a new trajectory store the index and trajectory name #
#==========================================================================#
global storage_mat, states_of_interest,name,index
if len(traj.Obj) > 0 and name[-1] != traj.Obj[0].name:
name.append(traj.Obj[0].name)
index.append(storage_mat.shape[0] -1)
start_time = rospy.get_time()
#===================#
# Main #
#===================#
if __name__=='__main__':
rospy.init_node('DF_Plotter')
start_time = rospy.get_time()
euler_max = float(rospy.get_param("euler_angle_max", ".78537")) #in radians
plot_button = int(rospy.get_param("plot_button", "3"))
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(rate)
while not rospy.is_shutdown():
sub_states = rospy.Subscriber('/cortex_raw' , Cortex, GetStates)
sub_traj = rospy.Subscriber('/trajectory',Trajectories, GetTrajectory)
sub_cntrl = rospy.Subscriber('/controls' , Controls, GetControls)
sub_joy = rospy.Subscriber('/joy' , Joy, GetJoy)
Datalogger()
if Button_pushed:
Plots()
answer = raw_input('Erase plots and reset datalogger?')
if answer == 'y' or answer == 'yes' or answer == 'I guess' or answer == 'sure':
rospy.loginfo("Resetting datalogger and erasing plots...")
plt.clf()
start_time = rospy.get_time()
storage_mat = np.asmatrix(np.zeros((1,states_of_interest)))
plt.close('all')
else:
plt.clf()
plt.close('all')
rospy.signal_shutdown(0)
r.sleep()
|
[
"matplotlib.pyplot.title",
"rospy.Subscriber",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"roslib.load_manifest",
"matplotlib.pyplot.close",
"rospy.Rate",
"rospy.signal_shutdown",
"numpy.append",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.get_time",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"rospy.loginfo",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"numpy.zeros",
"rospy.get_param",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((301, 334), 'roslib.load_manifest', 'roslib.load_manifest', (['"""risc_msgs"""'], {}), "('risc_msgs')\n", (321, 334), False, 'import roslib\n'), ((1180, 1213), 'numpy.zeros', 'np.zeros', (['(1, states_of_interest)'], {}), '((1, states_of_interest))\n', (1188, 1213), True, 'import numpy as np\n'), ((3923, 3944), 'matplotlib.pyplot.figure', 'plt.figure', (['Traj_name'], {}), '(Traj_name)\n', (3933, 3944), True, 'import matplotlib.pyplot as plt\n'), ((4651, 4672), 'matplotlib.pyplot.figure', 'plt.figure', (['plot_name'], {}), '(plot_name)\n', (4661, 4672), True, 'import matplotlib.pyplot as plt\n'), ((4677, 4717), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'Err1', '"""b-"""'], {'label': 'label1'}), "(time, Err1, 'b-', label=label1)\n", (4685, 4717), True, 'import matplotlib.pyplot as plt\n'), ((4721, 4761), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'Err2', '"""k-"""'], {'label': 'label2'}), "(time, Err2, 'k-', label=label2)\n", (4729, 4761), True, 'import matplotlib.pyplot as plt\n'), ((4765, 4805), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'Err3', '"""r-"""'], {'label': 'label3'}), "(time, Err3, 'r-', label=label3)\n", (4773, 4805), True, 'import matplotlib.pyplot as plt\n'), ((4809, 4821), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4819, 4821), True, 'import matplotlib.pyplot as plt\n'), ((4826, 4859), 'matplotlib.pyplot.title', 'plt.title', (['plot_name'], {'fontsize': '(16)'}), '(plot_name, fontsize=16)\n', (4835, 4859), True, 'import matplotlib.pyplot as plt\n'), ((4864, 4900), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xaxis_label'], {'fontsize': '(14)'}), '(xaxis_label, fontsize=14)\n', (4874, 4900), True, 'import matplotlib.pyplot as plt\n'), ((4905, 4941), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yaxis_label'], {'fontsize': '(14)'}), '(yaxis_label, fontsize=14)\n', (4915, 4941), True, 'import matplotlib.pyplot as plt\n'), ((4946, 4975), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(time[0], time[-1])'], {}), '((time[0], time[-1]))\n', (4954, 4975), True, 'import matplotlib.pyplot as plt\n'), ((5146, 5170), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(y_min, y_max)'], {}), '((y_min, y_max))\n', (5154, 5170), True, 'import matplotlib.pyplot as plt\n'), ((5441, 5462), 'matplotlib.pyplot.figure', 'plt.figure', (['plot_name'], {}), '(plot_name)\n', (5451, 5462), True, 'import matplotlib.pyplot as plt\n'), ((5467, 5516), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'actual_data', '"""b-"""'], {'label': '"""actual"""'}), "(time, actual_data, 'b-', label='actual')\n", (5475, 5516), True, 'import matplotlib.pyplot as plt\n'), ((5521, 5576), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'commanded_data', '"""r:"""'], {'label': '"""Commanded"""'}), "(time, commanded_data, 'r:', label='Commanded')\n", (5529, 5576), True, 'import matplotlib.pyplot as plt\n'), ((5580, 5592), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5590, 5592), True, 'import matplotlib.pyplot as plt\n'), ((5597, 5630), 'matplotlib.pyplot.title', 'plt.title', (['plot_name'], {'fontsize': '(16)'}), '(plot_name, fontsize=16)\n', (5606, 5630), True, 'import matplotlib.pyplot as plt\n'), ((5635, 5671), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xaxis_label'], {'fontsize': '(14)'}), '(xaxis_label, fontsize=14)\n', (5645, 5671), True, 'import matplotlib.pyplot as plt\n'), ((5676, 5712), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yaxis_label'], {'fontsize': '(14)'}), '(yaxis_label, fontsize=14)\n', (5686, 5712), True, 'import matplotlib.pyplot as plt\n'), ((5717, 5746), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(time[0], time[-1])'], {}), '((time[0], time[-1]))\n', (5725, 5746), True, 'import matplotlib.pyplot as plt\n'), ((5931, 5955), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(y_min, y_max)'], {}), '((y_min, y_max))\n', (5939, 5955), True, 'import matplotlib.pyplot as plt\n'), ((8031, 8060), 'rospy.init_node', 'rospy.init_node', (['"""DF_Plotter"""'], {}), "('DF_Plotter')\n", (8046, 8060), False, 'import rospy\n'), ((8078, 8094), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (8092, 8094), False, 'import rospy\n'), ((8379, 8395), 'rospy.Rate', 'rospy.Rate', (['rate'], {}), '(rate)\n', (8389, 8395), False, 'import rospy\n'), ((3441, 3475), 'rospy.loginfo', 'rospy.loginfo', (['"""insufficient data"""'], {}), "('insufficient data')\n", (3454, 3475), False, 'import rospy\n'), ((6395, 6427), 'rospy.loginfo', 'rospy.loginfo', (['"""logging data..."""'], {}), "('logging data...')\n", (6408, 6427), False, 'import rospy\n'), ((7407, 7443), 'numpy.append', 'np.append', (['storage_mat', 'new_stack', '(0)'], {}), '(storage_mat, new_stack, 0)\n', (7416, 7443), True, 'import numpy as np\n'), ((7905, 7921), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (7919, 7921), False, 'import rospy\n'), ((8120, 8164), 'rospy.get_param', 'rospy.get_param', (['"""euler_angle_max"""', '""".78537"""'], {}), "('euler_angle_max', '.78537')\n", (8135, 8164), False, 'import rospy\n'), ((8201, 8236), 'rospy.get_param', 'rospy.get_param', (['"""plot_button"""', '"""3"""'], {}), "('plot_button', '3')\n", (8216, 8236), False, 'import rospy\n'), ((8410, 8429), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (8427, 8429), False, 'import rospy\n'), ((8453, 8503), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/cortex_raw"""', 'Cortex', 'GetStates'], {}), "('/cortex_raw', Cortex, GetStates)\n", (8469, 8503), False, 'import rospy\n'), ((8527, 8587), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/trajectory"""', 'Trajectories', 'GetTrajectory'], {}), "('/trajectory', Trajectories, GetTrajectory)\n", (8543, 8587), False, 'import rospy\n'), ((8609, 8661), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/controls"""', 'Controls', 'GetControls'], {}), "('/controls', Controls, GetControls)\n", (8625, 8661), False, 'import rospy\n'), ((8685, 8722), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/joy"""', 'Joy', 'GetJoy'], {}), "('/joy', Joy, GetJoy)\n", (8701, 8722), False, 'import rospy\n'), ((3401, 3422), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3409, 3422), True, 'import matplotlib.pyplot as plt\n'), ((7184, 7347), 'numpy.array', 'np.array', (['[x_act, y_act, z_act, z_des, y_des, z_des, phi_traj, theta_traj, psi_traj,\n phi_cort, theta_cort, psi_cort, u_cort_err, v_cort_err, w_cort_err, t]'], {}), '([x_act, y_act, z_act, z_des, y_des, z_des, phi_traj, theta_traj,\n psi_traj, phi_cort, theta_cort, psi_cort, u_cort_err, v_cort_err,\n w_cort_err, t])\n', (7192, 7347), True, 'import numpy as np\n'), ((3663, 3678), 'numpy.array', 'np.array', (['x_act'], {}), '(x_act)\n', (3671, 3678), True, 'import numpy as np\n'), ((3707, 3722), 'numpy.array', 'np.array', (['y_act'], {}), '(y_act)\n', (3715, 3722), True, 'import numpy as np\n'), ((3751, 3766), 'numpy.array', 'np.array', (['z_act'], {}), '(z_act)\n', (3759, 3766), True, 'import numpy as np\n'), ((3795, 3810), 'numpy.array', 'np.array', (['x_des'], {}), '(x_des)\n', (3803, 3810), True, 'import numpy as np\n'), ((3839, 3854), 'numpy.array', 'np.array', (['y_des'], {}), '(y_des)\n', (3847, 3854), True, 'import numpy as np\n'), ((3883, 3898), 'numpy.array', 'np.array', (['z_des'], {}), '(z_des)\n', (3891, 3898), True, 'import numpy as np\n'), ((4471, 4485), 'numpy.array', 'np.array', (['err1'], {}), '(err1)\n', (4479, 4485), True, 'import numpy as np\n'), ((4518, 4532), 'numpy.array', 'np.array', (['err2'], {}), '(err2)\n', (4526, 4532), True, 'import numpy as np\n'), ((4565, 4579), 'numpy.array', 'np.array', (['err3'], {}), '(err3)\n', (4573, 4579), True, 'import numpy as np\n'), ((4612, 4626), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (4620, 4626), True, 'import numpy as np\n'), ((5277, 5298), 'numpy.array', 'np.array', (['actual_data'], {}), '(actual_data)\n', (5285, 5298), True, 'import numpy as np\n'), ((5338, 5362), 'numpy.array', 'np.array', (['commanded_data'], {}), '(commanded_data)\n', (5346, 5362), True, 'import numpy as np\n'), ((5402, 5416), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (5410, 5416), True, 'import numpy as np\n'), ((7121, 7137), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (7135, 7137), False, 'import rospy\n'), ((8973, 9031), 'rospy.loginfo', 'rospy.loginfo', (['"""Resetting datalogger and erasing plots..."""'], {}), "('Resetting datalogger and erasing plots...')\n", (8986, 9031), False, 'import rospy\n'), ((9049, 9058), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9056, 9058), True, 'import matplotlib.pyplot as plt\n'), ((9089, 9105), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (9103, 9105), False, 'import rospy\n'), ((9200, 9216), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9209, 9216), True, 'import matplotlib.pyplot as plt\n'), ((9253, 9262), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9260, 9262), True, 'import matplotlib.pyplot as plt\n'), ((9280, 9296), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9289, 9296), True, 'import matplotlib.pyplot as plt\n'), ((9314, 9338), 'rospy.signal_shutdown', 'rospy.signal_shutdown', (['(0)'], {}), '(0)\n', (9335, 9338), False, 'import rospy\n'), ((9149, 9182), 'numpy.zeros', 'np.zeros', (['(1, states_of_interest)'], {}), '((1, states_of_interest))\n', (9157, 9182), True, 'import numpy as np\n')]
|
import numpy as np
import anndata as ad
# -------------------------------------------------------------------------------
# Some test data
# -------------------------------------------------------------------------------
X_list = [ # data matrix of shape n_obs x n_vars
[1, 2, 3], [4, 5, 6], [7, 8, 9]]
obs_dict = { # annotation of observations / rows
'row_names': ['name1', 'name2', 'name3'], # row annotation
'oanno1': ['cat1', 'cat2', 'cat2'], # categorical annotation
'oanno2': ['o1', 'o2', 'o3'], # string annotation
'oanno3': [2.1, 2.2, 2.3]} # float annotation
var_dict = { # annotation of variables / columns
'vanno1': [3.1, 3.2, 3.3]}
uns_dict = { # unstructured annotation
'oanno1_colors': ['#000000', '#FFFFFF'],
'uns2': ['some annotation']}
# -------------------------------------------------------------------------------
# The test functions
# -------------------------------------------------------------------------------
def test_views():
X = np.array(X_list)
adata = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict, dtype='int32')
assert adata[:, 0].isview
assert adata[:, 0].X.tolist() == [1, 4, 7]
adata[:2, 0].X = [0, 0]
assert adata[:, 0].X.tolist() == [0, 0, 7]
adata_subset = adata[:2, [0, 1]]
assert adata_subset.isview
# now transition to actual object
adata_subset.obs['foo'] = range(2)
assert not adata_subset.isview
assert adata_subset.obs['foo'].tolist() == list(range(2))
def test_slice_copy():
adata = ad.AnnData(np.empty((100, 100)))
adata.obsm['o'] = np.empty((100, 50))
adata = adata[:50]
adata.obsm['o'] = np.ones((50, 20))
|
[
"anndata.AnnData",
"numpy.empty",
"numpy.array",
"numpy.ones"
] |
[((1049, 1065), 'numpy.array', 'np.array', (['X_list'], {}), '(X_list)\n', (1057, 1065), True, 'import numpy as np\n'), ((1078, 1148), 'anndata.AnnData', 'ad.AnnData', (['X'], {'obs': 'obs_dict', 'var': 'var_dict', 'uns': 'uns_dict', 'dtype': '"""int32"""'}), "(X, obs=obs_dict, var=var_dict, uns=uns_dict, dtype='int32')\n", (1088, 1148), True, 'import anndata as ad\n'), ((1641, 1660), 'numpy.empty', 'np.empty', (['(100, 50)'], {}), '((100, 50))\n', (1649, 1660), True, 'import numpy as np\n'), ((1707, 1724), 'numpy.ones', 'np.ones', (['(50, 20)'], {}), '((50, 20))\n', (1714, 1724), True, 'import numpy as np\n'), ((1597, 1617), 'numpy.empty', 'np.empty', (['(100, 100)'], {}), '((100, 100))\n', (1605, 1617), True, 'import numpy as np\n')]
|
import cv2
import glob
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout
from keras.utils.np_utils import to_categorical
from keras import losses, optimizers, regularizers
X_train = []
x_label = []
for img_class, directory in enumerate(['Red', 'Yellow', 'Green', 'NoTrafficLight']):
for i, file_name in enumerate(glob.glob("simulator_lights/{}/*.png".format(directory))):
# for i, file_name in enumerate(glob.glob("/home/andcircle/FunDriving/Term3/Final_Proj/tl_classifier_exceptsmall/real/{}/*.png".format(directory))):
img = cv2.imread(file_name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB);
resized = cv2.resize(img, (32,64))
X_train.append(resized/255.)
x_label.append(img_class)
X_train = np.array(X_train)
x_label = np.array(x_label)
categorical_labels = to_categorical(x_label)
num_classes = 4
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(64, 32, 3), padding='same', activation='relu', kernel_initializer='random_uniform', kernel_regularizer=regularizers.l2(0.01)))
model.add(MaxPooling2D(2,2))
Dropout(0.5)
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', kernel_initializer='random_uniform', kernel_regularizer=regularizers.l2(0.01)))
model.add(MaxPooling2D(2,2))
Dropout(0.5)
model.add(Flatten())
model.add(Dense(8, activation='relu', kernel_initializer='random_uniform', kernel_regularizer=regularizers.l2(0.01)))
model.add(Dense(num_classes, activation='softmax'))
loss = losses.categorical_crossentropy
optimizer = optimizers.Adam()
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
model.fit(X_train, categorical_labels, batch_size=32, epochs=10, verbose=True, validation_split=0.1, shuffle=True)
score = model.evaluate(X_train, categorical_labels, verbose=0)
print(score)
model.save('tl_classifier_simulator.h5')
# model.save('tl_classifier_real.h5')
#--------------------------------------------------------------- model.summary()
|
[
"keras.regularizers.l2",
"cv2.cvtColor",
"keras.layers.Dropout",
"keras.optimizers.Adam",
"keras.layers.Flatten",
"cv2.imread",
"keras.utils.np_utils.to_categorical",
"keras.layers.Dense",
"numpy.array",
"keras.models.Sequential",
"keras.layers.MaxPooling2D",
"cv2.resize"
] |
[((832, 849), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (840, 849), True, 'import numpy as np\n'), ((860, 877), 'numpy.array', 'np.array', (['x_label'], {}), '(x_label)\n', (868, 877), True, 'import numpy as np\n'), ((902, 925), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['x_label'], {}), '(x_label)\n', (916, 925), False, 'from keras.utils.np_utils import to_categorical\n'), ((956, 968), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (966, 968), False, 'from keras.models import Sequential\n'), ((1167, 1179), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1174, 1179), False, 'from keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout\n'), ((1353, 1365), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1360, 1365), False, 'from keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout\n'), ((1614, 1631), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (1629, 1631), False, 'from keras import losses, optimizers, regularizers\n'), ((1148, 1166), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1160, 1166), False, 'from keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout\n'), ((1334, 1352), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (1346, 1352), False, 'from keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout\n'), ((1376, 1385), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1383, 1385), False, 'from keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout\n'), ((1518, 1558), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (1523, 1558), False, 'from keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout\n'), ((622, 643), 'cv2.imread', 'cv2.imread', (['file_name'], {}), '(file_name)\n', (632, 643), False, 'import cv2\n'), ((661, 697), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (673, 697), False, 'import cv2\n'), ((717, 742), 'cv2.resize', 'cv2.resize', (['img', '(32, 64)'], {}), '(img, (32, 64))\n', (727, 742), False, 'import cv2\n'), ((1114, 1135), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (1129, 1135), False, 'from keras import losses, optimizers, regularizers\n'), ((1300, 1321), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (1315, 1321), False, 'from keras import losses, optimizers, regularizers\n'), ((1484, 1505), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (1499, 1505), False, 'from keras import losses, optimizers, regularizers\n')]
|
import torch
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import random
import numpy as np
import Read_Data
import Confusion_Matrix
def getdata():
features, target = Read_Data.read_data3('mushrooms_data.csv')
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.20)
return X_train.values, X_test.values, y_train.values, y_test.values
class NN(nn.Module,):
def __init__(self,input_size):
super().__init__()
self.fc1 = nn.Linear(input_size, 50)
self.fc2 = nn.Linear(50, 40)
self.fc3 = nn.Linear(40, 20)
self.fc4 = nn.Linear(20,9)
self.dropout = nn.Dropout(0.15)
def forward(self,x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
def train(epoch, net, train_x, train_y, optimizer):
for e in range(epoch):
optimizer.zero_grad()
output = net(train_x)
loss = F.cross_entropy(output, train_y)
loss.backward()
optimizer.step()
return net
def predict(model, test_x):
pred = []
with torch.no_grad():
for data in test_x:
output = model(data)
predict = np.argmax(output)
pred.append(int(predict))
return pred
def random_state(seed_val):
np.random.seed(seed_val)
random.seed(seed_val)
torch.manual_seed(seed_val)
# if you are using GPU
torch.cuda.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
def main(train_x,test_x,train_y,test_y,input_size):
random_state(100)
train_x = train_x
test_x = test_x
train_y = train_y
test_y = test_y
train_x_tensor = torch.from_numpy(train_x).float()
train_y_tensor = torch.from_numpy(train_y).long()
test_x_tensor = torch.from_numpy(test_x).float()
net = NN(input_size=input_size)
lr = 0.1
m = 0.9
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=m)
my_net = train(100,net,train_x_tensor,train_y_tensor,optimizer)
predicted = predict(my_net, test_x_tensor)
actual = test_y
f1, recall, accuracy = Confusion_Matrix.main(actual, predicted,"Confusion Matrix: Neural Network")
return f1, recall, accuracy
|
[
"torch.nn.Dropout",
"numpy.random.seed",
"Read_Data.read_data3",
"numpy.argmax",
"torch.manual_seed",
"sklearn.model_selection.train_test_split",
"torch.cuda.manual_seed",
"torch.nn.functional.cross_entropy",
"torch.cuda.manual_seed_all",
"Confusion_Matrix.main",
"random.seed",
"torch.nn.Linear",
"torch.no_grad",
"torch.from_numpy"
] |
[((274, 316), 'Read_Data.read_data3', 'Read_Data.read_data3', (['"""mushrooms_data.csv"""'], {}), "('mushrooms_data.csv')\n", (294, 316), False, 'import Read_Data\n'), ((357, 406), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'target'], {'test_size': '(0.2)'}), '(features, target, test_size=0.2)\n', (373, 406), False, 'from sklearn.model_selection import train_test_split\n'), ((1483, 1507), 'numpy.random.seed', 'np.random.seed', (['seed_val'], {}), '(seed_val)\n', (1497, 1507), True, 'import numpy as np\n'), ((1513, 1534), 'random.seed', 'random.seed', (['seed_val'], {}), '(seed_val)\n', (1524, 1534), False, 'import random\n'), ((1540, 1567), 'torch.manual_seed', 'torch.manual_seed', (['seed_val'], {}), '(seed_val)\n', (1557, 1567), False, 'import torch\n'), ((1601, 1633), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed_val'], {}), '(seed_val)\n', (1623, 1633), False, 'import torch\n'), ((1639, 1675), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed_val'], {}), '(seed_val)\n', (1665, 1675), False, 'import torch\n'), ((2307, 2383), 'Confusion_Matrix.main', 'Confusion_Matrix.main', (['actual', 'predicted', '"""Confusion Matrix: Neural Network"""'], {}), "(actual, predicted, 'Confusion Matrix: Neural Network')\n", (2328, 2383), False, 'import Confusion_Matrix\n'), ((592, 617), 'torch.nn.Linear', 'nn.Linear', (['input_size', '(50)'], {}), '(input_size, 50)\n', (601, 617), True, 'import torch.nn as nn\n'), ((638, 655), 'torch.nn.Linear', 'nn.Linear', (['(50)', '(40)'], {}), '(50, 40)\n', (647, 655), True, 'import torch.nn as nn\n'), ((676, 693), 'torch.nn.Linear', 'nn.Linear', (['(40)', '(20)'], {}), '(40, 20)\n', (685, 693), True, 'import torch.nn as nn\n'), ((714, 730), 'torch.nn.Linear', 'nn.Linear', (['(20)', '(9)'], {}), '(20, 9)\n', (723, 730), True, 'import torch.nn as nn\n'), ((754, 770), 'torch.nn.Dropout', 'nn.Dropout', (['(0.15)'], {}), '(0.15)\n', (764, 770), True, 'import torch.nn as nn\n'), ((1106, 1138), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output', 'train_y'], {}), '(output, train_y)\n', (1121, 1138), True, 'import torch.nn.functional as F\n'), ((1266, 1281), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1279, 1281), False, 'import torch\n'), ((1369, 1386), 'numpy.argmax', 'np.argmax', (['output'], {}), '(output)\n', (1378, 1386), True, 'import numpy as np\n'), ((1866, 1891), 'torch.from_numpy', 'torch.from_numpy', (['train_x'], {}), '(train_x)\n', (1882, 1891), False, 'import torch\n'), ((1922, 1947), 'torch.from_numpy', 'torch.from_numpy', (['train_y'], {}), '(train_y)\n', (1938, 1947), False, 'import torch\n'), ((1976, 2000), 'torch.from_numpy', 'torch.from_numpy', (['test_x'], {}), '(test_x)\n', (1992, 2000), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
from scipy import stats
from scipy import interpolate as spin
import pandas as pd
import os
import warnings
from datetime import datetime, timedelta
import calendar
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as pltick
import matplotlib.dates as mdates
import matplotlib.cbook as mcbook
import matplotlib.colors as mcolors
import matplotlib.legend as mlegend
from matplotlib import gridspec
from matplotlib.patches import Polygon
if os.path.isdir('/Applications/anaconda/share/proj'): # needed for Basemap import on my machine, but probably not yours
os.environ['PROJ_LIB'] = '/Applications/anaconda/share/proj'
from mpl_toolkits.basemap import Basemap
from collections import OrderedDict
warnings.filterwarnings('ignore','.*is_string_like function.*') # MatplotlibDeprecationWarning upon cmocean import
import cmocean
import gsw
from Circles.circles import circle # from https://github.com/urschrei/Circles
import time_tools as tt
import geo_tools as gt
import load_product as ldp
def sea_ice_argo_spatial(data_dir,date,sic_grid,sic,float_data,plot_argo_locs_not_trajs,
title,save_as,results_dir,width,height,lat_center,lon_center,
open_sic=0,max_sic=100,which_ice_cmap=4,extend_cmap='neither',rasterized=True,
plot_floats=True,polynya_grid=None,label_traj_dates=True,
as_subplot=False,create_subplot=True,subplot_fig_size=(11,6),
first_subplot=False,last_subplot=False,which_subplot=[1,1,1],
subplot_add_colorbar=False,bathy_contours=np.arange(-3500,-100,500),
grid_lats=np.arange(-80,60,5),grid_lons=np.arange(-80,50,10),
subplot_lon_labels=[0,0,0,0],subplot_lat_labels=[0,0,0,0],subplot_labelsize=None,
cmap_bad_color='w',cmap_ocean_color='#5bcfff',grid_color='.2',continent_color='0.7',
boundary_width=2,coastline_width=1,pad=0.25,spacing=0.2,cbar_bottom=0.125,
return_basemap=False,return_pcolor=False,include_year_in_date=False,save_png=False):
""" Plots Argo profile locations on regional map with background of sea ice concentration and bathymetry.
"""
warnings.filterwarnings('ignore', category=mcbook.mplDeprecation)
if plot_argo_locs_not_trajs is True:
floats_linger = 14 # let floats linger on the map for N days after most recent profile (if no new profile)
if as_subplot: cross_lonx = 100000/2; cross_laty = 100000/2; cross_width = 1.25; text_offset = 0.03
else: cross_lonx = 100000/4; cross_laty = 100000/4; cross_width = 2.5; text_offset = 0.015
if which_ice_cmap == 1: ice_cmap = plt.cm.CMRmap
elif which_ice_cmap == 2: ice_cmap = plt.cm.inferno
elif which_ice_cmap == 3: ice_cmap = plt.cm.gist_ncar
elif which_ice_cmap == 4: # custom colormap similar to cm.CMRmap:
cmap_colors = ['#79CDFA','#79CDFA','#87C3EC','#628eac','#1E1952',
'#4630B8','#E85A33','#E1C047','#F2F1C4','#FBFBEE','#FFFFFF']
ice_cmap = mcolors.LinearSegmentedColormap.from_list(name=None,colors=cmap_colors,N=250,gamma=1.3)
elif which_ice_cmap == 5: # alternate version of cmap 4 above, with less vibrant ocean blue
cmap_colors = ['#bce6fc','#bce6fc','#87C3EC','#628eac','#1E1952',
'#4630B8','#E85A33','#E1C047','#F2F1C4','#FBFBEE','#FFFFFF']
ice_cmap = mcolors.LinearSegmentedColormap.from_list(name=None,colors=cmap_colors,N=250,gamma=1.3)
if not as_subplot:
fig, m = blank_inset_basemap(width,height,lat_center,lon_center,lon_labels=[0,0,0,1],lat_labels=[1,0,0,0],
grid_lats=grid_lats,grid_lons=grid_lons,labelsize=subplot_labelsize)
if as_subplot:
if create_subplot:
if first_subplot: master_fig = plt.figure(figsize=subplot_fig_size)
plt.gcf().add_subplot(*which_subplot)
master_fig, m = blank_inset_basemap(width,height,lat_center,lon_center,create_new_fig=False,
lon_labels=subplot_lon_labels,lat_labels=subplot_lat_labels,
grid_lats=grid_lats,grid_lons=grid_lons,
labelsize=subplot_labelsize,grid_color=grid_color,
fill_continent_color=continent_color,
boundary_width=boundary_width,coastline_width=coastline_width)
xlims = plt.gca().get_xlim()
ylims = plt.gca().get_ylim()
lonx, laty = m(sic_grid['lons'], sic_grid['lats'])
sic_nan_masked = np.ma.masked_where(np.isnan(sic), sic)
sic_lon_edge_to_center = 0.5*np.mean([np.mean(np.diff(lonx[0,:])), np.mean(np.diff(lonx[-1,:]))])
sic_lat_edge_to_center = 0.5*np.mean([np.mean(np.diff(laty[0,:])),np.mean(np.diff(laty[-1,:]))])
pcm = plt.pcolormesh(lonx-sic_lon_edge_to_center, laty-sic_lat_edge_to_center, sic_nan_masked,
cmap=ice_cmap, edgecolors='None', rasterized=rasterized, zorder=1, alpha=1.0,
vmin=open_sic, vmax=max_sic) # norm=mcolors.PowerNorm(gamma=0.4,vmin=open_sic,vmax=100,clip=False)
pcm.cmap.set_over('w')
pcm.cmap.set_bad(cmap_bad_color)
pcm.cmap.set_under(cmap_ocean_color) # '#5bcfff' is sea blue (previously #f0ffff, very light blue)
if not as_subplot:
cbar = plt.colorbar(pad=0.05, shrink=0.65, format='%.0f%%', extend=extend_cmap)
cbar.ax.tick_params(labelsize=12)
cbar.set_label('Sea ice concentration',size=12)
if polynya_grid is not None:
plt.contour(lonx,laty,polynya_grid,levels=[0.999],colors='#00FF00',linewidths=0.7,alpha=0.8, zorder=2)
if len(bathy_contours) > 0:
etopo_lons, etopo_lats, etopo = ldp.load_bathy(data_dir)
retopolons, retopolats = m(*np.meshgrid(etopo_lons, etopo_lats))
olevels = bathy_contours # check etopo.ravel().min()
m.contour(retopolons, retopolats, etopo, olevels, linewidths=0.5, linestyles='solid', colors='#808080',
alpha=0.5, zorder=3)
if plot_floats:
for f in range(len(float_data)):
wmoid = float_data[f][0]
float_lons = float_data[f][1]
float_lats = float_data[f][2]
position_flags = float_data[f][3]
float_datetimes = float_data[f][4]
float_dates = (float_data[f][4]/1000000).astype(int)
prof_nums = float_data[f][5]
date_int = tt.convert_tuple_to_8_int(date)
if plot_argo_locs_not_trajs:
if sum((float_dates - date_int) == 0) >= 1:
this_day_index = np.where((float_dates - date_int) == 0)[0][0]
lonx, laty = m(float_lons[this_day_index], float_lats[this_day_index])
if not (xlims[0] <= lonx <= xlims[1]) or not (ylims[0] <= laty <= ylims[1]): continue
if position_flags[this_day_index] == 1:
c='m'
edgecolor='k'
else:
c='#15178F'
edgecolor='k'
plt.plot([lonx-cross_lonx,lonx+cross_lonx],[laty,laty],color=c,linestyle='solid',linewidth=cross_width,zorder=4)
plt.plot([lonx,lonx],[laty-cross_laty,laty+cross_laty],color=c,linestyle='solid',linewidth=cross_width,zorder=4)
plt.scatter(lonx,laty,s=14,c=c,edgecolors=edgecolor,alpha=0.9,zorder=5)
if subplot_labelsize is None: float_fontsize = 8
else: float_fontsize = subplot_labelsize
plt.text(lonx + text_offset*width,laty - 3*text_offset*height,str(wmoid) + ' ('
+ str(prof_nums[this_day_index]) + ')',fontsize=float_fontsize,color=c,clip_on=False,zorder=6)
elif date_int > float_dates[0]:
recent_day_index = np.where((float_dates - date_int) < 0)[0][-1]
days_since_last_profile = tt.days_between(tt.convert_8_int_to_tuple(float_dates[recent_day_index]),date)
if days_since_last_profile <= floats_linger:
lonx, laty = m(float_lons[recent_day_index], float_lats[recent_day_index])
if not (xlims[0] <= lonx <= xlims[1]) or not (ylims[0] <= laty <= ylims[1]): continue
# alpha = (0.75-0.25) + 0.25*(1 - days_since_last_profile/floats_linger)
if position_flags[recent_day_index] == 1: c = 'm'
else: c = '#15178F'
plt.plot([lonx-cross_lonx,lonx+cross_lonx],[laty,laty],color=c,linestyle='solid',linewidth=cross_width,zorder=4)
plt.plot([lonx,lonx],[laty-cross_laty,laty+cross_laty],color=c,linestyle='solid',linewidth=cross_width,zorder=4)
# plt.scatter(lonx,laty,s=18,c=c,edgecolors='none',alpha=0.7,zorder=5)
elif not plot_argo_locs_not_trajs:
flonx,flaty = m(float_lons,float_lats)
plt.plot(flonx[position_flags != 9],flaty[position_flags != 9],color='#15178F',linewidth=1.25,zorder=4)
plt.scatter(flonx[position_flags == 2],flaty[position_flags == 2],s=10,c='m',edgecolors='none',zorder=5)
plt.scatter(flonx[position_flags == 1],flaty[position_flags == 1],s=10,c='#15178F',edgecolors='none',
zorder=6)
if len(float_data) == 1 or label_traj_dates == True:
datetime_tuples = [tt.convert_14_to_tuple(float_datetimes[n]) for n in range(len(float_datetimes))]
mo_yr_strings = [str(datetime_tuples[n][1]) + '/' + '{0:02d}'.format(datetime_tuples[n][0] - 2000)
for n in range(len(datetime_tuples))]
unique_mo_yr_strings,unique_indices = np.unique(mo_yr_strings,return_index=True)
unique_indices = np.sort(unique_indices) # to undo undesired sort by 'unique'
mo_yr_strings_to_label = [mo_yr_strings[n] for n in unique_indices]
lonx_to_label = [flonx[n] for n in unique_indices]
laty_to_label = [flaty[n] for n in unique_indices]
for pt in np.arange(0,len(mo_yr_strings_to_label),6):
plt.text(lonx_to_label[pt] + 0.000625 * width,laty_to_label[pt] - 0.026 * height,
mo_yr_strings_to_label[pt],fontsize=7,color='#15178F')
for pt in np.arange(3,len(mo_yr_strings_to_label),6):
plt.text(lonx_to_label[pt] + 0.000625 * width,laty_to_label[pt] + 0.017 * height,
mo_yr_strings_to_label[pt],fontsize=7,color='#15178F')
if len(float_data) != 1:
plt.annotate(str(wmoid),fontsize=10,color='#15178F',xy=(flonx[len(flonx) - 1], flaty[len(flaty) - 1]),
xytext=(flonx[len(flonx) - 1] - 0.25 * width,flaty[len(flaty) - 1] + 0.2 * height),
arrowprops=dict(arrowstyle='->',color='#15178F',alpha=0.5))
if not as_subplot:
if title is not None: plt.title(title,fontsize=16)
plt.tight_layout()
if save_png: plt.savefig(results_dir + save_as + '.png',dpi=150)
else: plt.savefig(results_dir + save_as + '.pdf')
plt.close()
elif as_subplot:
if create_subplot:
if subplot_labelsize is None: baseline_date_fontsize = 7
else: baseline_date_fontsize = subplot_labelsize
if include_year_in_date:
day_string = '{0}-{1:02d}-{2:02d}'.format(*date)
date_fontsize = baseline_date_fontsize + 1
else:
day_string = '{1}-{2:02}'.format(*date)
date_fontsize = baseline_date_fontsize + 3
plt.text(0.05,0.95,day_string,fontsize=date_fontsize,fontweight='bold',
horizontalalignment='left',verticalalignment='top',transform=plt.gca().transAxes)
if not create_subplot and subplot_add_colorbar: # deprecated?
cbar = plt.gcf().colorbar(pcm,ticks=np.arange(open_sic,101,10),format='%.0f%%',extend=extend_cmap,
orientation='vertical',fraction=0.03,aspect=30)
cbar.ax.tick_params(labelsize=subplot_labelsize,left=True,right=False,labelleft=True,labelright=False)
cbar.outline.set_linewidth(boundary_width)
if last_subplot:
plt.tight_layout(h_pad=pad,w_pad=pad,rect=(0.02,0.02,0.98,0.98))
if subplot_add_colorbar:
if spacing is not None: hspace = spacing*width/height
else: hspace = None
plt.gcf().subplots_adjust(bottom=0.05,wspace=spacing,hspace=hspace)
cbar_ax = plt.gcf().add_axes([0.2,cbar_bottom,0.6,0.015])
cbar = plt.gcf().colorbar(pcm,format='%.0f%%',extend=extend_cmap,orientation='horizontal',cax=cbar_ax)
cbar.ax.tick_params(labelsize=subplot_labelsize+2)
cbar.outline.set_linewidth(boundary_width)
plt.savefig(results_dir + save_as + '.pdf')
plt.close()
if return_basemap and not return_pcolor: return m
elif return_basemap and return_pcolor: return m, pcm
def section(wmoid,results_dir,save_as,float_data,params='all',depth_lim=(0,1700),fixed_ylim=True,vert_res=10,toi=None,
mld=True,mld_ref_depth=10,mld_sigma_theta_crit=0.03,show_ice_bars=True,sea_ice_grids=None,
sea_ice_data_avail=None,show_prof_bars=False,show_prof_ticks=True,add_date_bars=None,cmap_level_mods=None,
cmap_color_mods=None,cmap_gamma_mods=None,cmap_freq_mods=None,trim_xlim=False,
create_new_figs=True,new_figsize=(8,6),add_title=True,facecolor='k',grid=True,
years_only=False,plot_ylabel=True,plot_xticklabels=True,plot_cbar=True,condensed_cbar_label=None,
smaller_text=False,force_label_size=None,density_coor=False,density_lim=(27.75,27.85),density_power_scale=15,
density_depth_contours=None,density_depth_labels=True,explicit_yticks=None,
drift_temps=None,drift_temp_baseline=None,drift_temp_depth=None):
""" Hydrographic depth section plots.
Args:
wmoid: int
params: 'all' to plot standard set of parameters listed below, or list of specific param_abbrev (if available)
depth_lim: tuple/list of bounding depths (ylim[1] will default to shallowest observation ≤ depth_lim[1])
fixed_ylim: True or False (force depth range to <<depth_lim>> [True], or set to deepest observation [False])
vert_res: vertical resolution of section (in meters); note that plot size scales inversely with this
toi: None or tuple/list of bounding times of interest (in 14-digit integer format)
mld: plot mixed-layer depth
mld_ref_depth: see gt.mld() (applies only if 'mld' is True)
mld_sigma_theta_crit: see gt.mld() (applies only if 'mld' is True)
show_ice_bars: plot bars estimating when float was under sea ice
note: calculates average SIC within a box 2° longitude x 1° latitude around the given or
interpolated float location (uses AMSR if available, then GSFC)
sea_ice_grids: created by ldp.sea_ice_data_prep(), only needed if show_ice_bars is True
sea_ice_data_avail: created by ldp.sea_ice_data_prep(), only needed if show_ice_bars is True
show_prof_bars: plot each profile as thin gray line on section
show_prof_ticks: plot each profile as small tick on top x-axis
add_date_bars: None or list of Datetimes to add vertical black bars, e.g. denoting start and end of some event
cmap_level_mods: None or dict with param_abbrevs as keys to lists of colormap levels to replace defaults
cmap_color_mods: None or dict with param_abbrevs as keys to lists of color sequences to replace defaults
cmap_gamma_mods: None or dict with param_abbrevs as keys to colormap shift parameter to replace defaults
note: gamma=1.0 is even spacing; gamma>1.0 stretches colors upwards; gamma<1.0 downwards
cmap_freq_mods: None or dict with param_abbrevs as keys to multiplier for adding additional color levels
between those specified in 'cmap_levels' (e.g. 3 for 3 levels between)
trim_xlim: trim xlim (time axis) to match range of data for each parameter (otherwise trim to time range of
GDAC temperature data)
create_new_figs: True to save each sections as an individual new figure (with dimensions of new_figsize)
False to plot each section to the currently active plot axes
(for this, pass a single param_abbrev)
new_figsize: figure dimensions in inches: (width,height), e.g. (8,6)
note: only used if create_new_figs is True
facecolor: 'k' or other color for plot background (i.e. where data is missing or invalid)
grid: True or False to add faint x- and y-grid at locations of major time and depth/density ticks
years_only: True or False (only label years on x-axis, instead of automatic month labeling)
plot_ylabel: True or False
plot_xticklabels: True or False
plot_cbar: True or False
condensed_cbar_label: None or string to replace default colorbar parameter label
smaller_text: True or False (use smaller font sizes, e.g. for subplot)
force_label_size: None or fontsize for labels (smaller_text should be True)
density_coor: True or False (if True, use sigma_theta as y-coordinate; if False, use depth as y-coordinate)
density_lim: tuple/list of bounding sigma_theta values (only used if density_coor is True)
density_power_scale: power exponent to stretch deeper density levels / condense near-surface levels
density_depth_contours: None or list of depths to contour and label when plotting with density y-coordinate
density_depth_labels: False or True (label the depth contours described above)
NOTE: this requires manual input (click and Return) to position contour labels
explicit_yticks: None or list/array of ytick locations (depths or sigma_theta values)
drift_temps: None or dict containing float drift-depth temperature time series with keys 'datetime' and 'temp'
drift_temp_baseline: None or potential temperature value to use as baseline from which to plot drift_temps
drift_temp_depth: None or depth to use as baseline from which to plot drift_temps
"""
if params == 'all':
param_abbrevs = np.array(['ptmp','psal','Nsquared','PV','destab','Oxygen','OxygenSat','pHinsitu','Nitrate',
'Chl_a'])
# full list of parameters below; implement custom colormaps as needed:
# param_abbrevs = np.array(['ptmp', 'psal', 'Nsquared', 'PV', 'destab', 'Oxygen', 'OxygenSat', 'Nitrate',
# 'Chl_a', 'pHinsitu', 'pH25C', 'TALK_LIAR', 'DIC_LIAR', 'pCO2_LIAR'])
else:
param_abbrevs = params
cmap_levels = {}
cmap_levels['ptmp'] = [0.0,0.2,0.4,0.6,0.8,1.0,1.2]
cmap_levels['psal'] = [34.66,34.67,34.68,34.69,34.70]
cmap_levels['Oxygen'] = [190,195,200,205,210,215,220]
cmap_levels['OxygenSat'] = [54,55,56,57,58,59,60,61,62,63,64,65,70,80,90,100,110]
cmap_levels['Nsquared'] = [0,5,10,15,20,25,50,100,500,1000]
cmap_levels['PV'] = [0,5,10,25,50,100,250,500,1000,5000]
cmap_levels['sigma_theta'] = [27.0,27.78,27.79,27.80,27.81,27.82,27.83,27.84,27.85]
cmap_levels['destab'] = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7] # same range but even spacing
cmap_levels['pHinsitu'] = np.arange(7.84,8.16,0.04)
cmap_levels['Nitrate'] = np.arange(20.0,34.01,1.0)
cmap_levels['Chl_a_corr'] = np.arange(0.0,2.0,0.25)
cmap_extend = {}
cmap_extend['ptmp'] = 'both'
cmap_extend['psal'] = 'both'
cmap_extend['Oxygen'] = 'both'
cmap_extend['OxygenSat'] = 'both'
cmap_extend['Nsquared'] = 'both'
cmap_extend['PV'] = 'both'
cmap_extend['sigma_theta'] = 'both'
cmap_extend['destab'] = 'max'
cmap_extend['pHinsitu'] = 'both'
cmap_extend['Nitrate'] = 'both'
cmap_extend['Chl_a_corr'] = 'both'
cmap_under_over = {}
cmap_under_over['ptmp'] = ['#252766','#62001d'] # darker purple/blue, darker red
cmap_under_over['psal'] = ['#271E6A','#fcf6d1'] # darker purple/blue, lighter cream
cmap_under_over['Oxygen'] = ['0.9','#660000'] # light grey-white, darker version of 'maroon'
cmap_under_over['Nsquared'] = ['#000099','0.3'] # darker version of 'blue', dark grey
cmap_under_over['PV'] = ['#000099','0.3'] # same as above
cmap_under_over['destab'] = [None,'#2d004e'] # darker version of 'indigo'
cmap_colors = {}
# useful resources for color picking: https://matplotlib.org/examples/color/named_colors.html
# http://www.color-hex.com/
cmap_colors['ptmp'] = ['#353992','#7294C2','#A5C4DD','#F9FCCF','#F2CF85','#CB533B','#8D002A']
cmap_colors['psal'] = ['#252A83','#22369C','#215091','#306489','#3F7687','#569487',
'#6EB380','#87C574','#B4D56D','#DCE184','#FAEDA3']
cmap_colors['Oxygen'] = ['white','maroon'] # previously ended with 'teal', started with '0.7'
cmap_colors['OxygenSat'] = ['0.7','white','maroon','teal']
cmap_colors['Nsquared'] = ['blue','#ffe34c','firebrick'] # ffe34c is lighter version of 'gold'
cmap_colors['PV'] = ['blue','gold','firebrick']
cmap_colors['sigma_theta'] = ['seagreen','white','coral','0.2']
cmap_colors['destab'] = ['yellow','#9366b4','indigo'] #9366b4 is lighter indigo
cmap_colors['pHinsitu'] = ['green','white','red','blue','orange']
cmap_colors['Nitrate'] = ['orange','blue','red','white','green']
cmap_colors['Chl_a_corr'] = ['#11114e','white','palegoldenrod','#005000']
cmap_gamma = {}
cmap_gamma['ptmp'] = 0.9
cmap_gamma['psal'] = 0.7
cmap_gamma['Oxygen'] = 1.75
cmap_gamma['OxygenSat'] = 0.5
cmap_gamma['Nsquared'] = 1.1
cmap_gamma['PV'] = 0.7
cmap_gamma['sigma_theta'] = 0.6 # colorbar is reversed below
cmap_gamma['destab'] = 0.7 # colorbar is reversed below
cmap_gamma['pHinsitu'] = 1.0
cmap_gamma['Nitrate'] = 1.0
cmap_gamma['Chl_a_corr'] = 1.0
cmap_freq = {}
cmap_freq['ptmp'] = 2
cmap_freq['psal'] = 3
cmap_freq['Oxygen'] = 2
cmap_freq['OxygenSat'] = 2
cmap_freq['Nsquared'] = 1
cmap_freq['PV'] = 1
cmap_freq['sigma_theta'] = 4
cmap_freq['destab'] = 2
cmap_freq['pHinsitu'] = 8
cmap_freq['Nitrate'] = 8
cmap_freq['Chl_a_corr'] = 5
if cmap_level_mods is not None:
for param in cmap_level_mods.keys():
cmap_levels[param] = cmap_level_mods[param]
if cmap_color_mods is not None:
for param in cmap_color_mods.keys():
cmap_colors[param] = cmap_color_mods[param]
if cmap_gamma_mods is not None:
for param in cmap_gamma_mods.keys():
cmap_gamma[param] = cmap_gamma_mods[param]
if cmap_freq_mods is not None:
for param in cmap_freq_mods.keys():
cmap_freq[param] = cmap_freq_mods[param]
prof_match = np.zeros(len(float_data['profiles'])).astype(bool)
for p in np.arange(len(prof_match)):
if toi is not None:
if toi[0] <= float_data['profiles'][p]['datetime'] <= toi[1]:
prof_match[p] = True
else:
prof_match[p] = True
prof_indices_to_plot = np.where(prof_match)[0]
if mld or show_ice_bars:
datetime_coord_profs = []
datetime_coord_as_tuples = []
mld_data = []
prof_lats = []
prof_lons = []
for pi in prof_indices_to_plot:
datetime_tuple_format = tt.convert_14_to_tuple(float_data['profiles'][pi]['datetime'])
datetime_coord_as_tuples.append(datetime_tuple_format)
datetime_coord_profs.append(tt.convert_tuple_to_datetime(datetime_tuple_format))
this_mld = gt.mld(float_data['profiles'][pi],ref_depth=mld_ref_depth,
sigma_theta_crit=mld_sigma_theta_crit,verbose_warn=False)
if density_coor:
# actually a density value:
this_mld = gt.vert_prof_eval(float_data['profiles'][pi],'sigma_theta',this_mld,extrap='nearest')
# convert to power-scaled density; ignore MLDs outside plotting range
if this_mld < density_lim[0]: this_mld = np.NaN
this_mld = (this_mld - density_lim[0])**density_power_scale
mld_data.append(this_mld)
prof_lats.append(float_data['profiles'][pi]['lat'])
prof_lons.append(float_data['profiles'][pi]['lon'])
def DatetimeToTimestampForInterp(dt):
return calendar.timegm(dt.timetuple())
if show_ice_bars:
date_coord_daily = tt.dates_in_range(datetime_coord_as_tuples[0][0:3],datetime_coord_as_tuples[-1][0:3])
datetime_coord_daily = [tt.convert_tuple_to_datetime(date_tuple) for date_tuple in date_coord_daily]
timestamp_coord_daily = [DatetimeToTimestampForInterp(dt) for dt in datetime_coord_daily]
timestamp_coord_profs = [DatetimeToTimestampForInterp(dt) for dt in datetime_coord_profs]
specific_lat_coord_for_ice = np.interp(timestamp_coord_daily,timestamp_coord_profs,prof_lats)
specific_lon_coord_for_ice = np.interp(timestamp_coord_daily,timestamp_coord_profs,prof_lons)
lat_coord_for_ice = []
lon_coord_for_ice = []
for pos_idx in range(len(specific_lat_coord_for_ice)):
lat_coord_for_ice.append([specific_lat_coord_for_ice[pos_idx] - 0.5,
specific_lat_coord_for_ice[pos_idx] + 0.5])
lon_coord_for_ice.append([specific_lon_coord_for_ice[pos_idx] - 1.0,
specific_lon_coord_for_ice[pos_idx] + 1.0])
sic_coord = ldp.sea_ice_concentration_along_track(date_coord_daily,lat_coord_for_ice,lon_coord_for_ice,
sea_ice_grids,sea_ice_data_avail)
for param_index, param_abbrev in enumerate(param_abbrevs):
param_skip = True
for pi in prof_indices_to_plot:
if param_abbrev in float_data['profiles'][pi].keys(): param_skip = False
if param_skip: continue
datetime_coord = []
section_data = []
if density_coor: depth_data = []
obs_range = []
for pi in prof_indices_to_plot:
if param_abbrev in float_data['profiles'][pi].keys():
if float_data['profiles'][pi][param_abbrev]['data'].size == 0: continue
z_vec, data_vec = gt.vert_prof_even_spacing(float_data['profiles'][pi],param_abbrev,z_coor='depth',
spacing=vert_res,interp_method='linear',extrap='NaN',
top=depth_lim[0],bottom=depth_lim[1],verbose_error=True)
if density_coor:
obs_param = data_vec
obs_depth, obs_sigma_theta \
= gt.vert_prof_even_spacing(float_data['profiles'][pi],'sigma_theta',z_coor='depth',
spacing=vert_res,interp_method='linear',extrap='NaN',
top=depth_lim[0],bottom=depth_lim[1],verbose_error=True)
obs_good_mask = ~np.logical_or(np.isnan(obs_param),np.isnan(obs_sigma_theta))
obs_sort_order = obs_sigma_theta[obs_good_mask].argsort()
sorted_sigma_theta = obs_sigma_theta[obs_good_mask][obs_sort_order]
sorted_param = obs_param[obs_good_mask][obs_sort_order]
sorted_depth = obs_depth[obs_good_mask][obs_sort_order]
z_vec = density_lim[0] \
+ (np.arange(0, (density_lim[1]-density_lim[0])**density_power_scale,
((density_lim[1]-density_lim[0])**density_power_scale)/200)) \
** (1.0/density_power_scale)
data_vec = gt.profile_interp(sorted_param,sorted_sigma_theta,z_vec,
method='linear',out_of_bounds='NaN')
depth_vec = gt.profile_interp(sorted_depth,sorted_sigma_theta,z_vec,
method='linear',out_of_bounds='NaN')
depth_data.append(depth_vec)
z_vec[z_vec < density_lim[0]] = np.NaN
z_vec = (z_vec - density_lim[0])**density_power_scale
section_data.append(data_vec)
datetime_coord.append(tt.convert_tuple_to_datetime(tt.convert_14_to_tuple(float_data['profiles']
[pi]['datetime'])))
obs_range.append([np.min(z_vec[np.isfinite(data_vec)]),
np.max(z_vec[np.isfinite(data_vec)])])
param_name_for_cbar = float_data['profiles'][pi][param_abbrev]['name']
param_units_for_cbar = float_data['profiles'][pi][param_abbrev]['units']
section_data = np.ma.masked_invalid(np.array(section_data).T)
if density_coor: depth_data = np.ma.masked_invalid(np.array(depth_data).T)
if create_new_figs: plt.figure(figsize=new_figsize)
specified_levels = np.array(cmap_levels[param_abbrev])
more_levels = np.interp(np.arange(len(specified_levels),step=1.0/cmap_freq[param_abbrev]),
np.arange(len(specified_levels)),specified_levels,right=np.NaN)
more_levels = more_levels[~np.isnan(more_levels)]
N_colors = len(more_levels) - 1
contourf_cmap = mcolors.LinearSegmentedColormap.from_list(name=None,colors=cmap_colors[param_abbrev],
N=N_colors,gamma=cmap_gamma[param_abbrev])
if param_abbrev in cmap_under_over:
if cmap_under_over[param_abbrev][0] is not None: contourf_cmap.set_under(cmap_under_over[param_abbrev][0])
if cmap_under_over[param_abbrev][1] is not None: contourf_cmap.set_over(cmap_under_over[param_abbrev][1])
normalization = mcolors.BoundaryNorm(more_levels,ncolors=N_colors,clip=False)
# set facecolor as black (or other given color)
if density_coor: plt.gca().axhspan((density_lim[1]-density_lim[0])**density_power_scale,0,
facecolor=facecolor,zorder=1)
else: plt.gca().axhspan(depth_lim[0],depth_lim[1],facecolor=facecolor,zorder=1)
contour_handle = plt.contourf(datetime_coord,z_vec,section_data,
vmin=np.min(more_levels),vmax=np.max(more_levels),
levels=more_levels,norm=normalization,cmap=contourf_cmap,
extend=cmap_extend[param_abbrev],zorder=2)
if plot_cbar:
if show_ice_bars: shrink_cbar = 1650/(1650+175)
else: shrink_cbar = 1.0
if np.max(np.abs(specified_levels)) >= 1000: formatter = pltick.FuncFormatter(lambda x, p: format(x, ','))
else: formatter = None
cbar = plt.colorbar(ticks=specified_levels,spacing='uniform',shrink=shrink_cbar,format=formatter)
if condensed_cbar_label is not None: cbar_label = condensed_cbar_label
else: cbar_label = '{0}\n({1})'.format(param_name_for_cbar,
param_units_for_cbar)
if smaller_text:
if force_label_size is not None:
cbar_labelsize = force_label_size - 1
cbar_titlesize = force_label_size
else:
cbar_labelsize = 6
cbar_titlesize = 8
cbar.ax.tick_params(labelsize=cbar_labelsize)
cbar.set_label(label=cbar_label,rotation=90,labelpad=9,size=cbar_titlesize)
else:
cbar.set_label(label=cbar_label,rotation=90,labelpad=11) # subtracted 9
cbar.ax.set_title(param_units_for_cbar,fontsize=8)
if param_abbrev == 'destab' or param_abbrev == 'sigma_theta': cbar.ax.invert_yaxis()
if show_prof_bars:
for obs_idx, obs_dt in enumerate(datetime_coord):
plt.plot([obs_dt,obs_dt],obs_range[obs_idx],color='0.5',linewidth=0.5,zorder=3)
if add_date_bars is not None:
for dt in add_date_bars:
if not density_coor: plt.plot([dt,dt],[*depth_lim],color='0.2',linewidth=0.8,zorder=3)
else: plt.plot([dt,dt],[*(np.array(density_lim)-density_lim[0])**density_power_scale],
color='0.2',linewidth=0.8,zorder=3)
if (drift_temps is not None) and (not density_coor): # sorry, can't plot drift temps in density space
plt.plot(drift_temps['datetime'],
drift_temp_depth + (depth_lim[1]-depth_lim[0])*(drift_temp_baseline-drift_temps['temp']),
'k-',linewidth=0.01,alpha=0.5,zorder=4)
if trim_xlim:
plt.xlim([datetime_coord[0],datetime_coord[-1]])
else:
start_date = tt.convert_tuple_to_datetime(tt.convert_14_to_tuple(float_data['profiles'][0]['datetime']))
end_date = tt.convert_tuple_to_datetime(tt.convert_14_to_tuple(float_data['profiles'][-1]['datetime']))
plt.xlim([start_date,end_date])
if mld or show_ice_bars:
mld_xlim_mask = np.logical_and(np.array(datetime_coord_profs) >= datetime_coord[0],
np.array(datetime_coord_profs) <= datetime_coord[-1])
if mld:
plt.plot(np.array(datetime_coord_profs)[mld_xlim_mask],np.array(mld_data)[mld_xlim_mask],
'w-',linewidth=1.0,zorder=4)
if density_coor and density_depth_contours is not None:
depth_contours = plt.contour(datetime_coord,z_vec,depth_data,levels=density_depth_contours,
linewidths=0.5,alpha=0.75,colors='k',zorder=5)
if force_label_size: depth_contour_fontsize = force_label_size-1
else: depth_contour_fontsize = 8
if density_depth_labels:
print('>>> Waiting for manual input.\n'
'>>> Click to position contours, hit Return when done.\n'
'>>> Note: do not change figure size.')
clabels = plt.clabel(depth_contours,fmt='%d m',fontsize=depth_contour_fontsize,manual=True,
inline=True,inline_spacing=25) # removed zorder=5
for label in clabels: label.set_rotation(0)
if fixed_ylim and not density_coor: max_ylim = depth_lim[1]
else: max_ylim = np.max(np.array(obs_range))
if show_ice_bars:
sic_xlim_mask = np.logical_and(np.array(datetime_coord_daily) >= datetime_coord[0],
np.array(datetime_coord_daily) <= datetime_coord[-1])
if not smaller_text: sic_norm = -35 # meters equivalent
elif depth_lim[1] <= 300: sic_norm = -35 # hackish temp solution for upper-ocean-only sections
else: sic_norm = -175
if not density_coor:
sic_baseline = depth_lim[0] # top of section (probably 0 m, but not necessarily)
else:
sic_norm = (sic_norm/1700)*((density_lim[1]-density_lim[0])**density_power_scale)
sic_baseline = 0
plt.gca().fill_between(np.array(datetime_coord_daily)[sic_xlim_mask],
sic_baseline + (sic_norm * np.array(sic_coord)[sic_xlim_mask]),sic_baseline,
color='k',linewidth=0,zorder=5) # or #8aacb8 for blue
plt.plot([datetime_coord_daily[0],datetime_coord_daily[-1]],[sic_baseline,sic_baseline],'k-',linewidth=0.5)
if not density_coor: plt.ylim([sic_baseline + 1.2*sic_norm,max_ylim])
else: plt.ylim(sic_baseline + 1.2*sic_norm,
(density_lim[1]-density_lim[0])**density_power_scale)
else:
if not density_coor: plt.ylim([depth_lim[0],max_ylim])
else: plt.ylim(*(np.array(density_lim)-density_lim[0])**density_power_scale)
plt.gca().invert_yaxis()
if not density_coor:
if explicit_yticks is not None: plt.yticks(explicit_yticks)
plt.gca().get_yaxis().set_major_formatter(pltick.FuncFormatter(lambda x, loc: "{:,}".format(x)))
else:
plt.yticks((np.array(explicit_yticks)-density_lim[0])**density_power_scale)
plt.gca().set_yticklabels(explicit_yticks)
if show_ice_bars and not density_coor: # NOTE: weird numbers display when using this in density coordinates
current_yticks = plt.yticks()[0]
plt.yticks([sic_baseline+sic_norm,sic_baseline,*current_yticks[1:]],
['100%','0%',*["{:,}".format(yt) for yt in current_yticks[1:]]])
if smaller_text:
if force_label_size is not None: ysize = force_label_size
else: ysize = 8
else: ysize = None
if not plot_ylabel: plt.gca().yaxis.set_ticklabels([])
else: plt.gca().tick_params(axis='y',which='major',labelsize=ysize)
if plot_ylabel:
if not density_coor: plt.ylabel('Depth (m)',size=ysize)
else: plt.ylabel(r'$\sigma_\theta$ (kg/m$^3$)',size=ysize)
if show_ice_bars:
plt.ylabel('Depth (m) ',size=ysize)
plt.text(-0.14,0.93,'SIC',fontsize=ysize,rotation=0,transform=plt.gca().transAxes,
horizontalalignment='right',verticalalignment='center')
years = mdates.YearLocator()
months = mdates.MonthLocator()
if not years_only:
xaxis_formatter = mdates.DateFormatter("%b")
plt.gca().xaxis.set_major_locator(months)
plt.gca().xaxis.set_major_formatter(xaxis_formatter)
else:
xaxis_formatter = mdates.DateFormatter("%Y")
plt.gca().xaxis.set_major_locator(years)
plt.gca().xaxis.set_major_formatter(xaxis_formatter)
plt.gca().xaxis.set_minor_locator(months)
plt.xticks(rotation=45)
if not plot_xticklabels:
plt.gca().xaxis.set_ticklabels([])
elif force_label_size is not None:
plt.gca().tick_params(axis='x',which='major',labelsize=force_label_size)
if grid:
plt.grid(which='major',axis='both',color='0.6',linewidth=0.25,alpha=0.6)
plt.gca().set_axisbelow(False) # True (grid below all), 'line' (below lines), False (grid above all)
if show_prof_ticks:
top_xaxis = plt.gca().twiny()
top_xaxis.set_xlim([datetime_coord[0],datetime_coord[-1]])
top_xaxis.xaxis.set_ticks_position('top')
top_xaxis.xaxis.set_tick_params(width=0.5)
top_xaxis.set_xticks(datetime_coord)
top_xaxis.xaxis.set_ticklabels([])
if create_new_figs:
if add_title: plt.title('Float {0}'.format(wmoid))
plt.tight_layout()
plt.savefig(results_dir + save_as + param_abbrev + '.pdf')
plt.close()
def section_compiler(wmoids,data_dir,results_dir,save_as,float_data,params,figsize=(8.5,11),depth_lim=(0,1000),
fixed_ylim=True,mld=True,sea_ice_grids=None,sea_ice_data_avail=None,add_date_bars=None,
condensed_cbar_labels=None,width_ratios=None,height_ratios=None,all_trajs=None,
traj_plot_params=None,show_ice_bars=True,density_coor=False,density_lim=None,
density_power_scale=None,density_depth_contours=None,plot_title=True,force_label_size=None,
explicit_yticks=None,w_pad=0.0,drift_temps=None,drift_temp_baseline=None,drift_temp_depth=None,
years_only=None):
""" Arrange multiple hydrographic sections and float trajectories on a single plot. Wrapper method for pt.section().
"""
plt.figure(figsize=figsize)
if all_trajs is not None:
params = ['__trajectories__',*params]
if condensed_cbar_labels is not None: condensed_cbar_labels = ['__trajectories__',*condensed_cbar_labels]
first_param_idx = 1
else:
first_param_idx = 0
subplot_grid = gridspec.GridSpec(len(params),len(wmoids),width_ratios=width_ratios,height_ratios=height_ratios)
for float_idx, wmoid in enumerate(wmoids):
for param_idx, param in enumerate(params):
plt.subplot(subplot_grid[len(wmoids)*param_idx + float_idx])
if param_idx == 0 and plot_title: plt.title('Float {0}'.format(wmoid),size=8,fontweight='bold')
if param == '__trajectories__':
argo_traj(data_dir,None,all_trajs[float_idx],*traj_plot_params[float_idx],label_dates=True,
save_as=None,label_placement=(0.04,-0.25),boundary_width=1,labelsize=4,
label_dates_12mo_only=True)
if float_idx+1 == len(wmoids):
plt.gca().set_anchor('W')
continue
if param_idx == first_param_idx: ice_bars_yes_no = show_ice_bars; show_prof_ticks = True
else: ice_bars_yes_no = False; show_prof_ticks = False
if float_idx == 0: plot_ylabel = True; density_depth_labels = True
else: plot_ylabel = False; density_depth_labels = False
if float_idx == len(wmoids)-1: plot_cbar = True
else: plot_cbar = False
if param_idx == len(params)-1: plot_xticklabels = True; dd_contours = density_depth_contours
else: plot_xticklabels = False; dd_contours = None
if param == 'ptmp' and drift_temps is not None:
dt = drift_temps[wmoid]; dtb = drift_temp_baseline; dtd = drift_temp_depth
else:
dt = None; dtb = None; dtd = None
section(wmoid,None,None,float_data[float_idx],params=[param],depth_lim=depth_lim,fixed_ylim=fixed_ylim,
mld=mld,show_ice_bars=ice_bars_yes_no,sea_ice_grids=sea_ice_grids,sea_ice_data_avail=sea_ice_data_avail,
show_prof_ticks=show_prof_ticks,add_date_bars=add_date_bars,trim_xlim=False,create_new_figs=False,
plot_ylabel=plot_ylabel,plot_xticklabels=plot_xticklabels,years_only=years_only[float_idx],plot_cbar=plot_cbar,
condensed_cbar_label=condensed_cbar_labels[param_idx],smaller_text=True,density_coor=density_coor,
density_lim=density_lim,density_power_scale=density_power_scale,force_label_size=force_label_size,
density_depth_contours=dd_contours,density_depth_labels=density_depth_labels,
explicit_yticks=explicit_yticks,drift_temps=dt,drift_temp_baseline=dtb,drift_temp_depth=dtd)
plt.tight_layout(h_pad=0.2,w_pad=w_pad) # can go negative if necessary
plt.savefig(results_dir + save_as + '.pdf')
plt.close()
def prof_locations_map(results_dir,data_dir,compiled_obs,map_dimensions,
toi_range=[datetime(1900,1,1),datetime.today()],bathy_cmap='Greys_r',
seasons=[[1,3],[4,6],[7,9],[10,12]],season_colors=['orange','cyan','orchid','lime'],
season_labels=['Jan-Mar','Apr-Jun','Jul-Sep','Oct-Dec'],
manual_list_of_types=None,manual_labels_for_types=None,
manual_markers_for_types=None,manual_marker_open_for_types=None,
grid_lats=np.arange(-80,60,5),grid_lons=np.arange(-80,50,10),
lon_labels=[0,0,1,0],lat_labels=[1,0,0,0],label_contours=False,
add_epoch_title=None,fontsize=5,fontsize_extra_for_epoch=2,
add_rect_patch=None,add_circ_patch=None,add_legend=False,legend_pos='outside_bottom',
create_new_fig=False,use_existing_basemap=None,return_basemap=False,save_as=None):
""" Plot locations of hydrographic observations, as compiled by ldp.compile_hydrographic_obs(),
by season and type (source) for a given epoch.
Args:
add_rect_patch: None or [lon_W,lon_E,lat_S,lat_N]
add_circ_patch: None or [[lon_cent,lat_cent,radius_in_km], etc.], i.e. a list of params for multiple circles
legend_pos: 'outside_bottom' or 'outside_right'
"""
if use_existing_basemap is None:
fig,m = bathy_basemap(data_dir,*map_dimensions,create_new_fig=create_new_fig,figsize=(9,9),
boundary_width=1,labelsize=fontsize,grid_color='.2',
grid_lats=grid_lats,grid_lons=grid_lons,force_lon_labels=lon_labels,force_lat_labels=lat_labels,
label_contours=label_contours,cmap=bathy_cmap)
else:
m = use_existing_basemap
if add_rect_patch is not None:
ap = add_rect_patch
patch_lons = np.concatenate((np.linspace(ap[0],ap[1],100),np.linspace(ap[1],ap[1],100),
np.linspace(ap[1],ap[0],100),np.linspace(ap[0],ap[0],100)))
patch_lats = np.concatenate((np.linspace(ap[3],ap[3],100),np.linspace(ap[3],ap[2],100),
np.linspace(ap[2],ap[2],100),np.linspace(ap[2],ap[3],100)))
plonx,platy = m(patch_lons,patch_lats)
patchxy = list(zip(plonx,platy))
poly = Polygon(patchxy,facecolor='white',alpha=0.1)
plt.gca().add_patch(poly)
if add_circ_patch is not None:
for circ in add_circ_patch:
circle_tuples = circle(m,*circ)
poly = Polygon(list(circle_tuples),facecolor='white',alpha=0.1)
plt.gca().add_patch(poly)
toi_mask_base = np.logical_and(np.array(compiled_obs['datetimes']) >= toi_range[0],
np.array(compiled_obs['datetimes']) <= toi_range[1])
dt_months = np.array([dt.month for dt in compiled_obs['datetimes']])
if manual_list_of_types is None: obs_types = np.unique(compiled_obs['types'][toi_mask_base])
else: obs_types = manual_list_of_types
if manual_labels_for_types is None: obs_type_labels = obs_types
else: obs_type_labels = manual_labels_for_types
if manual_markers_for_types is None: obs_type_markers = ['o','s','^','v','<','>','p','*','+','d'] # etc.
else: obs_type_markers = manual_markers_for_types
if manual_marker_open_for_types is None: obs_type_markers_open = np.tile(False,len(obs_type_markers))
else: obs_type_markers_open = manual_marker_open_for_types
for s_idx, season_months in enumerate(seasons):
toi_mask = np.logical_and(toi_mask_base,np.logical_and(dt_months >= season_months[0],
dt_months <= season_months[1]))
for t_idx, obs_type in enumerate(obs_types):
final_mask = np.logical_and(toi_mask,np.array(compiled_obs['types']) == obs_type)
if sum(final_mask) > 0:
lonx,laty = m(np.array(compiled_obs['lons'])[final_mask],np.array(compiled_obs['lats'])[final_mask])
if obs_type_markers_open[t_idx]: plt.scatter(lonx,laty,s=4.0,marker=obs_type_markers[t_idx],
facecolor='none',edgecolors=season_colors[s_idx],
linewidths=0.5)
else: plt.scatter(lonx,laty,s=4.0,marker=obs_type_markers[t_idx],
facecolor=season_colors[s_idx],edgecolors='none')
if add_epoch_title is not None:
plt.text(0.05,0.95,add_epoch_title,color='w',fontsize=fontsize+fontsize_extra_for_epoch,fontweight='bold',
horizontalalignment='left',verticalalignment='top',transform=plt.gca().transAxes)
if add_legend:
for s_idx, season_months in enumerate(seasons):
plt.plot([0,0],[np.nan,np.nan],lw=0,c=season_colors[s_idx],marker='o',ms=4,label=season_labels[s_idx])
for t_idx, obs_type in enumerate(obs_types):
if obs_type_markers_open[t_idx]:
plt.plot([0,0],[np.nan,np.nan],lw=0,marker=obs_type_markers[t_idx],ms=4,
markerfacecolor='none',markeredgecolor='k',markeredgewidth=0.5,label=obs_type_labels[t_idx])
else:
plt.plot([0,0],[np.nan,np.nan],lw=0,marker=obs_type_markers[t_idx],ms=4,
markerfacecolor='k',markeredgecolor='none',label=obs_type_labels[t_idx])
if legend_pos == 'outside_bottom':
ncol = len(seasons)+len(obs_types)
loc = 'upper right'
bbox_to_anchor = [0.5,-0.05]
handletextpad = 0.05
columnspacing = 1.5
labelspacing = None
elif legend_pos == 'outside_right':
ncol = 2
loc = 'center left'
bbox_to_anchor = [1.15,0.5]
handletextpad = 0.25
columnspacing = 1.5
labelspacing = 1.5
plt.legend(ncol=ncol,fontsize=fontsize,loc=loc,bbox_to_anchor=bbox_to_anchor,frameon=False,
handletextpad=handletextpad,columnspacing=columnspacing,labelspacing=labelspacing)
if save_as is not None:
plt.tight_layout()
plt.savefig(results_dir + save_as + '.pdf')
plt.close()
elif return_basemap:
return m
def era_field(data_dir,results_dir,save_as,data,datetime_range,width,height,lat_center,lon_center,bathy_contours=[],
contour=True,contour_lims=None,n_contours=21,use_cmap=None,add_cbar=True,
existing_canvas=None,return_pcm=False,
add_wind_vectors=None,wind_vector_downsample=[5,2],wind_vector_scale=50,
add_wind_vector_key=True,wind_vector_key=20,wind_vector_key_loc=[0.8,-0.25],wind_vector_key_fontsize=8,
add_sic_contours=None,sic_contours=[50],
add_date=None,date_string_loc=[0.05,0.95],date_string_size=8,
date_string_valign='top',date_string_halign='left',average_daily=True,add_patch_lons_lats=None):
""" Plotting routine for daily ECMWF fields.
Args:
data_dir: data directory (for bathymetry files)
results_dir: None to plot on existing canvas, or directory to save plot
save_as: None (to not save figure) or filename, without extension
data: xarray DataArray containing reanalysis parameter, e.g. erai_daily['u10']
datetime_range: single datetime to plot, or range of datetimes ([start,end]) to average for plot
note: if <<average_daily>> is True, averages over all hours during a given day, from hour 0 to 23
width: Basemap plot width
height: Basemap plot height
lat_center: Basemap plot latitude center location
lon_center: Basemap plot longitude center location
bathy_contours: list of depths to add bathymetric contours
contour: True or False to draw filled contour plot of <<data>>
contour_lims: None or [min,max] to specify contour color limits
n_contours: number of contour levels to plot, if contour_lims is specified (default = 21)
add_cbar: plot colorbar? True or False
existing_canvas: None or handle of Basemap instance (m) to plot onto
return_pcm: return handle ('pcm') to pcolormesh of field
add_wind_vectors: None or xarray DataArrays for [u,v] to plot wind vectors from fields
note: assumes lats and lons are same as for main 'data' DataArray above
wind_vector_downsample: [i,j] to plot every ith u-wind and jth v-wind vector
wind_vector_scale: length of wind vectors (larger numbers are smaller vectors)
add_wind_vector_key: add quiver key next to plot, representing size of [N] m/s wind vector
wind_vector_key: depict a [N] m/s vector key
wind_vector_key_loc: location of wind vector in axes coordinates from bottom left (x, y)
wind_vector_key_fontsize: fontsize of 'N m/s' key text
add_sic_contours: None or list of [sic_grid['lons'],sic_grid['lats'],sic_field]
sic_contours: [50] or list of other SIC % levels to contour
add_date: None or formatted date string to add as text
date_string_loc: location in axes coordinates (x,y) from bottom left for date string
date_string_size: fontsize for date string
date_string_valign: vertical alignment of date string location ('top' or 'bottom')
date_string_halign: horizontal alignment of date string location ('left' or 'right')
average_daily: if True, ignores hour value(s) of <<datetime_range>> and averages over day
if False, keeps hour value(s) of <<datetime_range>>
add_patch_lons_lats: None or box coordinates to plot as shaded patch: [lon_W,lon_E,lat_S,lat_N]
"""
if contour_lims is not None: contour_levs = np.linspace(contour_lims[0],contour_lims[1],n_contours)
else: contour_levs = None
if not isinstance(datetime_range,list) and not isinstance(datetime_range,tuple):
dtr = [datetime_range,datetime_range] # i.e. if only single datetime specified
else:
dtr = datetime_range
if average_daily: # if not average_daily, interpret datetime_range exactly and slice accordingly
dtr[0] = datetime(dtr[0].year,dtr[0].month,dtr[0].day,0)
dtr[1] = datetime(dtr[1].year,dtr[1].month,dtr[1].day,23,59,59)
data = data.sel(time=slice(*dtr)).mean(dim='time',keep_attrs=True)
if add_wind_vectors is not None:
u_data = add_wind_vectors[0].sel(time=slice(*dtr)).mean(dim='time',keep_attrs=True)
v_data = add_wind_vectors[1].sel(time=slice(*dtr)).mean(dim='time',keep_attrs=True)
if existing_canvas is None:
fig, m = lambert_basemap(width,height,lat_center,lon_center,
boundary_width=1,lon_labels_on_top=True,resolution='i')
else:
fig = plt.gcf()
m = existing_canvas
lon_grid, lat_grid = np.meshgrid(data['lons'],data['lats'])
rlons, rlats = m(lon_grid, lat_grid)
if contour:
if use_cmap is None:
if contour_lims is not None:
if contour_lims[1] == abs(contour_lims[0]): cmap = 'PRGn'
else: cmap = 'viridis'
else:
cmap = 'viridis'
else:
cmap = use_cmap
with warnings.catch_warnings(): # ignore Dask true_divide warning upon evaluating data
warnings.simplefilter('ignore')
pcm = m.contourf(rlons,rlats,data,levels=contour_levs,cmap=cmap,extend='both')
if add_cbar:
cbar = plt.colorbar()
cbar.set_label('{0} ({1})'.format(data.attrs['long_name'],data.attrs['units']),rotation=-90,labelpad=15)
if add_wind_vectors:
[i,j] = wind_vector_downsample
Q = plt.quiver(rlons[::j,::i],rlats[::j,::i],u_data[::j,::i],v_data[::j,::i],
units='width',scale=wind_vector_scale,width=0.01,zorder=10)
if add_wind_vector_key: plt.quiverkey(Q,*wind_vector_key_loc,wind_vector_key,
r'{0} '.format(wind_vector_key) + r'm s$^{-1}$',
fontproperties={'size':wind_vector_key_fontsize})
if add_sic_contours is not None:
sic_lonx,sic_laty = m(add_sic_contours[0],add_sic_contours[1])
plt.contour(sic_lonx,sic_laty,add_sic_contours[2],levels=sic_contours,
colors='k',linewidths=0.5,alpha=0.8,zorder=5)
if len(bathy_contours) > 0:
etopo_lons,etopo_lats,etopo = ldp.load_bathy(data_dir)
retopolons,retopolats = m(*np.meshgrid(etopo_lons,etopo_lats))
olevels = bathy_contours # check etopo.ravel().min()
m.contour(retopolons,retopolats,etopo,olevels,linewidths=0.5,linestyles='solid',colors='#808080',
alpha=0.5,zorder=4)
if add_patch_lons_lats is not None:
pll = add_patch_lons_lats
patch_lons = np.concatenate((np.linspace(pll[0],pll[1],100),np.linspace(pll[1],pll[1],100),
np.linspace(pll[1],pll[0],100),np.linspace(pll[0],pll[0],100)))
patch_lats = np.concatenate((np.linspace(pll[3],pll[3],100),np.linspace(pll[3],pll[2],100),
np.linspace(pll[2],pll[2],100),np.linspace(pll[2],pll[3],100)))
plonx,platy = m(patch_lons,patch_lats)
patchxy = list(zip(plonx,platy))
poly = Polygon(patchxy,facecolor='white',alpha=0.25,zorder=3)
plt.gca().add_patch(poly)
if add_date is not None:
plt.text(*date_string_loc,add_date,fontsize=date_string_size,fontweight='bold',
horizontalalignment=date_string_halign,verticalalignment=date_string_valign,
transform=plt.gca().transAxes)
if save_as is not None:
plt.savefig(results_dir + save_as + '.pdf')
plt.close()
if return_pcm and contour:
return pcm
############# AUXILIARY (INTERNAL) FUNCTIONS ################
def lambert_basemap(width,height,lat_center,lon_center,boundary_width=2,create_new_fig=True,figsize=None,resolution='i',
draw_grid=True,lon_labels_on_top=False,grid_color='0.2',meridians=np.arange(-80,50,20)):
""" Creates basic figure on a Lambert azimuthal equal-area projection.
"""
warnings.filterwarnings('ignore', category=mcbook.mplDeprecation)
if create_new_fig: fig = plt.figure(figsize=figsize)
else: fig = plt.gcf()
m = Basemap(width=width, height=height, resolution=resolution, projection='laea', lat_ts=lat_center,
lat_0=lat_center, lon_0=lon_center)
m.drawcoastlines(color='k')
m.drawmapboundary(linewidth=boundary_width)
m.fillcontinents()
if draw_grid:
if lon_labels_on_top: lon_labels = [0,0,1,0]
else: lon_labels = [0,0,0,1]
m.drawmeridians(meridians, linewidth=0.5, color=grid_color, labels=lon_labels)
m.drawmeridians(np.arange(-80, 50, 10), linewidth=0.5, color=grid_color)
m.drawparallels(np.arange(-80, 60, 5), linewidth=0.5, color=grid_color, labels=[1, 0, 0, 0])
return fig, m
def bathy_basemap(data_dir,width,height,lat_center,lon_center,create_new_fig=True,figsize=None,
labelsize=None,lon_labels_on_top=False,force_lon_labels=None,force_lat_labels=[1,0,0,0],
boundary_width=2,grid_color='0.2',grid_lats=np.arange(-80,60,5),grid_lons=np.arange(-80,50,10),
label_contours=False,cmap=cmocean.cm.deep_r,bathy_alpha=1.0):
""" Draws bathymetry on LAEA (Lambert) basemap.
Currently the figure parameters are not entirely defined through arguments above. This could be remedied.
"""
fig, m = lambert_basemap(width,height,lat_center,lon_center,create_new_fig=create_new_fig,figsize=figsize,
draw_grid=False,boundary_width=boundary_width)
lons, lats, etopo = ldp.load_bathy(data_dir)
rlons, rlats = m(*np.meshgrid(lons, lats))
olevels = np.arange(-7000, 760, 750) # check etopo.ravel().min()
cf = m.contourf(rlons, rlats, etopo, olevels, cmap=cmap, alpha=bathy_alpha, zorder=1)
if label_contours:
if cmap == 'Greys_r': contour_line_cmap = 'Greys_r'
else: contour_line_cmap = None
co = m.contour(rlons,rlats,-1*etopo,[2500,3250,4000,4750],linewidths=0.0,alpha=0.5,cmap=contour_line_cmap,zorder=1)
print('>>> Waiting for manual input.\n'
'>>> Click to position contours, hit Return when done.\n'
'>>> Note: do not change figure size.')
if cmap == 'Greys_r': clabel_single_color = 'k' # or change to None to use reversed grayscale cmap
else: clabel_single_color = 'w'
plt.clabel(co,colors=clabel_single_color,fmt='%d',fontsize=labelsize-1,manual=True,inline=True)
if lon_labels_on_top: lon_labels = [0, 0, 1, 0]
else: lon_labels = [0, 0, 0, 1]
lat_labels = [1,0,0,0]
if force_lon_labels is not None: lon_labels = force_lon_labels
if force_lat_labels is not None: lat_labels = force_lat_labels
m.drawmeridians(grid_lons,color=grid_color,linewidth=0.5,labels=lon_labels,fontsize=labelsize,zorder=2)
m.drawparallels(grid_lats,color=grid_color,linewidth=0.5,labels=lat_labels,fontsize=labelsize,zorder=2)
return fig, m
def blank_inset_basemap(width,height,lat_center,lon_center,create_new_fig=True,
boundary_width=2,coastline_width=1,lon_labels=[0,0,0,0],lat_labels=[0,0,0,0],
grid_lats=np.arange(-80,60,5),grid_lons=np.arange(-80,50,10),labelsize=None,grid_color='0.2',
fill_continent_zorder=None,lat_lon_line_zorder=None,fill_continent_color='0.8',
resolution='i'):
""" Creates figure with regional Lambert basemap, to be filled elsewhere with a plot.
"""
if create_new_fig: fig = plt.figure(figsize=(9, 7))
else: fig = plt.gcf()
m = Basemap(width=width, height=height, resolution=resolution, projection='laea', lat_ts=lat_center,
lat_0=lat_center, lon_0=lon_center)
m.drawcoastlines(linewidth=coastline_width,color='k',zorder=fill_continent_zorder)
m.drawmapboundary(linewidth=boundary_width, fill_color='#f0ffff')
m.fillcontinents(color=fill_continent_color,zorder=fill_continent_zorder)
if create_new_fig:
print('Temporary warning from pt.blank_inset_basemap(): '
'explicit setting of lat/lon labels is turned off; make sure this is okay')
# lon_labels = [0, 0, 0, 1]
# lat_labels = [1, 0, 0, 0]
m.drawmeridians(grid_lons, color=grid_color, linewidth=0.5, labels=lon_labels, fontsize=labelsize,
zorder=lat_lon_line_zorder)
m.drawparallels(grid_lats, color=grid_color, linewidth=0.5, labels=lat_labels, fontsize=labelsize,
zorder=lat_lon_line_zorder)
return fig, m
|
[
"matplotlib.pyplot.title",
"matplotlib.dates.MonthLocator",
"numpy.abs",
"time_tools.convert_tuple_to_8_int",
"matplotlib.pyplot.quiver",
"load_product.sea_ice_concentration_along_track",
"numpy.isnan",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.contour",
"geo_tools.mld",
"matplotlib.pyplot.gca",
"numpy.interp",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"time_tools.convert_14_to_tuple",
"load_product.load_bathy",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.meshgrid",
"time_tools.convert_8_int_to_tuple",
"warnings.simplefilter",
"time_tools.dates_in_range",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"numpy.isfinite",
"matplotlib.pyplot.colorbar",
"geo_tools.profile_interp",
"matplotlib.dates.DateFormatter",
"numpy.max",
"warnings.catch_warnings",
"numpy.linspace",
"matplotlib.pyplot.xticks",
"time_tools.convert_tuple_to_datetime",
"Circles.circles.circle",
"datetime.datetime.today",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"datetime.datetime",
"matplotlib.pyplot.text",
"numpy.sort",
"numpy.min",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clabel",
"geo_tools.vert_prof_eval",
"matplotlib.pyplot.plot",
"warnings.filterwarnings",
"os.path.isdir",
"numpy.logical_and",
"matplotlib.colors.BoundaryNorm",
"matplotlib.pyplot.scatter",
"geo_tools.vert_prof_even_spacing",
"numpy.where",
"numpy.array",
"numpy.diff",
"matplotlib.dates.YearLocator",
"mpl_toolkits.basemap.Basemap",
"matplotlib.pyplot.savefig"
] |
[((515, 565), 'os.path.isdir', 'os.path.isdir', (['"""/Applications/anaconda/share/proj"""'], {}), "('/Applications/anaconda/share/proj')\n", (528, 565), False, 'import os\n'), ((775, 839), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '""".*is_string_like function.*"""'], {}), "('ignore', '.*is_string_like function.*')\n", (798, 839), False, 'import warnings\n'), ((1679, 1706), 'numpy.arange', 'np.arange', (['(-3500)', '(-100)', '(500)'], {}), '(-3500, -100, 500)\n', (1688, 1706), True, 'import numpy as np\n'), ((1741, 1762), 'numpy.arange', 'np.arange', (['(-80)', '(60)', '(5)'], {}), '(-80, 60, 5)\n', (1750, 1762), True, 'import numpy as np\n'), ((1771, 1793), 'numpy.arange', 'np.arange', (['(-80)', '(50)', '(10)'], {}), '(-80, 50, 10)\n', (1780, 1793), True, 'import numpy as np\n'), ((2343, 2408), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'mcbook.mplDeprecation'}), "('ignore', category=mcbook.mplDeprecation)\n", (2366, 2408), False, 'import warnings\n'), ((5041, 5248), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['(lonx - sic_lon_edge_to_center)', '(laty - sic_lat_edge_to_center)', 'sic_nan_masked'], {'cmap': 'ice_cmap', 'edgecolors': '"""None"""', 'rasterized': 'rasterized', 'zorder': '(1)', 'alpha': '(1.0)', 'vmin': 'open_sic', 'vmax': 'max_sic'}), "(lonx - sic_lon_edge_to_center, laty - sic_lat_edge_to_center,\n sic_nan_masked, cmap=ice_cmap, edgecolors='None', rasterized=rasterized,\n zorder=1, alpha=1.0, vmin=open_sic, vmax=max_sic)\n", (5055, 5248), True, 'import matplotlib.pyplot as plt\n'), ((20185, 20212), 'numpy.arange', 'np.arange', (['(7.84)', '(8.16)', '(0.04)'], {}), '(7.84, 8.16, 0.04)\n', (20194, 20212), True, 'import numpy as np\n'), ((20240, 20267), 'numpy.arange', 'np.arange', (['(20.0)', '(34.01)', '(1.0)'], {}), '(20.0, 34.01, 1.0)\n', (20249, 20267), True, 'import numpy as np\n'), ((20298, 20323), 'numpy.arange', 'np.arange', (['(0.0)', '(2.0)', '(0.25)'], {}), '(0.0, 2.0, 0.25)\n', (20307, 20323), True, 'import numpy as np\n'), ((41350, 41377), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (41360, 41377), True, 'import matplotlib.pyplot as plt\n'), ((44324, 44364), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(0.2)', 'w_pad': 'w_pad'}), '(h_pad=0.2, w_pad=w_pad)\n', (44340, 44364), True, 'import matplotlib.pyplot as plt\n'), ((44399, 44442), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(results_dir + save_as + '.pdf')"], {}), "(results_dir + save_as + '.pdf')\n", (44410, 44442), True, 'import matplotlib.pyplot as plt\n'), ((44447, 44458), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (44456, 44458), True, 'import matplotlib.pyplot as plt\n'), ((45015, 45036), 'numpy.arange', 'np.arange', (['(-80)', '(60)', '(5)'], {}), '(-80, 60, 5)\n', (45024, 45036), True, 'import numpy as np\n'), ((45045, 45067), 'numpy.arange', 'np.arange', (['(-80)', '(50)', '(10)'], {}), '(-80, 50, 10)\n', (45054, 45067), True, 'import numpy as np\n'), ((47380, 47436), 'numpy.array', 'np.array', (["[dt.month for dt in compiled_obs['datetimes']]"], {}), "([dt.month for dt in compiled_obs['datetimes']])\n", (47388, 47436), True, 'import numpy as np\n'), ((55688, 55727), 'numpy.meshgrid', 'np.meshgrid', (["data['lons']", "data['lats']"], {}), "(data['lons'], data['lats'])\n", (55699, 55727), True, 'import numpy as np\n'), ((58995, 59017), 'numpy.arange', 'np.arange', (['(-80)', '(50)', '(20)'], {}), '(-80, 50, 20)\n', (59004, 59017), True, 'import numpy as np\n'), ((59106, 59171), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'mcbook.mplDeprecation'}), "('ignore', category=mcbook.mplDeprecation)\n", (59129, 59171), False, 'import warnings\n'), ((59278, 59415), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'width': 'width', 'height': 'height', 'resolution': 'resolution', 'projection': '"""laea"""', 'lat_ts': 'lat_center', 'lat_0': 'lat_center', 'lon_0': 'lon_center'}), "(width=width, height=height, resolution=resolution, projection=\n 'laea', lat_ts=lat_center, lat_0=lat_center, lon_0=lon_center)\n", (59285, 59415), False, 'from mpl_toolkits.basemap import Basemap\n'), ((60210, 60231), 'numpy.arange', 'np.arange', (['(-80)', '(60)', '(5)'], {}), '(-80, 60, 5)\n', (60219, 60231), True, 'import numpy as np\n'), ((60240, 60262), 'numpy.arange', 'np.arange', (['(-80)', '(50)', '(10)'], {}), '(-80, 50, 10)\n', (60249, 60262), True, 'import numpy as np\n'), ((60731, 60755), 'load_product.load_bathy', 'ldp.load_bathy', (['data_dir'], {}), '(data_dir)\n', (60745, 60755), True, 'import load_product as ldp\n'), ((60817, 60843), 'numpy.arange', 'np.arange', (['(-7000)', '(760)', '(750)'], {}), '(-7000, 760, 750)\n', (60826, 60843), True, 'import numpy as np\n'), ((62385, 62406), 'numpy.arange', 'np.arange', (['(-80)', '(60)', '(5)'], {}), '(-80, 60, 5)\n', (62394, 62406), True, 'import numpy as np\n'), ((62415, 62437), 'numpy.arange', 'np.arange', (['(-80)', '(50)', '(10)'], {}), '(-80, 50, 10)\n', (62424, 62437), True, 'import numpy as np\n'), ((62821, 62958), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'width': 'width', 'height': 'height', 'resolution': 'resolution', 'projection': '"""laea"""', 'lat_ts': 'lat_center', 'lat_0': 'lat_center', 'lon_0': 'lon_center'}), "(width=width, height=height, resolution=resolution, projection=\n 'laea', lat_ts=lat_center, lat_0=lat_center, lon_0=lon_center)\n", (62828, 62958), False, 'from mpl_toolkits.basemap import Basemap\n'), ((4808, 4821), 'numpy.isnan', 'np.isnan', (['sic'], {}), '(sic)\n', (4816, 4821), True, 'import numpy as np\n'), ((5562, 5634), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'pad': '(0.05)', 'shrink': '(0.65)', 'format': '"""%.0f%%"""', 'extend': 'extend_cmap'}), "(pad=0.05, shrink=0.65, format='%.0f%%', extend=extend_cmap)\n", (5574, 5634), True, 'import matplotlib.pyplot as plt\n'), ((5774, 5886), 'matplotlib.pyplot.contour', 'plt.contour', (['lonx', 'laty', 'polynya_grid'], {'levels': '[0.999]', 'colors': '"""#00FF00"""', 'linewidths': '(0.7)', 'alpha': '(0.8)', 'zorder': '(2)'}), "(lonx, laty, polynya_grid, levels=[0.999], colors='#00FF00',\n linewidths=0.7, alpha=0.8, zorder=2)\n", (5785, 5886), True, 'import matplotlib.pyplot as plt\n'), ((5949, 5973), 'load_product.load_bathy', 'ldp.load_bathy', (['data_dir'], {}), '(data_dir)\n', (5963, 5973), True, 'import load_product as ldp\n'), ((11471, 11489), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11487, 11489), True, 'import matplotlib.pyplot as plt\n'), ((11636, 11647), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11645, 11647), True, 'import matplotlib.pyplot as plt\n'), ((19095, 19208), 'numpy.array', 'np.array', (["['ptmp', 'psal', 'Nsquared', 'PV', 'destab', 'Oxygen', 'OxygenSat',\n 'pHinsitu', 'Nitrate', 'Chl_a']"], {}), "(['ptmp', 'psal', 'Nsquared', 'PV', 'destab', 'Oxygen', 'OxygenSat',\n 'pHinsitu', 'Nitrate', 'Chl_a'])\n", (19103, 19208), True, 'import numpy as np\n'), ((24064, 24084), 'numpy.where', 'np.where', (['prof_match'], {}), '(prof_match)\n', (24072, 24084), True, 'import numpy as np\n'), ((25445, 25535), 'time_tools.dates_in_range', 'tt.dates_in_range', (['datetime_coord_as_tuples[0][0:3]', 'datetime_coord_as_tuples[-1][0:3]'], {}), '(datetime_coord_as_tuples[0][0:3],\n datetime_coord_as_tuples[-1][0:3])\n', (25462, 25535), True, 'import time_tools as tt\n'), ((25873, 25939), 'numpy.interp', 'np.interp', (['timestamp_coord_daily', 'timestamp_coord_profs', 'prof_lats'], {}), '(timestamp_coord_daily, timestamp_coord_profs, prof_lats)\n', (25882, 25939), True, 'import numpy as np\n'), ((25975, 26041), 'numpy.interp', 'np.interp', (['timestamp_coord_daily', 'timestamp_coord_profs', 'prof_lons'], {}), '(timestamp_coord_daily, timestamp_coord_profs, prof_lons)\n', (25984, 26041), True, 'import numpy as np\n'), ((26511, 26643), 'load_product.sea_ice_concentration_along_track', 'ldp.sea_ice_concentration_along_track', (['date_coord_daily', 'lat_coord_for_ice', 'lon_coord_for_ice', 'sea_ice_grids', 'sea_ice_data_avail'], {}), '(date_coord_daily, lat_coord_for_ice,\n lon_coord_for_ice, sea_ice_grids, sea_ice_data_avail)\n', (26548, 26643), True, 'import load_product as ldp\n'), ((30138, 30173), 'numpy.array', 'np.array', (['cmap_levels[param_abbrev]'], {}), '(cmap_levels[param_abbrev])\n', (30146, 30173), True, 'import numpy as np\n'), ((30492, 30627), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mcolors.LinearSegmentedColormap.from_list', ([], {'name': 'None', 'colors': 'cmap_colors[param_abbrev]', 'N': 'N_colors', 'gamma': 'cmap_gamma[param_abbrev]'}), '(name=None, colors=cmap_colors[\n param_abbrev], N=N_colors, gamma=cmap_gamma[param_abbrev])\n', (30533, 30627), True, 'import matplotlib.colors as mcolors\n'), ((30993, 31056), 'matplotlib.colors.BoundaryNorm', 'mcolors.BoundaryNorm', (['more_levels'], {'ncolors': 'N_colors', 'clip': '(False)'}), '(more_levels, ncolors=N_colors, clip=False)\n', (31013, 31056), True, 'import matplotlib.colors as mcolors\n'), ((38993, 39013), 'matplotlib.dates.YearLocator', 'mdates.YearLocator', ([], {}), '()\n', (39011, 39013), True, 'import matplotlib.dates as mdates\n'), ((39031, 39052), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (39050, 39052), True, 'import matplotlib.dates as mdates\n'), ((39507, 39530), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (39517, 39530), True, 'import matplotlib.pyplot as plt\n'), ((44568, 44588), 'datetime.datetime', 'datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (44576, 44588), False, 'from datetime import datetime, timedelta\n'), ((44587, 44603), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (44601, 44603), False, 'from datetime import datetime, timedelta\n'), ((46878, 46924), 'matplotlib.patches.Polygon', 'Polygon', (['patchxy'], {'facecolor': '"""white"""', 'alpha': '(0.1)'}), "(patchxy, facecolor='white', alpha=0.1)\n", (46885, 46924), False, 'from matplotlib.patches import Polygon\n'), ((47487, 47534), 'numpy.unique', 'np.unique', (["compiled_obs['types'][toi_mask_base]"], {}), "(compiled_obs['types'][toi_mask_base])\n", (47496, 47534), True, 'import numpy as np\n'), ((50667, 50856), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': 'ncol', 'fontsize': 'fontsize', 'loc': 'loc', 'bbox_to_anchor': 'bbox_to_anchor', 'frameon': '(False)', 'handletextpad': 'handletextpad', 'columnspacing': 'columnspacing', 'labelspacing': 'labelspacing'}), '(ncol=ncol, fontsize=fontsize, loc=loc, bbox_to_anchor=\n bbox_to_anchor, frameon=False, handletextpad=handletextpad,\n columnspacing=columnspacing, labelspacing=labelspacing)\n', (50677, 50856), True, 'import matplotlib.pyplot as plt\n'), ((50898, 50916), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (50914, 50916), True, 'import matplotlib.pyplot as plt\n'), ((50925, 50968), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(results_dir + save_as + '.pdf')"], {}), "(results_dir + save_as + '.pdf')\n", (50936, 50968), True, 'import matplotlib.pyplot as plt\n'), ((50977, 50988), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (50986, 50988), True, 'import matplotlib.pyplot as plt\n'), ((54553, 54610), 'numpy.linspace', 'np.linspace', (['contour_lims[0]', 'contour_lims[1]', 'n_contours'], {}), '(contour_lims[0], contour_lims[1], n_contours)\n', (54564, 54610), True, 'import numpy as np\n'), ((54996, 55046), 'datetime.datetime', 'datetime', (['dtr[0].year', 'dtr[0].month', 'dtr[0].day', '(0)'], {}), '(dtr[0].year, dtr[0].month, dtr[0].day, 0)\n', (55004, 55046), False, 'from datetime import datetime, timedelta\n'), ((55061, 55120), 'datetime.datetime', 'datetime', (['dtr[1].year', 'dtr[1].month', 'dtr[1].day', '(23)', '(59)', '(59)'], {}), '(dtr[1].year, dtr[1].month, dtr[1].day, 23, 59, 59)\n', (55069, 55120), False, 'from datetime import datetime, timedelta\n'), ((55624, 55633), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (55631, 55633), True, 'import matplotlib.pyplot as plt\n'), ((56580, 56727), 'matplotlib.pyplot.quiver', 'plt.quiver', (['rlons[::j, ::i]', 'rlats[::j, ::i]', 'u_data[::j, ::i]', 'v_data[::j, ::i]'], {'units': '"""width"""', 'scale': 'wind_vector_scale', 'width': '(0.01)', 'zorder': '(10)'}), "(rlons[::j, ::i], rlats[::j, ::i], u_data[::j, ::i], v_data[::j,\n ::i], units='width', scale=wind_vector_scale, width=0.01, zorder=10)\n", (56590, 56727), True, 'import matplotlib.pyplot as plt\n'), ((57131, 57257), 'matplotlib.pyplot.contour', 'plt.contour', (['sic_lonx', 'sic_laty', 'add_sic_contours[2]'], {'levels': 'sic_contours', 'colors': '"""k"""', 'linewidths': '(0.5)', 'alpha': '(0.8)', 'zorder': '(5)'}), "(sic_lonx, sic_laty, add_sic_contours[2], levels=sic_contours,\n colors='k', linewidths=0.5, alpha=0.8, zorder=5)\n", (57142, 57257), True, 'import matplotlib.pyplot as plt\n'), ((57339, 57363), 'load_product.load_bathy', 'ldp.load_bathy', (['data_dir'], {}), '(data_dir)\n', (57353, 57363), True, 'import load_product as ldp\n'), ((58221, 58278), 'matplotlib.patches.Polygon', 'Polygon', (['patchxy'], {'facecolor': '"""white"""', 'alpha': '(0.25)', 'zorder': '(3)'}), "(patchxy, facecolor='white', alpha=0.25, zorder=3)\n", (58228, 58278), False, 'from matplotlib.patches import Polygon\n'), ((58607, 58650), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(results_dir + save_as + '.pdf')"], {}), "(results_dir + save_as + '.pdf')\n", (58618, 58650), True, 'import matplotlib.pyplot as plt\n'), ((58659, 58670), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (58668, 58670), True, 'import matplotlib.pyplot as plt\n'), ((59202, 59229), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (59212, 59229), True, 'import matplotlib.pyplot as plt\n'), ((59259, 59268), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (59266, 59268), True, 'import matplotlib.pyplot as plt\n'), ((61571, 61677), 'matplotlib.pyplot.clabel', 'plt.clabel', (['co'], {'colors': 'clabel_single_color', 'fmt': '"""%d"""', 'fontsize': '(labelsize - 1)', 'manual': '(True)', 'inline': '(True)'}), "(co, colors=clabel_single_color, fmt='%d', fontsize=labelsize - 1,\n manual=True, inline=True)\n", (61581, 61677), True, 'import matplotlib.pyplot as plt\n'), ((62746, 62772), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (62756, 62772), True, 'import matplotlib.pyplot as plt\n'), ((62802, 62811), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (62809, 62811), True, 'import matplotlib.pyplot as plt\n'), ((4659, 4668), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4666, 4668), True, 'import matplotlib.pyplot as plt\n'), ((4692, 4701), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4699, 4701), True, 'import matplotlib.pyplot as plt\n'), ((6665, 6696), 'time_tools.convert_tuple_to_8_int', 'tt.convert_tuple_to_8_int', (['date'], {}), '(date)\n', (6690, 6696), True, 'import time_tools as tt\n'), ((11434, 11463), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(16)'}), '(title, fontsize=16)\n', (11443, 11463), True, 'import matplotlib.pyplot as plt\n'), ((11511, 11563), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(results_dir + save_as + '.png')"], {'dpi': '(150)'}), "(results_dir + save_as + '.png', dpi=150)\n", (11522, 11563), True, 'import matplotlib.pyplot as plt\n'), ((11584, 11627), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(results_dir + save_as + '.pdf')"], {}), "(results_dir + save_as + '.pdf')\n", (11595, 11627), True, 'import matplotlib.pyplot as plt\n'), ((24334, 24396), 'time_tools.convert_14_to_tuple', 'tt.convert_14_to_tuple', (["float_data['profiles'][pi]['datetime']"], {}), "(float_data['profiles'][pi]['datetime'])\n", (24356, 24396), True, 'import time_tools as tt\n'), ((24580, 24702), 'geo_tools.mld', 'gt.mld', (["float_data['profiles'][pi]"], {'ref_depth': 'mld_ref_depth', 'sigma_theta_crit': 'mld_sigma_theta_crit', 'verbose_warn': '(False)'}), "(float_data['profiles'][pi], ref_depth=mld_ref_depth,\n sigma_theta_crit=mld_sigma_theta_crit, verbose_warn=False)\n", (24586, 24702), True, 'import geo_tools as gt\n'), ((25563, 25603), 'time_tools.convert_tuple_to_datetime', 'tt.convert_tuple_to_datetime', (['date_tuple'], {}), '(date_tuple)\n', (25591, 25603), True, 'import time_tools as tt\n'), ((30078, 30109), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'new_figsize'}), '(figsize=new_figsize)\n', (30088, 30109), True, 'import matplotlib.pyplot as plt\n'), ((32065, 32162), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ticks': 'specified_levels', 'spacing': '"""uniform"""', 'shrink': 'shrink_cbar', 'format': 'formatter'}), "(ticks=specified_levels, spacing='uniform', shrink=shrink_cbar,\n format=formatter)\n", (32077, 32162), True, 'import matplotlib.pyplot as plt\n'), ((33849, 34029), 'matplotlib.pyplot.plot', 'plt.plot', (["drift_temps['datetime']", "(drift_temp_depth + (depth_lim[1] - depth_lim[0]) * (drift_temp_baseline -\n drift_temps['temp']))", '"""k-"""'], {'linewidth': '(0.01)', 'alpha': '(0.5)', 'zorder': '(4)'}), "(drift_temps['datetime'], drift_temp_depth + (depth_lim[1] -\n depth_lim[0]) * (drift_temp_baseline - drift_temps['temp']), 'k-',\n linewidth=0.01, alpha=0.5, zorder=4)\n", (33857, 34029), True, 'import matplotlib.pyplot as plt\n'), ((34090, 34139), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[datetime_coord[0], datetime_coord[-1]]'], {}), '([datetime_coord[0], datetime_coord[-1]])\n', (34098, 34139), True, 'import matplotlib.pyplot as plt\n'), ((34398, 34430), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[start_date, end_date]'], {}), '([start_date, end_date])\n', (34406, 34430), True, 'import matplotlib.pyplot as plt\n'), ((34919, 35051), 'matplotlib.pyplot.contour', 'plt.contour', (['datetime_coord', 'z_vec', 'depth_data'], {'levels': 'density_depth_contours', 'linewidths': '(0.5)', 'alpha': '(0.75)', 'colors': '"""k"""', 'zorder': '(5)'}), "(datetime_coord, z_vec, depth_data, levels=\n density_depth_contours, linewidths=0.5, alpha=0.75, colors='k', zorder=5)\n", (34930, 35051), True, 'import matplotlib.pyplot as plt\n'), ((36897, 37013), 'matplotlib.pyplot.plot', 'plt.plot', (['[datetime_coord_daily[0], datetime_coord_daily[-1]]', '[sic_baseline, sic_baseline]', '"""k-"""'], {'linewidth': '(0.5)'}), "([datetime_coord_daily[0], datetime_coord_daily[-1]], [sic_baseline,\n sic_baseline], 'k-', linewidth=0.5)\n", (36905, 37013), True, 'import matplotlib.pyplot as plt\n'), ((39110, 39136), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b"""'], {}), "('%b')\n", (39130, 39136), True, 'import matplotlib.dates as mdates\n'), ((39300, 39326), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y"""'], {}), "('%Y')\n", (39320, 39326), True, 'import matplotlib.dates as mdates\n'), ((39769, 39845), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""both"""', 'color': '"""0.6"""', 'linewidth': '(0.25)', 'alpha': '(0.6)'}), "(which='major', axis='both', color='0.6', linewidth=0.25, alpha=0.6)\n", (39777, 39845), True, 'import matplotlib.pyplot as plt\n'), ((40406, 40424), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (40422, 40424), True, 'import matplotlib.pyplot as plt\n'), ((40437, 40495), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(results_dir + save_as + param_abbrev + '.pdf')"], {}), "(results_dir + save_as + param_abbrev + '.pdf')\n", (40448, 40495), True, 'import matplotlib.pyplot as plt\n'), ((40508, 40519), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (40517, 40519), True, 'import matplotlib.pyplot as plt\n'), ((47057, 47073), 'Circles.circles.circle', 'circle', (['m', '*circ'], {}), '(m, *circ)\n', (47063, 47073), False, 'from Circles.circles import circle\n'), ((47223, 47258), 'numpy.array', 'np.array', (["compiled_obs['datetimes']"], {}), "(compiled_obs['datetimes'])\n", (47231, 47258), True, 'import numpy as np\n'), ((47311, 47346), 'numpy.array', 'np.array', (["compiled_obs['datetimes']"], {}), "(compiled_obs['datetimes'])\n", (47319, 47346), True, 'import numpy as np\n'), ((48254, 48330), 'numpy.logical_and', 'np.logical_and', (['(dt_months >= season_months[0])', '(dt_months <= season_months[1])'], {}), '(dt_months >= season_months[0], dt_months <= season_months[1])\n', (48268, 48330), True, 'import numpy as np\n'), ((49551, 49665), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0]', '[np.nan, np.nan]'], {'lw': '(0)', 'c': 'season_colors[s_idx]', 'marker': '"""o"""', 'ms': '(4)', 'label': 'season_labels[s_idx]'}), "([0, 0], [np.nan, np.nan], lw=0, c=season_colors[s_idx], marker='o',\n ms=4, label=season_labels[s_idx])\n", (49559, 49665), True, 'import matplotlib.pyplot as plt\n'), ((56112, 56137), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (56135, 56137), False, 'import warnings\n'), ((56207, 56238), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (56228, 56238), False, 'import warnings\n'), ((56371, 56385), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (56383, 56385), True, 'import matplotlib.pyplot as plt\n'), ((59765, 59787), 'numpy.arange', 'np.arange', (['(-80)', '(50)', '(10)'], {}), '(-80, 50, 10)\n', (59774, 59787), True, 'import numpy as np\n'), ((59846, 59867), 'numpy.arange', 'np.arange', (['(-80)', '(60)', '(5)'], {}), '(-80, 60, 5)\n', (59855, 59867), True, 'import numpy as np\n'), ((60778, 60801), 'numpy.meshgrid', 'np.meshgrid', (['lons', 'lats'], {}), '(lons, lats)\n', (60789, 60801), True, 'import numpy as np\n'), ((3985, 4021), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'subplot_fig_size'}), '(figsize=subplot_fig_size)\n', (3995, 4021), True, 'import matplotlib.pyplot as plt\n'), ((6010, 6045), 'numpy.meshgrid', 'np.meshgrid', (['etopo_lons', 'etopo_lats'], {}), '(etopo_lons, etopo_lats)\n', (6021, 6045), True, 'import numpy as np\n'), ((12806, 12875), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': 'pad', 'w_pad': 'pad', 'rect': '(0.02, 0.02, 0.98, 0.98)'}), '(h_pad=pad, w_pad=pad, rect=(0.02, 0.02, 0.98, 0.98))\n', (12822, 12875), True, 'import matplotlib.pyplot as plt\n'), ((13447, 13490), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(results_dir + save_as + '.pdf')"], {}), "(results_dir + save_as + '.pdf')\n", (13458, 13490), True, 'import matplotlib.pyplot as plt\n'), ((13503, 13514), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13512, 13514), True, 'import matplotlib.pyplot as plt\n'), ((24504, 24555), 'time_tools.convert_tuple_to_datetime', 'tt.convert_tuple_to_datetime', (['datetime_tuple_format'], {}), '(datetime_tuple_format)\n', (24532, 24555), True, 'import time_tools as tt\n'), ((24827, 24919), 'geo_tools.vert_prof_eval', 'gt.vert_prof_eval', (["float_data['profiles'][pi]", '"""sigma_theta"""', 'this_mld'], {'extrap': '"""nearest"""'}), "(float_data['profiles'][pi], 'sigma_theta', this_mld,\n extrap='nearest')\n", (24844, 24919), True, 'import geo_tools as gt\n'), ((27289, 27497), 'geo_tools.vert_prof_even_spacing', 'gt.vert_prof_even_spacing', (["float_data['profiles'][pi]", 'param_abbrev'], {'z_coor': '"""depth"""', 'spacing': 'vert_res', 'interp_method': '"""linear"""', 'extrap': '"""NaN"""', 'top': 'depth_lim[0]', 'bottom': 'depth_lim[1]', 'verbose_error': '(True)'}), "(float_data['profiles'][pi], param_abbrev, z_coor=\n 'depth', spacing=vert_res, interp_method='linear', extrap='NaN', top=\n depth_lim[0], bottom=depth_lim[1], verbose_error=True)\n", (27314, 27497), True, 'import geo_tools as gt\n'), ((29940, 29962), 'numpy.array', 'np.array', (['section_data'], {}), '(section_data)\n', (29948, 29962), True, 'import numpy as np\n'), ((30404, 30425), 'numpy.isnan', 'np.isnan', (['more_levels'], {}), '(more_levels)\n', (30412, 30425), True, 'import numpy as np\n'), ((31499, 31518), 'numpy.min', 'np.min', (['more_levels'], {}), '(more_levels)\n', (31505, 31518), True, 'import numpy as np\n'), ((31524, 31543), 'numpy.max', 'np.max', (['more_levels'], {}), '(more_levels)\n', (31530, 31543), True, 'import numpy as np\n'), ((33266, 33354), 'matplotlib.pyplot.plot', 'plt.plot', (['[obs_dt, obs_dt]', 'obs_range[obs_idx]'], {'color': '"""0.5"""', 'linewidth': '(0.5)', 'zorder': '(3)'}), "([obs_dt, obs_dt], obs_range[obs_idx], color='0.5', linewidth=0.5,\n zorder=3)\n", (33274, 33354), True, 'import matplotlib.pyplot as plt\n'), ((34207, 34268), 'time_tools.convert_14_to_tuple', 'tt.convert_14_to_tuple', (["float_data['profiles'][0]['datetime']"], {}), "(float_data['profiles'][0]['datetime'])\n", (34229, 34268), True, 'import time_tools as tt\n'), ((34322, 34384), 'time_tools.convert_14_to_tuple', 'tt.convert_14_to_tuple', (["float_data['profiles'][-1]['datetime']"], {}), "(float_data['profiles'][-1]['datetime'])\n", (34344, 34384), True, 'import time_tools as tt\n'), ((35480, 35600), 'matplotlib.pyplot.clabel', 'plt.clabel', (['depth_contours'], {'fmt': '"""%d m"""', 'fontsize': 'depth_contour_fontsize', 'manual': '(True)', 'inline': '(True)', 'inline_spacing': '(25)'}), "(depth_contours, fmt='%d m', fontsize=depth_contour_fontsize,\n manual=True, inline=True, inline_spacing=25)\n", (35490, 35600), True, 'import matplotlib.pyplot as plt\n'), ((35842, 35861), 'numpy.array', 'np.array', (['obs_range'], {}), '(obs_range)\n', (35850, 35861), True, 'import numpy as np\n'), ((37038, 37089), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[sic_baseline + 1.2 * sic_norm, max_ylim]'], {}), '([sic_baseline + 1.2 * sic_norm, max_ylim])\n', (37046, 37089), True, 'import matplotlib.pyplot as plt\n'), ((37120, 37221), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(sic_baseline + 1.2 * sic_norm)', '((density_lim[1] - density_lim[0]) ** density_power_scale)'], {}), '(sic_baseline + 1.2 * sic_norm, (density_lim[1] - density_lim[0]) **\n density_power_scale)\n', (37128, 37221), True, 'import matplotlib.pyplot as plt\n'), ((37301, 37335), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[depth_lim[0], max_ylim]'], {}), '([depth_lim[0], max_ylim])\n', (37309, 37335), True, 'import matplotlib.pyplot as plt\n'), ((37447, 37456), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (37454, 37456), True, 'import matplotlib.pyplot as plt\n'), ((37545, 37572), 'matplotlib.pyplot.yticks', 'plt.yticks', (['explicit_yticks'], {}), '(explicit_yticks)\n', (37555, 37572), True, 'import matplotlib.pyplot as plt\n'), ((37985, 37997), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (37995, 37997), True, 'import matplotlib.pyplot as plt\n'), ((38588, 38623), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Depth (m)"""'], {'size': 'ysize'}), "('Depth (m)', size=ysize)\n", (38598, 38623), True, 'import matplotlib.pyplot as plt\n'), ((38656, 38710), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma_\\\\theta$ (kg/m$^3$)"""'], {'size': 'ysize'}), "('$\\\\sigma_\\\\theta$ (kg/m$^3$)', size=ysize)\n", (38666, 38710), True, 'import matplotlib.pyplot as plt\n'), ((38755, 38796), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Depth (m) """'], {'size': 'ysize'}), "('Depth (m) ', size=ysize)\n", (38765, 38796), True, 'import matplotlib.pyplot as plt\n'), ((46426, 46456), 'numpy.linspace', 'np.linspace', (['ap[0]', 'ap[1]', '(100)'], {}), '(ap[0], ap[1], 100)\n', (46437, 46456), True, 'import numpy as np\n'), ((46455, 46485), 'numpy.linspace', 'np.linspace', (['ap[1]', 'ap[1]', '(100)'], {}), '(ap[1], ap[1], 100)\n', (46466, 46485), True, 'import numpy as np\n'), ((46522, 46552), 'numpy.linspace', 'np.linspace', (['ap[1]', 'ap[0]', '(100)'], {}), '(ap[1], ap[0], 100)\n', (46533, 46552), True, 'import numpy as np\n'), ((46551, 46581), 'numpy.linspace', 'np.linspace', (['ap[0]', 'ap[0]', '(100)'], {}), '(ap[0], ap[0], 100)\n', (46562, 46581), True, 'import numpy as np\n'), ((46619, 46649), 'numpy.linspace', 'np.linspace', (['ap[3]', 'ap[3]', '(100)'], {}), '(ap[3], ap[3], 100)\n', (46630, 46649), True, 'import numpy as np\n'), ((46648, 46678), 'numpy.linspace', 'np.linspace', (['ap[3]', 'ap[2]', '(100)'], {}), '(ap[3], ap[2], 100)\n', (46659, 46678), True, 'import numpy as np\n'), ((46715, 46745), 'numpy.linspace', 'np.linspace', (['ap[2]', 'ap[2]', '(100)'], {}), '(ap[2], ap[2], 100)\n', (46726, 46745), True, 'import numpy as np\n'), ((46744, 46774), 'numpy.linspace', 'np.linspace', (['ap[2]', 'ap[3]', '(100)'], {}), '(ap[2], ap[3], 100)\n', (46755, 46774), True, 'import numpy as np\n'), ((46931, 46940), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (46938, 46940), True, 'import matplotlib.pyplot as plt\n'), ((49768, 49951), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0]', '[np.nan, np.nan]'], {'lw': '(0)', 'marker': 'obs_type_markers[t_idx]', 'ms': '(4)', 'markerfacecolor': '"""none"""', 'markeredgecolor': '"""k"""', 'markeredgewidth': '(0.5)', 'label': 'obs_type_labels[t_idx]'}), "([0, 0], [np.nan, np.nan], lw=0, marker=obs_type_markers[t_idx], ms\n =4, markerfacecolor='none', markeredgecolor='k', markeredgewidth=0.5,\n label=obs_type_labels[t_idx])\n", (49776, 49951), True, 'import matplotlib.pyplot as plt\n'), ((49993, 50156), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0]', '[np.nan, np.nan]'], {'lw': '(0)', 'marker': 'obs_type_markers[t_idx]', 'ms': '(4)', 'markerfacecolor': '"""k"""', 'markeredgecolor': '"""none"""', 'label': 'obs_type_labels[t_idx]'}), "([0, 0], [np.nan, np.nan], lw=0, marker=obs_type_markers[t_idx], ms\n =4, markerfacecolor='k', markeredgecolor='none', label=obs_type_labels[\n t_idx])\n", (50001, 50156), True, 'import matplotlib.pyplot as plt\n'), ((57399, 57434), 'numpy.meshgrid', 'np.meshgrid', (['etopo_lons', 'etopo_lats'], {}), '(etopo_lons, etopo_lats)\n', (57410, 57434), True, 'import numpy as np\n'), ((57753, 57785), 'numpy.linspace', 'np.linspace', (['pll[0]', 'pll[1]', '(100)'], {}), '(pll[0], pll[1], 100)\n', (57764, 57785), True, 'import numpy as np\n'), ((57784, 57816), 'numpy.linspace', 'np.linspace', (['pll[1]', 'pll[1]', '(100)'], {}), '(pll[1], pll[1], 100)\n', (57795, 57816), True, 'import numpy as np\n'), ((57853, 57885), 'numpy.linspace', 'np.linspace', (['pll[1]', 'pll[0]', '(100)'], {}), '(pll[1], pll[0], 100)\n', (57864, 57885), True, 'import numpy as np\n'), ((57884, 57916), 'numpy.linspace', 'np.linspace', (['pll[0]', 'pll[0]', '(100)'], {}), '(pll[0], pll[0], 100)\n', (57895, 57916), True, 'import numpy as np\n'), ((57954, 57986), 'numpy.linspace', 'np.linspace', (['pll[3]', 'pll[3]', '(100)'], {}), '(pll[3], pll[3], 100)\n', (57965, 57986), True, 'import numpy as np\n'), ((57985, 58017), 'numpy.linspace', 'np.linspace', (['pll[3]', 'pll[2]', '(100)'], {}), '(pll[3], pll[2], 100)\n', (57996, 58017), True, 'import numpy as np\n'), ((58054, 58086), 'numpy.linspace', 'np.linspace', (['pll[2]', 'pll[2]', '(100)'], {}), '(pll[2], pll[2], 100)\n', (58065, 58086), True, 'import numpy as np\n'), ((58085, 58117), 'numpy.linspace', 'np.linspace', (['pll[2]', 'pll[3]', '(100)'], {}), '(pll[2], pll[3], 100)\n', (58096, 58117), True, 'import numpy as np\n'), ((58284, 58293), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (58291, 58293), True, 'import matplotlib.pyplot as plt\n'), ((3202, 3297), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mcolors.LinearSegmentedColormap.from_list', ([], {'name': 'None', 'colors': 'cmap_colors', 'N': '(250)', 'gamma': '(1.3)'}), '(name=None, colors=cmap_colors, N=\n 250, gamma=1.3)\n', (3243, 3297), True, 'import matplotlib.colors as mcolors\n'), ((4034, 4043), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4041, 4043), True, 'import matplotlib.pyplot as plt\n'), ((4878, 4897), 'numpy.diff', 'np.diff', (['lonx[0, :]'], {}), '(lonx[0, :])\n', (4885, 4897), True, 'import numpy as np\n'), ((4907, 4927), 'numpy.diff', 'np.diff', (['lonx[-1, :]'], {}), '(lonx[-1, :])\n', (4914, 4927), True, 'import numpy as np\n'), ((4980, 4999), 'numpy.diff', 'np.diff', (['laty[0, :]'], {}), '(laty[0, :])\n', (4987, 4999), True, 'import numpy as np\n'), ((5008, 5028), 'numpy.diff', 'np.diff', (['laty[-1, :]'], {}), '(laty[-1, :])\n', (5015, 5028), True, 'import numpy as np\n'), ((7327, 7454), 'matplotlib.pyplot.plot', 'plt.plot', (['[lonx - cross_lonx, lonx + cross_lonx]', '[laty, laty]'], {'color': 'c', 'linestyle': '"""solid"""', 'linewidth': 'cross_width', 'zorder': '(4)'}), "([lonx - cross_lonx, lonx + cross_lonx], [laty, laty], color=c,\n linestyle='solid', linewidth=cross_width, zorder=4)\n", (7335, 7454), True, 'import matplotlib.pyplot as plt\n'), ((7460, 7587), 'matplotlib.pyplot.plot', 'plt.plot', (['[lonx, lonx]', '[laty - cross_laty, laty + cross_laty]'], {'color': 'c', 'linestyle': '"""solid"""', 'linewidth': 'cross_width', 'zorder': '(4)'}), "([lonx, lonx], [laty - cross_laty, laty + cross_laty], color=c,\n linestyle='solid', linewidth=cross_width, zorder=4)\n", (7468, 7587), True, 'import matplotlib.pyplot as plt\n'), ((7593, 7670), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lonx', 'laty'], {'s': '(14)', 'c': 'c', 'edgecolors': 'edgecolor', 'alpha': '(0.9)', 'zorder': '(5)'}), '(lonx, laty, s=14, c=c, edgecolors=edgecolor, alpha=0.9, zorder=5)\n', (7604, 7670), True, 'import matplotlib.pyplot as plt\n'), ((9277, 9389), 'matplotlib.pyplot.plot', 'plt.plot', (['flonx[position_flags != 9]', 'flaty[position_flags != 9]'], {'color': '"""#15178F"""', 'linewidth': '(1.25)', 'zorder': '(4)'}), "(flonx[position_flags != 9], flaty[position_flags != 9], color=\n '#15178F', linewidth=1.25, zorder=4)\n", (9285, 9389), True, 'import matplotlib.pyplot as plt\n'), ((9397, 9511), 'matplotlib.pyplot.scatter', 'plt.scatter', (['flonx[position_flags == 2]', 'flaty[position_flags == 2]'], {'s': '(10)', 'c': '"""m"""', 'edgecolors': '"""none"""', 'zorder': '(5)'}), "(flonx[position_flags == 2], flaty[position_flags == 2], s=10, c\n ='m', edgecolors='none', zorder=5)\n", (9408, 9511), True, 'import matplotlib.pyplot as plt\n'), ((9518, 9638), 'matplotlib.pyplot.scatter', 'plt.scatter', (['flonx[position_flags == 1]', 'flaty[position_flags == 1]'], {'s': '(10)', 'c': '"""#15178F"""', 'edgecolors': '"""none"""', 'zorder': '(6)'}), "(flonx[position_flags == 1], flaty[position_flags == 1], s=10, c\n ='#15178F', edgecolors='none', zorder=6)\n", (9529, 9638), True, 'import matplotlib.pyplot as plt\n'), ((27751, 27960), 'geo_tools.vert_prof_even_spacing', 'gt.vert_prof_even_spacing', (["float_data['profiles'][pi]", '"""sigma_theta"""'], {'z_coor': '"""depth"""', 'spacing': 'vert_res', 'interp_method': '"""linear"""', 'extrap': '"""NaN"""', 'top': 'depth_lim[0]', 'bottom': 'depth_lim[1]', 'verbose_error': '(True)'}), "(float_data['profiles'][pi], 'sigma_theta', z_coor\n ='depth', spacing=vert_res, interp_method='linear', extrap='NaN', top=\n depth_lim[0], bottom=depth_lim[1], verbose_error=True)\n", (27776, 27960), True, 'import geo_tools as gt\n'), ((28802, 28902), 'geo_tools.profile_interp', 'gt.profile_interp', (['sorted_param', 'sorted_sigma_theta', 'z_vec'], {'method': '"""linear"""', 'out_of_bounds': '"""NaN"""'}), "(sorted_param, sorted_sigma_theta, z_vec, method='linear',\n out_of_bounds='NaN')\n", (28819, 28902), True, 'import geo_tools as gt\n'), ((28977, 29077), 'geo_tools.profile_interp', 'gt.profile_interp', (['sorted_depth', 'sorted_sigma_theta', 'z_vec'], {'method': '"""linear"""', 'out_of_bounds': '"""NaN"""'}), "(sorted_depth, sorted_sigma_theta, z_vec, method='linear',\n out_of_bounds='NaN')\n", (28994, 29077), True, 'import geo_tools as gt\n'), ((30025, 30045), 'numpy.array', 'np.array', (['depth_data'], {}), '(depth_data)\n', (30033, 30045), True, 'import numpy as np\n'), ((31137, 31146), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (31144, 31146), True, 'import matplotlib.pyplot as plt\n'), ((31309, 31318), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (31316, 31318), True, 'import matplotlib.pyplot as plt\n'), ((31875, 31899), 'numpy.abs', 'np.abs', (['specified_levels'], {}), '(specified_levels)\n', (31881, 31899), True, 'import numpy as np\n'), ((33459, 33529), 'matplotlib.pyplot.plot', 'plt.plot', (['[dt, dt]', '[*depth_lim]'], {'color': '"""0.2"""', 'linewidth': '(0.8)', 'zorder': '(3)'}), "([dt, dt], [*depth_lim], color='0.2', linewidth=0.8, zorder=3)\n", (33467, 33529), True, 'import matplotlib.pyplot as plt\n'), ((34507, 34537), 'numpy.array', 'np.array', (['datetime_coord_profs'], {}), '(datetime_coord_profs)\n', (34515, 34537), True, 'import numpy as np\n'), ((34603, 34633), 'numpy.array', 'np.array', (['datetime_coord_profs'], {}), '(datetime_coord_profs)\n', (34611, 34633), True, 'import numpy as np\n'), ((34694, 34724), 'numpy.array', 'np.array', (['datetime_coord_profs'], {}), '(datetime_coord_profs)\n', (34702, 34724), True, 'import numpy as np\n'), ((34740, 34758), 'numpy.array', 'np.array', (['mld_data'], {}), '(mld_data)\n', (34748, 34758), True, 'import numpy as np\n'), ((35932, 35962), 'numpy.array', 'np.array', (['datetime_coord_daily'], {}), '(datetime_coord_daily)\n', (35940, 35962), True, 'import numpy as np\n'), ((36028, 36058), 'numpy.array', 'np.array', (['datetime_coord_daily'], {}), '(datetime_coord_daily)\n', (36036, 36058), True, 'import numpy as np\n'), ((36614, 36623), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (36621, 36623), True, 'import matplotlib.pyplot as plt\n'), ((36637, 36667), 'numpy.array', 'np.array', (['datetime_coord_daily'], {}), '(datetime_coord_daily)\n', (36645, 36667), True, 'import numpy as np\n'), ((37796, 37805), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (37803, 37805), True, 'import matplotlib.pyplot as plt\n'), ((38469, 38478), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (38476, 38478), True, 'import matplotlib.pyplot as plt\n'), ((39854, 39863), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (39861, 39863), True, 'import matplotlib.pyplot as plt\n'), ((40008, 40017), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (40015, 40017), True, 'import matplotlib.pyplot as plt\n'), ((47161, 47170), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (47168, 47170), True, 'import matplotlib.pyplot as plt\n'), ((48497, 48528), 'numpy.array', 'np.array', (["compiled_obs['types']"], {}), "(compiled_obs['types'])\n", (48505, 48528), True, 'import numpy as np\n'), ((48744, 48878), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lonx', 'laty'], {'s': '(4.0)', 'marker': 'obs_type_markers[t_idx]', 'facecolor': '"""none"""', 'edgecolors': 'season_colors[s_idx]', 'linewidths': '(0.5)'}), "(lonx, laty, s=4.0, marker=obs_type_markers[t_idx], facecolor=\n 'none', edgecolors=season_colors[s_idx], linewidths=0.5)\n", (48755, 48878), True, 'import matplotlib.pyplot as plt\n'), ((49041, 49159), 'matplotlib.pyplot.scatter', 'plt.scatter', (['lonx', 'laty'], {'s': '(4.0)', 'marker': 'obs_type_markers[t_idx]', 'facecolor': 'season_colors[s_idx]', 'edgecolors': '"""none"""'}), "(lonx, laty, s=4.0, marker=obs_type_markers[t_idx], facecolor=\n season_colors[s_idx], edgecolors='none')\n", (49052, 49159), True, 'import matplotlib.pyplot as plt\n'), ((49442, 49451), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (49449, 49451), True, 'import matplotlib.pyplot as plt\n'), ((58549, 58558), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (58556, 58558), True, 'import matplotlib.pyplot as plt\n'), ((3563, 3658), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mcolors.LinearSegmentedColormap.from_list', ([], {'name': 'None', 'colors': 'cmap_colors', 'N': '(250)', 'gamma': '(1.3)'}), '(name=None, colors=cmap_colors, N=\n 250, gamma=1.3)\n', (3604, 3658), True, 'import matplotlib.colors as mcolors\n'), ((10100, 10143), 'numpy.unique', 'np.unique', (['mo_yr_strings'], {'return_index': '(True)'}), '(mo_yr_strings, return_index=True)\n', (10109, 10143), True, 'import numpy as np\n'), ((10180, 10203), 'numpy.sort', 'np.sort', (['unique_indices'], {}), '(unique_indices)\n', (10187, 10203), True, 'import numpy as np\n'), ((12421, 12430), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (12428, 12430), True, 'import matplotlib.pyplot as plt\n'), ((12450, 12478), 'numpy.arange', 'np.arange', (['open_sic', '(101)', '(10)'], {}), '(open_sic, 101, 10)\n', (12459, 12478), True, 'import numpy as np\n'), ((29418, 29480), 'time_tools.convert_14_to_tuple', 'tt.convert_14_to_tuple', (["float_data['profiles'][pi]['datetime']"], {}), "(float_data['profiles'][pi]['datetime'])\n", (29440, 29480), True, 'import time_tools as tt\n'), ((37720, 37745), 'numpy.array', 'np.array', (['explicit_yticks'], {}), '(explicit_yticks)\n', (37728, 37745), True, 'import numpy as np\n'), ((38406, 38415), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (38413, 38415), True, 'import matplotlib.pyplot as plt\n'), ((39149, 39158), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (39156, 39158), True, 'import matplotlib.pyplot as plt\n'), ((39203, 39212), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (39210, 39212), True, 'import matplotlib.pyplot as plt\n'), ((39339, 39348), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (39346, 39348), True, 'import matplotlib.pyplot as plt\n'), ((39392, 39401), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (39399, 39401), True, 'import matplotlib.pyplot as plt\n'), ((39457, 39466), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (39464, 39466), True, 'import matplotlib.pyplot as plt\n'), ((39576, 39585), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (39583, 39585), True, 'import matplotlib.pyplot as plt\n'), ((39666, 39675), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (39673, 39675), True, 'import matplotlib.pyplot as plt\n'), ((48608, 48638), 'numpy.array', 'np.array', (["compiled_obs['lons']"], {}), "(compiled_obs['lons'])\n", (48616, 48638), True, 'import numpy as np\n'), ((48651, 48681), 'numpy.array', 'np.array', (["compiled_obs['lats']"], {}), "(compiled_obs['lats'])\n", (48659, 48681), True, 'import numpy as np\n'), ((6836, 6873), 'numpy.where', 'np.where', (['(float_dates - date_int == 0)'], {}), '(float_dates - date_int == 0)\n', (6844, 6873), True, 'import numpy as np\n'), ((8238, 8294), 'time_tools.convert_8_int_to_tuple', 'tt.convert_8_int_to_tuple', (['float_dates[recent_day_index]'], {}), '(float_dates[recent_day_index])\n', (8263, 8294), True, 'import time_tools as tt\n'), ((8814, 8941), 'matplotlib.pyplot.plot', 'plt.plot', (['[lonx - cross_lonx, lonx + cross_lonx]', '[laty, laty]'], {'color': 'c', 'linestyle': '"""solid"""', 'linewidth': 'cross_width', 'zorder': '(4)'}), "([lonx - cross_lonx, lonx + cross_lonx], [laty, laty], color=c,\n linestyle='solid', linewidth=cross_width, zorder=4)\n", (8822, 8941), True, 'import matplotlib.pyplot as plt\n'), ((8951, 9078), 'matplotlib.pyplot.plot', 'plt.plot', (['[lonx, lonx]', '[laty - cross_laty, laty + cross_laty]'], {'color': 'c', 'linestyle': '"""solid"""', 'linewidth': 'cross_width', 'zorder': '(4)'}), "([lonx, lonx], [laty - cross_laty, laty + cross_laty], color=c,\n linestyle='solid', linewidth=cross_width, zorder=4)\n", (8959, 9078), True, 'import matplotlib.pyplot as plt\n'), ((9767, 9809), 'time_tools.convert_14_to_tuple', 'tt.convert_14_to_tuple', (['float_datetimes[n]'], {}), '(float_datetimes[n])\n', (9789, 9809), True, 'import time_tools as tt\n'), ((10569, 10712), 'matplotlib.pyplot.text', 'plt.text', (['(lonx_to_label[pt] + 0.000625 * width)', '(laty_to_label[pt] - 0.026 * height)', 'mo_yr_strings_to_label[pt]'], {'fontsize': '(7)', 'color': '"""#15178F"""'}), "(lonx_to_label[pt] + 0.000625 * width, laty_to_label[pt] - 0.026 *\n height, mo_yr_strings_to_label[pt], fontsize=7, color='#15178F')\n", (10577, 10712), True, 'import matplotlib.pyplot as plt\n'), ((10837, 10980), 'matplotlib.pyplot.text', 'plt.text', (['(lonx_to_label[pt] + 0.000625 * width)', '(laty_to_label[pt] + 0.017 * height)', 'mo_yr_strings_to_label[pt]'], {'fontsize': '(7)', 'color': '"""#15178F"""'}), "(lonx_to_label[pt] + 0.000625 * width, laty_to_label[pt] + 0.017 *\n height, mo_yr_strings_to_label[pt], fontsize=7, color='#15178F')\n", (10845, 10980), True, 'import matplotlib.pyplot as plt\n'), ((12310, 12319), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12317, 12319), True, 'import matplotlib.pyplot as plt\n'), ((13048, 13057), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13055, 13057), True, 'import matplotlib.pyplot as plt\n'), ((13142, 13151), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13149, 13151), True, 'import matplotlib.pyplot as plt\n'), ((13213, 13222), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13220, 13222), True, 'import matplotlib.pyplot as plt\n'), ((28100, 28119), 'numpy.isnan', 'np.isnan', (['obs_param'], {}), '(obs_param)\n', (28108, 28119), True, 'import numpy as np\n'), ((28120, 28145), 'numpy.isnan', 'np.isnan', (['obs_sigma_theta'], {}), '(obs_sigma_theta)\n', (28128, 28145), True, 'import numpy as np\n'), ((28541, 28680), 'numpy.arange', 'np.arange', (['(0)', '((density_lim[1] - density_lim[0]) ** density_power_scale)', '((density_lim[1] - density_lim[0]) ** density_power_scale / 200)'], {}), '(0, (density_lim[1] - density_lim[0]) ** density_power_scale, (\n density_lim[1] - density_lim[0]) ** density_power_scale / 200)\n', (28550, 28680), True, 'import numpy as np\n'), ((36746, 36765), 'numpy.array', 'np.array', (['sic_coord'], {}), '(sic_coord)\n', (36754, 36765), True, 'import numpy as np\n'), ((37585, 37594), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (37592, 37594), True, 'import matplotlib.pyplot as plt\n'), ((38874, 38883), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (38881, 38883), True, 'import matplotlib.pyplot as plt\n'), ((42400, 42409), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (42407, 42409), True, 'import matplotlib.pyplot as plt\n'), ((8130, 8166), 'numpy.where', 'np.where', (['(float_dates - date_int < 0)'], {}), '(float_dates - date_int < 0)\n', (8138, 8166), True, 'import numpy as np\n'), ((29621, 29642), 'numpy.isfinite', 'np.isfinite', (['data_vec'], {}), '(data_vec)\n', (29632, 29642), True, 'import numpy as np\n'), ((29693, 29714), 'numpy.isfinite', 'np.isfinite', (['data_vec'], {}), '(data_vec)\n', (29704, 29714), True, 'import numpy as np\n'), ((37379, 37400), 'numpy.array', 'np.array', (['density_lim'], {}), '(density_lim)\n', (37387, 37400), True, 'import numpy as np\n'), ((33582, 33603), 'numpy.array', 'np.array', (['density_lim'], {}), '(density_lim)\n', (33590, 33603), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 27 14:24:54 2017
@author: IACJ
"""
import os
from os import path
from os.path import expanduser
from numpy import genfromtxt
import numpy as np
from numpy import array, zeros, argmin, inf
from numpy import *
from US_DTW import US_DTW
# classify using kNN
def kNNClassify_ED(newInput, dataSet, labels, k):
numSamples = dataSet.shape[0] # shape[0] stands for the num of row
## step 1: calculate Euclidean distance
diff = tile(newInput, (numSamples, 1)) - dataSet # Subtract element-wise
squaredDiff = diff ** 2 # squared for the subtract
squaredDist = sum(squaredDiff, axis = 1) # sum is performed by row
distance = squaredDist ** 0.5
## step 2: sort the distance
# argsort() returns the indices that would sort an array in a ascending order
sortedDistIndices = argsort(distance)
classCount = {} # define a dictionary (can be append element)
for i in range(k):
## step 3: choose the min k distance
voteLabel = labels[sortedDistIndices[i]]
## step 4: count the times labels occur
classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
## step 5: the max voted class will return
maxCount = 0
for key, value in classCount.items():
if value > maxCount:
maxCount = value
maxIndex = key
return maxIndex
def kNNClassify_DTW(newInput, dataSet, labels, k):
numSamples = dataSet.shape[0] # shape[0] stands for the num of row
## step 1: calculate Euclidean distance
distance = np.zeros(numSamples)
for i in range(numSamples):
distance[i] = dtw(newInput,dataSet[i])
## step 2: sort the distance
# argsort() returns the indices that would sort an array in a ascending order
sortedDistIndices = argsort(distance)
classCount = {} # define a dictionary (can be append element)
for i in range(k):
## step 3: choose the min k distance
voteLabel = labels[sortedDistIndices[i]]
## step 4: count the times labels occur
classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
## step 5: the max voted class will return
maxCount = 0
for key, value in classCount.items():
if value > maxCount:
maxCount = value
maxIndex = key
return maxIndex
def kNNClassify_US_DTW(newInput, dataSet, labels, k):
numSamples = dataSet.shape[0] # shape[0] stands for the num of row
## step 1: calculate Euclidean distance
distance = np.zeros(numSamples)
for i in range(numSamples):
us_dtw = US_DTW(newInput,dataSet[i])
print(i,len(us_dtw.paths))
distance[i] = us_dtw.resultDistance
## step 2: sort the distance
# argsort() returns the indices that would sort an array in a ascending order
sortedDistIndices = argsort(distance)
classCount = {} # define a dictionary (can be append element)
for i in range(k):
## step 3: choose the min k distance
voteLabel = labels[sortedDistIndices[i]]
## step 4: count the times labels occur
classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
## step 5: the max voted class will return
maxCount = 0
for key, value in classCount.items():
if value > maxCount:
maxCount = value
maxIndex = key
return maxIndex
def dtw(x, y ):
"""
Computes Dynamic Time Warping (DTW) of two sequences.
:param array x: N1*M array
:param array y: N2*M array
:param func dist: distance used as cost measure
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:] # view
for i in range(r):
for j in range(c):
D1[i, j] = abs(x[i]-y[j])
C = D1.copy()
for i in range(r):
for j in range(c):
D1[i, j] += min(D0[i, j], D0[i, j+1], D0[i+1, j])
return D1[-1, -1]
# 加载 UCR 数据集的函数
def load_dataset(dataset_name, dataset_folder):
dataset_path = path.join(dataset_folder, dataset_name)
train_file_path = path.join(dataset_path, '{}_TRAIN'.format(dataset_name))
test_file_path = path.join(dataset_path, '{}_TEST'.format(dataset_name))
# training data
train_raw_arr = genfromtxt(train_file_path, delimiter=',')
train_data = train_raw_arr[:, 1:]
train_labels = train_raw_arr[:, 0] - 1
# one was subtracted to change the labels to 0 and 1 instead of 1 and 2
# test_data
test_raw_arr = genfromtxt(test_file_path, delimiter=',')
test_data = test_raw_arr[:, 1:]
test_labels = test_raw_arr[:, 0] - 1
return train_data, train_labels, test_data, test_labels
if __name__ == '__main__':
print("Program Begin")
########## 使用 UCR 数据集 ###############
ucr_dataset_base_folder = expanduser('~/UCR_TS_Archive_2015')
dirs = os.listdir(ucr_dataset_base_folder)
for dir in dirs:
print (dir,end=" : \t")
ucr_dataset_name = dir
train_data, train_labels, test_data, test_labels = load_dataset(ucr_dataset_name,ucr_dataset_base_folder)
print(train_data.shape,train_labels.shape,test_data.shape,test_labels.shape)
########## 使用 1NN_ED ###################
Trues = 0
Falses = 0
for i in range (test_data.shape[0]):
x = test_data[i]
y = test_labels[i]
outputLabel = kNNClassify_ED(x, train_data, train_labels, 1)
# print (i,":\tpredict : ", outputLabel,"\tGroundTruth : ",y,"\t",outputLabel==y)
if (outputLabel==y):
Trues += 1
else :
Falses += 1
print ("1NN_ED :",Trues/(Trues+Falses))
#########################################
train_data = np.tile(train_data,2)
########## 使用 1NN_US-DTW ###################
Trues = 0
Falses = 0
for i in range (test_data.shape[0]):
x = test_data[i]
y = test_labels[i]
outputLabel = kNNClassify_US_DTW(x, train_data, train_labels, 1)
print (i,":\tpredict : ", outputLabel,"\tGroundTruth : ",y,"\t",outputLabel==y)
if (outputLabel==y):
Trues += 1
else :
Falses += 1
print ("1NN_DTW :",Trues/(Trues+Falses))
################
########## 使用 1NN_DTW ###################
Trues = 0
Falses = 0
for i in range (test_data.shape[0]):
x = test_data[i]
y = test_labels[i]
outputLabel = kNNClassify_DTW(x, train_data, train_labels, 1)
# print (i,":\tpredict : ", outputLabel,"\tGroundTruth : ",y,"\t",outputLabel==y)
if (outputLabel==y):
Trues += 1
else :
Falses += 1
print ("1NN_DTW :",Trues/(Trues+Falses))
#########################################
print()
|
[
"os.path.join",
"numpy.zeros",
"numpy.genfromtxt",
"numpy.tile",
"US_DTW.US_DTW",
"os.path.expanduser",
"os.listdir"
] |
[((1638, 1658), 'numpy.zeros', 'np.zeros', (['numSamples'], {}), '(numSamples)\n', (1646, 1658), True, 'import numpy as np\n'), ((2650, 2670), 'numpy.zeros', 'np.zeros', (['numSamples'], {}), '(numSamples)\n', (2658, 2670), True, 'import numpy as np\n'), ((3916, 3937), 'numpy.zeros', 'zeros', (['(r + 1, c + 1)'], {}), '((r + 1, c + 1))\n', (3921, 3937), False, 'from numpy import array, zeros, argmin, inf\n'), ((4330, 4369), 'os.path.join', 'path.join', (['dataset_folder', 'dataset_name'], {}), '(dataset_folder, dataset_name)\n', (4339, 4369), False, 'from os import path\n'), ((4567, 4609), 'numpy.genfromtxt', 'genfromtxt', (['train_file_path'], {'delimiter': '""","""'}), "(train_file_path, delimiter=',')\n", (4577, 4609), False, 'from numpy import genfromtxt\n'), ((4803, 4844), 'numpy.genfromtxt', 'genfromtxt', (['test_file_path'], {'delimiter': '""","""'}), "(test_file_path, delimiter=',')\n", (4813, 4844), False, 'from numpy import genfromtxt\n'), ((5126, 5161), 'os.path.expanduser', 'expanduser', (['"""~/UCR_TS_Archive_2015"""'], {}), "('~/UCR_TS_Archive_2015')\n", (5136, 5161), False, 'from os.path import expanduser\n'), ((5179, 5214), 'os.listdir', 'os.listdir', (['ucr_dataset_base_folder'], {}), '(ucr_dataset_base_folder)\n', (5189, 5214), False, 'import os\n'), ((2720, 2748), 'US_DTW.US_DTW', 'US_DTW', (['newInput', 'dataSet[i]'], {}), '(newInput, dataSet[i])\n', (2726, 2748), False, 'from US_DTW import US_DTW\n'), ((6111, 6133), 'numpy.tile', 'np.tile', (['train_data', '(2)'], {}), '(train_data, 2)\n', (6118, 6133), True, 'import numpy as np\n')]
|
import html
from unittest.mock import Mock
import numpy as np
import pytest
from napari.utils import nbscreenshot
def test_nbscreenshot(make_napari_viewer):
"""Test taking a screenshot."""
viewer = make_napari_viewer()
np.random.seed(0)
data = np.random.random((10, 15))
viewer.add_image(data)
rich_display_object = nbscreenshot(viewer)
assert hasattr(rich_display_object, '_repr_png_')
# Trigger method that would run in jupyter notebook cell automatically
rich_display_object._repr_png_()
assert rich_display_object.image is not None
@pytest.mark.parametrize(
"alt_text_input, expected_alt_text",
[
(None, None),
("Good alt text", "Good alt text"),
# Naughty strings https://github.com/minimaxir/big-list-of-naughty-strings
# ASCII punctuation
(r",./;'[]\-=", ',./;'[]\\-='), # noqa: W605
('>?:"{}|_+', '>?:"{}|_+'), # ASCII punctuation 2
("!@#$%^&*()`~", '!@#$%^&*()`~'), # ASCII punctuation 3
# # Emjoi
("😍", "😍"), # emoji 1
("👨🦰 👨🏿🦰 👨🦱 👨🏿🦱 🦹🏿♂️", "👨🦰 👨🏿🦰 👨🦱 👨🏿🦱 🦹🏿♂️"), # emoji 2
(r"¯\_(ツ)_/¯", '¯\\_(ツ)_/¯'), # Japanese emoticon # noqa: W605
# # Special characters
("田中さんにあげて下さい", "田中さんにあげて下さい"), # two-byte characters
("表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀", "表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀"), # special unicode chars
("گچپژ", "گچپژ"), # Persian special characters
# # Script injection
("<script>alert(0)</script>", None), # script injection 1
("<script>alert('1');</script>", None),
("<svg><script>123<1>alert(3)</script>", None),
],
)
def test_safe_alt_text(alt_text_input, expected_alt_text):
display_obj = nbscreenshot(Mock(), alt_text=alt_text_input)
if not expected_alt_text:
assert not display_obj.alt_text
else:
assert html.escape(display_obj.alt_text) == expected_alt_text
|
[
"numpy.random.seed",
"napari.utils.nbscreenshot",
"unittest.mock.Mock",
"numpy.random.random",
"pytest.mark.parametrize",
"html.escape"
] |
[((585, 1229), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""alt_text_input, expected_alt_text"""', '[(None, None), (\'Good alt text\', \'Good alt text\'), (",./;\'[]\\\\-=",\n \',./;'[]\\\\-=\'), (\'>?:"{}|_+\', \'>?:"{}|_+\'), (\n \'!@#$%^&*()`~\', \'!@#$%^&*()`~\'), (\'😍\', \'😍\'), (\n \'👨\\u200d🦰 👨🏿\\u200d🦰 👨\\u200d🦱 👨🏿\\u200d🦱 🦹🏿\\u200d♂️\',\n \'👨\\u200d🦰 👨🏿\\u200d🦰 👨\\u200d🦱 👨🏿\\u200d🦱 🦹🏿\\u200d♂️\'), (\'¯\\\\_(ツ)_/¯\',\n \'¯\\\\_(ツ)_/¯\'), (\'田中さんにあげて下さい\', \'田中さんにあげて下さい\'), (\'表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀\',\n \'表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀\'), (\'گچپژ\', \'گچپژ\'), (\'<script>alert(0)</script>\',\n None), (\'<script>alert('1');</script>\', None), (\n \'<svg><script>123<1>alert(3)</script>\', None)]'], {}), '(\'alt_text_input, expected_alt_text\', [(None, None),\n (\'Good alt text\', \'Good alt text\'), (",./;\'[]\\\\-=", \',./;'[]\\\\-=\'),\n (\'>?:"{}|_+\', \'>?:"{}|_+\'), (\'!@#$%^&*()`~\', \'!@#$%^&*()`~\'\n ), (\'😍\', \'😍\'), (\'👨\\u200d🦰 👨🏿\\u200d🦰 👨\\u200d🦱 👨🏿\\u200d🦱 🦹🏿\\u200d♂️\',\n \'👨\\u200d🦰 👨🏿\\u200d🦰 👨\\u200d🦱 👨🏿\\u200d🦱 🦹🏿\\u200d♂️\'), (\'¯\\\\_(ツ)_/¯\',\n \'¯\\\\_(ツ)_/¯\'), (\'田中さんにあげて下さい\', \'田中さんにあげて下さい\'), (\'表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀\',\n \'表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀\'), (\'گچپژ\', \'گچپژ\'), (\'<script>alert(0)</script>\',\n None), (\'<script>alert('1');</script>\', None), (\n \'<svg><script>123<1>alert(3)</script>\', None)])\n', (608, 1229), False, 'import pytest\n'), ((236, 253), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (250, 253), True, 'import numpy as np\n'), ((265, 291), 'numpy.random.random', 'np.random.random', (['(10, 15)'], {}), '((10, 15))\n', (281, 291), True, 'import numpy as np\n'), ((346, 366), 'napari.utils.nbscreenshot', 'nbscreenshot', (['viewer'], {}), '(viewer)\n', (358, 366), False, 'from napari.utils import nbscreenshot\n'), ((1772, 1778), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1776, 1778), False, 'from unittest.mock import Mock\n'), ((1900, 1933), 'html.escape', 'html.escape', (['display_obj.alt_text'], {}), '(display_obj.alt_text)\n', (1911, 1933), False, 'import html\n')]
|
import numpy as np
def random_sum(*dimensions):
return np.random.rand(*dimensions).sum()
|
[
"numpy.random.rand"
] |
[((61, 88), 'numpy.random.rand', 'np.random.rand', (['*dimensions'], {}), '(*dimensions)\n', (75, 88), True, 'import numpy as np\n')]
|
import numpy as np
import time
import sys
from ServoMotor import *
from fns import *
# Initialize motor control library & USB Port
filename = "/dev/ttyUSB0"
motor = ServoMotor(filename)
IO = motor.IO_Init()
if IO < 0:
print('IO exit')
sys.exit()
# Call corresponding function to convert sim2real/real2sim
def convFns(pos, convType):
conv = [left_armpit, left_elbow, left_shoulder, right_armpit, right_elbow, right_shoulder,
left_armpit, left_elbow, left_shoulder, right_armpit, right_elbow, right_shoulder]
targ = np.zeros(12)
for i in range(len(pos)):
if i==0:
targ[i] = conv[i](pos[i], convType, "front")
elif i==6:
targ[i] = conv[i](pos[i], convType, "back")
else:
targ[i] = conv[i](pos[i], convType)
return targ
'''
# Return target position
def act_shoulders&armpits(t, a, b, c, d, e):
# Calculate desired position
desired_p = np.zeros(12)
# Positive
pos_v_shoulder = a * np.sin(t * e) + b
pos_v_elbow = c * np.sin(t * e) + d
pos_shoulder = [2, 11]
pos_elbow = [1, 10]
# Negative
neg_v_shoulder = -a * np.sin(t * e) + b
neg_v_elbow = -c * np.sin(t * e) + d
neg_shoulder = [5, 8]
neg_elbow = [4, 7]
# Zero
zero = [0, 3, 6, 9]
# Assign
desired_p[pos_shoulder] = pos_v_shoulder
desired_p[pos_elbow] = pos_v_elbow
desired_p[neg_shoulder] = neg_v_shoulder
desired_p[neg_elbow] = neg_v_elbow
desired_p[zero] = 0
# Return desired new position
return convFns(desired_p, "sim2real")
'''
# Front and back legs diff
# Return target position
def act(t, a, b, c, d, e, f):
# Calculate desired position
f_pos = a * np.sin(t * e) + b
f_neg = -a * np.sin(t * e) + b
b_pos = c * np.sin(t * e + f) + d
b_neg = -c * np.sin(t * e + f) + d
# Assign
desired_p = [0, f_pos, f_pos, 0, f_neg, f_neg, 0, b_pos, b_pos, 0, b_neg, b_neg]
# Return desired new position
return convFns(desired_p, "sim2real")
# Return position to take
def get_action(steps):
params = np.array(np.load('params/ROB/best_overall-2.npy'))
params[4]-=22
#params = np.array([0.24495851730947005, 0.18187873796178136, 0.2020333429029758, -0.3852743697870839, -0.2094960812992037]) # Trained sin_gait 7, Oct 11 19:01
#params = np.array([0.2980418533307479, 0.01878523690431866, 0.022546654023646796, -0.2685025304630598, -0.2080157428428239]) # Trained sin_gait 5, Oct 12 13:21
#params = np.array([0.15, 0.0, 0.2, 0.15, 0.2]) # Smooth Criminal
#params = np.array([0.15, 0.0, 0.19, 0.2, 0.23, 2.05])
return act(steps, *params)
# MOVE MOTOR TO GIVEN POSITION
def walk(pos):
h = 0
real_pos = []
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
real_pos.append(motor.readPosition(i))
motor.move(i, int(pos[h]), 0)
h+=1
time.sleep(0.005)
return real_pos
# Initialize motors as servos and set offset
offsets = [30, 0, 64, 0, 70, 50, 26, 100, 55, 80, 90, 35]
h = 0
# Set servo mode to all servos with their offset
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
motor.setServoMode(i)
if offsets[h]!=0:
motor.setPositionOffset(i,offsets[h])
h+=1
# RESET position and stand down & up before walking
pos = [500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500]
h = 0
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
motor.move(i, int(pos[h]), 1500)
h+=1
time.sleep(3)
pos = [500, 750, 583, 500, 250, 417, 500, 750, 583, 500, 250, 417]
#pos = get_action(0)
h = 0
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
if h>5:
motor.move(i, int(pos[h]), 1000)
else:
motor.move(i, int(pos[h]), 1500)
h+=1
time.sleep(3)
'''
# Determine need to smoothen transition to first position
pos_prev = [500, 750, 583, 500, 250, 417, 500, 750, 583, 500, 250, 417]
pos = get_action(0)
delta_pos = abs(pos-pos_prev)
steps = int(max(delta_pos)/15)
m = []
for i in range(len(pos)):
m.append(np.linspace(pos_prev[i], pos[i], steps))
m_t = np.array(m).T.tolist()
for i in range(len(m_t)):
for j in range(len(m_t[0])):
m_t[i][j] = int(round(m_t[i][j]))
# If smoothing is needed, perform actions
for i in m_t:
real_pos = walk(i)
# WALK
j = 1
while j < 100:
# Get target position
pos = get_action(j)
# Move robot to target position
real_pos = walk(pos)
j += 1
'''
# RESET position and stand down & up before walking
pos = [500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500]
h = 0
for j in range(1,5):
u = 10*j
r = range(u, u+3)
for i in r:
motor.move(i, int(pos[h]), 1500)
h+=1
|
[
"numpy.load",
"numpy.zeros",
"time.sleep",
"numpy.sin",
"sys.exit"
] |
[((3246, 3259), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3256, 3259), False, 'import time\n'), ((3514, 3527), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (3524, 3527), False, 'import time\n'), ((238, 248), 'sys.exit', 'sys.exit', ([], {}), '()\n', (246, 248), False, 'import sys\n'), ((524, 536), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (532, 536), True, 'import numpy as np\n'), ((1914, 1954), 'numpy.load', 'np.load', (['"""params/ROB/best_overall-2.npy"""'], {}), "('params/ROB/best_overall-2.npy')\n", (1921, 1954), True, 'import numpy as np\n'), ((2666, 2683), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (2676, 2683), False, 'import time\n'), ((1559, 1572), 'numpy.sin', 'np.sin', (['(t * e)'], {}), '(t * e)\n', (1565, 1572), True, 'import numpy as np\n'), ((1591, 1604), 'numpy.sin', 'np.sin', (['(t * e)'], {}), '(t * e)\n', (1597, 1604), True, 'import numpy as np\n'), ((1622, 1639), 'numpy.sin', 'np.sin', (['(t * e + f)'], {}), '(t * e + f)\n', (1628, 1639), True, 'import numpy as np\n'), ((1658, 1675), 'numpy.sin', 'np.sin', (['(t * e + f)'], {}), '(t * e + f)\n', (1664, 1675), True, 'import numpy as np\n')]
|
import json
import jsonschema
from jsonschema import validate
import abc
import error as error
from Estimator import Estimator
import sys
import numpy as np # vectors and matrices
import pandas as pd # tables and data manipulations
from dateutil.relativedelta import relativedelta # working with dates with style
from scipy.optimize import minimize # for function minimization
import statsmodels.formula.api as smf # statistics and econometrics
import statsmodels.tsa.api as smt
import statsmodels.api as sm
import scipy.stats as scs
from itertools import product # some useful functions
from tqdm import tqdm_notebook
import warnings # `do not disturbe` mode
warnings.filterwarnings('ignore')
# Describe what kind of json you expect.
tripleESSchema = {
"type": "object",
"properties": {
"estimator": {
"type": "string"
},
"season_length": {
"type": "number"
},
"scaling_factor": {
"type": "number"
}
},
"required": ["estimator", "season_length"],
"additionalProperties": False
}
class TripleES(Estimator):
def __init__(self, jsonData):
super().__init__()
self.nick = 'TripleES'
try:
validate(instance=jsonData, schema=tripleESSchema)
except jsonschema.exceptions.ValidationError as err:
template = "An exception of type {0} occurred. Arguments: {1!r}"
message = template.format(type(err).__name__, err.args)
print(message)
raise ValueError(error.errors['tripleES_config'])
self.parse(jsonData)
self.estimator = self
def parse(self, jsonData):
self.is_regr = True
if 'scaling_factor' in jsonData:
self.scaling_factor = jsonData['scaling_factor']
else:
self.scaling_factor = 1.96
self.season_length = jsonData['season_length']
sys.path.insert(1, 'output')
import TripleES_OM
self.output_manager = TripleES_OM.TripleES_OM(self)
def process(self, prep, cv, X_train, y_train):
# initializing model parameters alpha, beta and gamma
x = [0, 0, 0]
# Minimizing the loss function
from scipy.optimize import minimize
from sklearn.metrics import mean_squared_error, mean_squared_log_error
#leggiamo cv.metrics, a seconda del suo valore stringa gli passo l'oggetto corrispondente
if 'mean_squared_log_error' in cv.metrics:
abg = minimize(timeseriesCVscore, x0=x,
args=(X_train, cv.cv, mean_squared_log_error, self.season_length),
method="TNC", bounds = ((0, 1), (0, 1), (0, 1))
)
elif 'mean_squared_error' in cv.metrics: #usable also for rootMSE
abg = minimize(timeseriesCVscore, x0=x,
args=(X_train, cv.cv, mean_squared_error, self.season_length),
method="TNC", bounds = ((0, 1), (0, 1), (0, 1))
)
else:
template = "An exception of type {0} occurred. Arguments: {1!r}"
message = template.format('Wrong metrics configuration parameter', cv.metrics)
raise ValueError(message)
# Take optimal values...
#best_estimator = abg.x
self.alpha = abg.x[0]
self.beta = abg.x[1]
self.gamma = abg.x[2]
self.X_train = X_train
#return best_estimator # stimatore deve restituire alpha, beta e gamma
return self
def predict(self, X_test):
self.model = HoltWinters(self.X_train, self.season_length, self.alpha, self.beta, self.gamma, len(X_test), self.scaling_factor)
self.model.triple_exponential_smoothing()
predictions = self.model.result[-len(X_test):]
return predictions
def predict_from_series(self, series, n_preds):
res = []
for i in range(len(series)):
if len(series[i])-2*self.season_length<0:
print(f'Skipped series nr. {i}, as too short. A series should be long at least two times the season length')
continue
self.model = HoltWinters(series[i], self.season_length, self.alpha, self.beta, self.gamma, n_preds, self.scaling_factor)
self.model.triple_exponential_smoothing()
predictions = self.model.result[-n_preds:]
res.append(predictions)
return res
class HoltWinters:
"""
Holt-Winters model with the anomalies detection using Brutlag method
# series - initial time series
# slen - length of a season <- parametro da mettere nel json est_tscv
# alpha, beta, gamma - Holt-Winters model coefficients <- output paramas
# n_preds - predictions horizon <- parametro def a priori
# scaling_factor - sets the width of the confidence interval by Brutlag (usually takes values from 2 to 3) <- ? può essere
hyperparmas per CV
"""
def __init__(self, series, slen, alpha, beta, gamma, n_preds, scaling_factor=1.96):
self.series = series
self.slen = slen
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.n_preds = n_preds
self.scaling_factor = scaling_factor
def initial_trend(self):
sum = 0.0
for i in range(self.slen):
sum += float(self.series[i+self.slen] - self.series[i]) / self.slen
return sum / self.slen
def initial_seasonal_components(self):
seasonals = {}
season_averages = []
n_seasons = int(len(self.series)/self.slen)
# let's calculate season averages
for j in range(n_seasons):
season_averages.append(sum(self.series[self.slen*j:self.slen*j+self.slen])/float(self.slen))
# let's calculate initial values
for i in range(self.slen):
sum_of_vals_over_avg = 0.0
for j in range(n_seasons):
sum_of_vals_over_avg += self.series[self.slen*j+i]-season_averages[j]
seasonals[i] = sum_of_vals_over_avg/n_seasons
return seasonals
def triple_exponential_smoothing(self):
self.result = []
self.Smooth = []
self.Season = []
self.Trend = []
self.PredictedDeviation = []
seasonals = self.initial_seasonal_components()
for i in range(len(self.series)+self.n_preds):
if i == 0: # components initialization
smooth = self.series[0]
trend = self.initial_trend()
self.result.append(self.series[0])
self.Smooth.append(smooth)
self.Trend.append(trend)
self.Season.append(seasonals[i%self.slen])
self.PredictedDeviation.append(0)
continue
if i >= len(self.series): # predicting
m = i - len(self.series) + 1
self.result.append((smooth + m*trend) + seasonals[i%self.slen])
# when predicting we increase uncertainty on each step
self.PredictedDeviation.append(self.PredictedDeviation[-1]*1.01)
else:
val = self.series[i]
last_smooth, smooth = smooth, self.alpha*(val-seasonals[i%self.slen]) + (1-self.alpha)*(smooth+trend)
trend = self.beta * (smooth-last_smooth) + (1-self.beta)*trend
seasonals[i%self.slen] = self.gamma*(val-smooth) + (1-self.gamma)*seasonals[i%self.slen]
self.result.append(smooth+trend+seasonals[i%self.slen])
# Deviation is calculated according to Brutlag algorithm.
self.PredictedDeviation.append(self.gamma * np.abs(self.series[i] - self.result[i])
+ (1-self.gamma)*self.PredictedDeviation[-1])
self.Smooth.append(smooth)
self.Trend.append(trend)
self.Season.append(seasonals[i%self.slen])
from sklearn.model_selection import TimeSeriesSplit
from sklearn.metrics import mean_squared_error
def timeseriesCVscore(params, series, cv, loss_function, slen):
"""
Returns error on CV
params - vector of parameters for optimization
series - dataset with timeseries
slen - season length for Holt-Winters model
"""
# errors array
errors_arr = []
values = series.values
alpha, beta, gamma = params
# set the number of folds for cross-validation
tscv = TimeSeriesSplit(n_splits=cv)
# iterating over folds, train model on each, forecast and calculate error
for train, test in tscv.split(values):
try:
n=len(train)-2*slen
assert n > 0
except AssertionError as err:
template = "An exception of type {0} occurred"
message = template.format(type(err).__name__)
print(message)
raise ValueError(error.errors['tripleES_wrong_nsplits'])
model = HoltWinters(series=values[train], slen=slen,
alpha=alpha, beta=beta, gamma=gamma, n_preds=len(test))
model.triple_exponential_smoothing()
predictions = model.result[-len(test):]
actual = values[test]
error_arr = loss_function(predictions, actual)
errors_arr.append(error_arr)
return np.mean(np.array(errors_arr))
|
[
"jsonschema.validate",
"scipy.optimize.minimize",
"numpy.abs",
"warnings.filterwarnings",
"sys.path.insert",
"sklearn.model_selection.TimeSeriesSplit",
"numpy.array",
"TripleES_OM.TripleES_OM"
] |
[((802, 835), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (825, 835), False, 'import warnings\n'), ((8825, 8853), 'sklearn.model_selection.TimeSeriesSplit', 'TimeSeriesSplit', ([], {'n_splits': 'cv'}), '(n_splits=cv)\n', (8840, 8853), False, 'from sklearn.model_selection import TimeSeriesSplit\n'), ((2094, 2122), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""output"""'], {}), "(1, 'output')\n", (2109, 2122), False, 'import sys\n'), ((2180, 2209), 'TripleES_OM.TripleES_OM', 'TripleES_OM.TripleES_OM', (['self'], {}), '(self)\n', (2203, 2209), False, 'import TripleES_OM\n'), ((9712, 9732), 'numpy.array', 'np.array', (['errors_arr'], {}), '(errors_arr)\n', (9720, 9732), True, 'import numpy as np\n'), ((1384, 1434), 'jsonschema.validate', 'validate', ([], {'instance': 'jsonData', 'schema': 'tripleESSchema'}), '(instance=jsonData, schema=tripleESSchema)\n', (1392, 1434), False, 'from jsonschema import validate\n'), ((2680, 2836), 'scipy.optimize.minimize', 'minimize', (['timeseriesCVscore'], {'x0': 'x', 'args': '(X_train, cv.cv, mean_squared_log_error, self.season_length)', 'method': '"""TNC"""', 'bounds': '((0, 1), (0, 1), (0, 1))'}), "(timeseriesCVscore, x0=x, args=(X_train, cv.cv,\n mean_squared_log_error, self.season_length), method='TNC', bounds=((0, \n 1), (0, 1), (0, 1)))\n", (2688, 2836), False, 'from scipy.optimize import minimize\n'), ((2972, 3119), 'scipy.optimize.minimize', 'minimize', (['timeseriesCVscore'], {'x0': 'x', 'args': '(X_train, cv.cv, mean_squared_error, self.season_length)', 'method': '"""TNC"""', 'bounds': '((0, 1), (0, 1), (0, 1))'}), "(timeseriesCVscore, x0=x, args=(X_train, cv.cv, mean_squared_error,\n self.season_length), method='TNC', bounds=((0, 1), (0, 1), (0, 1)))\n", (2980, 3119), False, 'from scipy.optimize import minimize\n'), ((7999, 8038), 'numpy.abs', 'np.abs', (['(self.series[i] - self.result[i])'], {}), '(self.series[i] - self.result[i])\n', (8005, 8038), True, 'import numpy as np\n')]
|
'''
Future Update Notes:
1) Non-supervisor mode is not coded.
2) Controller functions should be updated.
3) robot_creator function takes too much arguments and more argument
necessary for options like robot tags, data transfer between real
robot and player, etc. New structure might be necessary.
4) Codes modified for admin-permissions in Windows, in Linux they should
be arranged.
5) Robot tag creation not included.
6) Collaboration of game manager and world creator is still not
considered.
7) Controller paths should be arranged according to Linux system and
webots configuration.
'''
import numpy as np
from decimal import Decimal
import math
from pathlib import Path
import ctypes, sys #for Windows only
def is_admin():# Windows Admin Permissions
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
def grid_material(no):
'''
New texture may be added easily with location of texture file(should
be in jpg format) only corresponding number will assigned to texture
in the function.
'''
if no == 0:
return 0
if no == 1:
return 'textures/earth_texture.jpg'
if no == 2:
return 'textures/water_texture.jpg'
if no == 3:
return 'textures/desert_texture.jpg'
if no == 4:
return 'textures/cell_texture.jpg'
def floor_text(x,z,i,grid_length,material,world_text):
'''
Creates script of every floor element of floor matrix.
'''
world_text.append('Floor {\n')
world_text.append(' translation {0} 0 {1}\n'.format(x,z))
world_text.append(' name "floor({0})"\n'.format(i))
world_text.append(' size {0} {0}\n'.format(grid_length))
world_text.append(' tileSize {0} {0}\n'.format(grid_length))
world_text.append(' appearance Appearance {\n')
world_text.append(' texture ImageTexture {\n')
world_text.append(' url [\n')
world_text.append(' "{}"\n'.format(material))
world_text.append(' ]\n')
world_text.append(' filtering 0\n')
world_text.append(' }}}\n')
def arena_creator(floor_matrix, grid_length,world_text):
'''
floor_matrix is a matrix and it decides shape of the arena, number
of grids, grid colors. Each matrix element is a grid texture.
Corresponding element number will be defined. Value of grid_length
in meters.
For example:
A = [1 3] Element value: 0 = box obstacle, 1 = earth, 2 = water,
[3 4] 3 = sand, 4 = cell
'''
i = 0
for ix,iz in np.ndindex(floor_matrix.shape):
x = (grid_length / 2) + (iz * grid_length)
z = (grid_length / 2) + (ix * grid_length)
material = grid_material(floor_matrix[ix,iz])
if material != 0:
floor_text(x,z,i,grid_length,material,world_text)
if material == 0:
obstacle(x,z,i,grid_length,world_text)
i += 1
def distance_sensor(r_no,s_no,x,z,r,cov,res,s,body,world_text,main,loop):
'''
x, z and r are x-coordinate, z-coordinate and direction(rotation
around y axis), repectively. Values of x, z, r should calculated
w.r.t. robot body. cov(Coverage) is range of the sensor and
res(resolution) tells smallest change in distance a sensor can
detect. x, z, r and coverage in meters. Body is imaginary body of
sensor device and value of body can be True or False. r_no and s_no
are id of the robot that carries sensor and id of sensor(there might
be multiple sensor on a robot, they must identified distinctly),
repectively. Imaginary body is black as default. r is in degrees.
s is for supervisor mode of the robot(True or False). main and loop
are main and loop part of the controller.
'''
pi_5f = float("{:.5f}".format(math.pi))
r = r / 180 * pi_5f
world_text.append(' DistanceSensor {\n')
world_text.append(' translation {0} 0 {1}\n'.format(x,z))
world_text.append(' rotation 0 1 0 {}\n'.format(r))
if body == True:
world_text.append(' children [\n')
world_text.append(' Shape {\n')
world_text.append(' appearance PBRAppearance {\n')
world_text.append(' baseColor 0 0 0\n')
world_text.append(' roughness 1\n')
world_text.append(' metalness 0\n')
world_text.append(' }\n')
world_text.append(' geometry Box {\n')
world_text.append(' size 0.001 0.001 0.001\n')
world_text.append(' }}]\n')
world_text.append(' name "ds_{0}_{1}"\n'.format(r_no,s_no))
world_text.append(' lookupTable [\n')
world_text.append(' 0 0 0\n')
world_text.append(' {0} {1} 0\n'.format(cov,res))
world_text.append(' ]}\n')
#Controller Part
if s == True:
main.append('ds_1 = supervisor.getDistanceSensor("ds_{0}_{1}")\n'.format(r_no,s_no))
main.append('ds_1.enable(timeStep)\n')
main.append('ds.append(ds_1)\n')
#if s == False:
def robot_controller(r_no,supervisor,main,loop):
'''
r_no is robot no. main and loop are part of controller scripts which
necessary for devices and sensors.
'''
robot_controller_main = [] # main function of robot controller
robot_controller_loop = [] # loop function of robot controller
robot_controller_main.append('import json\n')
robot_controller_main.append('from pathlib import Path\n')
robot_controller_main.append('from controller import *\n')
robot_controller_main.append('\n')
robot_controller_main.append('timeStep = 32\n')#Default
robot_controller_main.append('ds = []\n')
robot_controller_main.append('file_to_open = Path("C:/Users/Korcan/Desktop/ME462/l_r_of_Robot_1.txt")\n') #EDIT THISSS
if supervisor == True:
robot_controller_main.append('supervisor = Supervisor()\n')
robot_controller_main.append('robot_node = supervisor.getFromDef("Robot_{}")\n'.format(r_no))
robot_controller_main.append('trans_field = robot_node.getField("translation")\n')
robot_controller_main.append('rot_field = robot_node.getField("rotation")\n')
robot_controller_main.append('\n')
robot_controller_loop.append('while supervisor.step(timeStep) != -1:\n')
robot_controller_loop.append(' val_translation = trans_field.getSFVec3f()\n')
robot_controller_loop.append(' val_rotation = rot_field.getSFRotation()\n')
robot_controller_loop.append(" f = open(file_to_open, mode='r')\n")
robot_controller_loop.append(' data_as_string = f.readlines()\n')
robot_controller_loop.append(' f.close()\n')
robot_controller_loop.append(' try:\n')
robot_controller_loop.append(' if len(data_as_string[{}]) != 0:\n'.format(r_no-1))
robot_controller_loop.append(' data = json.loads(data_as_string[{}])\n'.format(r_no-1))
robot_controller_loop.append(' trans_field.setSFVec3f(data[0])\n')
robot_controller_loop.append(' rot_field.setSFRotation(data[1])\n')
robot_controller_loop.append(' except IndexError:\n')
robot_controller_loop.append(' pass\n')
robot_controller_loop.append('\n')
loop.append(' for e in ds:\n')
loop.append(' print(e.getValue())\n')
#if super == False:
robot_controller_main = robot_controller_main + main
robot_controller_loop = robot_controller_loop + loop
final_controller = robot_controller_main + robot_controller_loop
location = "C:/Program Files/Webots/projects/default/controllers/Robot_{}".format(r_no)# EDIT THISSS
if is_admin():#Windows admin permissions inside of this if part of the program
Path(location).mkdir(parents=False, exist_ok=True)
[f.unlink() for f in Path(location).glob("*") if f.is_file()]
f = open(location + "/Robot_{}.py".format(r_no), "w")
f.writelines(final_controller)
f.close()
else:
# Re-run the program with admin rights
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, " ".join(sys.argv), None, 1)
def robot_creator(x,z,r_no,supervisor,world_text):
'''
Value of supervisor can be True or False. If value is False than
motors are enabled. r_no is id of the robot. x and z are start
coordinates of robot. r_no should not be 0, 0 is used for Arena
Top Camera.
'''
main = []
loop = []
world_text.append('DEF Robot_{} Robot '.format(r_no))
world_text.append('{\n')
world_text.append(' translation {0} 0.03 {1}\n'.format(x,z))
world_text.append(' children [\n')
#Below lines for robot body
world_text.append(' DEF robot_{}_body Shape '.format(r_no))
world_text.append('{\n')
world_text.append(' appearance PBRAppearance {\n')
world_text.append(' baseColor 0.917647 0.145098 0.145098\n')
world_text.append(' roughness 1\n')
world_text.append(' metalness 0\n')
world_text.append(' }\n')
world_text.append(' geometry Box {\n')
world_text.append(' size 0.09 0.06 0.07\n')
world_text.append(' }}\n')
#Below lines for sensor
distance_sensor(r_no,1,0.045,0,0,0.1,100,supervisor,False,world_text,main,loop)
distance_sensor(r_no,2,-0.045,0,180,0.1,100,supervisor,False,world_text,main,loop)
distance_sensor(r_no,3,0,0.035,-90,0.1,100,supervisor,False,world_text,main,loop)
distance_sensor(r_no,4,0,-0.035,90,0.1,100,supervisor,False,world_text,main,loop)
#Below lines for motor when no real robots exist
if supervisor == False:
motor()
world_text.append(' ]\n')
#end of the children of robot
world_text.append(' name "robot_{}"\n'.format(r_no))
world_text.append(' boundingObject USE robot_{}_body\n'.format(r_no))
world_text.append(' controller "Robot_{}"\n'.format(r_no))
if supervisor == True:
world_text.append(' supervisor TRUE\n')
world_text.append('}\n')
#controller of robot
robot_controller(r_no,supervisor,main,loop)
def motor(x,z):
'''
x, y are coordinates are w.r.t the robot body and x, y values are
in meters.
'''
def obstacle(x,z,i,a,world_text):
'''
Cubic obstacle with side size a in meters. x, y are coordinate
values of obstacle w.r.t general coordinate axis. Base color is
the color of the obstacle and it made black as default.
'''
world_text.append('Solid {\n')
world_text.append(' translation {0} {1} {2}\n'.format(x,a/2,z))
world_text.append(' children [\n')
world_text.append(' DEF obstacle_{0} Shape '.format(i))
world_text.append('{\n')
world_text.append(' appearance PBRAppearance {\n')
world_text.append(' baseColor 0 0 0\n')
world_text.append(' roughness 1\n')
world_text.append(' metalness 0\n')
world_text.append(' }\n')
world_text.append(' geometry Box {\n')
world_text.append(' size {0} {0} {0}\n'.format(a))
world_text.append(' }}]\n')
world_text.append(' name "obstacle_{}"\n'.format(i))
world_text.append(' boundingObject USE obstacle_{}\n'.format(i))
world_text.append('}\n')
def arena_top_cam(a,grid_length,y,width,height,world_text):
'''
x, z coordinates of middle point of the arena and can be found by
arena matrix(a) with grid_length. Value of y should be proper
perpendicular distance from the floor and y value in meters. Values
of width and height are resolution of camera and values are in
pixels.
'''
x = a.shape[0]
x = x / 2 * grid_length
z = a.shape[1]
z = z / 2 * grid_length
y = y * (width / height) #Assumed width > height
world_text.append('DEF Arena_Cam Robot {\n')
world_text.append(' translation {0} {1} {2}\n'.format(x,y,z))
world_text.append(' rotation -1 0 0 1.5708\n')
world_text.append(' children [\n')
world_text.append(' Camera {\n')
world_text.append(' name "Arena_Top_Cam"\n')
world_text.append(' width {}\n'.format(width))
world_text.append(' height {}\n'.format(height))
world_text.append(' }]\n')
world_text.append(' name "robot_0"\n')
world_text.append(' controller "arena_top_cam"\n')
world_text.append(' supervisor TRUE\n')
world_text.append('}\n')
def world_creator(floor_matrix,grid_length,basic_time_step):
'''
floor_matrix is a matrix and it decides shape of the arena, number
of grids, grid colors. Each matrix element is a grid texture.
Corresponding element number will be defined. Value of grid_length
in meters. basic_time_step is the time step increament used by
Webots and expressed in milliseconds. Default value of basic time
step in Webots is 32ms.
'''
contents = []
a = floor_matrix.shape
m = a[0]
n = a[1]
x = m / 2 * grid_length
z = n / 2 * grid_length
max_length = max(m,n) * grid_length
y = max_length / 0.748 #Field of view calculations
#Main contents of world
contents.append('#VRML_SIM R2020a utf8\n')
contents.append('WorldInfo {\n')
contents.append(' basicTimeStep {}\n'.format(basic_time_step))
contents.append('}\n')
contents.append('Viewpoint {\n')
contents.append(' orientation -1 0 0 1.5708\n')
contents.append(' position {0} {1} {2}\n'.format(x,2*y,z))
contents.append('}\n')
contents.append('TexturedBackground {\n')
contents.append('}\n')
contents.append('TexturedBackgroundLight {\n')
contents.append('}\n')
#Element of world: Arena, Robots, Top Camera
arena_creator(floor_matrix, grid_length,contents)
robot_creator(-grid_length,grid_length,1,True,contents)
robot_creator(-grid_length,3*grid_length,2,True,contents)
arena_top_cam(floor_matrix,grid_length,y,1280,720,contents)
f = open("sample_world.wbt", "w")
f.writelines(contents)
f.close()
a = np.random.randint(0,5,size=(10,10))
print(a)
print(a.shape)
world_creator(a, 0.15, 32)
|
[
"numpy.ndindex",
"numpy.random.randint",
"ctypes.windll.shell32.IsUserAnAdmin",
"pathlib.Path"
] |
[((13388, 13426), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {'size': '(10, 10)'}), '(0, 5, size=(10, 10))\n', (13405, 13426), True, 'import numpy as np\n'), ((2466, 2496), 'numpy.ndindex', 'np.ndindex', (['floor_matrix.shape'], {}), '(floor_matrix.shape)\n', (2476, 2496), True, 'import numpy as np\n'), ((820, 857), 'ctypes.windll.shell32.IsUserAnAdmin', 'ctypes.windll.shell32.IsUserAnAdmin', ([], {}), '()\n', (855, 857), False, 'import ctypes, sys\n'), ((7421, 7435), 'pathlib.Path', 'Path', (['location'], {}), '(location)\n', (7425, 7435), False, 'from pathlib import Path\n'), ((7496, 7510), 'pathlib.Path', 'Path', (['location'], {}), '(location)\n', (7500, 7510), False, 'from pathlib import Path\n')]
|
"""
This package includes my constraints/utilities/etc for cpmpy.
This cpmpy model was written by <NAME> (<EMAIL>)
See also my cpmpy page: http://hakank.org/cpmpy/
"""
import sys, math, re
import itertools
import numpy as np
from functools import reduce
from cpmpy import *
from cpmpy.expressions.globalconstraints import GlobalConstraint
from cpmpy.solvers import *
from ortools.sat.python import cp_model as ort
from cpmpy.transformations.flatten_model import flatten_constraint, flatten_model
from cpmpy.transformations.get_variables import print_variables
def AllDifferent_except_0(args):
"""
Ensure that all arguments that are != 0 must have distinct values.
"""
# Note: The parenthesis around (var1 != 0) are needed!
return [ ((var1!= 0) & (var2 != 0)).implies(var1 != var2) for var1, var2 in all_pairs(args)]
def all_different_except_0(args):
"""
Alias for AllDifferent_except_0(args).
"""
return AllDifferent_except_0(args)
def to_num(a,n,base):
"""
to_num(a, n, base)
Ensure that the digits in array `a` corresponds to the number `n` in base `base`.
"""
tlen = len(a)
return n == sum([(base ** (tlen - i - 1)) * a[i] for i in range(tlen)])
def increasing(args):
"""
Ensure that the values in args are increasing.
"""
return [args[i-1] <= args[i] for i in range(1,len(args))]
def increasing_strict(args):
"""
Ensure that the values in args are strict increasing.
"""
return [args[i-1] < args[i] for i in range(1,len(args))]
def decreasing(args):
"""
Ensure that the values in args are decreasing.
"""
return [args[i-1] >= args[i] for i in range(1,len(args))]
def decreasing_strict(args):
"""
Ensure that the values in args are strict decreasing.
"""
return [args[i-1] >= args[i] for i in range(1,len(args))]
def all_pairs(args):
"""
Generate all pairs from the list of lists args.
(stolen from cmppy/globalconstraints.py)
"""
return list(itertools.combinations(args, 2))
def get_different_solution(m,x):
"""
Add the current solution (x) in the model to generate
other solutions.
Usage:
# ...
ss = CPM_ortools(model)
if ss.solve():
print(x.value())
get_different_solution(ss, x)
Note: The array in x must be a flattened array. If there are
many decision variables, use flatten_lists(a) to
flatten out the array. E.g.
# ...
ss = CPM_ortools(model)
while ss.solve():
print(x.value()) # an array
print(y.value()) # a variable
print(z.value()) # another variable
get_different_solution(ss,flatten_lists([x,[y,z]])
Note that this might be slow for larger models or models with
many solutions. If so, try to use
- ortools_wrapper()
or the simple solution printers such as
- ORT_simple_printer
- ORT_arrays_printer
- ORT_simple_printer_matrix
- ORT_simple_function_printer
or define a similiar solution printer.
"""
# n = len(x)
# m += [any([x[i].value() != x[i] for i in range(n)])]
m += [any([t.value() != t for t in x])]
def flatten_lists(a):
"""
Flatten a list of lists.
Note: a must be an array of arrays (list of lists).
See get_different_solution for examples.
"""
return [item for sublist in a for item in sublist]
class ORT_simple_printer(ort.CpSolverSolutionCallback):
"""
A simple printer callback for single array printing.
"""
def __init__(self, varmap, a, num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1 # I always start at 1. :-)
# populate values before printing
# For array of arrays (Tias' original)
# for wm in self.vars:
# for cpm_var in wm:
# cpm_var._value = self.Value(self.varmap[cpm_var])
# For single arrays:
for cpm_var in self.vars:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
print(f"#{self.solcount}: {a.value()}")
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
class ORT_arrays_printer(ort.CpSolverSolutionCallback):
"""
A simple printer callback for array of arrays.
"""
def __init__(self, varmap, a, num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1 # I always start at 1. :-)
# populate values before printing
# For array of arrays (Tias' original)
for wm in self.vars:
for cpm_var in wm:
cpm_var._value = self.Value(self.varmap[cpm_var])
# For single arrays:
for cpm_var in self.vars:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
print(f"#{self.solcount}: {a.value()}")
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
class ORT_simple_printer_matrix(ort.CpSolverSolutionCallback):
"""
A simple printer callback for printing a matrix.
"""
def __init__(self, varmap, a, rows,cols, num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.rows = rows
self.cols = cols
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1
for cpm_var in self.vars:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
print(f"#{self.solcount}:")
for i in range(self.rows):
for j in range(self.cols):
print("%3d" % a[i*self.cols+j].value(), end=" ")
print()
print()
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
class ORT_simple_function_printer(ort.CpSolverSolutionCallback):
"""
A printer callback with a callback (cb_fun) for printing
the array a, which should be structured by the user and
including .value() for the variables.
Note that the data array a must be a flattening array
to be used with this printer callback.
Example of a printer function:
def f(a):
print(a[0].value(),"+",a[1].value(),"=",a[2].value())
which will print a solution such as
2 + 3 = 5
"""
def __init__(self, varmap, a, cb_fun,num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.cb_fun = cb_fun
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1
# For single arrays:
for cpm_var in self.vars:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
print(f"\n#{self.solcount}:")
self.cb_fun(a)
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
class ORT_simple_solution_counter(ort.CpSolverSolutionCallback):
"""
This is a solution 'printer' that just count the solutions.
"""
def __init__(self, varmap, a):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
def on_solution_callback(self):
self.solcount += 1
for wm in self.vars:
for cpm_var in wm:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
class ORT_function_printer_arrays(ort.CpSolverSolutionCallback):
"""
A printer callback with a callback (cb_fun) for printing
the array of arrays a, which should be structured by the user and
including .value() for the variables.
This version t prints solution number.
Example of a printer function:
def print_solution(a):
print('x:', a[0].value())
print('y:', a[1].value())
"""
def __init__(self, varmap, a, cb_fun,num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.cb_fun = cb_fun
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1
for wm in self.vars:
for cpm_var in wm:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
print(f"sol #{self.solcount}")
self.cb_fun(a)
print()
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
class ORT_function_printer_arrays2(ort.CpSolverSolutionCallback):
"""
A printer callback with a callback (cb_fun) for printing
the array of arrays a, which should be structured by the user and
including .value() for the variables.
This version don't print solution number.
Example of a printer function:
def print_solution(a):
print('x:', a[0].value())
print('y:', a[1].value())
"""
def __init__(self, varmap, a, cb_fun,num_solutions=0):
super().__init__()
self.solcount = 0
self.varmap = varmap
self.vars = (a)
self.cb_fun = cb_fun
self.num_solutions=num_solutions
def on_solution_callback(self):
self.solcount += 1
for wm in self.vars:
for cpm_var in wm:
cpm_var._value = self.Value(self.varmap[cpm_var])
(a) = self.vars
self.cb_fun(a)
if self.num_solutions > 0 and self.solcount >= self.num_solutions:
self.StopSearch()
def print_solution(a):
"""
print_solution(a)
Default callback method for printing the solution in a printer callback.
Note: a must be an array of arrays to be used with ortools_wrapper
(defined below).
"""
for x in a:
print(x.value())
def ortools_wrapper(model,var_array,print_solution=print_solution,num_sols=0):
"""
ortools_wrapper((model,var_array,print_solution=print_solution,num_sols=0)
This is a simple wrapper for printing the solutions of a model and tends
to be (significantly) faster than using
ss = CPM_ortools(model)
while ss.solve():
# ...
get_different_solution(ss,flatten_lists(var_array))
Parameters:
- model : the model
- var_array: the array of arrays of the decision variables to be printed
with print_solution(var_array)
- print_solution: the method used to do the actual printing of the solution.
Default is print_solution(a) defined above. The function
can be overwritten / defined in the current constraint model.
- num_sols : number of solutions. Default 0, all solutions.
Note: For optimality problems, use ortools_wrapper_opt(.) instead.
"""
ss = CPM_ortools(model)
cb = ORT_function_printer_arrays(ss.varmap,var_array,print_solution,num_sols)
# Flags to experiment with
# ss.ort_solver.parameters.num_search_workers = 8 # Don't work together with SearchForAllSolutions
# ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH
# ss.ort_solver.parameters.cp_model_presolve = False
ss.ort_solver.parameters.linearization_level = 0
ss.ort_solver.parameters.cp_model_probing_level = 0
ort_status = ss.ort_solver.SearchForAllSolutions(ss.ort_model, cb)
ss._after_solve(ort_status)
print(ss.status())
print("Nr solutions:", cb.solcount)
print("Num conflicts:", ss.ort_solver.NumConflicts())
print("NumBranches:", ss.ort_solver.NumBranches())
print("WallTime:", ss.ort_solver.WallTime())
print()
def ortools_wrapper2(model,var_array,print_solution=print_solution,num_sols=0):
"""
ortools_wrapper((model,var_array,print_solution=print_solution,num_sols=0)
This is a simple wrapper for printing the solutions of a model and tends
to be (significantly) faster than using
ss = CPM_ortools(model)
while ss.solve():
# ...
get_different_solution(ss,flatten_lists(var_array))
This version don't print the solution number.
Parameters:
- model : the model
- var_array: the array of arrays of the decision variables to be printed
with print_solution(var_array)
- print_solution: the method used to do the actual printing of the solution.
Default is print_solution(a) defined above. The function
can be overwritten / defined in the current constraint model.
- num_sols : number of solutions. Default 0, all solutions.
Note: For optimality problems, use ortools_wrapper_opt(.) instead.
"""
ss = CPM_ortools(model)
cb = ORT_function_printer_arrays2(ss.varmap,var_array,print_solution,num_sols)
# Flags to experiment with
# ss.ort_solver.parameters.num_search_workers = 8 # Don't work together with SearchForAllSolutions
# ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH
# ss.ort_solver.parameters.cp_model_presolve = False
ss.ort_solver.parameters.linearization_level = 0
ss.ort_solver.parameters.cp_model_probing_level = 0
ort_status = ss.ort_solver.SearchForAllSolutions(ss.ort_model, cb)
print()
ss._after_solve(ort_status) # post-process after solve() call...
print(ss.status())
print("Nr solutions:", cb.solcount)
print("Num conflicts:", ss.ort_solver.NumConflicts())
print("NumBranches:", ss.ort_solver.NumBranches())
print("WallTime:", ss.ort_solver.WallTime())
print()
def ortools_wrapper_opt(model,var_array,print_solution=print_solution,num_sols=1,num_procs=1):
"""
ortools_wrapper_opt((model,var_array,print_solution=print_solution,num_sols=0)
This is a simple wrapper for printing the _optimal_ solution of a model.
This tends to be (significantly) faster than using
if model.solve():
# ...
Parameters:
- model : the model
- var_array: the array of arrays of the decision variables to be printed
with print_solution(var_array)
- print_solution: the method used to do the actual printing of the solution.
Default is print_solution(a) defined above. The function
can be overwritten / defined in the current constraint model.
- num_sols : number of solutions. Default 0, all solutions.
"""
ss = CPM_ortools(model)
cb = ORT_function_printer_arrays(ss.varmap,var_array,print_solution,1)
# Flags to experiment with
if num_procs > 1:
ss.ort_solver.parameters.num_search_workers = num_procs
# ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH
# ss.ort_solver.parameters.cp_model_presolve = False
ss.ort_solver.parameters.linearization_level = 0
ss.ort_solver.parameters.cp_model_probing_level = 0
# Note: This is the real difference between this method and ortool_wrapper.
# For optimal problems one cannot use SearchForAllSolutions. Instead
# one must use ss.ort_solver.Solve(,)
# ort_status = ss.ort_solver.SearchForAllSolutions(ss.ort_model, cb)
ort_status = ss.ort_solver.Solve(ss.ort_model, cb)
ss._after_solve(ort_status) # post-process after solve() call...
print(ss.status())
print("Nr solutions:", cb.solcount)
print("Num conflicts:", ss.ort_solver.NumConflicts())
print("NumBranches:", ss.ort_solver.NumBranches())
print("WallTime:", ss.ort_solver.WallTime())
print()
def ortools_wrapper_count_solutions(model,var_array):
"""
ortools_wrapper((model,var_array,print_solution=print_solution,num_sols=0)
This is a simple wrapper for just counting the solutions of a model.
Parameters:
- model : the model
- var_array: the array of arrays of the decision variables to be printed
with print_solution(var_array)
"""
ss = CPM_ortools(model)
cb = ORT_simple_solution_counter(ss.varmap,var_array)
# Flags to experiment with
# ss.ort_solver.parameters.num_search_workers = 8 # Don't work together with SearchForAllSolutions
# ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH
# ss.ort_solver.parameters.cp_model_presolve = False
ss.ort_solver.parameters.linearization_level = 0
ss.ort_solver.parameters.cp_model_probing_level = 0
ort_status = ss.ort_solver.SearchForAllSolutions(ss.ort_model, cb)
ss._after_solve(ort_status)
return cb.solcount
def base_array(n):
"""
Returns an array of length `n` with base coefficients.
Example: `base_array(4)` returns the array [1000,100,10,1]
"""
return np.array([10**i for i in range(n-1,-1,-1)])
def scalar_product(a,b):
"""
`scalar_product(a,b)`
Returns the scalar product of the arrays `a` and `b`.
Assumption: `len(a) == len(b)`
"""
assert len(a) == len(a), f"len(a) == len(b)"
# return np.dot(a,b)
return sum(a*b)
def scalar_product1(a):
"""
`scalar_product1(a)`
Returns the scalar product of the array `a` and a base_array of appropriate length.
Assumption: `len(a) == len(b)`
"""
assert len(a) == len(a), f"len(a) == len(b)"
# return np.dot(a,base_array(len(a)))
return sum(a*base_array(len(a)))
def my_circuit(x):
"""
circuit(x)
Exsures that x is a circuit.
Note: This assumes that x is has the domain 0..len(x)-1,
i.e. 0-based.
"""
assert x[0].lb == 0, f"circuit: lb is {x[0].lb}, but must be 0"
n = len(x)
z = intvar(0, n-1,shape=n,name='z')
constraints = [
AllDifferent(x),
AllDifferent(z),
# put the orbit of x[0] in in z[1..n]
z[0] == x[0],
[ z[i] == x[z[i-1]] for i in range(1, n-1)],
# may not be 0 for i < n-1
[ z[i] != 0 for i in range(1, n-1)],
# when i = n-1 it must be 0
z[n-1] == 0
]
return constraints
def my_circuit_path(x,z):
"""
circuit(x,z)
Ensures that x is an circuit and z is the path.
Note: This assumes that x is has the domain 0..len(x)-1,
i.e. 0-based.
"""
assert x[0].lb == 0, f"circuit: x[0].lb is {x[0].lb}, but must be 0"
n = len(x)
constraints = [
AllDifferent(x),
AllDifferent(z),
# put the orbit of x[0] in in z[1..n]
z[0] == x[0],
[ z[i] == x[z[i-1]] for i in range(1, n-1)],
# may not be 0 for i < n-1
[ z[i] != 0 for i in range(1, n-1)],
# when i = n-1 it must be 0
z[n-1] == 0
]
return constraints
def count(a,val,c):
"""
count(a,val,c)
c is the number of occurrences of val in array a.
"""
return [c == sum([a[i] == val for i in range(len(a))])
]
def atmost(a,val,c):
"""
atmost(a,val,c)
Ensure that the number of occurrences of val in a is atmost c.
"""
return [sum([a[i] == val for i in range(len(a))]) <= c]
def atleast(a,val,c):
"""
atleast(a,val,c)
Ensure that the number of occurrences of val in a is atmost c.
"""
return [sum([a[i] == val for i in range(len(a))]) >= c]
def exactly(a,val,c):
"""
exactly(a,val,c)
Ensure that the number of occurrences of val in a is exactly c.
"""
return [sum([a[i] == val for i in range(len(a))]) == c]
def global_cardinality_count(a,gcc):
"""
global_cardinality_count(a,gcc)
Global cardinality count: Collect the number of occurrences of each value 0..a.ub
in gcc. The array gcc must be of length 0..ub.
"""
n = len(a)
ub = max([a[i].ub for i in range(n)])
constraints = []
for i in range(ub+1):
constraints += [count(a,i,gcc[i])]
return constraints
def inverse(x,y):
"""
inverse(x,y)
Ensures that:
x[i] == j #<=> y[j] == i
Note: inverse(x,y) is sometimes called assignment(x,y).
There is an alternative version: inverse(x) which can
be simulated by inverse(x,x)
"""
n = len(x)
assert n == len(y), "x and y must be of equal length"
constraints = []
for i in range(n):
for j in range(n):
constraints += [(x[i] == j) == (y[j] == i)]
return constraints
def my_cumulative(s, d, r, b):
"""
Decompositon of cumulative.
Inspired by the MiniZinc implementation.
The MiniZinc decomposition is discussed in the paper:
<NAME>, <NAME>, <NAME>, and <NAME>.
'Why cumulative decomposition is not as bad as it sounds.'
Parameters:
s: start_times assumption: array of varint
d: durations assumption: array of int
r: resources assumption: array of int
b: resource limit assumption: varint or int
"""
constraints = []
max_d = max(d)
tasks = [i for i in range(len(s)) if r[i] > 0 and d[i] > 0]
times_min = min([s[i].lb for i in tasks])
times_max = max([s[i].ub + max_d for i in tasks])
for t in range(times_min, times_max + 1):
constraints += [ b >= sum([((s[i] <= t) & (t < s[i] + d[i])) * r[i] for i in tasks])]
# Somewhat experimental:
# This constraint is needed to contrain the upper limit of b.
if not isinstance(b, int):
constraints += [b <= sum(r)]
return constraints
def member_of(x, val):
"""
member_of(x, val)
Ensures that the value `val` is in the array `x`.
"""
n = len(x)
# cc = intvar(0,n)
# constraints = [count(x, val, cc), cc > 0]
constraints = [sum([x[i] == val for i in range(n)]) > 0]
return constraints
def regular(x, Q, S, d, q0, F):
"""
Global constraint regular
This is a translation of MiniZinc's regular constraint (defined in
lib/zinc/globals.mzn), via the Comet code refered above.
All comments are from the MiniZinc code.
'''
The sequence of values in array 'x' (which must all be in the range 1..S)
is accepted by the DFA of 'Q' states with input 1..S and transition
function 'd' (which maps (1..Q, 1..S) -> 0..Q)) and initial state 'q0'
(which must be in 1..Q) and accepting states 'F' (which all must be in
1..Q). We reserve state 0 to be an always failing state.
'''
x : IntVar array
Q : number of states
S : input_max
d : transition matrix
q0: initial state
F : accepting states
Note: As mentioned above the states must start at 1 since 0 is
represents a failed state.
Note: Compare with regular_table which use the Table constraints
instead of Element constraint in the main loop.
"""
assert Q > 0, 'regular: "Q" must be greater than zero'
assert S > 0, 'regular: "S" must be greater than zero'
# d2 is the same as d, except we add one extra transition for
# each possible input; each extra transition is from state zero
# to state zero. This allows us to continue even if we hit a
# non-accepted input.
d2 = []
for i in range(Q + 1):
row = []
for j in range(S):
if i == 0:
row.append(0)
else:
row.append(d[i - 1][j])
d2.append(row)
d2_flatten = [d2[i][j] for i in range(Q + 1) for j in range(S)]
# If x has index set m..n, then a[m-1] holds the initial state
# (q0), and a[i+1] holds the state we're in after processing
# x[i]. If a[n] is in F, then we succeed (ie. accept the
# string).
x_range = list(range(0, len(x)))
m = 0
n = len(x)
a = [intvar(0, Q + 1) for i in range(m, n + 1)]
constraints = []
# Check that the final state is in F
constraints += [member_of(F,a[-1])]
# First state is q0
constraints += [a[m] == q0]
for i in x_range:
constraints += [x[i] >= 1]
constraints += [x[i] <= S]
# Determine a[i+1]: a[i+1] == d2[a[i], x[i]]
constraints += [
a[i + 1] == Element(d2_flatten,(a[i]) * S + (x[i] - 1))
]
return constraints
def regular_table(x, Q, S, d, q0, F):
"""
Global constraint regular_table
This is a translation of MiniZinc's regular constraint (defined in
lib/zinc/globals.mzn), via the Comet code refered above.
All comments are from the MiniZinc code.
'''
The sequence of values in array 'x' (which must all be in the range 1..S)
is accepted by the DFA of 'Q' states with input 1..S and transition
function 'd' (which maps (1..Q, 1..S) -> 0..Q)) and initial state 'q0'
(which must be in 1..Q) and accepting states 'F' (which all must be in
1..Q). We reserve state 0 to be an always failing state.
'''
x : IntVar array
Q : number of states
S : input_max
d : transition matrix
q0: initial state
F : accepting states
Note: As mentioned above the states must start at 1 since 0 is
represents a failed state.
The difference between this version (regular_table) and
regular is that this version use Table constraint instead
of Element constraint.
"""
assert Q > 0, 'regular: "Q" must be greater than zero'
assert S > 0, 'regular: "S" must be greater than zero'
# d2 is the same as d, except we add one extra transition for
# each possible input; each extra transition is from state zero
# to state zero. This allows us to continue even if we hit a
# non-accepted input.
d2 = []
for i in range(Q + 1):
row = []
for j in range(S):
if i == 0:
# This is different from regular(.)
row.append((0,j,0))
else:
# This is different from regular(.)
row.append((i,j, d[i - 1][j]))
d2.append(row)
d2_flatten = [d2[i][j] for i in range(Q + 1) for j in range(S)]
# If x has index set m..n, then a[m-1] holds the initial state
# (q0), and a[i+1] holds the state we're in after processing
# x[i]. If a[n] is in F, then we succeed (ie. accept the
# string).
x_range = list(range(0, len(x)))
m = 0
n = len(x)
a = [intvar(0, Q + 1) for i in range(m, n + 1)]
constraints = []
# Check that the final state is in F
constraints += [member_of(F,a[-1])]
# First state is q0
constraints += [a[m] == q0]
x_lb, x_ub = get_min_max_domain(x)
for i in x_range:
constraints += [x[i] >= 1]
constraints += [x[i] <= S]
# Determine a[i+1]: a[i+1] == d2[a[i], x[i]]
xi1 = intvar(0,x_ub)
constraints += [
# These two constraints are different
# from regular(.)
xi1 == x[i]-1,
Table((a[i], xi1, a[i + 1]), d2_flatten)
]
return constraints
def lex_less(x,y):
"""
lex_less(x,y)
Ensures that the array 'x' is strictly lexicographically less than array 'y'.
Compares them from first to last element, regardless of indices
This is a port of MiniZinc's definition lex_less_int
https://github.com/MiniZinc/libminizinc/blob/master/share/minizinc/std/fzn_lex_less_int.mzn
Note that we simplify the calculation of lx and ly since cpmpy has start index 0 (in MiniZinc
the start index can be user defined).
"""
xlen = len(x)
ylen = len(y)
ux = xlen
uy = ylen
size = min([ux,uy])
# Do not name variables in global constraints
# since then the variables are not unique.
# b = boolvar(shape=size+1,name="b")
b = boolvar(shape=size+1)
constraints = []
constraints += [b[0] == 1 ]
for i in range(size):
constraints += [b[i] == ((x[i] <= y[i]) &
((x[i] < y[i]) | (b[i+1] == 1)) )]
constraints += [b[size] == (ux < uy)]
return constraints
def lex_greater(x,y):
"""
lex_greater(x,y)
Ensures that the array 'x' is strictly lexicographically greater than array 'y'.
Compares them from first to last element, regardless of indices.
This constraint is defined by lex_less(y,x) defined above .
"""
return lex_less(y,x)
def lex2(x):
"""
lex2(x)
Ensures that the rows and columns in the matrix `x` are increasing,
using lex_less.
"""
x_t = x.transpose()
return [[lex_less(x[i],x[i+1]) for i in range(len(x)-1)],
[lex_less(x_t[i],x_t[i+1]) for i in range(len(x_t)-1)]]
#
# Somewhat general definition of knapsack.
#
def knapsack(values, weights, n):
"""
knapsack(values, weights, n)
Creates a model for the knapsack problem with the values, weights and limit n.
See knapsack.py for usage of this.
"""
z = intvar(0, 10000,name="z")
x = intvar(0,1,shape=len(values),name="x")
model = Model(
[
z >= 0,
z == sum(x*values),
sum(x*weights) <= n,
],
maximize=z
)
return [model, x, z]
def my_abs(x,y,d):
"""
A decomposition of abs() for experimentation.
"""
constraints = []
b = boolvar()
constraints += [b == (x >= y)]
constraints += [(b).implies(d == x - y)]
constraints += [(~b).implies(d == y - x)]
return constraints
def my_abs2(x,y):
"""
A decomposition of abs() for experimentation.
"""
constraints = []
b = boolvar()
d = intvar(0,1000000)
constraints += [b == (x >= y)]
constraints += [(b).implies(d == x - y)]
constraints += [(~b).implies(d == y - x)]
return d
def prod(x,res):
"""
prod(x,res)
res is the product of the values in x.
"""
return [reduce(lambda a, b: a * b, x) == res]
def prod1(x):
"""
prod1(x)
return the product of the values in x.
"""
return reduce(lambda a, b: a * b, x)
def among(m,x,v):
"""
among(m,x,v)
Requires exactly m variables in x to take one of the values in v.
"""
return [m == sum([x[i] == j for i in range(len(x)) for j in v])]
#
# Symmetry breaking
#
# From
# http://en.wikipedia.org/wiki/Fr#C3#A9nicle_standard_form
# """
# A magic square is in Frénicle standard form, named for
# <NAME>, if the following two conditions apply:
# - the element at position [1,1] (top left corner) is the smallest
# of the four corner elements; and
# - the element at position [1,2] (top edge, second from left) is
# smaller than the element in [2,1].
# """
#
def frenicle(x,n):
constraints = [x[(0,0)] == min([x[0,0], x[0,n-1], x[n-1,0], x[n-1,n-1]])]
constraints += [x[0,1] < x[1,0]]
return constraints
def distribute(card, value, base):
"""
distribute(card, value, base)
Requires that 'card[i]' is the number of occurences of 'value[i]' in 'base'.
Note: card, value, and base are assumed to be intvar arrays.
"""
card_len = len(card)
value_len = len(value)
assert card_len == value_len, "`card` and `value` must have the same length"
base_len = len(base)
constraints = []
constraints += [AllDifferent(value)]
for i in range(card_len):
constraints += [
card[i] == sum([value[i] == base[j] for j in range(base_len)])
]
return constraints
def fill_array(x,x_val):
"""
fill_array(x,x_val)
If x_val[i] != None then x[i] == x_val[i].
"""
constraints = []
for i in range(len(x)):
if x_val[i] != None:
constraints += [x[i] == x_val[i]]
return constraints
def all_different_pairs(a, s):
"""
all_different_pairs(a, s)
all pairs must be different
"""
return [AllDifferent([p for p in pairs(a,s)])]
def increasing_pairs(a, s):
"""
increasing_pairs(a, s)
Ensure that the pairs are in increasing order.
"""
return [increasing(pairs(a,s))]
def decreasing_pairs(a, s):
"""
decreasing_pairs(a, s)
Ensure that the pairs are in decreasing order.
"""
return [decreasing(pairs(a,s))]
def pairs(a, s):
"""
return the pairs of a in the 'integer representation': a[k,0]*(n-1) + a[k,1]
s is the size of max value of n
"""
n = len(a)
return [ a[(k,0)]*(s-1) + a[(k,1)] for k in range(n)]
def all_min_dist(min_dist, x, n):
"""
all_min_dist(min_dist, x, n)
Ensures that the differences of all pairs (i !=j) are >= min_dist.
"""
constraints = []
for i in range(n):
for j in range(i):
constraints += [abs(x[i]-x[j]) >= min_dist] # Nope!
return constraints
def all_different_on_intersection(x, y):
"""
all_different_on_intersection(x, y)
Ensure that the values that are common in x and y are distinct (in each array).
"""
return [count_a_in_b(x,y), count_a_in_b(y,x)]
def count_a_in_b(ass,bss):
"""
count_a_in_b(ass,bss)
helper for all_different_on_intersection
"""
constraints = []
for a in ass:
constraints += [sum([a == b for b in bss]) <= 1]
return constraints
def all_different_modulo(x, m):
"""
all_different_modulo(x, m)
Ensure that all elements in x (modulo m) are distinct
"""
print("x2:",x)
n = len(x)
constraints = []
mods = intvar(0,m-1,shape=n)
for i in range(n):
constraints += [mods[i] == x[i] % m]
constraints += [AllDifferent(mods)]
return constraints
def all_different_cst(xs, cst):
"""
all_different_cst(xs, cst)
Ensure that all elements in xs + cst are distinct
"""
return [AllDifferent([(x + c) for (x,c) in zip(xs,cst)])]
def arith(x, relop, val):
"""
arith(x, relop, val)
Ensure that all elements in x are <relop> val.
"""
constraints = []
for i in range(len(x)):
constraints += [arith_relop(x[i],relop, val)]
return constraints
def arith_relop(a, t, b):
"""
arith_relop(a, t, b)
This is (arguably) a hack.
Represents each function as an integer 0..5.
"""
return [(t == 0).implies(a < b),
(t == 1).implies(a <= b),
(t == 2).implies(a == b),
(t == 3).implies(a >= b),
(t == 4).implies(a > b),
(t == 5).implies(a != b)
]
#
# diffn ported from MiniZinc's fzn_diffn:
#
def diffn(x,y,dx,dy):
"""
diffn(x,y,dx,dy)
Constrains rectangles i, given by their origins x[i], y[i])
and sizes (dx[i], dy[i]), to be non-overlapping. Zero-width
rectangles can still not overlap with any other rectangle.
"""
n = len(x)
constraints = []
for i in range(n):
for j in range(i+1,n):
constraints += [(x[i] + dx[i] <= x[j]) |
(y[i] + dy[i] <= y[j]) |
(x[j] + dx[j] <= x[i]) |
(y[j] + dy[j] <= y[i])
]
return constraints
def nvalue(m, x):
"""
nvalue(m, x)
Requires that there is exactly m distinct values in x
(min_val and max_val are the minimum and maximum value
in x, respectively)
"""
n = len(x)
min_val = min([x[i].lb for i in range(n)])
max_val = max([x[i].ub for i in range(n)])
return (m == sum([ sum([ x[j] == i for j in range(n)]) > 0 for i in range(min_val, max_val+1)]))
#
# nvalues(x,op,n)
#
# Requires that the number of distinct values in the array x is
# op n
# where
# op is either one of
# =, <m, =<, >=, >
#
def nvalues(x, op, n):
xlen = len(x)
m = intvar(1,xlen)
return [nvalue(m,x),
arith_relop(m,op,n)
]
def clique(g, clique, card):
"""
clique(g, clique, card)
Ensure that the boolean array 'clique' (of Integer Array type)
represents a clique in the graph g with the cardinality card.
Note: This is kind of backward, but it is the whole thing:
If there is a connection between nodes I and J (I != J) then
there should be a node from I to J in G. If it's not then
both c1 and c2 is not in the clique.
"""
n = len(g)
constraints = []
constraints += [card == sum([clique[i] for i in range(n)])]
for (c1,i) in zip(clique, range(n)):
for (c2,j) in zip(clique, range(n)):
if i != j and g[i][j] == 0:
constraints += [(c1 == 0) | (c2 == 0)]
return constraints
def assignment_model(cost, tasks=None,people=None,print_solution=None,opt="min"):
"""
assignment_model(cost, rows, cols, tasks=None,people=None,print_solution=None,opt='min'):
Fairly general implementation of the assignment problem:
Minimize total cost of assign all task to one person given
the cost of assigning a person to the tasks.
For problems were 'task' and 'people' does not applies, a used-defined
method 'print_solution' can be used.
For maximization problems, use opt='max'.
"""
rows = len(cost)
cols = len(cost[0])
max_cost = np.sum(np.array(cost))
total_cost = intvar(0,max_cost,name='cost')
x = boolvar(shape=(rows,cols),name="x")
model = Model(
total_cost >= 0,
total_cost == np.sum([ x_row*cost_row for (x_row, cost_row) in zip(x, cost)]),
# exacly one assignment per row, all rows (tasks) must be assigned.
[sum(row) == 1 for row in x],
# zero or one assignments per column (people)
[sum(col) <= 1 for col in x.transpose()],
)
if opt == "max":
model.maximize(total_cost)
else:
model.minimize(total_cost)
ss = CPM_ortools(model)
if ss.solve():
print("total_cost: ", total_cost.value())
print("x:")
print(x.value())
print()
if tasks == None and people == None:
for i in range(rows):
print("Task", i, end="")
for j in range(cols):
if x[i][j].value() == 1:
print(" is done by ", j)
print()
else:
if print_solution != None:
print_solution(x.value(),tasks,people)
else:
for i in range(rows):
print("Task", tasks[i], end="")
for j in range(cols):
if x[i][j].value() == 1:
print(" is done by", people[j])
print()
def latin_square(x):
"""
latin_square(x)
The matrix x is a Latin square.
"""
return [[AllDifferent(row) for row in x],
[AllDifferent(col) for col in x.transpose()]]
#
# reverses an array from -> to
#
def reverse(xfrom, xto):
"""
reverse(xfrom, xto)
xto is reverse of xfrom.
"""
n = len(xfrom)
return [xto[i] == xfrom[n-i-1] for i in range(n)]
def print_model_and_variables(model):
"""
print_model_and_variables(model)
Prints the following:
- the unflattened model (via print(model))
- the flattened model
- the variables and the domains in the flattened model
(From <NAME> when he debugged one of my models. Thanks, Tias!)
"""
print("Model:")
print(model)
print("\nFlattened model and variables:")
mf = flatten_model(model)
print_variables(mf)
print(mf)
print()
def argmax(x,p):
"""
argmax(x,p)
Ensure that p is the argmax, i.e. the position of the maximum value
in x.
Note: If there are many maximum values then argmax(x,p) will find
all these values.
"""
n = len(x)
constraints = []
for i in range(n):
constraints += [(p != i).implies(x[p] > x[i]) ]
return constraints
def argmin(x,p):
"""
argmin(x,p)
Ensure that p is the argmin, i.e. the position of the minimum value
in x.
Note: If there are many minimum values then argmin(x,p) will find
all these values.
"""
n = len(x)
constraints = []
for i in range(n):
constraints += [(p != i).implies(x[p] < x[i]) ]
return constraints
def argmin_except_c(x,p,c):
"""
argmin_except_c(x,p,c)
Ensure that p is the argmin, i.e. the position of the minimum value
in x, but ignores any value of c.
Note:
- If there are many minimum values then argmin_except_c(x,p,c) will find
all these values.
- We assume that there are at least one value != c.
"""
n = len(x)
constraints = [x[p] != c]
for i in range(n):
constraints += [(p != i).implies((x[i] == c) | (x[p] < x[i])) ]
return constraints
def argmin_except_0(x,p):
"""
argmin_except_0(x,p)
Ensure that p is the argmin, i.e. the position of the minimum value
in x, but ignores any value of 0.
Note:
- If there are many minimum values then argmin_except_0(x,p) will find
all these values.
- We assume that there are at least one value > 0.
"""
return argmin_except_c(x,p,0)
def argmax_except_c(x,p,c):
"""
argmax_except_c(x,p,c)
Ensure that p is the argmax, i.e. the position of the minimum value
in x, but ignores any value of c.
Note:
- If there are many maximum values then argmax_except_c(x,p,c) will find
all these values.
- We assume that there are at least one value != c.
"""
n = len(x)
constraints = [x[p] != c]
for i in range(n):
constraints += [(p != i).implies((x[i] == c) | (x[p] > x[i])) ]
return constraints
def permutation3(x,p,y):
"""
permutation(x,p,y)
Ensure that the array y is a permutation of array x with the permutation
operations in array p.
Example:
x = [2,0,1,3]
p = [2,1,3,0]
What is y?
y[0] = x[p[0]] = x[2] = 1
y[1] = x[p[1]] = x[1] = 0
y[2] = x[p[2]] = x[3] = 3
y[3] = x[p[3]] = x[0] = 2
Thus:
y = [1,0,3,2]
Assumptions:
- We assume that x, p, and y has distinct values, i.e. constrained by
AllDifferent.
We check that:
- p has the domain of 0..len(p)-1
"""
n = len(x)
assert n == len(p) and n == len(y), f"Length of x, p, and y must be the same"
p_lb, p_ub = get_min_max_domain(p)
assert p_lb == 0 and p_ub == n-1, "Domain value of p must be 0..n-1"
constraints = []
for i in range(n):
constraints += [y[i] == x[p[i]] ]
return constraints
def permutation(x,y):
"""
permutation(x,y)
Ensure that the array y is a permutation of array x,
connected with some unknown permutation.
permutation3(x,p,y) is used (which see).
"""
n = len(x)
p = intvar(0,n-1,shape=n)
return permutation3(x,p,y)
def get_min_max_domain(x):
"""
get_min_max_domain(x)
Return the minimum and maximum domain of an array x.
"""
n = len(x)
x_lb = min([x[i].lb for i in range(n)])
x_ub = max([x[i].ub for i in range(n)])
return [x_lb,x_ub]
def chain(op,x):
"""
chain(op,x)
Ensure that all elements pairwise satisfies the binary operator op.
Note: In order for this to work the operator must be from the
operator library, e.g. operator.lt, operator.ne, e.g:
chain(operator.lt,x)
Note: Many of the binary operator.* has a definition already, e.g.
(from cpmpy_hakank.py):
increasing, increasing_strict, decreasing, descreasing_strict
and
AllDifferent, AllEqual
"""
n = len(x)
constraints = []
for i in range(1,n):
constraints += [ op(x[i-1], x[i]) ]
return constraints
def minimum_except_c(x,min_val,c,allow_all_c=False):
"""
minimum_except_c(x,min_val,c,allow_all_c)
Ensures that min_val is the minimum value in array x, ignoring the value of c.
The flag allow_all_c:
- If True: allow an array with only c values: min_val is thus c.
- If False: assume that there is at least one non c value. min_val must be != c.
"""
n = len(x)
ix = intvar(0,n-1)
# Ensure that min_val is in x
constraints = [min_val == x[ix]]
for j in range(n):
constraints += [(min_val <= x[j]) | (x[j] == 0)]
if allow_all_c:
max_val = max(x) # To be able to handle the case when there is only 0s
constraints += [(max_val == c)==(min_val == c)]
else:
constraints += [min_val != c]
return constraints
def minimum_except_0(x,min_val,allow_all_0s=False):
"""
minimum_except_0(x,min_val,allow_all_0s)
Ensures that min_val is the minimum value in array x, ignoring 0s.
The flag allow_all_0s:
- If True: allow an array with only 0 values: min_val is thus 0.
- If False: assume that there is at least one non 0 value. min_val must be != 0.
"""
return minimum_except_c(x,min_val,0,False)
def value_precede(s,t,x):
"""
value_precede(s,t, x)
Ensures that the (first occurrence) of the value s precedes
the (first occurrence) of the value t in array x if both
s and t are in x.
This means that for t to occur in x then s has to precede t.
This definition is inspired by MiniZinc's definition
value_precede.mzn
"""
n = len(x)
bs = boolvar(shape=n+1)
constraints = []
for i in range(n):
xis = boolvar()
constraints += [(xis ==1)==(x[i] == s),
(xis ==1).implies(bs[i+1]==1),
(xis == 0).implies(bs[i]==bs[i+1]),
(bs[i] == 0).implies(x[i] != t)
]
constraints += [bs[0] == 0]
return constraints
def value_precede_chain(c,x):
"""
value_precede_chain(c, x)
Ensures that the value c[i-1] precedes the value c[i] is the array x
if both c[i-1] and c[i] are in x.
See value_precede().
"""
n=len(c)
constraints = []
for i in range(1,n):
constraints += [value_precede(c[i-1],c[i],x)]
return constraints
def sliding_sum(low, up, seq, x):
"""
sliding_sum(low, up, seq, x)
Ensure that all sequences of length seq in x sums to between low and up.
"""
vlen = len(x)
constraints = []
for i in range(vlen-seq+1):
s = intvar(low,up)
constraints += [s == sum([x[j] for j in range(i,i+seq)])]
return constraints
def no_overlap(s1, d1, s2, d2):
"""
no_overlap(s1, d1, s2, d2)
Ensures that task 1 (start time s1 with duration d1) does not overlap with
task2 (start time s2 with duration d2)
"""
return [(s1 + d1 <= s2) | (s2 + d2 <= s1)]
def is_prime(n):
"""
is_prime(n)
Returns True if the number n is a prime number, otherwise return False.
"""
if n < 2: return False
if n == 2: return True
if not n & 1:
return False
for i in range(3, 1+int(math.sqrt(n)), 2):
if n % i == 0:
return False
return True
def primes(limit):
"""
primes(limit)
Returns the prime numbers below limit.
"""
primes = [2]
i = 3
for i in range(3, limit, 2):
if is_prime(i):
primes.append(i)
return primes
def all_different_reif(x,b):
"""
all_different_reif(x,b)
b == 1 if all values in x are different, else 0.
"""
n = len(x)
m = intvar(1,n)
return [nvalue(m,x),
(m==n)==(b==1)
]
def all_different_reif_m(model,x):
"""
all_different_reif(x,b)
b == 1 if all values in x are different, else 0.
This version returns b.
Note that the model is a parameter so it must be
created first:
x = intvar(...)
b = boolvar()
model = Model(...)
model += [b == all_different_reif_m(model,x)]
"""
n = len(x)
m = intvar(1,n)
b = boolvar()
model += [nvalue(m,x),
(m==n)==(b==1)]
return b
def lex_chain_less(x):
"""
lex_chain_less(x)
Require that all the rows are lexicographically sorted
(but not the columns as in lex2).
See: http://www.emn.fr/z-info/sdemasse/gccat/Clex_chain_less.html
"""
n = len(x)
m = len(x[0])
constraints = []
for i in range(1,n):
constraints += [lex_less([x[i-1,j] for j in range(m)], [x[i,j] for j in range(m)])]
return constraints
def soft_alldifferent(x,p):
"""
soft_alldifferent(x,p)
p is the number of pairs that have the same value.
See http://www.emn.fr/z-info/sdemasse/gccat/Csoft_alldifferent_ctr.html
"""
n = len(x)
return [p == sum([x[i] == x[j] for i in range(n) for j in range(i+1,n)])]
def among_seq(low,high,seqlen,x,v):
"""
among_seq(low, high, seqlen, x, v)
Ensures that all sequences of length SeqLen in the list X
contains at least Low and atmost High occurrences of V.
"""
n = len(x)
size = n-seqlen+1
constraints = []
for i in range(size):
seq = [x[j] for j in range(i,i+seqlen)]
constraints += [among_range(low, high, seq, v)]
return constraints
def among_range(low, high,x,v):
"""
among_range(low, high, x, v)
Ensures that the list x contains at least low and atmost high
occurrences of v.
Used by among_seq.
"""
xs = intvar(0,len(x))
vlen = len(v)
return [
xs == sum([sum([el == v[i] for i in range(vlen)])>0 for el in x]),
xs >= low,
xs <= high]
def sequence(x,seq_length, lbound,ubound):
"""
sequence(,length,lbound,ubound)
Ensures that all sums of every subsequence of length length
in array x is between lbound and ubound
"""
n = len(x)
xs = intvar(lbound.lb,ubound.ub)
constraints = []
for i in range(n-seq_length+1):
constraints += [xs == sum([x[j] for j in range(i,i+seq_length)]),
xs >= lbound,
xs <= ubound
]
return constraints
|
[
"math.sqrt",
"itertools.combinations",
"numpy.array",
"functools.reduce",
"cpmpy.transformations.flatten_model.flatten_model",
"cpmpy.transformations.get_variables.print_variables"
] |
[((29382, 29411), 'functools.reduce', 'reduce', (['(lambda a, b: a * b)', 'x'], {}), '(lambda a, b: a * b, x)\n', (29388, 29411), False, 'from functools import reduce\n'), ((38373, 38393), 'cpmpy.transformations.flatten_model.flatten_model', 'flatten_model', (['model'], {}), '(model)\n', (38386, 38393), False, 'from cpmpy.transformations.flatten_model import flatten_constraint, flatten_model\n'), ((38396, 38415), 'cpmpy.transformations.get_variables.print_variables', 'print_variables', (['mf'], {}), '(mf)\n', (38411, 38415), False, 'from cpmpy.transformations.get_variables import print_variables\n'), ((1950, 1981), 'itertools.combinations', 'itertools.combinations', (['args', '(2)'], {}), '(args, 2)\n', (1972, 1981), False, 'import itertools\n'), ((36209, 36223), 'numpy.array', 'np.array', (['cost'], {}), '(cost)\n', (36217, 36223), True, 'import numpy as np\n'), ((29255, 29284), 'functools.reduce', 'reduce', (['(lambda a, b: a * b)', 'x'], {}), '(lambda a, b: a * b, x)\n', (29261, 29284), False, 'from functools import reduce\n'), ((45503, 45515), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (45512, 45515), False, 'import sys, math, re\n')]
|
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print(data)
#Code starts here
census = np.concatenate((new_record,data),axis=0)
print(census)
age = np.array(census[0:,0])
print(age)
max_age = np.max(age)
min_age = np.min(age)
age_mean = age.mean()
age_std = np.std(age)
print(max_age,min_age,age_mean,age_std, sep='\n')
race = np.array(census[0:,2])
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
print(len_0,len_1,len_2,len_3,len_4)
minority_race = min(len_0,len_1,len_2,len_3,len_4)
print(minority_race)
senior_citizens =census[census[:,0]>60]
#working_hours_sum = 0
working_hours = np.array(senior_citizens[0:,6])
working_hours_sum= np.sum(working_hours)
#for i in range(0,len(senior_citizens)):
# working_hours_sum = working_hours_sum + senior_citizens[i][6]
print(working_hours_sum)
print(len(senior_citizens))
avg_working_hours = np.mean(working_hours)
print(avg_working_hours)
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high = np.mean(np.array(high[0:,7]))
avg_pay_low = np.mean(np.array(low[0:,7]))
if(avg_pay_high>avg_pay_low):
print("yaa good edu mean good pay")
else:
print("no good edu don mean good pay")
|
[
"numpy.sum",
"warnings.filterwarnings",
"numpy.std",
"numpy.genfromtxt",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.mean",
"numpy.concatenate"
] |
[((82, 115), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (105, 115), False, 'import warnings\n'), ((203, 252), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (216, 252), True, 'import numpy as np\n'), ((295, 337), 'numpy.concatenate', 'np.concatenate', (['(new_record, data)'], {'axis': '(0)'}), '((new_record, data), axis=0)\n', (309, 337), True, 'import numpy as np\n'), ((358, 381), 'numpy.array', 'np.array', (['census[0:, 0]'], {}), '(census[0:, 0])\n', (366, 381), True, 'import numpy as np\n'), ((404, 415), 'numpy.max', 'np.max', (['age'], {}), '(age)\n', (410, 415), True, 'import numpy as np\n'), ((427, 438), 'numpy.min', 'np.min', (['age'], {}), '(age)\n', (433, 438), True, 'import numpy as np\n'), ((473, 484), 'numpy.std', 'np.std', (['age'], {}), '(age)\n', (479, 484), True, 'import numpy as np\n'), ((544, 567), 'numpy.array', 'np.array', (['census[0:, 2]'], {}), '(census[0:, 2])\n', (552, 567), True, 'import numpy as np\n'), ((1023, 1055), 'numpy.array', 'np.array', (['senior_citizens[0:, 6]'], {}), '(senior_citizens[0:, 6])\n', (1031, 1055), True, 'import numpy as np\n'), ((1075, 1096), 'numpy.sum', 'np.sum', (['working_hours'], {}), '(working_hours)\n', (1081, 1096), True, 'import numpy as np\n'), ((1287, 1309), 'numpy.mean', 'np.mean', (['working_hours'], {}), '(working_hours)\n', (1294, 1309), True, 'import numpy as np\n'), ((1422, 1443), 'numpy.array', 'np.array', (['high[0:, 7]'], {}), '(high[0:, 7])\n', (1430, 1443), True, 'import numpy as np\n'), ((1467, 1487), 'numpy.array', 'np.array', (['low[0:, 7]'], {}), '(low[0:, 7])\n', (1475, 1487), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import sys
try:
from Cython.Distutils import build_ext
except ImportError:
def build_ext(*args, **kwargs):
from Cython.Distutils import build_ext
return build_ext(*args, **kwargs)
class lazy_extlist(list):
def __init__(self, callback):
self._list, self.callback = None, callback
def c_list(self):
if self._list is None:
self._list = self.callback()
return self._list
def __iter__(self):
for e in self.c_list():
yield e
def __getitem__(self, ii):
return self.c_list()[ii]
def __len__(self):
return len(self.c_list())
def extensions():
__builtins__.__NUMPY_SETUP__ = False
from Cython.Distutils import Extension
import numpy as np
extra_compile_args = ["-O3"]
extra_link_args = []
if sys.platform == "darwin":
extra_compile_args.append("-mmacosx-version-min=10.9")
extra_compile_args.append('-stdlib=libc++')
extra_link_args.append('-stdlib=libc++')
return [Extension(
'pydtw.dtw',
["pydtw/dtw.pyx"],
cython_directives={'language_level': sys.version_info[0]},
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
include_dirs=[np.get_include()],
language="c++")
]
setup(
name="pydtw",
description='Fast Implementation of Dynamic Time Warping',
version="2.0.3",
long_description=open('README.rst').read(),
packages=find_packages(),
setup_requires=["numpy", 'cython'],
ext_modules=lazy_extlist(extensions),
cmdclass={'build_ext': build_ext},
author='<NAME>',
author_email="<EMAIL>",
url='https://github.com/shunsukeaihara/pydtw',
license="MIT License",
include_package_data=True,
test_suite='nose.collector',
tests_require=['nose', 'numpy', 'cython'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2",
]
)
|
[
"Cython.Distutils.build_ext",
"numpy.get_include",
"setuptools.find_packages"
] |
[((1559, 1574), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1572, 1574), False, 'from setuptools import setup, find_packages\n'), ((247, 273), 'Cython.Distutils.build_ext', 'build_ext', (['*args'], {}), '(*args, **kwargs)\n', (256, 273), False, 'from Cython.Distutils import build_ext\n'), ((1338, 1354), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (1352, 1354), True, 'import numpy as np\n')]
|
import openpnm as op
import numpy as np
import matplotlib.pyplot as plt
pn = op.network.Cubic(shape=[10, 10, 10], spacing=1e-4)
geo = op.geometry.SpheresAndCylinders(network=pn, pores=pn.Ps, throats=pn.Ts)
air = op.phases.Air(network=pn, name='air')
water = op.phases.Water(network=pn, name='h2o')
phys_air = op.physics.Standard(network=pn, phase=air, geometry=geo)
phys_water = op.physics.Standard(network=pn, phase=water, geometry=geo)
ip = op.algorithms.InvasionPercolation(network=pn, phase=water)
ip.set_inlets(pores=pn.pores('left'))
ip.run()
Krel = []
for s in np.linspace(0, pn.Nt, 10):
inv = ip['throat.invasion_sequence'] < s
phys_air['throat.hydraulic_conductance'][inv] *= 1e-5
perm_a = op.algorithms.StokesFlow(network=pn, phase=air)
perm_a.set_value_BC(pores=pn.pores('top'), values=1)
perm_a.set_value_BC(pores=pn.pores('bottom'), values=0)
perm_a.run()
Krel.append(perm_a.rate(pores=pn.pores('top')))
plt.plot(np.linspace(0, pn.Nt, 10)/pn.Nt, Krel)
# Export to Statoil format.
# Add reservoir pores on each end
op.io.Statoil.add_reservoir_pore(network=pn,
pores=pn.pores('left'),
offset=0.25)
op.io.Statoil.add_reservoir_pore(network=pn,
pores=pn.pores('right'),
offset=0.25)
op.io.Statoil.export_data(network=pn, shape=[10, 10, 10])
|
[
"openpnm.algorithms.InvasionPercolation",
"openpnm.phases.Air",
"openpnm.network.Cubic",
"openpnm.algorithms.StokesFlow",
"openpnm.geometry.SpheresAndCylinders",
"openpnm.physics.Standard",
"openpnm.phases.Water",
"numpy.linspace",
"openpnm.io.Statoil.export_data"
] |
[((78, 130), 'openpnm.network.Cubic', 'op.network.Cubic', ([], {'shape': '[10, 10, 10]', 'spacing': '(0.0001)'}), '(shape=[10, 10, 10], spacing=0.0001)\n', (94, 130), True, 'import openpnm as op\n'), ((135, 206), 'openpnm.geometry.SpheresAndCylinders', 'op.geometry.SpheresAndCylinders', ([], {'network': 'pn', 'pores': 'pn.Ps', 'throats': 'pn.Ts'}), '(network=pn, pores=pn.Ps, throats=pn.Ts)\n', (166, 206), True, 'import openpnm as op\n'), ((213, 250), 'openpnm.phases.Air', 'op.phases.Air', ([], {'network': 'pn', 'name': '"""air"""'}), "(network=pn, name='air')\n", (226, 250), True, 'import openpnm as op\n'), ((259, 298), 'openpnm.phases.Water', 'op.phases.Water', ([], {'network': 'pn', 'name': '"""h2o"""'}), "(network=pn, name='h2o')\n", (274, 298), True, 'import openpnm as op\n'), ((310, 366), 'openpnm.physics.Standard', 'op.physics.Standard', ([], {'network': 'pn', 'phase': 'air', 'geometry': 'geo'}), '(network=pn, phase=air, geometry=geo)\n', (329, 366), True, 'import openpnm as op\n'), ((380, 438), 'openpnm.physics.Standard', 'op.physics.Standard', ([], {'network': 'pn', 'phase': 'water', 'geometry': 'geo'}), '(network=pn, phase=water, geometry=geo)\n', (399, 438), True, 'import openpnm as op\n'), ((446, 504), 'openpnm.algorithms.InvasionPercolation', 'op.algorithms.InvasionPercolation', ([], {'network': 'pn', 'phase': 'water'}), '(network=pn, phase=water)\n', (479, 504), True, 'import openpnm as op\n'), ((573, 598), 'numpy.linspace', 'np.linspace', (['(0)', 'pn.Nt', '(10)'], {}), '(0, pn.Nt, 10)\n', (584, 598), True, 'import numpy as np\n'), ((1358, 1415), 'openpnm.io.Statoil.export_data', 'op.io.Statoil.export_data', ([], {'network': 'pn', 'shape': '[10, 10, 10]'}), '(network=pn, shape=[10, 10, 10])\n', (1383, 1415), True, 'import openpnm as op\n'), ((716, 763), 'openpnm.algorithms.StokesFlow', 'op.algorithms.StokesFlow', ([], {'network': 'pn', 'phase': 'air'}), '(network=pn, phase=air)\n', (740, 763), True, 'import openpnm as op\n'), ((959, 984), 'numpy.linspace', 'np.linspace', (['(0)', 'pn.Nt', '(10)'], {}), '(0, pn.Nt, 10)\n', (970, 984), True, 'import numpy as np\n')]
|
import torch
import numpy as np
from torchvision import models
from utils.misc import *
from utils.process_fp import process_inputs_fp
def compute_features(tg_model, free_model, tg_feature_model, is_start_iteration, evalloader, num_samples, num_features, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tg_feature_model.eval()
tg_model.eval()
if free_model is not None:
free_model.eval()
features = np.zeros([num_samples, num_features])
start_idx = 0
with torch.no_grad():
for inputs, targets in evalloader:
inputs = inputs.to(device)
if is_start_iteration:
the_feature = tg_feature_model(inputs)
else:
the_feature = process_inputs_fp(tg_model, free_model, inputs, feature_mode=True)
features[start_idx:start_idx+inputs.shape[0], :] = np.squeeze(the_feature.cpu().numpy())
start_idx = start_idx+inputs.shape[0]
assert(start_idx==num_samples)
return features
|
[
"utils.process_fp.process_inputs_fp",
"torch.no_grad",
"numpy.zeros",
"torch.cuda.is_available"
] |
[((493, 530), 'numpy.zeros', 'np.zeros', (['[num_samples, num_features]'], {}), '([num_samples, num_features])\n', (501, 530), True, 'import numpy as np\n'), ((558, 573), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (571, 573), False, 'import torch\n'), ((335, 360), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (358, 360), False, 'import torch\n'), ((795, 861), 'utils.process_fp.process_inputs_fp', 'process_inputs_fp', (['tg_model', 'free_model', 'inputs'], {'feature_mode': '(True)'}), '(tg_model, free_model, inputs, feature_mode=True)\n', (812, 861), False, 'from utils.process_fp import process_inputs_fp\n')]
|
"Unit tests for Constraint, MonomialEquality and SignomialInequality"
import unittest
import numpy as np
from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable
from gpkit.nomials import SignomialInequality, PosynomialInequality
from gpkit.nomials import MonomialEquality
from gpkit import Model, ConstraintSet
from gpkit.constraints.costed import CostedConstraintSet
from gpkit.constraints.tight import Tight
from gpkit.constraints.loose import Loose
from gpkit.tests.helpers import run_tests
from gpkit.exceptions import (InvalidGPConstraint, PrimalInfeasible,
DimensionalityError)
from gpkit.constraints.relax import (ConstraintsRelaxed, ConstantsRelaxed,
ConstraintsRelaxedEqually)
from gpkit.constraints.bounded import Bounded
from gpkit.globals import NamedVariables
import gpkit
class TestConstraint(unittest.TestCase):
"Tests for Constraint class"
def test_uninited_element(self):
x = Variable("x")
class SelfPass(Model):
"A model which contains itself!"
def setup(self):
ConstraintSet([self, x <= 1])
self.assertRaises(ValueError, SelfPass)
def test_bad_elements(self):
x = Variable("x")
with self.assertRaises(ValueError):
_ = Model(x, [x == "A"])
with self.assertRaises(ValueError):
_ = Model(x, [x >= 1, x == "A"])
with self.assertRaises(ValueError):
_ = Model(x, [x >= 1, x == "A", x >= 1, ])
with self.assertRaises(ValueError):
_ = Model(x, [x == "A", x >= 1])
v = VectorVariable(2, "v")
with self.assertRaises(ValueError):
_ = Model(x, [v == "A"])
with self.assertRaises(TypeError):
_ = Model(x, [v <= ["A", "B"]])
with self.assertRaises(TypeError):
_ = Model(x, [v >= ["A", "B"]])
def test_evalfn(self):
x = Variable("x")
x2 = Variable("x^2", evalfn=lambda solv: solv[x]**2)
m = Model(x, [x >= 2])
m.unique_varkeys = set([x2.key])
sol = m.solve(verbosity=0)
self.assertAlmostEqual(sol(x2), sol(x)**2)
def test_relax_list(self):
x = Variable("x")
x_max = Variable("x_max", 1)
x_min = Variable("x_min", 2)
constraints = [x_min <= x, x <= x_max]
ConstraintsRelaxed(constraints)
ConstantsRelaxed(constraints)
ConstraintsRelaxedEqually(constraints)
def test_relax_linked(self):
x = Variable("x")
x_max = Variable("x_max", 1)
x_min = Variable("x_min", lambda c: 2*c[x_max])
zero = Variable("zero", lambda c: 0*c[x_max])
constraints = ConstraintSet([x_min + zero <= x, x + zero <= x_max])
_ = ConstantsRelaxed(constraints)
NamedVariables.reset_modelnumbers()
include_min = ConstantsRelaxed(constraints, include_only=["x_min"])
NamedVariables.reset_modelnumbers()
exclude_max = ConstantsRelaxed(constraints, exclude=["x_max"])
self.assertEqual(str(include_min), str(exclude_max))
def test_equality_relaxation(self):
x = Variable("x")
m = Model(x, [x == 3, x == 4])
rc = ConstraintsRelaxed(m)
m2 = Model(rc.relaxvars.prod() * x**0.01, rc)
self.assertAlmostEqual(m2.solve(verbosity=0)(x), 3, places=3)
def test_constraintget(self):
x = Variable("x")
x_ = Variable("x", lineage=[("_", 0)])
xv = VectorVariable(2, "x")
xv_ = VectorVariable(2, "x", lineage=[("_", 0)])
self.assertEqual(Model(x, [x >= 1])["x"], x)
with self.assertRaises(ValueError):
_ = Model(x, [x >= 1, x_ >= 1])["x"]
with self.assertRaises(ValueError):
_ = Model(x, [x >= 1, xv >= 1])["x"]
self.assertTrue(all(Model(xv.prod(), [xv >= 1])["x"] == xv))
with self.assertRaises(ValueError):
_ = Model(xv.prod(), [xv >= 1, xv_ >= 1])["x"]
with self.assertRaises(ValueError):
_ = Model(xv.prod(), [xv >= 1, x_ >= 1])["x"]
def test_additive_scalar(self):
"Make sure additive scalars simplify properly"
x = Variable('x')
c1 = 1 >= 10*x
c2 = 1 >= 5*x + 0.5
self.assertEqual(type(c1), PosynomialInequality)
self.assertEqual(type(c2), PosynomialInequality)
c1hmap, = c1.as_hmapslt1({})
c2hmap, = c2.as_hmapslt1({})
self.assertEqual(c1hmap, c2hmap)
def test_additive_scalar_gt1(self):
"1 can't be greater than (1 + something positive)"
x = Variable('x')
def constr():
"method that should raise a ValueError"
return 1 >= 5*x + 1.1
self.assertRaises(PrimalInfeasible, constr)
def test_init(self):
"Test Constraint __init__"
x = Variable('x')
y = Variable('y')
c = PosynomialInequality(x, ">=", y**2)
self.assertEqual(c.as_hmapslt1({}), [(y**2/x).hmap])
self.assertEqual(c.left, x)
self.assertEqual(c.right, y**2)
c = PosynomialInequality(x, "<=", y**2)
self.assertEqual(c.as_hmapslt1({}), [(x/y**2).hmap])
self.assertEqual(c.left, x)
self.assertEqual(c.right, y**2)
self.assertEqual(type((1 >= x).latex()), str)
def test_oper_overload(self):
"Test Constraint initialization by operator overloading"
x = Variable('x')
y = Variable('y')
c = (y >= 1 + x**2)
self.assertEqual(c.as_hmapslt1({}), [(1/y + x**2/y).hmap])
self.assertEqual(c.left, y)
self.assertEqual(c.right, 1 + x**2)
# same constraint, switched operator direction
c2 = (1 + x**2 <= y) # same as c
self.assertEqual(c2.as_hmapslt1({}), c.as_hmapslt1({}))
def test_sub_tol(self):
" Test PosyIneq feasibility tolerance under substitutions"
x = Variable('x')
y = Variable('y')
z = Variable('z')
PosynomialInequality.feastol = 1e-5
m = Model(z, [x == z, x >= y], {x: 1, y: 1.0001})
self.assertRaises(PrimalInfeasible, m.solve, verbosity=0)
PosynomialInequality.feastol = 1e-3
self.assertEqual(m.substitutions('x'), m.solve(verbosity=0)('x'))
class TestCostedConstraint(unittest.TestCase):
"Tests for Costed Constraint class"
def test_vector_cost(self):
x = VectorVariable(2, "x")
self.assertRaises(ValueError, CostedConstraintSet, x, [])
_ = CostedConstraintSet(np.array(x[0]), [])
class TestMonomialEquality(unittest.TestCase):
"Test monomial equality constraint class"
def test_init(self):
"Test initialization via both operator overloading and __init__"
x = Variable('x')
y = Variable('y')
mono = y**2/x
# operator overloading
mec = (x == y**2)
# __init__
mec2 = MonomialEquality(x, y**2)
self.assertTrue(mono.hmap in mec.as_hmapslt1({}))
self.assertTrue(mono.hmap in mec2.as_hmapslt1({}))
x = Variable("x", "ft")
y = Variable("y")
if gpkit.units:
self.assertRaises(DimensionalityError, MonomialEquality, x, y)
self.assertRaises(DimensionalityError, MonomialEquality, y, x)
def test_vector(self):
"Monomial Equalities with VectorVariables"
x = VectorVariable(3, "x")
self.assertFalse(x == 3)
self.assertTrue(x == x) # pylint: disable=comparison-with-itself
def test_inheritance(self):
"Make sure MonomialEquality inherits from the right things"
F = Variable('F')
m = Variable('m')
a = Variable('a')
mec = (F == m*a)
self.assertTrue(isinstance(mec, MonomialEquality))
def test_non_monomial(self):
"Try to initialize a MonomialEquality with non-monomial args"
x = Variable('x')
y = Variable('y')
def constr():
"method that should raise a TypeError"
MonomialEquality(x*y, x+y)
self.assertRaises(TypeError, constr)
def test_str(self):
"Test that MonomialEquality.__str__ returns a string"
x = Variable('x')
y = Variable('y')
mec = (x == y)
self.assertEqual(type(mec.str_without()), str)
def test_united_dimensionless(self):
"Check dimensionless unit-ed variables work"
x = Variable('x')
y = Variable('y', 'hr/day')
c = MonomialEquality(x, y)
self.assertTrue(isinstance(c, MonomialEquality))
class TestSignomialInequality(unittest.TestCase):
"Test Signomial constraints"
def test_becomes_posy_sensitivities(self):
# pylint: disable=invalid-name
# model from #1165
ujet = Variable("ujet")
PK = Variable("PK")
Dp = Variable("Dp", 0.662)
fBLI = Variable("fBLI", 0.4)
fsurf = Variable("fsurf", 0.836)
mdot = Variable("mdot", 1/0.7376)
with SignomialsEnabled():
m = Model(PK, [mdot*ujet + fBLI*Dp >= 1,
PK >= 0.5*mdot*ujet*(2 + ujet) + fBLI*fsurf*Dp])
var_senss = m.solve(verbosity=0)["sensitivities"]["variables"]
self.assertAlmostEqual(var_senss[Dp], -0.16, 2)
self.assertAlmostEqual(var_senss[fBLI], -0.16, 2)
self.assertAlmostEqual(var_senss[fsurf], 0.19, 2)
self.assertAlmostEqual(var_senss[mdot], -0.17, 2)
# Linked variable
Dp = Variable("Dp", 0.662)
mDp = Variable("-Dp", lambda c: -c[Dp])
fBLI = Variable("fBLI", 0.4)
fsurf = Variable("fsurf", 0.836)
mdot = Variable("mdot", 1/0.7376)
m = Model(PK, [mdot*ujet >= 1 + fBLI*mDp,
PK >= 0.5*mdot*ujet*(2 + ujet) + fBLI*fsurf*Dp])
var_senss = m.solve(verbosity=0)["sensitivities"]["variables"]
self.assertAlmostEqual(var_senss[Dp], -0.16, 2)
self.assertAlmostEqual(var_senss[fBLI], -0.16, 2)
self.assertAlmostEqual(var_senss[fsurf], 0.19, 2)
self.assertAlmostEqual(var_senss[mdot], -0.17, 2)
# fixed negative variable
Dp = Variable("Dp", 0.662)
mDp = Variable("-Dp", -0.662)
fBLI = Variable("fBLI", 0.4)
fsurf = Variable("fsurf", 0.836)
mdot = Variable("mdot", 1/0.7376)
m = Model(PK, [mdot*ujet >= 1 + fBLI*mDp,
PK >= 0.5*mdot*ujet*(2 + ujet) + fBLI*fsurf*Dp])
var_senss = m.solve(verbosity=0)["sensitivities"]["variables"]
self.assertAlmostEqual(var_senss[Dp] + var_senss[mDp], -0.16, 2)
self.assertAlmostEqual(var_senss[fBLI], -0.16, 2)
self.assertAlmostEqual(var_senss[fsurf], 0.19, 2)
self.assertAlmostEqual(var_senss[mdot], -0.17, 2)
def test_init(self):
"Test initialization and types"
D = Variable('D', units="N")
x1, x2, x3 = (Variable("x_%s" % i, units="N") for i in range(3))
with self.assertRaises(TypeError):
sc = (D >= x1 + x2 - x3)
with SignomialsEnabled():
sc = (D >= x1 + x2 - x3)
self.assertTrue(isinstance(sc, SignomialInequality))
self.assertFalse(isinstance(sc, Posynomial))
def test_posyslt1(self):
x = Variable("x")
y = Variable("y")
with SignomialsEnabled():
sc = (x + y >= x*y)
# make sure that the error type doesn't change on our users
with self.assertRaises(InvalidGPConstraint):
_ = sc.as_hmapslt1({})
class TestLoose(unittest.TestCase):
"Test loose constraint set"
def test_raiseerror(self):
x = Variable('x')
x_min = Variable('x_{min}', 2)
m = Model(x, [Loose([x >= x_min]),
x >= 1])
Loose.raiseerror = True
self.assertRaises(RuntimeWarning, m.solve, verbosity=0)
Loose.raiseerror = False
def test_posyconstr_in_gp(self):
"Tests loose constraint set with solve()"
x = Variable('x')
x_min = Variable('x_{min}', 2)
m = Model(x, [Loose([x >= x_min]),
x >= 1])
sol = m.solve(verbosity=0)
warndata = sol["warnings"]["Unexpectedly Tight Constraints"][0][1]
self.assertIs(warndata[-1], m[0][0])
self.assertAlmostEqual(warndata[0], +1, 3)
m.substitutions[x_min] = 0.5
self.assertAlmostEqual(m.solve(verbosity=0)["cost"], 1)
def test_posyconstr_in_sp(self):
x = Variable('x')
y = Variable('y')
x_min = Variable('x_min', 1)
y_min = Variable('y_min', 2)
with SignomialsEnabled():
sig_constraint = (x + y >= 3.5)
m = Model(x*y, [Loose([x >= y]),
x >= x_min, y >= y_min, sig_constraint])
sol = m.localsolve(verbosity=0)
warndata = sol["warnings"]["Unexpectedly Tight Constraints"][0][1]
self.assertIs(warndata[-1], m[0][0])
self.assertAlmostEqual(warndata[0], +1, 3)
m.substitutions[x_min] = 2
m.substitutions[y_min] = 1
self.assertAlmostEqual(m.localsolve(verbosity=0)["cost"], 2.5, 5)
class TestTight(unittest.TestCase):
"Test tight constraint set"
def test_posyconstr_in_gp(self):
"Tests tight constraint set with solve()"
x = Variable('x')
x_min = Variable('x_{min}', 2)
m = Model(x, [Tight([x >= 1]),
x >= x_min])
sol = m.solve(verbosity=0)
warndata = sol["warnings"]["Unexpectedly Loose Constraints"][0][1]
self.assertIs(warndata[-1], m[0][0])
self.assertAlmostEqual(warndata[0], 1, 3)
m.substitutions[x_min] = 0.5
self.assertAlmostEqual(m.solve(verbosity=0)["cost"], 1)
def test_posyconstr_in_sp(self):
x = Variable('x')
y = Variable('y')
with SignomialsEnabled():
sig_constraint = (x + y >= 0.1)
m = Model(x*y, [Tight([x >= y]),
x >= 2, y >= 1, sig_constraint])
sol = m.localsolve(verbosity=0)
warndata = sol["warnings"]["Unexpectedly Loose Constraints"][0][1]
self.assertIs(warndata[-1], m[0][0])
self.assertAlmostEqual(warndata[0], 1, 3)
m.pop(1)
self.assertAlmostEqual(m.localsolve(verbosity=0)["cost"], 1, 5)
def test_sigconstr_in_sp(self):
"Tests tight constraint set with localsolve()"
x = Variable('x')
y = Variable('y')
x_min = Variable('x_{min}', 2)
y_max = Variable('y_{max}', 0.5)
with SignomialsEnabled():
m = Model(x, [Tight([x + y >= 1]),
x >= x_min,
y <= y_max])
sol = m.localsolve(verbosity=0)
warndata = sol["warnings"]["Unexpectedly Loose Constraints"][0][1]
self.assertIs(warndata[-1], m[0][0])
self.assertGreater(warndata[0], 0.5)
m.substitutions[x_min] = 0.5
self.assertAlmostEqual(m.localsolve(verbosity=0)["cost"], 0.5, 5)
class TestBounded(unittest.TestCase):
"Test bounded constraint set"
def test_substitution_issue905(self):
x = Variable("x")
y = Variable("y")
m = Model(x, [x >= y], {"y": 1})
bm = Model(m.cost, Bounded(m))
sol = bm.solve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 1.0)
bm = Model(m.cost, Bounded(m, lower=1e-10))
sol = bm.solve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 1.0)
bm = Model(m.cost, Bounded(m, upper=1e10))
sol = bm.solve(verbosity=0)
self.assertAlmostEqual(sol["cost"], 1.0)
TESTS = [TestConstraint, TestMonomialEquality, TestSignomialInequality,
TestTight, TestLoose, TestBounded, TestCostedConstraint]
if __name__ == "__main__": # pragma: no cover
run_tests(TESTS)
|
[
"gpkit.VectorVariable",
"gpkit.constraints.loose.Loose",
"gpkit.ConstraintSet",
"gpkit.constraints.relax.ConstraintsRelaxedEqually",
"gpkit.globals.NamedVariables.reset_modelnumbers",
"gpkit.nomials.PosynomialInequality",
"gpkit.tests.helpers.run_tests",
"gpkit.constraints.relax.ConstantsRelaxed",
"gpkit.Model",
"gpkit.constraints.bounded.Bounded",
"gpkit.constraints.tight.Tight",
"numpy.array",
"gpkit.constraints.relax.ConstraintsRelaxed",
"gpkit.SignomialsEnabled",
"gpkit.Variable",
"gpkit.nomials.MonomialEquality"
] |
[((15753, 15769), 'gpkit.tests.helpers.run_tests', 'run_tests', (['TESTS'], {}), '(TESTS)\n', (15762, 15769), False, 'from gpkit.tests.helpers import run_tests\n'), ((997, 1010), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (1005, 1010), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((1258, 1271), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (1266, 1271), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((1642, 1664), 'gpkit.VectorVariable', 'VectorVariable', (['(2)', '"""v"""'], {}), "(2, 'v')\n", (1656, 1664), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((1960, 1973), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (1968, 1973), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((1987, 2036), 'gpkit.Variable', 'Variable', (['"""x^2"""'], {'evalfn': '(lambda solv: solv[x] ** 2)'}), "('x^2', evalfn=lambda solv: solv[x] ** 2)\n", (1995, 2036), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((2047, 2065), 'gpkit.Model', 'Model', (['x', '[x >= 2]'], {}), '(x, [x >= 2])\n', (2052, 2065), False, 'from gpkit import Model, ConstraintSet\n'), ((2237, 2250), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (2245, 2250), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((2267, 2287), 'gpkit.Variable', 'Variable', (['"""x_max"""', '(1)'], {}), "('x_max', 1)\n", (2275, 2287), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((2304, 2324), 'gpkit.Variable', 'Variable', (['"""x_min"""', '(2)'], {}), "('x_min', 2)\n", (2312, 2324), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((2380, 2411), 'gpkit.constraints.relax.ConstraintsRelaxed', 'ConstraintsRelaxed', (['constraints'], {}), '(constraints)\n', (2398, 2411), False, 'from gpkit.constraints.relax import ConstraintsRelaxed, ConstantsRelaxed, ConstraintsRelaxedEqually\n'), ((2420, 2449), 'gpkit.constraints.relax.ConstantsRelaxed', 'ConstantsRelaxed', (['constraints'], {}), '(constraints)\n', (2436, 2449), False, 'from gpkit.constraints.relax import ConstraintsRelaxed, ConstantsRelaxed, ConstraintsRelaxedEqually\n'), ((2458, 2496), 'gpkit.constraints.relax.ConstraintsRelaxedEqually', 'ConstraintsRelaxedEqually', (['constraints'], {}), '(constraints)\n', (2483, 2496), False, 'from gpkit.constraints.relax import ConstraintsRelaxed, ConstantsRelaxed, ConstraintsRelaxedEqually\n'), ((2543, 2556), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (2551, 2556), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((2573, 2593), 'gpkit.Variable', 'Variable', (['"""x_max"""', '(1)'], {}), "('x_max', 1)\n", (2581, 2593), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((2610, 2651), 'gpkit.Variable', 'Variable', (['"""x_min"""', '(lambda c: 2 * c[x_max])'], {}), "('x_min', lambda c: 2 * c[x_max])\n", (2618, 2651), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((2665, 2705), 'gpkit.Variable', 'Variable', (['"""zero"""', '(lambda c: 0 * c[x_max])'], {}), "('zero', lambda c: 0 * c[x_max])\n", (2673, 2705), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((2726, 2779), 'gpkit.ConstraintSet', 'ConstraintSet', (['[x_min + zero <= x, x + zero <= x_max]'], {}), '([x_min + zero <= x, x + zero <= x_max])\n', (2739, 2779), False, 'from gpkit import Model, ConstraintSet\n'), ((2792, 2821), 'gpkit.constraints.relax.ConstantsRelaxed', 'ConstantsRelaxed', (['constraints'], {}), '(constraints)\n', (2808, 2821), False, 'from gpkit.constraints.relax import ConstraintsRelaxed, ConstantsRelaxed, ConstraintsRelaxedEqually\n'), ((2830, 2865), 'gpkit.globals.NamedVariables.reset_modelnumbers', 'NamedVariables.reset_modelnumbers', ([], {}), '()\n', (2863, 2865), False, 'from gpkit.globals import NamedVariables\n'), ((2888, 2941), 'gpkit.constraints.relax.ConstantsRelaxed', 'ConstantsRelaxed', (['constraints'], {'include_only': "['x_min']"}), "(constraints, include_only=['x_min'])\n", (2904, 2941), False, 'from gpkit.constraints.relax import ConstraintsRelaxed, ConstantsRelaxed, ConstraintsRelaxedEqually\n'), ((2950, 2985), 'gpkit.globals.NamedVariables.reset_modelnumbers', 'NamedVariables.reset_modelnumbers', ([], {}), '()\n', (2983, 2985), False, 'from gpkit.globals import NamedVariables\n'), ((3008, 3056), 'gpkit.constraints.relax.ConstantsRelaxed', 'ConstantsRelaxed', (['constraints'], {'exclude': "['x_max']"}), "(constraints, exclude=['x_max'])\n", (3024, 3056), False, 'from gpkit.constraints.relax import ConstraintsRelaxed, ConstantsRelaxed, ConstraintsRelaxedEqually\n'), ((3171, 3184), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (3179, 3184), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((3197, 3223), 'gpkit.Model', 'Model', (['x', '[x == 3, x == 4]'], {}), '(x, [x == 3, x == 4])\n', (3202, 3223), False, 'from gpkit import Model, ConstraintSet\n'), ((3237, 3258), 'gpkit.constraints.relax.ConstraintsRelaxed', 'ConstraintsRelaxed', (['m'], {}), '(m)\n', (3255, 3258), False, 'from gpkit.constraints.relax import ConstraintsRelaxed, ConstantsRelaxed, ConstraintsRelaxedEqually\n'), ((3430, 3443), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (3438, 3443), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((3457, 3490), 'gpkit.Variable', 'Variable', (['"""x"""'], {'lineage': "[('_', 0)]"}), "('x', lineage=[('_', 0)])\n", (3465, 3490), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((3504, 3526), 'gpkit.VectorVariable', 'VectorVariable', (['(2)', '"""x"""'], {}), "(2, 'x')\n", (3518, 3526), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((3541, 3583), 'gpkit.VectorVariable', 'VectorVariable', (['(2)', '"""x"""'], {'lineage': "[('_', 0)]"}), "(2, 'x', lineage=[('_', 0)])\n", (3555, 3583), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((4201, 4214), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (4209, 4214), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((4607, 4620), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (4615, 4620), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((4855, 4868), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (4863, 4868), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((4881, 4894), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (4889, 4894), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((4907, 4944), 'gpkit.nomials.PosynomialInequality', 'PosynomialInequality', (['x', '""">="""', '(y ** 2)'], {}), "(x, '>=', y ** 2)\n", (4927, 4944), False, 'from gpkit.nomials import SignomialInequality, PosynomialInequality\n'), ((5092, 5129), 'gpkit.nomials.PosynomialInequality', 'PosynomialInequality', (['x', '"""<="""', '(y ** 2)'], {}), "(x, '<=', y ** 2)\n", (5112, 5129), False, 'from gpkit.nomials import SignomialInequality, PosynomialInequality\n'), ((5431, 5444), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (5439, 5444), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((5457, 5470), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (5465, 5470), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((5915, 5928), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (5923, 5928), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((5941, 5954), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (5949, 5954), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((5967, 5980), 'gpkit.Variable', 'Variable', (['"""z"""'], {}), "('z')\n", (5975, 5980), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((6037, 6082), 'gpkit.Model', 'Model', (['z', '[x == z, x >= y]', '{x: 1, y: 1.0001}'], {}), '(z, [x == z, x >= y], {x: 1, y: 1.0001})\n', (6042, 6082), False, 'from gpkit import Model, ConstraintSet\n'), ((6400, 6422), 'gpkit.VectorVariable', 'VectorVariable', (['(2)', '"""x"""'], {}), "(2, 'x')\n", (6414, 6422), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((6746, 6759), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (6754, 6759), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((6772, 6785), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (6780, 6785), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((6899, 6926), 'gpkit.nomials.MonomialEquality', 'MonomialEquality', (['x', '(y ** 2)'], {}), '(x, y ** 2)\n', (6915, 6926), False, 'from gpkit.nomials import MonomialEquality\n'), ((7054, 7073), 'gpkit.Variable', 'Variable', (['"""x"""', '"""ft"""'], {}), "('x', 'ft')\n", (7062, 7073), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((7086, 7099), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (7094, 7099), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((7365, 7387), 'gpkit.VectorVariable', 'VectorVariable', (['(3)', '"""x"""'], {}), "(3, 'x')\n", (7379, 7387), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((7608, 7621), 'gpkit.Variable', 'Variable', (['"""F"""'], {}), "('F')\n", (7616, 7621), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((7634, 7647), 'gpkit.Variable', 'Variable', (['"""m"""'], {}), "('m')\n", (7642, 7647), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((7660, 7673), 'gpkit.Variable', 'Variable', (['"""a"""'], {}), "('a')\n", (7668, 7673), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((7874, 7887), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (7882, 7887), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((7900, 7913), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (7908, 7913), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((8171, 8184), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (8179, 8184), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((8197, 8210), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (8205, 8210), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((8396, 8409), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (8404, 8409), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((8422, 8445), 'gpkit.Variable', 'Variable', (['"""y"""', '"""hr/day"""'], {}), "('y', 'hr/day')\n", (8430, 8445), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((8458, 8480), 'gpkit.nomials.MonomialEquality', 'MonomialEquality', (['x', 'y'], {}), '(x, y)\n', (8474, 8480), False, 'from gpkit.nomials import MonomialEquality\n'), ((8752, 8768), 'gpkit.Variable', 'Variable', (['"""ujet"""'], {}), "('ujet')\n", (8760, 8768), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((8782, 8796), 'gpkit.Variable', 'Variable', (['"""PK"""'], {}), "('PK')\n", (8790, 8796), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((8810, 8831), 'gpkit.Variable', 'Variable', (['"""Dp"""', '(0.662)'], {}), "('Dp', 0.662)\n", (8818, 8831), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((8847, 8868), 'gpkit.Variable', 'Variable', (['"""fBLI"""', '(0.4)'], {}), "('fBLI', 0.4)\n", (8855, 8868), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((8885, 8909), 'gpkit.Variable', 'Variable', (['"""fsurf"""', '(0.836)'], {}), "('fsurf', 0.836)\n", (8893, 8909), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((8925, 8953), 'gpkit.Variable', 'Variable', (['"""mdot"""', '(1 / 0.7376)'], {}), "('mdot', 1 / 0.7376)\n", (8933, 8953), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((9456, 9477), 'gpkit.Variable', 'Variable', (['"""Dp"""', '(0.662)'], {}), "('Dp', 0.662)\n", (9464, 9477), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((9492, 9525), 'gpkit.Variable', 'Variable', (['"""-Dp"""', '(lambda c: -c[Dp])'], {}), "('-Dp', lambda c: -c[Dp])\n", (9500, 9525), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((9541, 9562), 'gpkit.Variable', 'Variable', (['"""fBLI"""', '(0.4)'], {}), "('fBLI', 0.4)\n", (9549, 9562), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((9579, 9603), 'gpkit.Variable', 'Variable', (['"""fsurf"""', '(0.836)'], {}), "('fsurf', 0.836)\n", (9587, 9603), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((9619, 9647), 'gpkit.Variable', 'Variable', (['"""mdot"""', '(1 / 0.7376)'], {}), "('mdot', 1 / 0.7376)\n", (9627, 9647), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((9658, 9762), 'gpkit.Model', 'Model', (['PK', '[mdot * ujet >= 1 + fBLI * mDp, PK >= 0.5 * mdot * ujet * (2 + ujet) + fBLI *\n fsurf * Dp]'], {}), '(PK, [mdot * ujet >= 1 + fBLI * mDp, PK >= 0.5 * mdot * ujet * (2 +\n ujet) + fBLI * fsurf * Dp])\n', (9663, 9762), False, 'from gpkit import Model, ConstraintSet\n'), ((10117, 10138), 'gpkit.Variable', 'Variable', (['"""Dp"""', '(0.662)'], {}), "('Dp', 0.662)\n", (10125, 10138), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((10153, 10176), 'gpkit.Variable', 'Variable', (['"""-Dp"""', '(-0.662)'], {}), "('-Dp', -0.662)\n", (10161, 10176), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((10192, 10213), 'gpkit.Variable', 'Variable', (['"""fBLI"""', '(0.4)'], {}), "('fBLI', 0.4)\n", (10200, 10213), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((10230, 10254), 'gpkit.Variable', 'Variable', (['"""fsurf"""', '(0.836)'], {}), "('fsurf', 0.836)\n", (10238, 10254), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((10270, 10298), 'gpkit.Variable', 'Variable', (['"""mdot"""', '(1 / 0.7376)'], {}), "('mdot', 1 / 0.7376)\n", (10278, 10298), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((10309, 10413), 'gpkit.Model', 'Model', (['PK', '[mdot * ujet >= 1 + fBLI * mDp, PK >= 0.5 * mdot * ujet * (2 + ujet) + fBLI *\n fsurf * Dp]'], {}), '(PK, [mdot * ujet >= 1 + fBLI * mDp, PK >= 0.5 * mdot * ujet * (2 +\n ujet) + fBLI * fsurf * Dp])\n', (10314, 10413), False, 'from gpkit import Model, ConstraintSet\n'), ((10815, 10839), 'gpkit.Variable', 'Variable', (['"""D"""'], {'units': '"""N"""'}), "('D', units='N')\n", (10823, 10839), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((11220, 11233), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (11228, 11233), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((11246, 11259), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (11254, 11259), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((11596, 11609), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (11604, 11609), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((11626, 11648), 'gpkit.Variable', 'Variable', (['"""x_{min}"""', '(2)'], {}), "('x_{min}', 2)\n", (11634, 11648), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((11952, 11965), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (11960, 11965), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((11982, 12004), 'gpkit.Variable', 'Variable', (['"""x_{min}"""', '(2)'], {}), "('x_{min}', 2)\n", (11990, 12004), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((12436, 12449), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (12444, 12449), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((12462, 12475), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (12470, 12475), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((12492, 12512), 'gpkit.Variable', 'Variable', (['"""x_min"""', '(1)'], {}), "('x_min', 1)\n", (12500, 12512), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((12529, 12549), 'gpkit.Variable', 'Variable', (['"""y_min"""', '(2)'], {}), "('y_min', 2)\n", (12537, 12549), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((13259, 13272), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (13267, 13272), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((13289, 13311), 'gpkit.Variable', 'Variable', (['"""x_{min}"""', '(2)'], {}), "('x_{min}', 2)\n", (13297, 13311), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((13742, 13755), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (13750, 13755), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((13768, 13781), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (13776, 13781), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((14361, 14374), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (14369, 14374), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((14387, 14400), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (14395, 14400), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((14417, 14439), 'gpkit.Variable', 'Variable', (['"""x_{min}"""', '(2)'], {}), "('x_{min}', 2)\n", (14425, 14439), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((14456, 14480), 'gpkit.Variable', 'Variable', (['"""y_{max}"""', '(0.5)'], {}), "('y_{max}', 0.5)\n", (14464, 14480), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((15084, 15097), 'gpkit.Variable', 'Variable', (['"""x"""'], {}), "('x')\n", (15092, 15097), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((15110, 15123), 'gpkit.Variable', 'Variable', (['"""y"""'], {}), "('y')\n", (15118, 15123), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((15136, 15164), 'gpkit.Model', 'Model', (['x', '[x >= y]', "{'y': 1}"], {}), "(x, [x >= y], {'y': 1})\n", (15141, 15164), False, 'from gpkit import Model, ConstraintSet\n'), ((1332, 1352), 'gpkit.Model', 'Model', (['x', "[x == 'A']"], {}), "(x, [x == 'A'])\n", (1337, 1352), False, 'from gpkit import Model, ConstraintSet\n'), ((1413, 1441), 'gpkit.Model', 'Model', (['x', "[x >= 1, x == 'A']"], {}), "(x, [x >= 1, x == 'A'])\n", (1418, 1441), False, 'from gpkit import Model, ConstraintSet\n'), ((1502, 1538), 'gpkit.Model', 'Model', (['x', "[x >= 1, x == 'A', x >= 1]"], {}), "(x, [x >= 1, x == 'A', x >= 1])\n", (1507, 1538), False, 'from gpkit import Model, ConstraintSet\n'), ((1601, 1629), 'gpkit.Model', 'Model', (['x', "[x == 'A', x >= 1]"], {}), "(x, [x == 'A', x >= 1])\n", (1606, 1629), False, 'from gpkit import Model, ConstraintSet\n'), ((1725, 1745), 'gpkit.Model', 'Model', (['x', "[v == 'A']"], {}), "(x, [v == 'A'])\n", (1730, 1745), False, 'from gpkit import Model, ConstraintSet\n'), ((1805, 1832), 'gpkit.Model', 'Model', (['x', "[v <= ['A', 'B']]"], {}), "(x, [v <= ['A', 'B']])\n", (1810, 1832), False, 'from gpkit import Model, ConstraintSet\n'), ((1892, 1919), 'gpkit.Model', 'Model', (['x', "[v >= ['A', 'B']]"], {}), "(x, [v >= ['A', 'B']])\n", (1897, 1919), False, 'from gpkit import Model, ConstraintSet\n'), ((6521, 6535), 'numpy.array', 'np.array', (['x[0]'], {}), '(x[0])\n', (6529, 6535), True, 'import numpy as np\n'), ((8000, 8030), 'gpkit.nomials.MonomialEquality', 'MonomialEquality', (['(x * y)', '(x + y)'], {}), '(x * y, x + y)\n', (8016, 8030), False, 'from gpkit.nomials import MonomialEquality\n'), ((8965, 8984), 'gpkit.SignomialsEnabled', 'SignomialsEnabled', ([], {}), '()\n', (8982, 8984), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((9002, 9105), 'gpkit.Model', 'Model', (['PK', '[mdot * ujet + fBLI * Dp >= 1, PK >= 0.5 * mdot * ujet * (2 + ujet) + fBLI *\n fsurf * Dp]'], {}), '(PK, [mdot * ujet + fBLI * Dp >= 1, PK >= 0.5 * mdot * ujet * (2 +\n ujet) + fBLI * fsurf * Dp])\n', (9007, 9105), False, 'from gpkit import Model, ConstraintSet\n'), ((10862, 10893), 'gpkit.Variable', 'Variable', (["('x_%s' % i)"], {'units': '"""N"""'}), "('x_%s' % i, units='N')\n", (10870, 10893), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((11006, 11025), 'gpkit.SignomialsEnabled', 'SignomialsEnabled', ([], {}), '()\n', (11023, 11025), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((11273, 11292), 'gpkit.SignomialsEnabled', 'SignomialsEnabled', ([], {}), '()\n', (11290, 11292), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((12563, 12582), 'gpkit.SignomialsEnabled', 'SignomialsEnabled', ([], {}), '()\n', (12580, 12582), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((13795, 13814), 'gpkit.SignomialsEnabled', 'SignomialsEnabled', ([], {}), '()\n', (13812, 13814), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((14494, 14513), 'gpkit.SignomialsEnabled', 'SignomialsEnabled', ([], {}), '()\n', (14511, 14513), False, 'from gpkit import Variable, SignomialsEnabled, Posynomial, VectorVariable\n'), ((15192, 15202), 'gpkit.constraints.bounded.Bounded', 'Bounded', (['m'], {}), '(m)\n', (15199, 15202), False, 'from gpkit.constraints.bounded import Bounded\n'), ((15316, 15339), 'gpkit.constraints.bounded.Bounded', 'Bounded', (['m'], {'lower': '(1e-10)'}), '(m, lower=1e-10)\n', (15323, 15339), False, 'from gpkit.constraints.bounded import Bounded\n'), ((15453, 15484), 'gpkit.constraints.bounded.Bounded', 'Bounded', (['m'], {'upper': '(10000000000.0)'}), '(m, upper=10000000000.0)\n', (15460, 15484), False, 'from gpkit.constraints.bounded import Bounded\n'), ((1133, 1162), 'gpkit.ConstraintSet', 'ConstraintSet', (['[self, x <= 1]'], {}), '([self, x <= 1])\n', (1146, 1162), False, 'from gpkit import Model, ConstraintSet\n'), ((3609, 3627), 'gpkit.Model', 'Model', (['x', '[x >= 1]'], {}), '(x, [x >= 1])\n', (3614, 3627), False, 'from gpkit import Model, ConstraintSet\n'), ((3697, 3724), 'gpkit.Model', 'Model', (['x', '[x >= 1, x_ >= 1]'], {}), '(x, [x >= 1, x_ >= 1])\n', (3702, 3724), False, 'from gpkit import Model, ConstraintSet\n'), ((3790, 3817), 'gpkit.Model', 'Model', (['x', '[x >= 1, xv >= 1]'], {}), '(x, [x >= 1, xv >= 1])\n', (3795, 3817), False, 'from gpkit import Model, ConstraintSet\n'), ((11671, 11690), 'gpkit.constraints.loose.Loose', 'Loose', (['[x >= x_min]'], {}), '([x >= x_min])\n', (11676, 11690), False, 'from gpkit.constraints.loose import Loose\n'), ((12027, 12046), 'gpkit.constraints.loose.Loose', 'Loose', (['[x >= x_min]'], {}), '([x >= x_min])\n', (12032, 12046), False, 'from gpkit.constraints.loose import Loose\n'), ((12652, 12667), 'gpkit.constraints.loose.Loose', 'Loose', (['[x >= y]'], {}), '([x >= y])\n', (12657, 12667), False, 'from gpkit.constraints.loose import Loose\n'), ((13334, 13349), 'gpkit.constraints.tight.Tight', 'Tight', (['[x >= 1]'], {}), '([x >= 1])\n', (13339, 13349), False, 'from gpkit.constraints.tight import Tight\n'), ((13884, 13899), 'gpkit.constraints.tight.Tight', 'Tight', (['[x >= y]'], {}), '([x >= y])\n', (13889, 13899), False, 'from gpkit.constraints.tight import Tight\n'), ((14541, 14560), 'gpkit.constraints.tight.Tight', 'Tight', (['[x + y >= 1]'], {}), '([x + y >= 1])\n', (14546, 14560), False, 'from gpkit.constraints.tight import Tight\n')]
|
from bokeh.models import ColumnDataSource, HoverTool, Range1d, Plot, LinearAxis, Grid, Paragraph,TapTool,Div
from bokeh.plotting import figure, show, output_file
from bokeh.io import curdoc
from bokeh.layouts import widgetbox , layout
from bokeh.models.widgets import Select, Slider, Button
import dim_reduction
import numpy as np
import clustering
import random
import sys
import os
import pickle
from bokeh.models.glyphs import ImageURL
import re
from bokeh.models.callbacks import CustomJS
from sklearn.preprocessing import MinMaxScaler
src_path = os.path.abspath("./src/")
if src_path not in sys.path:
sys.path.insert(0, src_path)
import data_format
from wcloud_standalone import get_wcloud
import heatmap as hmap
from lrp import get_lrp_timedata
def button_callback():
text_review,words,word_embeddings = get_rawText_data(rawInput_selections.value,keys_raw,data_raw,testX,embed_mat)
text_banner.text = text_review
def get_wc_colourGroups(rawInput_source):
words = np.array(rawInput_source.data['w'])
colors = rawInput_source.data['z']
color_dict = dict()
for color in sorted(data_format.list_duplicates(colors)):
color_dict[color[0]] = list(words[color[1]])
return color_dict
def get_selections():
gates = ["IN - what to add on","NOT IMPORTANT - what to drop off","IMPORTANT - where to focus on"]
select_gate = Select(title="Gate", value="IN - what to add on", options=gates)
if select_gate.value == "IN - what to add on":
select_gate.value = "input_gate"
elif select_gate.value == "NOT IMPORTANT - what to drop off":
select_gate.value = "forget_gate"
elif select_gate.value == "IMPORTANT - where to focus on":
select_gate.value = "output_gate"
return select_gate
def get_clustering_selections(algorithms_neurons):
algorithm_select_neuron = Select(value="KMeans - selected gate",title="Select clustering option for neurons:",width=250, options=algorithms_neurons)
cluster_slider = Slider(title="Number of clusters (use in kmeans,hierarchical clustering)",value=2.0,start=2.0,end=4.0,step=1,width=400)
return (algorithm_select_neuron,cluster_slider)
def get_rawInput_selections(keys_raw):
review = [str(r) for r in list(keys_raw)]
select_rawInput = Select(title="Input review", value=review[0], options=review)
return select_rawInput
def get_projection_selections(algorithms):
algorithm_select = Select(value="PCA",title="Select projection algorithm:",width=250, options=algorithms)
return algorithm_select
def get_rawText_data(rawInput_selections,keys_raw,data_raw,feed,embed_mat):
text_review = np.array_str(data_raw[int(rawInput_selections)])
txt_rev = text_review.replace('<UNK>','UNK')
words = text_review.split()
word_embeddings = [embed_mat[i,:] for i in list(feed[int(rawInput_selections),:].astype(int))]
return txt_rev,words,word_embeddings
"""
-------------------------------------------------------------------------------------------------------
UPDATE SOURCE
-------------------------------------------------------------------------------------------------------
"""
def update_source(attrname, old, new):
gate_value = gate_selections.value
if gate_value == "IN - what to add on":
gate_value = "input_gate"
elif gate_value == "NOT IMPORTANT - what to drop off":
gate_value = "forget_gate"
elif gate_value == "IMPORTANT - where to focus on":
gate_value = "output_gate"
x = data[lstm_layer_name][gate_value]
text_review,words,word_embeddings = get_rawText_data(rawInput_selections.value,keys_raw,data_raw,testX,embed_mat)
#update raw input
text_banner.text = text_review
text_banner2.text = text_review
label_banner.text = "Network decision : POSITIVE" if predicted_tgs[int(rawInput_selections.value)][0] == 0 else "Network decision : NEGATIVE"
#update dimension reduction source
algorithm = projection_selections.value
knn = 5
x_pr,performance_metric = dim_reduction.project(x, algorithm, knn, labels)
#update clustering
algorithm_cl_neurons = clustering_selections[0].value
n_clusters = int(clustering_selections[1].value)
if algorithm_cl_neurons=="Internal state clustering (LSTM's outputs)":
text_set.text = "Internal state clustering - selected review: Clusters representation of input review at every timestep as learned by the LSTM layer."
lstm_hidVal = np.array(lstm_hidden[int(rawInput_selections.value)])
x_pr,performance_metric = dim_reduction.project(np.transpose(lstm_hidVal), algorithm, knn, labels)
cluster_labels, colors, _ = clustering.apply_cluster(data=np.transpose(lstm_hidVal),algorithm=algorithm_cl_neurons,n_clusters=n_clusters,review=None,neuronData=None,mode="nn")
elif algorithm_cl_neurons=="DBSCAN - all reviews" or algorithm_cl_neurons== "AgglomerativeClustering - all reviews":
if algorithm_cl_neurons=="DBSCAN - all reviews":
text_set.text = "DBSCAN - all reviews: Clusters neurons based on how related their most activating words are. List of activating words generated from all reviews."
elif algorithm_cl_neurons== "AgglomerativeClustering - all reviews":
text_set.text = "AgglomerativeClustering - all reviews: Hierarchical clustering of neurons based on how related their most activating words are. List of activating words generated from all reviews."
neuronData = similarityMatrix_AllReviews
cluster_labels, colors, _ = clustering.apply_cluster(x,algorithm_cl_neurons,n_clusters,review=rawInput_selections.value,neuronData=neuronData,mode="nn")
elif algorithm_cl_neurons=="Positive-Negative neuron clustering (LSTM's predictions)":
text_set.text = "Positive-Negative neuron clustering: Clusters neurons based on how much they contributed to classifying the review as positive or negative."
neuronData = neuron_types
cluster_labels, colors, spectr = clustering.apply_cluster(x,algorithm_cl_neurons,n_clusters,review=rawInput_selections.value,neuronData=neuronData,mode="nn")
neutral = tuple(int((spectr[0].lstrip('#'))[i:i+2], 16) for i in (0, 2 ,4))
positive = tuple(int((spectr[1].lstrip('#'))[i:i+2], 16) for i in (0, 2 ,4))
negative = tuple(int((spectr[2].lstrip('#'))[i:i+2], 16) for i in (0, 2 ,4))
neu = "<span style='background-color: rgb("+str(neutral[0])+","+str(neutral[1])+","+str(neutral[2])+")'>Neutral</span>"
pos = "<span style='background-color: rgb("+str(positive[0])+","+str(positive[1])+","+str(positive[2])+")'>Positive</span>"
neg = "<span style='background-color: rgb("+str(negative[0])+","+str(negative[1])+","+str(negative[2])+")'>Negative</span>"
text_set.text = "Positive-Negative neuron clustering: Clusters neurons based on how much they contributed to classifying the review as positive or negative:"+neu+" "+pos+" "+neg
else:
if algorithm_cl_neurons=="KMeans - selected gate":
text_set.text = "KMeans: Clusters neurons based on their gate values after training."
elif algorithm_cl_neurons=="DBSCAN - selected review":
text_set.text = "DBSCAN - selected review: Clusters neurons based on how related their most activating words are. List of activating words generated from selected review."
neuronData = similarityMatrix_PerReview
cluster_labels, colors, _ = clustering.apply_cluster(x,algorithm_cl_neurons,n_clusters,review=int(rawInput_selections.value),neuronData=neuronData,mode="nn")
proj_source.data = dict(x=x_pr[:, 0], y=x_pr[:, 1], z=colors)
w2v_labels, w2v_colors, _ = clustering.apply_cluster(np.array(word_embeddings),"KMeans - selected gate",n_clusters,mode="wc")
rawInput_source.data = dict(z=w2v_colors, w=words)
color_dict = get_wc_colourGroups(rawInput_source)
if gate_value=="input_gate":
wc_filename,wc_img,wc_words = get_wcloud(LRP,int(rawInput_selections.value),load_dir,color_dict=color_dict,gate="in",text=text_banner.text)
elif gate_value=="forget_gate":
wc_filename,wc_img,wc_words = get_wcloud(LRP,int(rawInput_selections.value),load_dir,color_dict=color_dict,gate="forget")
elif gate_value=="output_gate":
wc_filename,wc_img,wc_words = get_wcloud(LRP,int(rawInput_selections.value),load_dir,color_dict=color_dict,gate="out")
words_to_be_highlighted = list(set(wc_words).intersection(totalLRP[int(rawInput_selections.value)]['words']))
lrp_source.data['lrp'] = scaler.fit_transform(np.array(totalLRP[int(rawInput_selections.value)]['lrp'].tolist()).reshape(-1,1))
tap_source.data['wc_words'] = words_to_be_highlighted
wc_plot.add_glyph(img_source, ImageURL(url=dict(value=load_dir+wc_filename), x=0, y=0, anchor="bottom_left"))
"""
------------------------------------------------------------------------------------------------------------------------
MAIN APP CODE
------------------------------------------------------------------------------------------------------------------------
"""
# Provide data paths and files
load_dir = "./bokeh_vis/static/"
lstm_layer_name = "lstm"
#Get trained model parameters: weights and gate values
keys,data = data_format.get_data(load_dir+"model.json")
#Get raw input
keys_raw,data_raw = data_format.get_data(load_dir+"test_data_text.pickle")
#Load auxiliary data
with open(load_dir+"lstm_predictions.pickle","rb") as handle:
predicted_tgs = pickle.load(handle)
with open(load_dir+"exploratoryDataFull.pickle", 'rb') as f:
(testX,embed_mat,excitingWords_fullSet,similarityMatrix_AllReviews,similarityMatrix_PerReview,neuron_types,totalLRP,LRP) = pickle.load(f)
_,lstm_hidden = data_format.get_data(load_dir+"test_model_internals_lstm_hidden.pickle")
#Get preset buttons' selections
#LSTM gates
gate_selections = get_selections()
#Dimensionality reduction
projection_selections = get_projection_selections(dim_reduction.get_dimReduction_algorithms())
#Clustering
algorithm_neurons = clustering.get_cluster_algorithms()
clustering_selections = get_clustering_selections(algorithm_neurons)
#Raw input clustering
rawInput_selections = get_rawInput_selections(keys_raw)
tools = "pan,wheel_zoom,box_zoom,reset"
#Dimensionality reduction
labels = None
data_pr = data[lstm_layer_name][gate_selections.value]
X, performance_metric = dim_reduction.project(data_pr, "PCA", n_neighbors=5, labels=labels)
X_cluster_labels, X_colors, _ = clustering.apply_cluster(data_pr,algorithm=clustering_selections[0].value,n_clusters=int(clustering_selections[1].value),mode="nn")
proj_source = ColumnDataSource(dict(x=X[:,0],y=X[:,1],z=X_colors))
project_plot = figure(title=projection_selections.value,tools=tools,plot_width=300, plot_height=300)
scatter_tap = project_plot.scatter('x', 'y', marker='circle', size=10, fill_color='z', alpha=0.5, source=proj_source, legend=None)
project_plot.xaxis.axis_label = 'Dim 1'
project_plot.yaxis.axis_label = 'Dim 2'
taptool = TapTool()
project_plot.add_tools(taptool)
#Input text
text_review,words,word_embeddings = get_rawText_data(rawInput_selections.value,keys_raw,data_raw,testX,embed_mat)
w2v_labels, w2v_colors, _ = clustering.apply_cluster(np.array(word_embeddings),algorithm="KMeans - selected gate",n_clusters=int(clustering_selections[1].value),mode="wc")
rawInput_source = ColumnDataSource(dict(z=w2v_colors,w=words))
text_banner = Div(text=text_review, width=1300, height=100)
text_banner2 = Div(text=text_review, width=1300, height=100)
label_banner = Paragraph(text="Network decision : POSITIVE" if predicted_tgs[int(rawInput_selections.value)][0] == 0 else "Network decision : NEGATIVE", width=200, height=30)
button = Button(label="Reset text")
button.on_click(button_callback)
#WordCloud
color_dict = get_wc_colourGroups(rawInput_source) #Colors based on similarity in embedding space
wc_filename,wc_img,wc_words = get_wcloud(LRP,int(rawInput_selections.value),load_dir,color_dict=color_dict,gate="in",text=text_banner.text)
words_to_be_highlighted = list(set(wc_words).intersection(totalLRP[int(rawInput_selections.value)]['words']))
highlight_source = ColumnDataSource(dict(scores=[]))
tap_source = ColumnDataSource(dict(wc_words=words_to_be_highlighted))
scaler = MinMaxScaler(copy=True, feature_range=(-1, 1))
lrp_source = ColumnDataSource(dict(lrp=scaler.fit_transform(np.array(totalLRP[int(rawInput_selections.value)]['lrp'].tolist()).reshape(-1,1))))
#totalLRP : how relevant is each LSTM neuron
taptool.callback = CustomJS(args=dict(source=tap_source,lrp=lrp_source,high=highlight_source,div=text_banner,div_orig=text_banner2),
code="""
cell = cb_obj.selected['1d']['indices'][0]
var d = high.data;
d['scores'] = []
for(var i=0; i<source.data['wc_words'].length; i++){
d['scores'].push(lrp.data['lrp'][cell])
}
high.change.emit();
ws = div_orig.text.split(" ");
ws_out = [];
for(var j=0; j<ws.length; j++){
w_idx = source.data['wc_words'].indexOf(ws[j])
if (w_idx>=0){
if (d['scores'][w_idx]>0){
ws_out.push("<span style='background-color: rgba(255,0,0,"+d['scores'][w_idx]+")'>"+ws[j]+"</span>")
}
else if (d['scores'][w_idx]<0){
ws_out.push("<span style='background-color: rgba(0,255,0,"+Math.abs(d['scores'][w_idx])+")'>"+ws[j]+"</span>")
}
}
else {
ws_out.push(ws[j])
}
}
div.text = ws_out.join(" ")
console.log(ws_out)
""")
img_source = ColumnDataSource(dict(url = [load_dir+wc_filename]))
xdr = Range1d(start=0, end=600)
ydr = Range1d(start=0, end=600)
wc_plot = Plot(title=None, x_range=xdr, y_range=ydr, plot_width=500, plot_height=550, min_border=0)
image = ImageURL(url=dict(value=load_dir+wc_filename), x=0, y=0, anchor="bottom_left", retry_attempts=3, retry_timeout=1000)
wc_plot.add_glyph(img_source, image)
text_0 = Paragraph(text="Clustering option:", width=200, height=20)
text_set = Div(text="KMeans: Clusters neurons based on their gate values after training.", width=250, height=100)
lrp_timedata = get_lrp_timedata(LRP)
time = [i for i in range(len(lrp_timedata))]
lrptime_source = ColumnDataSource(dict(lrptime = lrp_timedata,time=time))
lrp_plot = figure(title="Network focus per timestep",plot_width=300, plot_height=50)
lrp_plot.scatter('time','lrptime', marker='circle', size=5, alpha=0.5, source=lrptime_source)
lrp_plot.xaxis.axis_label = 'Time'
lrp_plot.yaxis.axis_label = 'Normalized relevance score'
#Layout
gate_selections.on_change('value', update_source)
projection_selections.on_change('value', update_source)
for attr in clustering_selections:
attr.on_change('value', update_source)
rawInput_selections.on_change('value', update_source)
gp = layout([project_plot, wc_plot, widgetbox(rawInput_selections,gate_selections,projection_selections,clustering_selections[0],clustering_selections[1],text_0,text_set,label_banner,button)],
[lrp_plot],
[text_banner],
responsive=True)
curdoc().add_root(gp)
curdoc().title = "tRustNN"
|
[
"bokeh.models.Plot",
"bokeh.layouts.widgetbox",
"sklearn.preprocessing.MinMaxScaler",
"dim_reduction.project",
"pickle.load",
"data_format.get_data",
"bokeh.models.widgets.Slider",
"bokeh.models.widgets.Select",
"os.path.abspath",
"numpy.transpose",
"bokeh.io.curdoc",
"bokeh.models.Div",
"clustering.get_cluster_algorithms",
"dim_reduction.get_dimReduction_algorithms",
"bokeh.models.Range1d",
"data_format.list_duplicates",
"bokeh.models.widgets.Button",
"bokeh.models.Paragraph",
"bokeh.models.TapTool",
"bokeh.plotting.figure",
"lrp.get_lrp_timedata",
"sys.path.insert",
"clustering.apply_cluster",
"numpy.array"
] |
[((552, 577), 'os.path.abspath', 'os.path.abspath', (['"""./src/"""'], {}), "('./src/')\n", (567, 577), False, 'import os\n'), ((9359, 9404), 'data_format.get_data', 'data_format.get_data', (["(load_dir + 'model.json')"], {}), "(load_dir + 'model.json')\n", (9379, 9404), False, 'import data_format\n'), ((9438, 9494), 'data_format.get_data', 'data_format.get_data', (["(load_dir + 'test_data_text.pickle')"], {}), "(load_dir + 'test_data_text.pickle')\n", (9458, 9494), False, 'import data_format\n'), ((9837, 9911), 'data_format.get_data', 'data_format.get_data', (["(load_dir + 'test_model_internals_lstm_hidden.pickle')"], {}), "(load_dir + 'test_model_internals_lstm_hidden.pickle')\n", (9857, 9911), False, 'import data_format\n'), ((10152, 10187), 'clustering.get_cluster_algorithms', 'clustering.get_cluster_algorithms', ([], {}), '()\n', (10185, 10187), False, 'import clustering\n'), ((10496, 10563), 'dim_reduction.project', 'dim_reduction.project', (['data_pr', '"""PCA"""'], {'n_neighbors': '(5)', 'labels': 'labels'}), "(data_pr, 'PCA', n_neighbors=5, labels=labels)\n", (10517, 10563), False, 'import dim_reduction\n'), ((10810, 10901), 'bokeh.plotting.figure', 'figure', ([], {'title': 'projection_selections.value', 'tools': 'tools', 'plot_width': '(300)', 'plot_height': '(300)'}), '(title=projection_selections.value, tools=tools, plot_width=300,\n plot_height=300)\n', (10816, 10901), False, 'from bokeh.plotting import figure, show, output_file\n'), ((11117, 11126), 'bokeh.models.TapTool', 'TapTool', ([], {}), '()\n', (11124, 11126), False, 'from bokeh.models import ColumnDataSource, HoverTool, Range1d, Plot, LinearAxis, Grid, Paragraph, TapTool, Div\n'), ((11536, 11581), 'bokeh.models.Div', 'Div', ([], {'text': 'text_review', 'width': '(1300)', 'height': '(100)'}), '(text=text_review, width=1300, height=100)\n', (11539, 11581), False, 'from bokeh.models import ColumnDataSource, HoverTool, Range1d, Plot, LinearAxis, Grid, Paragraph, TapTool, Div\n'), ((11597, 11642), 'bokeh.models.Div', 'Div', ([], {'text': 'text_review', 'width': '(1300)', 'height': '(100)'}), '(text=text_review, width=1300, height=100)\n', (11600, 11642), False, 'from bokeh.models import ColumnDataSource, HoverTool, Range1d, Plot, LinearAxis, Grid, Paragraph, TapTool, Div\n'), ((11828, 11854), 'bokeh.models.widgets.Button', 'Button', ([], {'label': '"""Reset text"""'}), "(label='Reset text')\n", (11834, 11854), False, 'from bokeh.models.widgets import Select, Slider, Button\n'), ((12380, 12426), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'copy': '(True)', 'feature_range': '(-1, 1)'}), '(copy=True, feature_range=(-1, 1))\n', (12392, 12426), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((13737, 13762), 'bokeh.models.Range1d', 'Range1d', ([], {'start': '(0)', 'end': '(600)'}), '(start=0, end=600)\n', (13744, 13762), False, 'from bokeh.models import ColumnDataSource, HoverTool, Range1d, Plot, LinearAxis, Grid, Paragraph, TapTool, Div\n'), ((13769, 13794), 'bokeh.models.Range1d', 'Range1d', ([], {'start': '(0)', 'end': '(600)'}), '(start=0, end=600)\n', (13776, 13794), False, 'from bokeh.models import ColumnDataSource, HoverTool, Range1d, Plot, LinearAxis, Grid, Paragraph, TapTool, Div\n'), ((13805, 13898), 'bokeh.models.Plot', 'Plot', ([], {'title': 'None', 'x_range': 'xdr', 'y_range': 'ydr', 'plot_width': '(500)', 'plot_height': '(550)', 'min_border': '(0)'}), '(title=None, x_range=xdr, y_range=ydr, plot_width=500, plot_height=550,\n min_border=0)\n', (13809, 13898), False, 'from bokeh.models import ColumnDataSource, HoverTool, Range1d, Plot, LinearAxis, Grid, Paragraph, TapTool, Div\n'), ((14068, 14126), 'bokeh.models.Paragraph', 'Paragraph', ([], {'text': '"""Clustering option:"""', 'width': '(200)', 'height': '(20)'}), "(text='Clustering option:', width=200, height=20)\n", (14077, 14126), False, 'from bokeh.models import ColumnDataSource, HoverTool, Range1d, Plot, LinearAxis, Grid, Paragraph, TapTool, Div\n'), ((14138, 14249), 'bokeh.models.Div', 'Div', ([], {'text': '"""KMeans: Clusters neurons based on their gate values after training."""', 'width': '(250)', 'height': '(100)'}), "(text=\n 'KMeans: Clusters neurons based on their gate values after training.',\n width=250, height=100)\n", (14141, 14249), False, 'from bokeh.models import ColumnDataSource, HoverTool, Range1d, Plot, LinearAxis, Grid, Paragraph, TapTool, Div\n'), ((14258, 14279), 'lrp.get_lrp_timedata', 'get_lrp_timedata', (['LRP'], {}), '(LRP)\n', (14274, 14279), False, 'from lrp import get_lrp_timedata\n'), ((14410, 14484), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""Network focus per timestep"""', 'plot_width': '(300)', 'plot_height': '(50)'}), "(title='Network focus per timestep', plot_width=300, plot_height=50)\n", (14416, 14484), False, 'from bokeh.plotting import figure, show, output_file\n'), ((611, 639), 'sys.path.insert', 'sys.path.insert', (['(0)', 'src_path'], {}), '(0, src_path)\n', (626, 639), False, 'import sys\n'), ((992, 1027), 'numpy.array', 'np.array', (["rawInput_source.data['w']"], {}), "(rawInput_source.data['w'])\n", (1000, 1027), True, 'import numpy as np\n'), ((1379, 1443), 'bokeh.models.widgets.Select', 'Select', ([], {'title': '"""Gate"""', 'value': '"""IN - what to add on"""', 'options': 'gates'}), "(title='Gate', value='IN - what to add on', options=gates)\n", (1385, 1443), False, 'from bokeh.models.widgets import Select, Slider, Button\n'), ((1858, 1992), 'bokeh.models.widgets.Select', 'Select', ([], {'value': '"""KMeans - selected gate"""', 'title': '"""Select clustering option for neurons:"""', 'width': '(250)', 'options': 'algorithms_neurons'}), "(value='KMeans - selected gate', title=\n 'Select clustering option for neurons:', width=250, options=\n algorithms_neurons)\n", (1864, 1992), False, 'from bokeh.models.widgets import Select, Slider, Button\n'), ((2002, 2130), 'bokeh.models.widgets.Slider', 'Slider', ([], {'title': '"""Number of clusters (use in kmeans,hierarchical clustering)"""', 'value': '(2.0)', 'start': '(2.0)', 'end': '(4.0)', 'step': '(1)', 'width': '(400)'}), "(title='Number of clusters (use in kmeans,hierarchical clustering)',\n value=2.0, start=2.0, end=4.0, step=1, width=400)\n", (2008, 2130), False, 'from bokeh.models.widgets import Select, Slider, Button\n'), ((2285, 2346), 'bokeh.models.widgets.Select', 'Select', ([], {'title': '"""Input review"""', 'value': 'review[0]', 'options': 'review'}), "(title='Input review', value=review[0], options=review)\n", (2291, 2346), False, 'from bokeh.models.widgets import Select, Slider, Button\n'), ((2444, 2536), 'bokeh.models.widgets.Select', 'Select', ([], {'value': '"""PCA"""', 'title': '"""Select projection algorithm:"""', 'width': '(250)', 'options': 'algorithms'}), "(value='PCA', title='Select projection algorithm:', width=250,\n options=algorithms)\n", (2450, 2536), False, 'from bokeh.models.widgets import Select, Slider, Button\n'), ((4087, 4135), 'dim_reduction.project', 'dim_reduction.project', (['x', 'algorithm', 'knn', 'labels'], {}), '(x, algorithm, knn, labels)\n', (4108, 4135), False, 'import dim_reduction\n'), ((9597, 9616), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (9608, 9616), False, 'import pickle\n'), ((9805, 9819), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9816, 9819), False, 'import pickle\n'), ((10075, 10118), 'dim_reduction.get_dimReduction_algorithms', 'dim_reduction.get_dimReduction_algorithms', ([], {}), '()\n', (10116, 10118), False, 'import dim_reduction\n'), ((11339, 11364), 'numpy.array', 'np.array', (['word_embeddings'], {}), '(word_embeddings)\n', (11347, 11364), True, 'import numpy as np\n'), ((15214, 15222), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (15220, 15222), False, 'from bokeh.io import curdoc\n'), ((1116, 1151), 'data_format.list_duplicates', 'data_format.list_duplicates', (['colors'], {}), '(colors)\n', (1143, 1151), False, 'import data_format\n'), ((7793, 7818), 'numpy.array', 'np.array', (['word_embeddings'], {}), '(word_embeddings)\n', (7801, 7818), True, 'import numpy as np\n'), ((14955, 15125), 'bokeh.layouts.widgetbox', 'widgetbox', (['rawInput_selections', 'gate_selections', 'projection_selections', 'clustering_selections[0]', 'clustering_selections[1]', 'text_0', 'text_set', 'label_banner', 'button'], {}), '(rawInput_selections, gate_selections, projection_selections,\n clustering_selections[0], clustering_selections[1], text_0, text_set,\n label_banner, button)\n', (14964, 15125), False, 'from bokeh.layouts import widgetbox, layout\n'), ((15192, 15200), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (15198, 15200), False, 'from bokeh.io import curdoc\n'), ((4640, 4665), 'numpy.transpose', 'np.transpose', (['lstm_hidVal'], {}), '(lstm_hidVal)\n', (4652, 4665), True, 'import numpy as np\n'), ((5607, 5741), 'clustering.apply_cluster', 'clustering.apply_cluster', (['x', 'algorithm_cl_neurons', 'n_clusters'], {'review': 'rawInput_selections.value', 'neuronData': 'neuronData', 'mode': '"""nn"""'}), "(x, algorithm_cl_neurons, n_clusters, review=\n rawInput_selections.value, neuronData=neuronData, mode='nn')\n", (5631, 5741), False, 'import clustering\n'), ((4757, 4782), 'numpy.transpose', 'np.transpose', (['lstm_hidVal'], {}), '(lstm_hidVal)\n', (4769, 4782), True, 'import numpy as np\n'), ((6069, 6203), 'clustering.apply_cluster', 'clustering.apply_cluster', (['x', 'algorithm_cl_neurons', 'n_clusters'], {'review': 'rawInput_selections.value', 'neuronData': 'neuronData', 'mode': '"""nn"""'}), "(x, algorithm_cl_neurons, n_clusters, review=\n rawInput_selections.value, neuronData=neuronData, mode='nn')\n", (6093, 6203), False, 'import clustering\n')]
|
#!/usr/bin/env -S conda run -n tf python
import numpy as np
import cv2
import onnxruntime
import json
import requests
from PIL import Image
from tqdm import tqdm
from pathlib import Path
from fire import Fire
from typing import List, Union, Tuple
def preprocess_image(
image_path,
min_side = 800,
max_side = 1333,
):
image = read_image_bgr(image_path)
image = _preprocess_image(image)
image, scale = resize_image(
image, min_side = min_side, max_side = max_side
)
return image, scale
def read_image_bgr(path):
""" Read an image in BGR format.
Args
path: Path to the image.
"""
if isinstance(path, (str, Path)):
image = np.array(Image.open(path).convert("RGB"))
else:
path = cv2.cvtColor(path, cv2.COLOR_BGR2RGB)
image = np.array(Image.fromarray(path))
return image[:, :, ::-1]
def _preprocess_image(x, mode = "caffe"):
x = x.astype(np.float32)
if mode == "tf":
x /= 127.5
x -= 1.0
elif mode == "caffe":
x -= np.array([103.939, 116.779, 123.68])
return x
def compute_resize_scale(image_shape, min_side = 800, max_side = 1333):
(rows, cols, _) = image_shape
smallest_side = min(rows, cols)
scale = min_side / smallest_side
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
return scale
def resize_image(img, min_side = 800, max_side = 1333):
scale = compute_resize_scale(
img.shape, min_side = min_side, max_side = max_side
)
img = cv2.resize(img, None, fx = scale, fy = scale)
return img, scale
def detect(model, classes, image_path, fast: bool = False) -> List[dict]:
image, scale = preprocess_image(image_path)
args_o = [s.name for s in model.get_outputs()]
args_i = {model.get_inputs()[0].name: image[None, :, :, :]}
boxes, labels, scores = model.run(args_o, args_i)
boxes, labels, scores = boxes[0], labels[0], scores[0]
min_prob = 0.6
boxes /= (image.shape[:2] * 2)
results = [
dict(
box = box.tolist(),
score = score.item(),
label = classes[label.item()],
path = str(image_path),
) for box, score, label in zip(boxes, scores, labels)
if score > min_prob
]
return results
def download_file(url: str, to: Union[str, Path])->None:
to = Path(to)
if to.exists():
print('using cached file', to)
return
tmp = to.with_suffix('.tmp')
resp = requests.get(url, stream = True)
block_size = 1024*1024
total_length = int(resp.headers.get('content-length', 0))
progress_bar = tqdm(total = total_length, unit = 'iB', unit_scale = True)
with tmp.open('wb') as file:
for data in resp.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
if progress_bar.n == total_length:
tmp.rename(to)
def prepare_weight(model_name: str) -> Tuple[str, List[str]]:
root = Path.home() / '.NudeNet'
url = 'https://github.com/notAI-tech/NudeNet/releases/download/v0'
w = root / f'{model_name}_checkpoint.onnx'
wurl = f'{url}/{w.name}'
download_file(wurl, w)
c = root / f'{model_name}_classes'
curl = f'{url}/{c.name}'
download_file(curl, c)
return str(w), c.read_text().splitlines()
def prepare_weights() -> Tuple[str, List[str]]:
# prepare_weight('detector_v2_base')
return prepare_weight('detector_v2_default')
def main(file_or_dir: str = None, out: str = None):
ckpt, classes = prepare_weights()
model = onnxruntime.InferenceSession(
ckpt,
providers = [
'CUDAExecutionProvider',
# 'TensorrtExecutionProvider',
'CPUExecutionProvider',
]
)
if file_or_dir is None:
file_or_dir = 'whats.train/psed.a/3C49E51BF3B55D51B9582CE4735DDE3CDA0523C7-t.jpg'
file_or_dir = Path(file_or_dir)
if file_or_dir.is_file():
images = [file_or_dir]
else:
images = [
image for image in file_or_dir.rglob('*.*')
if image.suffix.lower() in {'.jpg', '.jpeg', '.png'}
]
if not out:
out = file_or_dir.name
out = Path(out).with_suffix('.json')
out.parent.mkdir(parents = True, exist_ok = True)
with out.open('w', encoding = 'utf-8') as out:
for image_path in tqdm(images):
for r in detect(model, classes, image_path):
json.dump(r, out, ensure_ascii = False)
out.write('\n')
if __name__ == '__main__':
# run as standalone script:
# nu-detect.py --file_or_dir=... --out=...
Fire(main)
|
[
"json.dump",
"tqdm.tqdm",
"fire.Fire",
"pathlib.Path.home",
"cv2.cvtColor",
"PIL.Image.open",
"onnxruntime.InferenceSession",
"pathlib.Path",
"numpy.array",
"requests.get",
"PIL.Image.fromarray",
"cv2.resize"
] |
[((1586, 1627), 'cv2.resize', 'cv2.resize', (['img', 'None'], {'fx': 'scale', 'fy': 'scale'}), '(img, None, fx=scale, fy=scale)\n', (1596, 1627), False, 'import cv2\n'), ((2423, 2431), 'pathlib.Path', 'Path', (['to'], {}), '(to)\n', (2427, 2431), False, 'from pathlib import Path\n'), ((2551, 2581), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (2563, 2581), False, 'import requests\n'), ((2693, 2745), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_length', 'unit': '"""iB"""', 'unit_scale': '(True)'}), "(total=total_length, unit='iB', unit_scale=True)\n", (2697, 2745), False, 'from tqdm import tqdm\n'), ((3657, 3756), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['ckpt'], {'providers': "['CUDAExecutionProvider', 'CPUExecutionProvider']"}), "(ckpt, providers=['CUDAExecutionProvider',\n 'CPUExecutionProvider'])\n", (3685, 3756), False, 'import onnxruntime\n'), ((3992, 4009), 'pathlib.Path', 'Path', (['file_or_dir'], {}), '(file_or_dir)\n', (3996, 4009), False, 'from pathlib import Path\n'), ((4725, 4735), 'fire.Fire', 'Fire', (['main'], {}), '(main)\n', (4729, 4735), False, 'from fire import Fire\n'), ((763, 800), 'cv2.cvtColor', 'cv2.cvtColor', (['path', 'cv2.COLOR_BGR2RGB'], {}), '(path, cv2.COLOR_BGR2RGB)\n', (775, 800), False, 'import cv2\n'), ((3071, 3082), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (3080, 3082), False, 'from pathlib import Path\n'), ((4454, 4466), 'tqdm.tqdm', 'tqdm', (['images'], {}), '(images)\n', (4458, 4466), False, 'from tqdm import tqdm\n'), ((826, 847), 'PIL.Image.fromarray', 'Image.fromarray', (['path'], {}), '(path)\n', (841, 847), False, 'from PIL import Image\n'), ((1049, 1085), 'numpy.array', 'np.array', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (1057, 1085), True, 'import numpy as np\n'), ((4291, 4300), 'pathlib.Path', 'Path', (['out'], {}), '(out)\n', (4295, 4300), False, 'from pathlib import Path\n'), ((4541, 4578), 'json.dump', 'json.dump', (['r', 'out'], {'ensure_ascii': '(False)'}), '(r, out, ensure_ascii=False)\n', (4550, 4578), False, 'import json\n'), ((705, 721), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (715, 721), False, 'from PIL import Image\n')]
|
"""Visualize the Fibonacci sequence in binary.
This script plots the Fibonacci sequence in binary form; the idea comes from
https://mathworld.wolfram.com/FibonacciNumber.html
and
https://www.maa.org/editorial/mathgames
This script depends on the dataset `fibonacci.dat`, which is hosted on Zenodo:
https://zenodo.org/record/5187276/files/fibonacci.dat
The instructions for downloading this file are specified in the `Snakefile`
at the top level of the repository.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from pathlib import Path
# Path to the "data" directory
DATA = Path(__file__).parents[1].absolute() / "data"
# Read the Fibonacci numbers
with open(DATA / "fibonacci.dat", "r") as f:
n = [int(l) for l in f.readlines()]
# The dimensions of the image we'll plot
N = len(n)
B = len("{:b}".format(n[-1]))
# Cast each number to binary and then to an array of bits
b = np.zeros((N, B), dtype=int)
b[0] = np.zeros(B)
b[1] = np.zeros(B)
b[1, -1] = 1
for i in range(2, N):
bi = list("{:b}".format(n[i]))
b[i, -len(bi) :] = bi
# Plot the Fibonacci sequence in binary; idea from
# https://mathworld.wolfram.com/FibonacciNumber.html and
# https://www.maa.org/editorial/mathgames
fig, ax = plt.subplots(figsize=(6, 6))
cmap = matplotlib.colors.ListedColormap(["white", "C0"])
ax.imshow(b, interpolation="nearest", cmap=cmap, aspect="auto")
ax.axis("off")
fig.savefig("fibonacci.pdf", bbox_inches="tight")
|
[
"pathlib.Path",
"numpy.zeros",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.subplots"
] |
[((916, 943), 'numpy.zeros', 'np.zeros', (['(N, B)'], {'dtype': 'int'}), '((N, B), dtype=int)\n', (924, 943), True, 'import numpy as np\n'), ((951, 962), 'numpy.zeros', 'np.zeros', (['B'], {}), '(B)\n', (959, 962), True, 'import numpy as np\n'), ((970, 981), 'numpy.zeros', 'np.zeros', (['B'], {}), '(B)\n', (978, 981), True, 'import numpy as np\n'), ((1239, 1267), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (1251, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1275, 1324), 'matplotlib.colors.ListedColormap', 'matplotlib.colors.ListedColormap', (["['white', 'C0']"], {}), "(['white', 'C0'])\n", (1307, 1324), False, 'import matplotlib\n'), ((609, 623), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (613, 623), False, 'from pathlib import Path\n')]
|
""" obtain the static field from a set of charges and
dipoles at polarizable points.
"""
import numpy
from .tensor import T
from .mulmom import MulMom as M
def get_static_field_from_file(potential, filename):
f = open(filename, 'r')
field = []
for i, line in enumerate(f):
d = line.split()
if i == 0:
continue
else:
field.append(list(map(float, d)))
f.close()
return numpy.ravel(field)
def get_static_field(potential, **kwargs):
"""
"""
verbose = kwargs.get('verbose', False)
filename = kwargs.pop('filename', None)
F_static = numpy.zeros(3 * potential.npols)
if filename is not None:
if verbose:
print("Loading static field from file '{0}'".format(filename))
F_static = get_static_field_from_file(potential, filename)
else:
try:
from .field import static_field
ex = numpy.array([potential.exclusion_list[k] for k in range(len(potential.exclusion_list))])
q = numpy.zeros(potential.nsites)
d = numpy.zeros((potential.nsites,3))
multipoles = potential.multipoles
if 0 in multipoles.keys():
q = numpy.array([q[0] for q in multipoles[0]])
if 1 in multipoles.keys():
d = numpy.array([d for d in multipoles[1]])
F_static = static_field(potential.npols, potential.coordinates, potential.has_alpha, ex, q, d)
except ModuleNotFoundError:
if verbose:
print("INFO: static field calculated using (slow) python version.")
offset = 0
for isite in range(potential.nsites):
itensor = potential.has_alpha[isite]
is_polarizable_point = (itensor > -1)
Ri = potential.coordinates[isite]
if is_polarizable_point:
iexclusion_list = potential.exclusion_list[isite]
for jsite in range(potential.nsites):
if jsite == isite:
continue
if jsite in iexclusion_list:
continue
jtensor = potential.has_alpha[jsite]
js_polarizable_point = (jtensor > -1)
Rj = potential.coordinates[jsite]
dRij = Rj - Ri
T1 = T(1, dRij)
try:
M0 = M(*potential.multipoles[0][jsite])
except KeyError:
F0 = numpy.zeros(3)
else:
F0 = numpy.array(M0 * T1).ravel()
finally:
F_static[offset:offset + 3] -= F0
T2 = T(2, dRij)
try:
M1 = M(*potential.multipoles[1][jsite])
except KeyError:
F1 = numpy.zeros(3)
else:
F1 = numpy.array(M1 * T2)
finally:
F_static[offset:offset + 3] += F1
T3 = T(3, dRij)
try:
M2 = M(*potential.multipoles[2][jsite])
except KeyError:
F2 = numpy.zeros(3)
else:
F2 = numpy.array(M2 * T3)
finally:
F_static[offset:offset + 3] += F2
offset += 3
return F_static
|
[
"numpy.array",
"numpy.zeros",
"numpy.ravel"
] |
[((441, 459), 'numpy.ravel', 'numpy.ravel', (['field'], {}), '(field)\n', (452, 459), False, 'import numpy\n'), ((625, 657), 'numpy.zeros', 'numpy.zeros', (['(3 * potential.npols)'], {}), '(3 * potential.npols)\n', (636, 657), False, 'import numpy\n'), ((1041, 1070), 'numpy.zeros', 'numpy.zeros', (['potential.nsites'], {}), '(potential.nsites)\n', (1052, 1070), False, 'import numpy\n'), ((1087, 1121), 'numpy.zeros', 'numpy.zeros', (['(potential.nsites, 3)'], {}), '((potential.nsites, 3))\n', (1098, 1121), False, 'import numpy\n'), ((1227, 1269), 'numpy.array', 'numpy.array', (['[q[0] for q in multipoles[0]]'], {}), '([q[0] for q in multipoles[0]])\n', (1238, 1269), False, 'import numpy\n'), ((1329, 1368), 'numpy.array', 'numpy.array', (['[d for d in multipoles[1]]'], {}), '([d for d in multipoles[1]])\n', (1340, 1368), False, 'import numpy\n'), ((3118, 3138), 'numpy.array', 'numpy.array', (['(M1 * T2)'], {}), '(M1 * T2)\n', (3129, 3138), False, 'import numpy\n'), ((3524, 3544), 'numpy.array', 'numpy.array', (['(M2 * T3)'], {}), '(M2 * T3)\n', (3535, 3544), False, 'import numpy\n'), ((2626, 2640), 'numpy.zeros', 'numpy.zeros', (['(3)'], {}), '(3)\n', (2637, 2640), False, 'import numpy\n'), ((3040, 3054), 'numpy.zeros', 'numpy.zeros', (['(3)'], {}), '(3)\n', (3051, 3054), False, 'import numpy\n'), ((3446, 3460), 'numpy.zeros', 'numpy.zeros', (['(3)'], {}), '(3)\n', (3457, 3460), False, 'import numpy\n'), ((2704, 2724), 'numpy.array', 'numpy.array', (['(M0 * T1)'], {}), '(M0 * T1)\n', (2715, 2724), False, 'import numpy\n')]
|
import sys
import os
import json
import numpy as np
import glob
import argparse
import pdb
import f0dl_bernox
def compute_f0_shift_curve(expt_dict, filter_key, filter_value, f0_min=80.0, f0_max=1e3):
'''
'''
# Identify trials where filter_key = filter_value and stimulus is in f0 range
indexes = expt_dict[filter_key] == filter_value
indexes = np.logical_and(indexes, np.logical_and(expt_dict['f0'] >= f0_min, expt_dict['f0'] <= f0_max))
# Compute f0 shifts
f0_shift = expt_dict['f0_shift'][indexes]
f0_pred_shift = (expt_dict['f0_pred'][indexes] - expt_dict['f0'][indexes]) / expt_dict['f0'][indexes]
# For each unique f0 shift, compute the mean, median, stddev predicted f0 shift
f0_shift_unique = np.unique(f0_shift)
f0_pred_shift_mean = np.zeros_like(f0_shift_unique)
f0_pred_shift_median = np.zeros_like(f0_shift_unique)
f0_pred_shift_stddev = np.zeros_like(f0_shift_unique)
for idx, f0_shift_value in enumerate(f0_shift_unique):
current_value_indexes = f0_shift == f0_shift_value
f0_pred_shift_mean[idx] = np.mean(f0_pred_shift[current_value_indexes])
f0_pred_shift_median[idx] = np.median(f0_pred_shift[current_value_indexes])
f0_pred_shift_stddev[idx] = np.std(f0_pred_shift[current_value_indexes])
# Return results in dictionary (units converted to percent)
sub_results_dict = {
'f0_shift': 100.0 * f0_shift_unique,
'f0_pred_shift_mean': 100.0 * f0_pred_shift_mean,
'f0_pred_shift_median': 100.0 * f0_pred_shift_median,
'f0_pred_shift_stddev': 100.0 * f0_pred_shift_stddev,
}
return sub_results_dict
def run_f0experiment_freq_shifted(json_fn, filter_key='spectral_envelope_centered_harmonic',
f0_label_pred_key='f0_label:labels_pred',
f0_label_true_key='f0_label:labels_true',
f0_label_prob_key='f0_label:probs_out',
kwargs_f0_prior={},
f0_min=None,
f0_max=None):
'''
'''
# Load JSON file of model predictions into `expt_dict`
metadata_key_list = [
'f0',
'f0_shift',
'spectral_envelope_centered_harmonic',
'spectral_envelope_bandwidth_in_harmonics',
]
expt_dict = f0dl_bernox.load_f0_expt_dict_from_json(json_fn,
f0_label_true_key=f0_label_true_key,
f0_label_pred_key=f0_label_pred_key,
f0_label_prob_key=f0_label_prob_key,
metadata_key_list=metadata_key_list)
expt_dict = f0dl_bernox.add_f0_estimates_to_expt_dict(expt_dict,
f0_label_true_key=f0_label_true_key,
f0_label_pred_key=f0_label_pred_key,
kwargs_f0_prior=kwargs_f0_prior)
# Initialize dictionary to hold psychophysical results
if f0_min is None: f0_min = np.min(expt_dict['f0'])
if f0_max is None: f0_max = np.max(expt_dict['f0'])
results_dict = {filter_key:{}, 'f0_min':f0_min, 'f0_max':f0_max}
for filter_value in np.unique(expt_dict[filter_key]):
results_dict[filter_key][int(filter_value)] = compute_f0_shift_curve(expt_dict,
filter_key,
filter_value,
f0_min=f0_min,
f0_max=f0_max)
# Return dictionary of psychophysical experiment results
return results_dict
def main(json_eval_fn, json_results_dict_fn=None, save_results_to_file=False,
filter_key='spectral_envelope_centered_harmonic',
f0_label_pred_key='f0_label:labels_pred',
f0_label_true_key='f0_label:labels_true',
f0_label_prob_key='f0_label:probs_out',
kwargs_f0_prior={},
f0_min=None,
f0_max=None):
'''
'''
# Run the Moore and Moore (2003) freq-shifted complexes experiment; results stored in results_dict
results_dict = run_f0experiment_freq_shifted(json_eval_fn,
filter_key=filter_key,
f0_label_pred_key=f0_label_pred_key,
f0_label_true_key=f0_label_true_key,
f0_label_prob_key=f0_label_prob_key,
kwargs_f0_prior=kwargs_f0_prior,
f0_min=f0_min,
f0_max=f0_max)
results_dict['json_eval_fn'] = json_eval_fn
results_dict['kwargs_f0_prior'] = kwargs_f0_prior
# If specified, save results_dict to file
if save_results_to_file:
# Check filename for results_dict
if json_results_dict_fn is None:
json_results_dict_fn = json_eval_fn.replace('.json', '_results_dict.json')
assert not json_results_dict_fn == json_eval_fn, "json_results_dict_fn must not overwrite json_eval_fn"
# Define helper class to JSON serialize the results_dict
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray): return obj.tolist()
if isinstance(obj, np.int64): return int(obj)
return json.JSONEncoder.default(self, obj)
# Write results_dict to json_results_dict_fn
with open(json_results_dict_fn, 'w') as f: json.dump(results_dict, f, cls=NumpyEncoder)
print('[END] wrote results_dict to {}'.format(json_results_dict_fn))
return results_dict
if __name__ == "__main__":
'''
'''
parser = argparse.ArgumentParser(description="run Moore and Moore (2003) freq-shifted complexes experiment")
parser.add_argument('-r', '--regex_json_eval_fn', type=str, default=None,
help='regex that globs list of json_eval_fn to process')
parser.add_argument('-j', '--job_idx', type=int, default=None,
help='job index used to select json_eval_fn from list')
parser.add_argument('-p', '--prior_range_in_octaves', type=float, default=0,
help='sets octave_range in `kwargs_f0_prior`: [#, #]')
parsed_args_dict = vars(parser.parse_args())
assert parsed_args_dict['regex_json_eval_fn'] is not None, "regex_json_eval_fn is a required argument"
assert parsed_args_dict['job_idx'] is not None, "job_idx is a required argument"
list_json_eval_fn = sorted(glob.glob(parsed_args_dict['regex_json_eval_fn']))
json_eval_fn = list_json_eval_fn[parsed_args_dict['job_idx']]
print('Processing file {} of {}'.format(parsed_args_dict['job_idx'], len(list_json_eval_fn)))
print('Processing file: {}'.format(json_eval_fn))
if parsed_args_dict['prior_range_in_octaves'] > 0:
kwargs_f0_prior = {
'f0_label_prob_key': 'f0_label:probs_out',
'f0_prior_ref_key': 'f0',
'octave_range': [
-parsed_args_dict['prior_range_in_octaves'],
parsed_args_dict['prior_range_in_octaves']
],
}
else:
kwargs_f0_prior = {}
main(json_eval_fn, save_results_to_file=True, kwargs_f0_prior=kwargs_f0_prior)
|
[
"json.dump",
"numpy.zeros_like",
"f0dl_bernox.add_f0_estimates_to_expt_dict",
"argparse.ArgumentParser",
"numpy.logical_and",
"numpy.median",
"numpy.std",
"numpy.min",
"numpy.mean",
"f0dl_bernox.load_f0_expt_dict_from_json",
"numpy.max",
"glob.glob",
"json.JSONEncoder.default",
"numpy.unique"
] |
[((744, 763), 'numpy.unique', 'np.unique', (['f0_shift'], {}), '(f0_shift)\n', (753, 763), True, 'import numpy as np\n'), ((789, 819), 'numpy.zeros_like', 'np.zeros_like', (['f0_shift_unique'], {}), '(f0_shift_unique)\n', (802, 819), True, 'import numpy as np\n'), ((847, 877), 'numpy.zeros_like', 'np.zeros_like', (['f0_shift_unique'], {}), '(f0_shift_unique)\n', (860, 877), True, 'import numpy as np\n'), ((905, 935), 'numpy.zeros_like', 'np.zeros_like', (['f0_shift_unique'], {}), '(f0_shift_unique)\n', (918, 935), True, 'import numpy as np\n'), ((2375, 2580), 'f0dl_bernox.load_f0_expt_dict_from_json', 'f0dl_bernox.load_f0_expt_dict_from_json', (['json_fn'], {'f0_label_true_key': 'f0_label_true_key', 'f0_label_pred_key': 'f0_label_pred_key', 'f0_label_prob_key': 'f0_label_prob_key', 'metadata_key_list': 'metadata_key_list'}), '(json_fn, f0_label_true_key=\n f0_label_true_key, f0_label_pred_key=f0_label_pred_key,\n f0_label_prob_key=f0_label_prob_key, metadata_key_list=metadata_key_list)\n', (2414, 2580), False, 'import f0dl_bernox\n'), ((2812, 2981), 'f0dl_bernox.add_f0_estimates_to_expt_dict', 'f0dl_bernox.add_f0_estimates_to_expt_dict', (['expt_dict'], {'f0_label_true_key': 'f0_label_true_key', 'f0_label_pred_key': 'f0_label_pred_key', 'kwargs_f0_prior': 'kwargs_f0_prior'}), '(expt_dict, f0_label_true_key=\n f0_label_true_key, f0_label_pred_key=f0_label_pred_key, kwargs_f0_prior\n =kwargs_f0_prior)\n', (2853, 2981), False, 'import f0dl_bernox\n'), ((3410, 3442), 'numpy.unique', 'np.unique', (['expt_dict[filter_key]'], {}), '(expt_dict[filter_key])\n', (3419, 3442), True, 'import numpy as np\n'), ((6172, 6276), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""run Moore and Moore (2003) freq-shifted complexes experiment"""'}), "(description=\n 'run Moore and Moore (2003) freq-shifted complexes experiment')\n", (6195, 6276), False, 'import argparse\n'), ((391, 459), 'numpy.logical_and', 'np.logical_and', (["(expt_dict['f0'] >= f0_min)", "(expt_dict['f0'] <= f0_max)"], {}), "(expt_dict['f0'] >= f0_min, expt_dict['f0'] <= f0_max)\n", (405, 459), True, 'import numpy as np\n'), ((1088, 1133), 'numpy.mean', 'np.mean', (['f0_pred_shift[current_value_indexes]'], {}), '(f0_pred_shift[current_value_indexes])\n', (1095, 1133), True, 'import numpy as np\n'), ((1170, 1217), 'numpy.median', 'np.median', (['f0_pred_shift[current_value_indexes]'], {}), '(f0_pred_shift[current_value_indexes])\n', (1179, 1217), True, 'import numpy as np\n'), ((1254, 1298), 'numpy.std', 'np.std', (['f0_pred_shift[current_value_indexes]'], {}), '(f0_pred_shift[current_value_indexes])\n', (1260, 1298), True, 'import numpy as np\n'), ((3237, 3260), 'numpy.min', 'np.min', (["expt_dict['f0']"], {}), "(expt_dict['f0'])\n", (3243, 3260), True, 'import numpy as np\n'), ((3293, 3316), 'numpy.max', 'np.max', (["expt_dict['f0']"], {}), "(expt_dict['f0'])\n", (3299, 3316), True, 'import numpy as np\n'), ((7010, 7059), 'glob.glob', 'glob.glob', (["parsed_args_dict['regex_json_eval_fn']"], {}), "(parsed_args_dict['regex_json_eval_fn'])\n", (7019, 7059), False, 'import glob\n'), ((5968, 6012), 'json.dump', 'json.dump', (['results_dict', 'f'], {'cls': 'NumpyEncoder'}), '(results_dict, f, cls=NumpyEncoder)\n', (5977, 6012), False, 'import json\n'), ((5828, 5863), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (5852, 5863), False, 'import json\n')]
|
"""
Base Class for Optimization Algorithms
Implements problem instance loading, heuristic initialization function, and data plotting functions.
"""
# ----------------------------------------------------------
import tsplib95 as tsp
from numpy.random import default_rng
# plotting
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib import cm
matplotlib.use("pgf")
matplotlib.rcParams.update(
{
"pgf.texsystem": "pdflatex",
"font.family": "serif",
"text.usetex": True,
"pgf.rcfonts": False,
}
)
from pathlib import Path
from candidate_solution import CandidateSolution, weight
# from agent import distance
# ------------------------------------------------------------------------------
class OptimisationBase:
"""
Abstract base class for Optimisation Algorithms
"""
# Initialization and built-in function overriding
def __init__(self, parameters, output_path):
self.output_path = output_path
self.size = parameters["size"]
self.max_iterations = parameters["max_iterations"]
self.heuristic_init = parameters["heuristic_init"]
self.load_problem(parameters["problem_filename"])
self.iteration = 0
self.memory = []
self.quality_by_iteration = []
self.quality_overall = []
self.run_stats = {**parameters}
def __repr__(self):
return "<Optimization size:%s limit:%s problem:%s>" % (
self.size,
self.max_iterations,
self.problem.name,
)
def __str__(self):
return "Optimization: population size %s, limit %s, problem %s" % (
self.size,
self.max_iterations,
self.problem.name,
)
def get_heuristic_tour(self):
def d(a, b):
return self.problem.get_weight(a, b)
rng = default_rng()
nodes = list(self.problem.get_nodes())
first = rng.choice(nodes)
nodes.remove(first)
tour = []
tour.append(first)
while nodes:
next_node = min([(node, d(tour[-1], node)) for node in nodes])
tour.append(next_node[0])
nodes.remove(next_node[0])
return tour
def load_problem(self, problem_filename):
"""
Load problem from file, as well as opt.tour if available
"""
self.problem = tsp.load(problem_filename)
CandidateSolution.set_problem(self.problem)
self.optimum = None
opt_tour = problem_filename[:-4] + ".opt.tour"
try:
self.optimum = CandidateSolution(tsp.load(opt_tour).tours[0])
except FileNotFoundError as err:
print("FileNotFoundError: {0}".format(err))
else:
pass
# Output Methods -----------------------------------------------------------
def print_best(self):
# make figure, axes
plt.style.use("ggplot")
plt.tight_layout()
gs_kw = dict(width_ratios=[3, 2], height_ratios=[1])
fig, (ax1, ax2) = plt.subplots(
figsize=(9, 4.5), ncols=2, nrows=1, gridspec_kw=gs_kw
)
ax1.set(title="Optimisation Result", xlabel="x", ylabel="y")
# ax1.set_aspect('equal', 'box')
ax1.set_aspect("equal")
ax2.set(title="Quality development", xlabel="iteration", ylabel="tour length")
# AX1 Best Solution vs. Optimal Solution
# Nodes
xs, ys = zip(*self.problem.node_coords.values())
labels = self.problem.node_coords.keys()
ax1.scatter(xs, ys, marker="o", color="dimgrey", zorder=10)
for label, x, y in zip(labels, xs, ys):
ax1.annotate(label, xy=(x, y), zorder=20)
# xs,ys hold data for city coordinates
xs, ys = zip(*self.problem.node_coords.values())
labels = self.problem.node_coords.keys()
# plots best tour in self.memory
best_tour = min(self.memory, key=lambda p: p.weight).tour
xt = []
yt = []
for p in best_tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=1.0, color="darkred", linestyle="dashed", zorder=2)
# plots optimum tour if given
if self.optimum is not None:
opt_tour = self.optimum.tour
xt = []
yt = []
for p in opt_tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=0.4, color="yellowgreen", linewidth=5, zorder=1)
# Labels
redline = mlines.Line2D(
[], [], color="darkred", linestyle="dashed", label="Overall Best Tour"
)
yellowline = mlines.Line2D(
[], [], color="yellowgreen", linewidth="5", label="Known Optimal Tour"
)
grey_dot = mlines.Line2D(
[], [], color="dimgrey", marker="o", linestyle="", label="Node"
)
ax1.legend(
handles=[redline, yellowline, grey_dot],
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
shadow=True,
ncol=3,
)
# AX2 - Stats
ymax = max(max(self.quality_overall), max(self.quality_by_iteration))
if self.optimum is not None:
ymin = self.optimum.weight
else:
ymin = min(min(self.quality_overall), min(self.quality_by_iteration))
margin = (ymax - ymin) * 0.5
ax2.set(
xlim=(-0.5, self.max_iterations + 0.5), ylim=(ymin - margin, ymax + margin)
)
iterations = list(range(0, self.iteration + 1))
ax2.plot(iterations, self.quality_by_iteration, marker="", color="red")
ax2.plot(iterations, self.quality_overall, marker="", color="grey")
if self.optimum is not None:
ax2.axhline(y=self.optimum.weight, color="yellowgreen", linewidth=2)
# Legend
red_dot = mlines.Line2D([], [], color="red", marker="", label="Iteration best")
grey_dot = mlines.Line2D([], [], color="grey", marker="", label="Overall best")
ax2_handles = [red_dot, grey_dot]
if self.optimum is not None:
baseline = mlines.Line2D(
[], [], color="yellowgreen", linewidth=2, label="Known Optimum"
)
ax2_handles.append(baseline)
ax2.legend(
handles=ax2_handles,
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
shadow=True,
ncol=2,
)
fig.tight_layout()
# Saving to specific directory and file
out_path = "output/{}".format(self.output_path)
Path(out_path).mkdir(parents=True, exist_ok=True)
plt.savefig("{}/best.png".format(out_path), format="png")
plt.savefig("{}/best.pgf".format(out_path), format="pgf")
plt.close(fig)
def print_map_only(self):
# make figure, axes
plt.style.use("ggplot")
plt.tight_layout()
gs_kw = dict(width_ratios=[1], height_ratios=[1])
fig, (ax1) = plt.subplots(
figsize=(4.4, 5.9), ncols=1, nrows=1, gridspec_kw=gs_kw
)
titel = f"eil51 - Iteration {self.iteration}"
ax1.set(title=titel, xlabel="x", ylabel="y")
ax1.set_aspect("equal")
# AX1 Best Solution vs. Optimal Solution
# Nodes
xs, ys = zip(*self.problem.node_coords.values())
# labels = self.problem.node_coords.keys()
ax1.scatter(xs, ys, marker="o", color="dimgrey", zorder=10)
# for label, x, y in zip(labels, xs, ys):
# ax1.annotate(label, xy=(x, y), zorder=20)
# xs,ys hold data for city coordinates
xs, ys = zip(*self.problem.node_coords.values())
labels = self.problem.node_coords.keys()
# plots best tour in self.memory
best_tour = min(self.memory, key=lambda p: p.weight).tour
xt = []
yt = []
for p in best_tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=1.0, color="C1", linestyle="dashed", zorder=2)
# plots optimum tour if given
if self.optimum is not None:
opt_tour = self.optimum.tour
xt = []
yt = []
for p in opt_tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=0.4, color="C4", linewidth=5, zorder=1)
# Labels
redline = mlines.Line2D(
[], [], color="C1", linestyle="dashed", label="Overall Best Tour"
)
yellowline = mlines.Line2D(
[], [], color="C4", linewidth="5", label="Known Optimal Tour"
)
grey_dot = mlines.Line2D(
[], [], color="dimgrey", marker="o", linestyle="", label="City"
)
ax1.legend(
handles=[redline, yellowline, grey_dot],
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
shadow=True,
ncol=1,
)
fig.tight_layout(rect=[0, 0, 1, 1])
# Saving to specific directory and file
out_path = "output/{}".format(self.output_path)
Path(out_path).mkdir(parents=True, exist_ok=True)
plt.savefig("{}/best_{}.png".format(out_path, self.iteration), format="png")
plt.savefig("{}/best_{}.pgf".format(out_path, self.iteration), format="pgf")
plt.close(fig)
def print_stats_only(self):
# make figure, axes
plt.style.use("ggplot")
plt.tight_layout()
gs_kw = dict(width_ratios=[1], height_ratios=[1])
fig, (ax2) = plt.subplots(figsize=(9, 4.5), ncols=1, nrows=1, gridspec_kw=gs_kw)
ax2.set(title="Quality development", xlabel="iteration", ylabel="tour length")
ymax = max(max(self.quality_overall), max(self.quality_by_iteration))
if self.optimum is not None:
ymin = self.optimum.weight
else:
ymin = min(min(self.quality_overall), min(self.quality_by_iteration))
margin = (ymax - ymin) * 0.5
ax2.set(
xlim=(-0.5, self.max_iterations + 0.5), ylim=(ymin - margin, ymax + margin)
)
iterations = list(range(0, self.iteration + 1))
ax2.plot(iterations, self.quality_by_iteration, marker="", color="red")
ax2.plot(iterations, self.quality_overall, marker="", color="grey")
if self.optimum is not None:
ax2.axhline(y=self.optimum.weight, color="yellowgreen", linewidth=2)
# Legend
red_dot = mlines.Line2D([], [], color="red", marker="", label="Iteration best")
grey_dot = mlines.Line2D([], [], color="grey", marker="", label="Overall best")
ax2_handles = [red_dot, grey_dot]
if self.optimum is not None:
baseline = mlines.Line2D(
[], [], color="yellowgreen", linewidth=2, label="Known Optimum"
)
ax2_handles.append(baseline)
ax2.legend(handles=ax2_handles, loc="upper right", shadow=True)
# Saving to specific directory and file
out_path = "output/{}".format(self.output_path)
Path(out_path).mkdir(parents=True, exist_ok=True)
plt.savefig("{}/stats.png".format(out_path), format="png")
plt.savefig("{}/stats.pgf".format(out_path), format="pgf")
plt.close(fig)
def print_state(self, population):
"""
Print State of Optimization with Coordinate System of tours and stats,
default: only latest addition to the memory is plottet
a population given by list of tours is additionally plottet if provided
"""
# make figure, axes
# plt.style.use('seaborn-whitegrid')
plt.style.use("ggplot")
gs_kw = dict(width_ratios=[3, 2], height_ratios=[1])
fig, (ax1, ax2) = plt.subplots(
figsize=(9, 4.5), ncols=2, nrows=1, gridspec_kw=gs_kw
)
plt.tight_layout()
ax1.set(title="Optimisation State", xlabel="x", ylabel="y")
ax1.set_aspect("equal")
ax2.set(title="Quality development", xlabel="iteration", ylabel="tour length")
# AX1 - Coordinate System
# Nodes
xs, ys = zip(*self.problem.node_coords.values())
labels = self.problem.node_coords.keys()
ax1.scatter(xs, ys, marker="o", color="dimgrey", zorder=10)
for label, x, y in zip(labels, xs, ys):
ax1.annotate(label, xy=(x, y), zorder=20)
# Tours (in current population)
for _ in range(0, len(population)):
for agent in population:
xt = []
yt = []
for p in agent.tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=0.1, color="goldenrod", linewidth=5)
# Best Tour in Population
best_agent = self.memory[self.iteration]
best_tour = best_agent.tour
xt = []
yt = []
for p in best_tour:
coords = self.problem.node_coords[p]
xt.append(coords[0])
yt.append(coords[1])
xt.append(xt[0])
yt.append(yt[0])
ax1.plot(xt, yt, alpha=1.0, color="darkred", linestyle="dashed")
# LABELS
redline = mlines.Line2D(
[], [], color="darkred", linestyle="dashed", label="Best Tour in Iteration"
)
yellowline = mlines.Line2D(
[], [], color="goldenrod", linewidth="5", label="Other Tours in Iteration"
)
grey_dot = mlines.Line2D(
[], [], color="dimgrey", marker="o", linestyle="", label="Node"
)
ax1.legend(
handles=[grey_dot, yellowline, redline],
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
shadow=True,
ncol=3,
)
# AX2 - Stats
ax2.set(xlim=(0 - 0.5, self.max_iterations + 0.5))
iterations = list(range(0, self.iteration + 1))
ax2.plot(
iterations, self.quality_by_iteration, marker="o", color="red", linestyle=""
)
ax2.plot(
iterations, self.quality_overall, marker="x", color="grey", linestyle=""
)
# LABELS
red_dot = mlines.Line2D(
[], [], color="red", marker="o", label="Iteration best", linestyle=""
)
grey_dot = mlines.Line2D(
[], [], color="grey", marker="x", label="Overall best", linestyle=""
)
ax2.legend(
handles=[red_dot, grey_dot],
loc="upper center",
bbox_to_anchor=(0.5, -0.1),
shadow=True,
ncol=2,
)
fig.tight_layout()
# Saving to specific directory and file
out_path = "output/{}".format(self.output_path)
Path(out_path).mkdir(parents=True, exist_ok=True)
plt.savefig(
"{}/iteration_{:03d}.png".format(out_path, self.iteration), format="png"
)
plt.savefig(
"{}/iteration_{:03d}.pgf".format(out_path, self.iteration), format="pgf"
)
plt.close(fig)
|
[
"matplotlib.lines.Line2D",
"matplotlib.pyplot.close",
"matplotlib.rcParams.update",
"tsplib95.load",
"matplotlib.pyplot.subplots",
"numpy.random.default_rng",
"candidate_solution.CandidateSolution.set_problem",
"matplotlib.pyplot.style.use",
"matplotlib.use",
"pathlib.Path",
"matplotlib.pyplot.tight_layout"
] |
[((396, 417), 'matplotlib.use', 'matplotlib.use', (['"""pgf"""'], {}), "('pgf')\n", (410, 417), False, 'import matplotlib\n'), ((418, 546), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'pgf.texsystem': 'pdflatex', 'font.family': 'serif', 'text.usetex': True,\n 'pgf.rcfonts': False}"], {}), "({'pgf.texsystem': 'pdflatex', 'font.family':\n 'serif', 'text.usetex': True, 'pgf.rcfonts': False})\n", (444, 546), False, 'import matplotlib\n'), ((1908, 1921), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (1919, 1921), False, 'from numpy.random import default_rng\n'), ((2428, 2454), 'tsplib95.load', 'tsp.load', (['problem_filename'], {}), '(problem_filename)\n', (2436, 2454), True, 'import tsplib95 as tsp\n'), ((2463, 2506), 'candidate_solution.CandidateSolution.set_problem', 'CandidateSolution.set_problem', (['self.problem'], {}), '(self.problem)\n', (2492, 2506), False, 'from candidate_solution import CandidateSolution, weight\n'), ((2951, 2974), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (2964, 2974), True, 'import matplotlib.pyplot as plt\n'), ((2983, 3001), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2999, 3001), True, 'import matplotlib.pyplot as plt\n'), ((3090, 3157), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 4.5)', 'ncols': '(2)', 'nrows': '(1)', 'gridspec_kw': 'gs_kw'}), '(figsize=(9, 4.5), ncols=2, nrows=1, gridspec_kw=gs_kw)\n', (3102, 3157), True, 'import matplotlib.pyplot as plt\n'), ((4815, 4905), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""darkred"""', 'linestyle': '"""dashed"""', 'label': '"""Overall Best Tour"""'}), "([], [], color='darkred', linestyle='dashed', label=\n 'Overall Best Tour')\n", (4828, 4905), True, 'import matplotlib.lines as mlines\n'), ((4944, 5034), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""yellowgreen"""', 'linewidth': '"""5"""', 'label': '"""Known Optimal Tour"""'}), "([], [], color='yellowgreen', linewidth='5', label=\n 'Known Optimal Tour')\n", (4957, 5034), True, 'import matplotlib.lines as mlines\n'), ((5071, 5149), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""dimgrey"""', 'marker': '"""o"""', 'linestyle': '""""""', 'label': '"""Node"""'}), "([], [], color='dimgrey', marker='o', linestyle='', label='Node')\n", (5084, 5149), True, 'import matplotlib.lines as mlines\n'), ((6167, 6236), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""red"""', 'marker': '""""""', 'label': '"""Iteration best"""'}), "([], [], color='red', marker='', label='Iteration best')\n", (6180, 6236), True, 'import matplotlib.lines as mlines\n'), ((6256, 6324), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""grey"""', 'marker': '""""""', 'label': '"""Overall best"""'}), "([], [], color='grey', marker='', label='Overall best')\n", (6269, 6324), True, 'import matplotlib.lines as mlines\n'), ((7092, 7106), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7101, 7106), True, 'import matplotlib.pyplot as plt\n'), ((7175, 7198), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (7188, 7198), True, 'import matplotlib.pyplot as plt\n'), ((7207, 7225), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7223, 7225), True, 'import matplotlib.pyplot as plt\n'), ((7306, 7375), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4.4, 5.9)', 'ncols': '(1)', 'nrows': '(1)', 'gridspec_kw': 'gs_kw'}), '(figsize=(4.4, 5.9), ncols=1, nrows=1, gridspec_kw=gs_kw)\n', (7318, 7375), True, 'import matplotlib.pyplot as plt\n'), ((8935, 9020), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""C1"""', 'linestyle': '"""dashed"""', 'label': '"""Overall Best Tour"""'}), "([], [], color='C1', linestyle='dashed', label='Overall Best Tour'\n )\n", (8948, 9020), True, 'import matplotlib.lines as mlines\n'), ((9059, 9135), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""C4"""', 'linewidth': '"""5"""', 'label': '"""Known Optimal Tour"""'}), "([], [], color='C4', linewidth='5', label='Known Optimal Tour')\n", (9072, 9135), True, 'import matplotlib.lines as mlines\n'), ((9177, 9255), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""dimgrey"""', 'marker': '"""o"""', 'linestyle': '""""""', 'label': '"""City"""'}), "([], [], color='dimgrey', marker='o', linestyle='', label='City')\n", (9190, 9255), True, 'import matplotlib.lines as mlines\n'), ((9864, 9878), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9873, 9878), True, 'import matplotlib.pyplot as plt\n'), ((9949, 9972), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (9962, 9972), True, 'import matplotlib.pyplot as plt\n'), ((9981, 9999), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9997, 9999), True, 'import matplotlib.pyplot as plt\n'), ((10080, 10147), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 4.5)', 'ncols': '(1)', 'nrows': '(1)', 'gridspec_kw': 'gs_kw'}), '(figsize=(9, 4.5), ncols=1, nrows=1, gridspec_kw=gs_kw)\n', (10092, 10147), True, 'import matplotlib.pyplot as plt\n'), ((11009, 11078), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""red"""', 'marker': '""""""', 'label': '"""Iteration best"""'}), "([], [], color='red', marker='', label='Iteration best')\n", (11022, 11078), True, 'import matplotlib.lines as mlines\n'), ((11098, 11166), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""grey"""', 'marker': '""""""', 'label': '"""Overall best"""'}), "([], [], color='grey', marker='', label='Overall best')\n", (11111, 11166), True, 'import matplotlib.lines as mlines\n'), ((11798, 11812), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (11807, 11812), True, 'import matplotlib.pyplot as plt\n'), ((12181, 12204), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (12194, 12204), True, 'import matplotlib.pyplot as plt\n'), ((12293, 12360), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 4.5)', 'ncols': '(2)', 'nrows': '(1)', 'gridspec_kw': 'gs_kw'}), '(figsize=(9, 4.5), ncols=2, nrows=1, gridspec_kw=gs_kw)\n', (12305, 12360), True, 'import matplotlib.pyplot as plt\n'), ((12391, 12409), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12407, 12409), True, 'import matplotlib.pyplot as plt\n'), ((13868, 13963), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""darkred"""', 'linestyle': '"""dashed"""', 'label': '"""Best Tour in Iteration"""'}), "([], [], color='darkred', linestyle='dashed', label=\n 'Best Tour in Iteration')\n", (13881, 13963), True, 'import matplotlib.lines as mlines\n'), ((14002, 14096), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""goldenrod"""', 'linewidth': '"""5"""', 'label': '"""Other Tours in Iteration"""'}), "([], [], color='goldenrod', linewidth='5', label=\n 'Other Tours in Iteration')\n", (14015, 14096), True, 'import matplotlib.lines as mlines\n'), ((14133, 14211), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""dimgrey"""', 'marker': '"""o"""', 'linestyle': '""""""', 'label': '"""Node"""'}), "([], [], color='dimgrey', marker='o', linestyle='', label='Node')\n", (14146, 14211), True, 'import matplotlib.lines as mlines\n'), ((14840, 14928), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""red"""', 'marker': '"""o"""', 'label': '"""Iteration best"""', 'linestyle': '""""""'}), "([], [], color='red', marker='o', label='Iteration best',\n linestyle='')\n", (14853, 14928), True, 'import matplotlib.lines as mlines\n'), ((14966, 15053), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""grey"""', 'marker': '"""x"""', 'label': '"""Overall best"""', 'linestyle': '""""""'}), "([], [], color='grey', marker='x', label='Overall best',\n linestyle='')\n", (14979, 15053), True, 'import matplotlib.lines as mlines\n'), ((15693, 15707), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (15702, 15707), True, 'import matplotlib.pyplot as plt\n'), ((6428, 6506), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""yellowgreen"""', 'linewidth': '(2)', 'label': '"""Known Optimum"""'}), "([], [], color='yellowgreen', linewidth=2, label='Known Optimum')\n", (6441, 6506), True, 'import matplotlib.lines as mlines\n'), ((11270, 11348), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""yellowgreen"""', 'linewidth': '(2)', 'label': '"""Known Optimum"""'}), "([], [], color='yellowgreen', linewidth=2, label='Known Optimum')\n", (11283, 11348), True, 'import matplotlib.lines as mlines\n'), ((6901, 6915), 'pathlib.Path', 'Path', (['out_path'], {}), '(out_path)\n', (6905, 6915), False, 'from pathlib import Path\n'), ((9636, 9650), 'pathlib.Path', 'Path', (['out_path'], {}), '(out_path)\n', (9640, 9650), False, 'from pathlib import Path\n'), ((11606, 11620), 'pathlib.Path', 'Path', (['out_path'], {}), '(out_path)\n', (11610, 11620), False, 'from pathlib import Path\n'), ((15402, 15416), 'pathlib.Path', 'Path', (['out_path'], {}), '(out_path)\n', (15406, 15416), False, 'from pathlib import Path\n'), ((2648, 2666), 'tsplib95.load', 'tsp.load', (['opt_tour'], {}), '(opt_tour)\n', (2656, 2666), True, 'import tsplib95 as tsp\n')]
|
from emto_input_generator import *
import numpy as np
folder = os.getcwd() # Get current working directory.
emtopath = folder+"/L11_CuPt" # Folder where the calculations will be performed.
latpath = emtopath
# L11 CuPt
prims = np.array([[1.0,0.5,0.5],
[0.5,1.0,0.5],
[0.5,0.5,1.0]])
basis = np.array([[0.0,0.0,0.0],
[0.5,0.5,0.5]])
species = ["Cu","Pt"]
species_cpa = [["cu","pt"],["cu","pt"]]
input_creator = EMTO(folder=emtopath)
input_creator.init_structure(latpath=latpath,
prims=prims,
basis=basis,
species=species,
latname='L11')
input_creator.init_bulk(atoms_cpa=species_cpa)
sws_range = np.linspace(2,3,6)
#input_creator.write_bmdl_kstr_shape_input()
#input_creator.write_kgrn_kfcd_input()
input_creator.write_kgrn_kfcd_swsrange(sws=sws_range)
#input_creator.draw_structure('standard_conv')
|
[
"numpy.array",
"numpy.linspace"
] |
[((240, 301), 'numpy.array', 'np.array', (['[[1.0, 0.5, 0.5], [0.5, 1.0, 0.5], [0.5, 0.5, 1.0]]'], {}), '([[1.0, 0.5, 0.5], [0.5, 1.0, 0.5], [0.5, 0.5, 1.0]])\n', (248, 301), True, 'import numpy as np\n'), ((343, 387), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]'], {}), '([[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]])\n', (351, 387), True, 'import numpy as np\n'), ((786, 806), 'numpy.linspace', 'np.linspace', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (797, 806), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import math
from PIL import Image, ImageStat
import sys
from os import listdir
import pymeanshift as pms
import os.path
# path1 = "/Users/caijieyang/Desktop/allgoodthinghere/"
# files= listdir(path1)
avg_recall=[]
avg_pre=[]
hsl_acc_list=[]
manual_acc_list=[]
file = sys.argv[1]
file = str(file)
# print (file)
origin = Image.open(file)
width, height = origin.size
area = (0, 0, width, 0.5*height)
image = origin.crop(area) # crop top half of the image
image.save(file+"_cropped.png")
original_image = cv2.imread(file+"_cropped.png")
(segmented_image, label_image, number_regions) = pms.segment(original_image, spatial_radius=7,
range_radius=8, min_density=300)
counterclass_all={}
for j in range(0,len(label_image[0])):
for i in range(0,len(label_image)):
if label_image[i,j] in counterclass_all:
counterclass_all[label_image[i,j]] +=1
else:
counterclass_all[label_image[i,j]] = 1
max=0
for i in counterclass_all:
if counterclass_all[i]>max:
max=counterclass_all[i]
most_common_colour=i
origin=cv2.imread(file)
data = np.asarray(origin,dtype="int32")
RED = 2
GREEN = 1
BLUE = 0
for i in range (len(original_image)):
for j in range (len(original_image[0])):
if label_image[i][j] == most_common_colour:
data[i, j, GREEN] = 0
data[i, j, RED] = 0
data[i, j, BLUE] = 255
else:
continue
result = Image.fromarray(data.astype(np.uint8))
b, g, r = result.split()
result = Image.merge("RGB", (r, g, b))
result.save(file+"ms_sky_mark.png")
ms = Image.fromarray(segmented_image)
b, g, r = ms.split()
im = Image.merge("RGB", (r, g, b))
im.save(file+"ms_cluster_mark.png")
tempfile = file.replace('.png',' - Copy.png')
if os.path.isfile(tempfile):
marked_pixel_values = cv2.imread(tempfile)
true_po_list=[]
precision_list=[]
true_sky_counter=0
true_positive_counter=0
false_positive_counter=0
# print (marked_pixel_values[0,0])
# print (int(len(marked_pixel_values)/2+1))
for i in range(int(len(marked_pixel_values)/2+1)):
for j in range(len(marked_pixel_values[0])):
if marked_pixel_values[i][j][0] == 255 and marked_pixel_values[i][j][1] == 0 and marked_pixel_values[i][j][2] == 0 :
true_sky_counter+=1
if data[i][j][0] == 255 and data[i][j][1] == 0 and data[i][j][2] == 0:
true_positive_counter+=1
else:
if data[i][j][0] == 255 and data[i][j][1] == 0 and data[i][j][2] == 0:
false_positive_counter+=1
avg_recall.append(true_positive_counter/true_sky_counter)
avg_pre.append(true_positive_counter/(true_positive_counter+false_positive_counter))
hsl_acc_list.append((true_positive_counter+false_positive_counter)/((len(marked_pixel_values)/2+1)*len(marked_pixel_values[0])))
manual_acc_list.append(true_sky_counter/((len(marked_pixel_values)/2+1)*len(marked_pixel_values[0])))
print ("meanshift program marked proportion:", (true_positive_counter+false_positive_counter)/((len(marked_pixel_values)/2+1)*len(marked_pixel_values[0])))
print ("manually marked proportion:",true_sky_counter/((len(marked_pixel_values)/2+1)*len(marked_pixel_values[0])))
# print ("recall: ", avg_recall)
# print ("precision: " , avg_pre)
# print ("recall: ", np.mean(avg_recall))
# print ("precision: " , np.mean(avg_pre))
# print (hsl_acc_list)
# print (manual_acc_list)
|
[
"numpy.asarray",
"PIL.Image.open",
"pymeanshift.segment",
"cv2.imread",
"PIL.Image.fromarray",
"PIL.Image.merge"
] |
[((351, 367), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (361, 367), False, 'from PIL import Image, ImageStat\n'), ((534, 567), 'cv2.imread', 'cv2.imread', (["(file + '_cropped.png')"], {}), "(file + '_cropped.png')\n", (544, 567), False, 'import cv2\n'), ((616, 694), 'pymeanshift.segment', 'pms.segment', (['original_image'], {'spatial_radius': '(7)', 'range_radius': '(8)', 'min_density': '(300)'}), '(original_image, spatial_radius=7, range_radius=8, min_density=300)\n', (627, 694), True, 'import pymeanshift as pms\n'), ((1143, 1159), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (1153, 1159), False, 'import cv2\n'), ((1168, 1201), 'numpy.asarray', 'np.asarray', (['origin'], {'dtype': '"""int32"""'}), "(origin, dtype='int32')\n", (1178, 1201), True, 'import numpy as np\n'), ((1548, 1577), 'PIL.Image.merge', 'Image.merge', (['"""RGB"""', '(r, g, b)'], {}), "('RGB', (r, g, b))\n", (1559, 1577), False, 'from PIL import Image, ImageStat\n'), ((1621, 1653), 'PIL.Image.fromarray', 'Image.fromarray', (['segmented_image'], {}), '(segmented_image)\n', (1636, 1653), False, 'from PIL import Image, ImageStat\n'), ((1680, 1709), 'PIL.Image.merge', 'Image.merge', (['"""RGB"""', '(r, g, b)'], {}), "('RGB', (r, g, b))\n", (1691, 1709), False, 'from PIL import Image, ImageStat\n'), ((1845, 1865), 'cv2.imread', 'cv2.imread', (['tempfile'], {}), '(tempfile)\n', (1855, 1865), False, 'import cv2\n')]
|
#!/usr/bin/env python
#
# fix_settling_shifts.py
#
# Author: <NAME>, STScI, February 2022
#
# Script to fix shifts between groups introduced by settling issue.
#
# Input arguments:
# required: image filename (single "uncal.fits" file)
# optional: boxpeaksize (box size for searching central peak; default=20 pixels)
# optional: boxfitsize (box size for fitting central peak; default=200 pixels)
#
# Output: Level-2 calibrated file ("cal.fits" file)
#
#
# Example of how to run it - from the unix command line:
#
# $ fix_settling_shifts.py jw01143001001_02101_00001_nrcalong_uncal.fits
#
# which produces:
# jw01143001001_02101_00001_nrcalong_cal.fits
import argparse, os, sys
from glob import glob
import astropy
from astropy.io import ascii as asc
from astropy.io import fits
from astropy import stats
from astropy import nddata
from astropy.nddata import block_reduce
from astropy.modeling import models, fitting
import jwst
from jwst.pipeline import calwebb_detector1
from jwst.pipeline import Detector1Pipeline
from jwst.pipeline import Image2Pipeline
from jwst.jump import JumpStep
from jwst.ramp_fitting import RampFitStep
from jwst.gain_scale import GainScaleStep
import scipy
from scipy.ndimage import gaussian_filter, median_filter
from scipy import signal
import numpy as np
def crosscorfit(data_template,data_to_be_shifted,corrboxpeak,corrboxfit):
#
corr = scipy.signal.fftconvolve(data_template,data_to_be_shifted[::-1,::-1],mode='same')
#
xcen,ycen = int(float(corr.shape[0])/2.),int(float(corr.shape[1])/2.)
#
box2 = int(float(corrboxpeak)/2.)
#
ypeak,xpeak = np.unravel_index(np.argmax(corr[ycen-box2:ycen+box2,xcen-box2:xcen+box2]),(corrboxpeak,corrboxpeak))
#
x0,y0 = xcen-box2+xpeak,ycen-box2+ypeak
#
ampl = corr[y0,x0]
#
x_stddev0,y_stddev0 = 2.,2.
#
corrboxfit2 = float(corrboxfit)/2.
#
x1,y1 = xcen-int(corrboxfit2),ycen-int(corrboxfit2)
x2,y2 = x1+corrboxfit,y1+corrboxfit
#
corr_fit = corr[y1:y2,x1:x2]
#
x0fit,y0fit = x0-x1,y0-y1
#
y_array_2d,x_array_2d = np.mgrid[:corrboxfit,:corrboxfit]
#
gfit_init = models.Gaussian2D(amplitude=ampl,x_mean=x0fit,y_mean=y0fit,x_stddev=x_stddev0,y_stddev=y_stddev0,theta=0.)
gfit_init.theta.fixed = True
#
gfit_model = fitting.LevMarLSQFitter()
gfit_results = gfit_model(gfit_init,x_array_2d,y_array_2d,corr_fit)
#
amplfit = gfit_results.amplitude.value
thetafit = gfit_results.theta.value
xfit,yfit = gfit_results.x_mean.value,gfit_results.y_mean.value
xsigma,ysigma = gfit_results.x_stddev.value,gfit_results.y_stddev.value
dx,dy = xfit-corrboxfit2,yfit-corrboxfit2
#
print('fit1 results: %s %3i %3i %5i %5i %8.3f %8.3f %8.3f %8.3f %14.1f %14.1f %8.3f %8.3f %8.3f' % (uncalfile,ni+1,ng+1,x0fit,y0fit,xfit,yfit,dx,dy,ampl,amplfit,thetafit,xsigma,ysigma))
#
#
sigma_new = np.min([xsigma,ysigma])
#
gfit_init2 = models.Gaussian2D(amplitude=ampl,x_mean=x0fit,y_mean=y0fit,x_stddev=sigma_new,y_stddev=sigma_new,theta=0.)
gfit_init2.x_stddev.fixed = True
gfit_init2.y_stddev.fixed = True
gfit_init2.theta.fixed = True
#
gfit_model2 = fitting.LevMarLSQFitter()
gfit_results2 = gfit_model2(gfit_init2,x_array_2d,y_array_2d,corr_fit)
#
amplfit = gfit_results2.amplitude.value
thetafit = gfit_results2.theta.value
xfit,yfit = gfit_results2.x_mean.value,gfit_results2.y_mean.value
xsigma,ysigma = gfit_results2.x_stddev.value,gfit_results2.y_stddev.value
dx,dy = xfit-corrboxfit2,yfit-corrboxfit2
#
print('fit2 results: %s %3i %3i %5i %5i %8.3f %8.3f %8.3f %8.3f %14.1f %14.1f %8.3f %8.3f %8.3f' % (uncalfile,ni+1,ng+1,x0fit,y0fit,xfit,yfit,dx,dy,ampl,amplfit,thetafit,xsigma,ysigma))
#
if ((xpeak in [0,corrboxpeak]) or (ypeak in [0,corrboxpeak])): exit_need_adjustment('xpeak, ypeak = '+repr(xpeak)+', '+repr(ypeak)+'; corrboxpeak = '+repr(corrboxpeak))
if ((xsigma > box2) or (ysigma > box2)): exit_need_adjustment('xsigma, ysigma = '+repr(xsigma)+', '+repr(ysigma)+'; box2 = '+repr(box2))
#
return dx,dy
def apply_shift(data_to_be_shifted,dx,dy,bkgd):
#
dxi,dyi = round(dx),round(dy)
#
ny,nx = data_to_be_shifted.shape
#
if (dxi >= 0):
x1old,x2old = 0,nx-dxi
x1new,x2new = dxi,nx
else:
x1old,x2old = -dxi,nx
x1new,x2new = 0,nx+dxi
#
if (dyi >= 0):
y1old,y2old = 0,ny-dyi
y1new,y2new = dyi,ny
else:
y1old,y2old = -dyi,ny
y1new,y2new = 0,ny+dyi
#
data_to_be_shifted_new = np.full(data_to_be_shifted.shape,bkgd,dtype=np.float32)
data_to_be_shifted_new[y1new:y2new,x1new:x2new] = data_to_be_shifted[y1old:y2old,x1old:x2old]
#
return data_to_be_shifted_new
def exit_need_adjustment(err_info):
#
print('''
***
*** ERROR - The code has encountered potentially problematic results, and is therefore exiting.
***''')
print(' *** Error info is: ',err_info)
print(''' ***
***
*** Please contact the author (<NAME>) to discuss how to run it on this dataset.
***
''')
sys.exit()
if __name__ == '__main__':
#
print('''
*** -----------------------------------------------------------------------------------
***
*** fix_settling_shifts.py
***
*** Author: <NAME>, STScI, February 2022
***
*** Script to fix shifts between groups introduced by settling issue.
***
*** Input arguments:
*** required: image filename (single "uncal.fits" file)
*** optional: boxpeaksize (box size for searching central peak; default=20 pixels)
*** optional: boxfitsize (box size for fitting central peak; default=200 pixels)
***
*** Output: Level-2 calibrated file ("cal.fits" file)
***
***
*** Example of how to run it - from the unix command line:
***
*** $ fix_settling_shifts.py jw01143001001_02101_00001_nrcalong_uncal.fits
***
*** which produces:
*** jw01143001001_02101_00001_nrcalong_cal.fits
***
*** -----------------------------------------------------------------------------------
''')
#
parser = argparse.ArgumentParser(description='Fix shifts between groups introduced by settling issue.')
parser.add_argument('image', default='NONE', type=str, help='Input uncal.fits filename')
parser.add_argument('-bp','--boxpeaksize',default=20, type=int, help='Box size for searching central peak')
parser.add_argument('-bf','--boxfitsize',default=200, type=int, help='Box size for fitting central peak')
#
options = parser.parse_args()
uncalfile = options.image
corrboxpeak = options.boxpeaksize
corrboxfit = options.boxfitsize
#
parameter_dict = {'jump': {'skip': True},
'ramp_fit': {'skip': True},
'gain_scale': {'skip': True}}
#
rampfile = uncalfile.replace('_uncal.fits', '_ramp.fits')
#
if (not os.path.exists(rampfile)):
rampdata = calwebb_detector1.Detector1Pipeline.call(uncalfile, steps=parameter_dict, save_results=True)
#
hdr0 = fits.getheader(uncalfile,0)
n_ints = hdr0['NINTS']
n_grps_per_int = hdr0['NGROUPS']
#
data = fits.getdata(uncalfile,1)
#
ramp_cube_aligned = np.zeros(data.shape).astype(np.float32)
#
first_group = True
for ni in range(n_ints):
#
for ng in range(n_grps_per_int):
#
if (ng != 0): data_intgrp_prev = data_intgrp
#
data_intgrp = fits.getdata(rampfile,1)[ni,ng,:,:]
#
if (ng == 0):
#
data_diff = data_intgrp
#
else:
#
data_diff = data_intgrp - data_intgrp_prev
#
mean,med,rms = stats.sigma_clipped_stats(data_diff, maxiters=10, sigma_lower=6., sigma_upper=4)
#
print('n_int = ',ni+1,'; n_grp = ',ng+1,'; mean, med, rms = ',mean,med,rms)
#
data_diff_med = scipy.ndimage.median_filter(input=data_diff, size=3, mode='constant', cval=0.)
#
data_diff_sub = data_diff_med - med
#
data_diff_sub[np.where(data_diff_sub < (5.*rms))] = 0.
#
data_diff_sub_gauss = gaussian_filter(data_diff_sub, sigma=1.0, truncate=5., order=0, mode='constant', cval=0.).astype(np.float32)
#
if (first_group):
#
first_group = False
#
group_template = data_diff_sub_gauss
#
ramp_cube_aligned[0,0,:,:] = data_diff
#
else:
#
dx,dy = crosscorfit(group_template,data_diff_sub_gauss,corrboxpeak,corrboxfit)
#
data_diff_sub_gauss_shifted = apply_shift(data_diff_sub_gauss,dx,dy,0.)
#
dx_check,dy_check = crosscorfit(group_template,data_diff_sub_gauss_shifted,corrboxpeak,corrboxfit)
#
print('shift results: %s %3i %3i %5i %5i %8.3f %8.3f %8.3f %8.3f' % (uncalfile,ni+1,ng+1,round(dx),round(dy),dx,dy,dx_check,dy_check))
#
data_diff_shifted = apply_shift(data_diff,dx,dy,med)
#
if (ng == 0):
#
ramp_cube_aligned[ni,ng,:,:] = data_diff_shifted
#
else:
#
ramp_cube_aligned[ni,ng,:,:] = ramp_cube_aligned[ni,ng-1,:,:] + data_diff_shifted
rampfile_aligned = rampfile[:-10] + '_aligned_ramp.fits'
#
a = os.system('/bin/rm -f '+rampfile_aligned)
a = os.system('/bin/cp -p '+rampfile+' '+rampfile_aligned)
#
f = fits.open(rampfile_aligned,'update')
#
f[1].data = ramp_cube_aligned
#
f.flush()
f.close()
#
caldetector1_output_jumpstep = JumpStep.call(rampfile_aligned, save_results=False)
caldetector1_output_rampfit = RampFitStep.call(caldetector1_output_jumpstep, save_results=False)
caldetector1_output_gainscale0 = GainScaleStep.call(caldetector1_output_rampfit[0], save_results=False)
caldetector1_output_gainscale1 = GainScaleStep.call(caldetector1_output_rampfit[1], save_results=False)
#
calimage2_output0 = Image2Pipeline.call(caldetector1_output_gainscale0, save_results=True)
calimage2_output1 = Image2Pipeline.call(caldetector1_output_gainscale1, save_results=True)
|
[
"argparse.ArgumentParser",
"numpy.argmax",
"astropy.stats.sigma_clipped_stats",
"jwst.ramp_fitting.RampFitStep.call",
"scipy.signal.fftconvolve",
"scipy.ndimage.median_filter",
"numpy.full",
"jwst.pipeline.calwebb_detector1.Detector1Pipeline.call",
"jwst.pipeline.Image2Pipeline.call",
"astropy.io.fits.getdata",
"scipy.ndimage.gaussian_filter",
"os.path.exists",
"os.system",
"numpy.min",
"astropy.io.fits.open",
"jwst.gain_scale.GainScaleStep.call",
"sys.exit",
"astropy.modeling.models.Gaussian2D",
"jwst.jump.JumpStep.call",
"numpy.zeros",
"astropy.io.fits.getheader",
"astropy.modeling.fitting.LevMarLSQFitter",
"numpy.where"
] |
[((1422, 1510), 'scipy.signal.fftconvolve', 'scipy.signal.fftconvolve', (['data_template', 'data_to_be_shifted[::-1, ::-1]'], {'mode': '"""same"""'}), "(data_template, data_to_be_shifted[::-1, ::-1],\n mode='same')\n", (1446, 1510), False, 'import scipy\n'), ((2132, 2249), 'astropy.modeling.models.Gaussian2D', 'models.Gaussian2D', ([], {'amplitude': 'ampl', 'x_mean': 'x0fit', 'y_mean': 'y0fit', 'x_stddev': 'x_stddev0', 'y_stddev': 'y_stddev0', 'theta': '(0.0)'}), '(amplitude=ampl, x_mean=x0fit, y_mean=y0fit, x_stddev=\n x_stddev0, y_stddev=y_stddev0, theta=0.0)\n', (2149, 2249), False, 'from astropy.modeling import models, fitting\n'), ((2291, 2316), 'astropy.modeling.fitting.LevMarLSQFitter', 'fitting.LevMarLSQFitter', ([], {}), '()\n', (2314, 2316), False, 'from astropy.modeling import models, fitting\n'), ((2871, 2895), 'numpy.min', 'np.min', (['[xsigma, ysigma]'], {}), '([xsigma, ysigma])\n', (2877, 2895), True, 'import numpy as np\n'), ((2914, 3031), 'astropy.modeling.models.Gaussian2D', 'models.Gaussian2D', ([], {'amplitude': 'ampl', 'x_mean': 'x0fit', 'y_mean': 'y0fit', 'x_stddev': 'sigma_new', 'y_stddev': 'sigma_new', 'theta': '(0.0)'}), '(amplitude=ampl, x_mean=x0fit, y_mean=y0fit, x_stddev=\n sigma_new, y_stddev=sigma_new, theta=0.0)\n', (2931, 3031), False, 'from astropy.modeling import models, fitting\n'), ((3145, 3170), 'astropy.modeling.fitting.LevMarLSQFitter', 'fitting.LevMarLSQFitter', ([], {}), '()\n', (3168, 3170), False, 'from astropy.modeling import models, fitting\n'), ((4472, 4529), 'numpy.full', 'np.full', (['data_to_be_shifted.shape', 'bkgd'], {'dtype': 'np.float32'}), '(data_to_be_shifted.shape, bkgd, dtype=np.float32)\n', (4479, 4529), True, 'import numpy as np\n'), ((4993, 5003), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5001, 5003), False, 'import argparse, os, sys\n'), ((6005, 6104), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fix shifts between groups introduced by settling issue."""'}), "(description=\n 'Fix shifts between groups introduced by settling issue.')\n", (6028, 6104), False, 'import argparse, os, sys\n'), ((6927, 6955), 'astropy.io.fits.getheader', 'fits.getheader', (['uncalfile', '(0)'], {}), '(uncalfile, 0)\n', (6941, 6955), False, 'from astropy.io import fits\n'), ((7036, 7062), 'astropy.io.fits.getdata', 'fits.getdata', (['uncalfile', '(1)'], {}), '(uncalfile, 1)\n', (7048, 7062), False, 'from astropy.io import fits\n'), ((9114, 9157), 'os.system', 'os.system', (["('/bin/rm -f ' + rampfile_aligned)"], {}), "('/bin/rm -f ' + rampfile_aligned)\n", (9123, 9157), False, 'import argparse, os, sys\n'), ((9162, 9222), 'os.system', 'os.system', (["('/bin/cp -p ' + rampfile + ' ' + rampfile_aligned)"], {}), "('/bin/cp -p ' + rampfile + ' ' + rampfile_aligned)\n", (9171, 9222), False, 'import argparse, os, sys\n'), ((9227, 9264), 'astropy.io.fits.open', 'fits.open', (['rampfile_aligned', '"""update"""'], {}), "(rampfile_aligned, 'update')\n", (9236, 9264), False, 'from astropy.io import fits\n'), ((9367, 9418), 'jwst.jump.JumpStep.call', 'JumpStep.call', (['rampfile_aligned'], {'save_results': '(False)'}), '(rampfile_aligned, save_results=False)\n', (9380, 9418), False, 'from jwst.jump import JumpStep\n'), ((9473, 9539), 'jwst.ramp_fitting.RampFitStep.call', 'RampFitStep.call', (['caldetector1_output_jumpstep'], {'save_results': '(False)'}), '(caldetector1_output_jumpstep, save_results=False)\n', (9489, 9539), False, 'from jwst.ramp_fitting import RampFitStep\n'), ((9579, 9649), 'jwst.gain_scale.GainScaleStep.call', 'GainScaleStep.call', (['caldetector1_output_rampfit[0]'], {'save_results': '(False)'}), '(caldetector1_output_rampfit[0], save_results=False)\n', (9597, 9649), False, 'from jwst.gain_scale import GainScaleStep\n'), ((9685, 9755), 'jwst.gain_scale.GainScaleStep.call', 'GainScaleStep.call', (['caldetector1_output_rampfit[1]'], {'save_results': '(False)'}), '(caldetector1_output_rampfit[1], save_results=False)\n', (9703, 9755), False, 'from jwst.gain_scale import GainScaleStep\n'), ((9782, 9852), 'jwst.pipeline.Image2Pipeline.call', 'Image2Pipeline.call', (['caldetector1_output_gainscale0'], {'save_results': '(True)'}), '(caldetector1_output_gainscale0, save_results=True)\n', (9801, 9852), False, 'from jwst.pipeline import Image2Pipeline\n'), ((9875, 9945), 'jwst.pipeline.Image2Pipeline.call', 'Image2Pipeline.call', (['caldetector1_output_gainscale1'], {'save_results': '(True)'}), '(caldetector1_output_gainscale1, save_results=True)\n', (9894, 9945), False, 'from jwst.pipeline import Image2Pipeline\n'), ((1657, 1722), 'numpy.argmax', 'np.argmax', (['corr[ycen - box2:ycen + box2, xcen - box2:xcen + box2]'], {}), '(corr[ycen - box2:ycen + box2, xcen - box2:xcen + box2])\n', (1666, 1722), True, 'import numpy as np\n'), ((6779, 6803), 'os.path.exists', 'os.path.exists', (['rampfile'], {}), '(rampfile)\n', (6793, 6803), False, 'import argparse, os, sys\n'), ((6821, 6917), 'jwst.pipeline.calwebb_detector1.Detector1Pipeline.call', 'calwebb_detector1.Detector1Pipeline.call', (['uncalfile'], {'steps': 'parameter_dict', 'save_results': '(True)'}), '(uncalfile, steps=parameter_dict,\n save_results=True)\n', (6861, 6917), False, 'from jwst.pipeline import calwebb_detector1\n'), ((7088, 7108), 'numpy.zeros', 'np.zeros', (['data.shape'], {}), '(data.shape)\n', (7096, 7108), True, 'import numpy as np\n'), ((7530, 7615), 'astropy.stats.sigma_clipped_stats', 'stats.sigma_clipped_stats', (['data_diff'], {'maxiters': '(10)', 'sigma_lower': '(6.0)', 'sigma_upper': '(4)'}), '(data_diff, maxiters=10, sigma_lower=6.0,\n sigma_upper=4)\n', (7555, 7615), False, 'from astropy import stats\n'), ((7733, 7812), 'scipy.ndimage.median_filter', 'scipy.ndimage.median_filter', ([], {'input': 'data_diff', 'size': '(3)', 'mode': '"""constant"""', 'cval': '(0.0)'}), "(input=data_diff, size=3, mode='constant', cval=0.0)\n", (7760, 7812), False, 'import scipy\n'), ((7312, 7337), 'astropy.io.fits.getdata', 'fits.getdata', (['rampfile', '(1)'], {}), '(rampfile, 1)\n', (7324, 7337), False, 'from astropy.io import fits\n'), ((7890, 7925), 'numpy.where', 'np.where', (['(data_diff_sub < 5.0 * rms)'], {}), '(data_diff_sub < 5.0 * rms)\n', (7898, 7925), True, 'import numpy as np\n'), ((7967, 8063), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['data_diff_sub'], {'sigma': '(1.0)', 'truncate': '(5.0)', 'order': '(0)', 'mode': '"""constant"""', 'cval': '(0.0)'}), "(data_diff_sub, sigma=1.0, truncate=5.0, order=0, mode=\n 'constant', cval=0.0)\n", (7982, 8063), False, 'from scipy.ndimage import gaussian_filter, median_filter\n')]
|
"""
Unit tests for the atom ID filter
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import numpy as np
from ....system import lattice
from .. import atomIdFilter
from .. import base
################################################################################
class TestAtomId(unittest.TestCase):
"""
Test atom ID filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,0], 0.0)
self.lattice.addAtom("Au", [1,0,0], 0.0)
self.lattice.addAtom("Au", [0,1,0], 0.0)
self.lattice.addAtom("Au", [0,0,1], 0.0)
self.lattice.addAtom("Au", [1,1,0], 0.0)
self.lattice.addAtom("Au", [0,1,1], 0.0)
self.lattice.addAtom("Au", [1,1,1], 0.0)
self.lattice.addAtom("Au", [2,0,0], 0.0)
self.lattice.addAtom("Au", [0,2,0], 0.0)
self.lattice.addAtom("Au", [0,0,2], 0.0)
# filter
self.filter = atomIdFilter.AtomIdFilter("Atom ID")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_atomID(self):
"""
Atom ID filter
"""
# settings
settings = atomIdFilter.AtomIdFilterSettings()
settings.updateSetting("filterString", "0")
# set PBC
self.lattice.PBC[:] = 1
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
filterInput.NScalars = 0
filterInput.fullScalars = np.empty(0, np.float64)
filterInput.NVectors = 0
filterInput.fullVectors = np.empty(0, np.float64)
filterInput.ompNumThreads = 1
# call filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 1)
# check position is correct
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[0])), [0,0,0])
# settings
settings = atomIdFilter.AtomIdFilterSettings()
settings.updateSetting("filterString", "1-3, 8")
# set PBC
self.lattice.PBC[:] = 1
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
filterInput.NScalars = 0
filterInput.fullScalars = np.empty(0, np.float64)
filterInput.NVectors = 0
filterInput.fullVectors = np.empty(0, np.float64)
filterInput.ompNumThreads = 1
# call filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# check position is correct
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[0])), [1,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[1])), [0,1,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[2])), [0,0,1])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[3])), [0,2,0])
|
[
"numpy.empty",
"numpy.arange"
] |
[((1719, 1765), 'numpy.arange', 'np.arange', (['self.lattice.NAtoms'], {'dtype': 'np.int32'}), '(self.lattice.NAtoms, dtype=np.int32)\n', (1728, 1765), True, 'import numpy as np\n'), ((1881, 1904), 'numpy.empty', 'np.empty', (['(0)', 'np.float64'], {}), '(0, np.float64)\n', (1889, 1904), True, 'import numpy as np\n'), ((1972, 1995), 'numpy.empty', 'np.empty', (['(0)', 'np.float64'], {}), '(0, np.float64)\n', (1980, 1995), True, 'import numpy as np\n'), ((2748, 2794), 'numpy.arange', 'np.arange', (['self.lattice.NAtoms'], {'dtype': 'np.int32'}), '(self.lattice.NAtoms, dtype=np.int32)\n', (2757, 2794), True, 'import numpy as np\n'), ((2910, 2933), 'numpy.empty', 'np.empty', (['(0)', 'np.float64'], {}), '(0, np.float64)\n', (2918, 2933), True, 'import numpy as np\n'), ((3001, 3024), 'numpy.empty', 'np.empty', (['(0)', 'np.float64'], {}), '(0, np.float64)\n', (3009, 3024), True, 'import numpy as np\n')]
|
"""
Filename: cartesian.py
Authors: <NAME>
Implements cartesian products and regular cartesian grids.
"""
import numpy
from numba import njit
def cartesian(nodes, order="C"):
"""Cartesian product of a list of arrays
Parameters:
-----------
nodes: (list of 1d-arrays)
order: ('C' or 'F') order in which the product is enumerated
Returns:
--------
out: (2d-array) each line corresponds to one point of the product space
"""
nodes = [numpy.array(e) for e in nodes]
shapes = [e.shape[0] for e in nodes]
n = len(nodes)
l = numpy.prod(shapes)
out = numpy.zeros((l, n))
if order == "C":
repetitions = numpy.cumprod([1] + shapes[:-1])
else:
shapes.reverse()
sh = [1] + shapes[:-1]
repetitions = numpy.cumprod(sh)
repetitions = repetitions.tolist()
repetitions.reverse()
for i in range(n):
_repeat_1d(nodes[i], repetitions[i], out[:, i])
return out
def mlinspace(a, b, nums, order="C"):
"""Constructs a regular cartesian grid
Parameters:
-----------
a: (1d-array) lower bounds in each dimension
b: (1d-array) upper bounds in each dimension
nums: (1d-array) number of nodes along each dimension
order: ('C' or 'F') order in which the product is enumerated
Returns:
--------
out: (2d-array) each line corresponds to one point of the product space
"""
a = numpy.array(a, dtype="float64")
b = numpy.array(b, dtype="float64")
nums = numpy.array(nums, dtype="int64")
nodes = [numpy.linspace(a[i], b[i], nums[i]) for i in range(len(nums))]
return cartesian(nodes, order=order)
@njit(cache=True)
def _repeat_1d(x, K, out):
"""Repeats each element of a vector many times and repeats the whole result many times
Parameters
----------
x: (1d array) vector to be repeated
K: (int) number of times each element of x is repeated (inner iterations)
out: (1d array) placeholder for the result
Returns
-------
None
"""
N = x.shape[0]
L = out.shape[0] // (K * N) # number of outer iterations
# K # number of inner iterations
# the result out should enumerate in C-order the elements
# of a 3-dimensional array T of dimensions (K,N,L)
# such that for all k,n,l, we have T[k,n,l] == x[n]
for n in range(N):
val = x[n]
for k in range(K):
for l in range(L):
ind = k * N * L + n * L + l
out[ind] = val
|
[
"numpy.cumprod",
"numba.njit",
"numpy.zeros",
"numpy.array",
"numpy.linspace",
"numpy.prod"
] |
[((1674, 1690), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (1678, 1690), False, 'from numba import njit\n'), ((579, 597), 'numpy.prod', 'numpy.prod', (['shapes'], {}), '(shapes)\n', (589, 597), False, 'import numpy\n'), ((608, 627), 'numpy.zeros', 'numpy.zeros', (['(l, n)'], {}), '((l, n))\n', (619, 627), False, 'import numpy\n'), ((1437, 1468), 'numpy.array', 'numpy.array', (['a'], {'dtype': '"""float64"""'}), "(a, dtype='float64')\n", (1448, 1468), False, 'import numpy\n'), ((1477, 1508), 'numpy.array', 'numpy.array', (['b'], {'dtype': '"""float64"""'}), "(b, dtype='float64')\n", (1488, 1508), False, 'import numpy\n'), ((1520, 1552), 'numpy.array', 'numpy.array', (['nums'], {'dtype': '"""int64"""'}), "(nums, dtype='int64')\n", (1531, 1552), False, 'import numpy\n'), ((479, 493), 'numpy.array', 'numpy.array', (['e'], {}), '(e)\n', (490, 493), False, 'import numpy\n'), ((672, 704), 'numpy.cumprod', 'numpy.cumprod', (['([1] + shapes[:-1])'], {}), '([1] + shapes[:-1])\n', (685, 704), False, 'import numpy\n'), ((793, 810), 'numpy.cumprod', 'numpy.cumprod', (['sh'], {}), '(sh)\n', (806, 810), False, 'import numpy\n'), ((1566, 1601), 'numpy.linspace', 'numpy.linspace', (['a[i]', 'b[i]', 'nums[i]'], {}), '(a[i], b[i], nums[i])\n', (1580, 1601), False, 'import numpy\n')]
|
"""
-------------------------------------------
Author: <NAME>
Date: 7/3/19
-------------------------------------------
"""
# common packages, most likely already installed
import scipy
import math
import pandas as pd
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
import sys
from scipy.special import ndtr
# uncommon packages required for this analysis
import seaborn as sns # pip install seaborn
# -------------------- LOCALIZATION ---------------------------------#
def localization(Gint, focal_genes, num_reps = 10, sample_frac = 0.8, method = 'numedges', plot = True, print_counter = False,
background_list=None):
"""
Function to calculate localization of an input set of genes (focal_genes) on a background network (Gint).
Option to compute number of edges (method = 'numedges') or largest connected component (method = 'LLC')
localization analysis. Calculates by sampling sub-sections of the focal genes/random set. Percentage to sample
is set by sample_frac. Option to plot the distributions of random and focal gene localizaiton.
Args:
Gint: Networkx Graph, background network to randomly sample from
focal_genes: List, set of genes to calculate localization of
num_reps: Int, number of times to randomly sample
sample_frac: Float, percent of sampled genes
method: String, to decide which type of localization analysis to run. Options: 'numedges', 'LLC', or 'both'.
plot: Bool, whether to plot the distributions in the output jupyter notebook cell
print_counter: Bool, whether to print a counter that tells you which iteration you are on (every 25 iterations).
Useful when the num_reps is very high.
background_list: list of background genes to sample from. If none, jsut use all interactome genes
Returns:
numedges_list: List, the number of edges calculated for each rep, sampling over focal genes.
Empty if method = 'LLC'.
numedges_rand: List, the number of edges calculated for each rep, sampling over random genes of
similar degree in the background network. Empty if method = 'LLC'.
LCC_list: List, the size of the largest connected component, calculated for each rep, sampling over focal genes.
Empty if method = 'numedges'.
LCC_rand: List, the size of the largest connected component, calculated for each rep, sampling over random genes of
similar degree in the background network. Empty if method = 'numedges'.
"""
# Create degree bins to sample from
bins = get_degree_binning(Gint, 10)
min_degree, max_degree, genes_binned = zip(*bins)
bin_df = pd.DataFrame({'min_degree':min_degree, 'max_degree':max_degree, 'genes_binned':genes_binned})
# create a lookup table for degree and index
actual_degree_to_bin_df_idx = {}
for i in range(0, bin_df['max_degree'].max() + 1):
idx_temp = bin_df[ (bin_df['min_degree'].lt(i + 1)) & (bin_df['max_degree'].gt(i - 1)) ].index.tolist()
if len(idx_temp) > 0: # there are some degrees which aren't represented in the graph
actual_degree_to_bin_df_idx[i] = idx_temp[0]
focal_genes = list(np.intersect1d(focal_genes, Gint.nodes())) # only use focal_genes which are in Gint
numedges_list = []
numedges_rand = []
LCC_list = []
LCC_rand = []
if background_list==None:
background_list=Gint.nodes()
for r in range(num_reps):
if print_counter == True:
# so user knows how far along the process is
if (r % 25) == 0:
print(r)
focal_80 = focal_genes
np.random.shuffle(focal_80)
focal_80 = focal_80[:int(len(focal_80)*sample_frac)]
# find genes with similar degrees to focal gene degree
seed_random = []
for g in focal_80:
degree_temp = nx.degree(Gint,g)
genes_temp = bin_df.loc[actual_degree_to_bin_df_idx[degree_temp]]['genes_binned'] # use the lookup table for speed
np.random.shuffle(genes_temp) # shuffle them
while (genes_temp[0] in seed_random) or (genes_temp[0] not in background_list): # make sure the gene isn't already in the list, but is in the background_list
np.random.shuffle(genes_temp) # shuffle them
seed_random.append(genes_temp[0]) # build the seed_D1_random list
#print(len(focal_80))
#print(len(seed_random))
#print(len(np.unique(seed_random)))
if (method == 'numedges') or (method == 'both'):
# number edges calc on focal set
numedges_temp = len(nx.subgraph(Gint,focal_80).edges())
numedges_list.append(numedges_temp)
# number edges calc on random sample
numedges_temp_rand = len(nx.subgraph(Gint,seed_random).edges())
numedges_rand.append(numedges_temp_rand)
if (method == 'LCC') or (method == 'both'):
# LLC calc on focal set
G_sub_temp = nx.Graph(nx.subgraph(Gint, focal_80))
G_sub_temp = max(nx.connected_component_subgraphs(G_sub_temp), key = len)
LCC_list.append(len(G_sub_temp.nodes()))
# LLC calc on random sample
G_sub_temp = nx.Graph(nx.subgraph(Gint, seed_random))
G_sub_temp = max(nx.connected_component_subgraphs(G_sub_temp), key=len)
LCC_rand.append(len(G_sub_temp.nodes()))
if plot == True:
if (method == 'numedges') or (method == 'both'):
fig, ax = plt.subplots(figsize = (12, 7))
sns.distplot(numedges_list, ax = ax, hist = True, label = 'focal genes')
sns.distplot(numedges_rand, ax = ax, hist = True, label = 'random set')
plt.ylabel('frequency', fontsize = 16)
plt.xlabel('number of edges', fontsize = 16)
plt.title('Number of Edges Localization', fontsize = 18)
plt.legend(loc = 'upper right', fontsize = 14)
if (method == 'LCC') or (method == 'both'):
fig, ax = plt.subplots(figsize = (12, 7))
sns.distplot(LCC_list, ax = ax, hist = True, label = 'focal genes')
sns.distplot(LCC_rand, ax = ax, hist = True, label = 'random set')
plt.ylabel('frequency', fontsize = 16)
plt.xlabel('largest connected component size', fontsize = 16)
plt.title('Largest Connected Component Localization', fontsize = 18)
plt.legend(loc = 'upper right', fontsize = 14)
return numedges_list, numedges_rand, LCC_list, LCC_rand
def localization_full(Gint, focal_genes,
num_reps = 200,
method = 'LCC',
print_counter = False,
label = 'focal genes',
line_height = 0.1,
legend_loc = 'upper left'):
"""
Function to calculate localization of an input set of genes (focal_genes) on a background network (Gint).
Option to compute number of edges (method = 'numedges') or largest connected component (method = 'LLC')
localization analysis. DOes no sub-sampling. Plots the distribution of random gene localizaiton, and
marks the focal set localization on distribution. Includes p-value of focal set localization.
Args:
Gint: Networkx Graph, background network to randomly sample from
focal_genes: List, set of genes to calculate localization of
num_reps: Int, number of times to randomly sample
method: String, to decide which type of localization analysis to run. Options: 'numedges', 'LLC', or 'both'.
print_counter: Bool, whether to print a counter that tells you which iteration you are on (every 25 iterations).
Useful when the num_reps is very high.
label: String, label for focal genes in graph legend
line_height: Float, the height of the red line that marks the focal gene localization
legend_loc: String, relative position of legend in graph. Something similar to 'upper left'.
Returns:
numedges_list: List, the number of edges calculated for each rep, over focal genes.
Empty if method = 'LLC'.
numedges_rand: List, the number of edges calculated for each rep, over random genes of
similar degree in the background network. Empty if method = 'LLC'.
LCC_list: List, the size of the largest connected component, calculated for each rep, over focal genes.
Empty if method = 'numedges'.
LCC_rand: List, the size of the largest connected component, calculated for each rep, over random genes of
similar degree in the background network. Empty if method = 'numedges'.
"""
numedges_list, numedges_rand, LCC_list, LCC_rand = localization(Gint, focal_genes, num_reps,
sample_frac = 1,
method = method,
plot = False,
print_counter = print_counter)
if method == 'numedges':
analysis_list = numedges_list
analysis_rand = numedges_rand
title = 'number of edges'
else:
analysis_list = LCC_list
analysis_rand = LCC_rand
title = 'largest connected component'
# plot distributions for non-sampled case
fig, ax = plt.subplots(figsize = (12, 7))
sns.set_style('white')
plt.vlines(np.mean(analysis_list), ymin = 0, ymax = line_height, color = 'r', lw = 2, label = label)
sns.kdeplot(analysis_rand, ax = ax, color = 'k', lw = 2, alpha = 0.5, shade = True, label = 'random')
plt.legend(loc = legend_loc, fontsize = 12)
plt.ylabel('frequency', fontsize = 16)
plt.xlabel(title, fontsize = 16)
# print the z-score and fdr
analysis_z = (np.mean(analysis_list) - np.mean(analysis_rand))/float(np.std(analysis_rand))
print(1 - ndtr(analysis_z))
plt.title('permutation p = ' + str(1 - ndtr(analysis_z)))
return numedges_list, numedges_rand, LCC_list, LCC_rand
def get_degree_binning(g, bin_size, lengths = None):
"""
Helper function for localization(). This function comes from network_utilities.py of emregtoobox. https://github.com/emreg00/toolbox
"""
degree_to_nodes = {}
if sys.version_info >= (3, 0):
for node, degree in dict(g.degree()).items():
if lengths is not None and node not in lengths:
continue
degree_to_nodes.setdefault(degree, []).append(node)
else:
for node, degree in dict(g.degree()).iteritems():
if lengths is not None and node not in lengths:
continue
degree_to_nodes.setdefault(degree, []).append(node)
values = list(degree_to_nodes.keys())
values.sort()
bins = []
i = 0
while i < len(values):
low = values[i]
val = degree_to_nodes[values[i]]
while len(val) < bin_size:
i += 1
if i == len(values):
break
val.extend(degree_to_nodes[values[i]])
if i == len(values):
i -= 1
high = values[i]
i += 1
#print low, high, len(val)
if len(val) < bin_size:
low_, high_, val_ = bins[-1]
bins[-1] = (low_, high, val_ + val)
else:
bins.append((low, high, val))
return bins
|
[
"pandas.DataFrame",
"seaborn.set_style",
"matplotlib.pyplot.title",
"seaborn.kdeplot",
"networkx.degree",
"numpy.std",
"matplotlib.pyplot.legend",
"scipy.special.ndtr",
"numpy.mean",
"seaborn.distplot",
"networkx.subgraph",
"matplotlib.pyplot.ylabel",
"networkx.connected_component_subgraphs",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle"
] |
[((2872, 2972), 'pandas.DataFrame', 'pd.DataFrame', (["{'min_degree': min_degree, 'max_degree': max_degree, 'genes_binned':\n genes_binned}"], {}), "({'min_degree': min_degree, 'max_degree': max_degree,\n 'genes_binned': genes_binned})\n", (2884, 2972), True, 'import pandas as pd\n'), ((10006, 10035), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (10018, 10035), True, 'import matplotlib.pyplot as plt\n'), ((10042, 10064), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (10055, 10064), True, 'import seaborn as sns\n'), ((10175, 10268), 'seaborn.kdeplot', 'sns.kdeplot', (['analysis_rand'], {'ax': 'ax', 'color': '"""k"""', 'lw': '(2)', 'alpha': '(0.5)', 'shade': '(True)', 'label': '"""random"""'}), "(analysis_rand, ax=ax, color='k', lw=2, alpha=0.5, shade=True,\n label='random')\n", (10186, 10268), True, 'import seaborn as sns\n'), ((10281, 10320), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'legend_loc', 'fontsize': '(12)'}), '(loc=legend_loc, fontsize=12)\n', (10291, 10320), True, 'import matplotlib.pyplot as plt\n'), ((10329, 10365), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {'fontsize': '(16)'}), "('frequency', fontsize=16)\n", (10339, 10365), True, 'import matplotlib.pyplot as plt\n'), ((10372, 10402), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['title'], {'fontsize': '(16)'}), '(title, fontsize=16)\n', (10382, 10402), True, 'import matplotlib.pyplot as plt\n'), ((3864, 3891), 'numpy.random.shuffle', 'np.random.shuffle', (['focal_80'], {}), '(focal_80)\n', (3881, 3891), True, 'import numpy as np\n'), ((10081, 10103), 'numpy.mean', 'np.mean', (['analysis_list'], {}), '(analysis_list)\n', (10088, 10103), True, 'import numpy as np\n'), ((4095, 4113), 'networkx.degree', 'nx.degree', (['Gint', 'g'], {}), '(Gint, g)\n', (4104, 4113), True, 'import networkx as nx\n'), ((4252, 4281), 'numpy.random.shuffle', 'np.random.shuffle', (['genes_temp'], {}), '(genes_temp)\n', (4269, 4281), True, 'import numpy as np\n'), ((5839, 5868), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (5851, 5868), True, 'import matplotlib.pyplot as plt\n'), ((5883, 5949), 'seaborn.distplot', 'sns.distplot', (['numedges_list'], {'ax': 'ax', 'hist': '(True)', 'label': '"""focal genes"""'}), "(numedges_list, ax=ax, hist=True, label='focal genes')\n", (5895, 5949), True, 'import seaborn as sns\n'), ((5968, 6033), 'seaborn.distplot', 'sns.distplot', (['numedges_rand'], {'ax': 'ax', 'hist': '(True)', 'label': '"""random set"""'}), "(numedges_rand, ax=ax, hist=True, label='random set')\n", (5980, 6033), True, 'import seaborn as sns\n'), ((6052, 6088), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {'fontsize': '(16)'}), "('frequency', fontsize=16)\n", (6062, 6088), True, 'import matplotlib.pyplot as plt\n'), ((6103, 6145), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of edges"""'], {'fontsize': '(16)'}), "('number of edges', fontsize=16)\n", (6113, 6145), True, 'import matplotlib.pyplot as plt\n'), ((6160, 6214), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of Edges Localization"""'], {'fontsize': '(18)'}), "('Number of Edges Localization', fontsize=18)\n", (6169, 6214), True, 'import matplotlib.pyplot as plt\n'), ((6229, 6271), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '(14)'}), "(loc='upper right', fontsize=14)\n", (6239, 6271), True, 'import matplotlib.pyplot as plt\n'), ((6367, 6396), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (6379, 6396), True, 'import matplotlib.pyplot as plt\n'), ((6411, 6472), 'seaborn.distplot', 'sns.distplot', (['LCC_list'], {'ax': 'ax', 'hist': '(True)', 'label': '"""focal genes"""'}), "(LCC_list, ax=ax, hist=True, label='focal genes')\n", (6423, 6472), True, 'import seaborn as sns\n'), ((6491, 6551), 'seaborn.distplot', 'sns.distplot', (['LCC_rand'], {'ax': 'ax', 'hist': '(True)', 'label': '"""random set"""'}), "(LCC_rand, ax=ax, hist=True, label='random set')\n", (6503, 6551), True, 'import seaborn as sns\n'), ((6570, 6606), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {'fontsize': '(16)'}), "('frequency', fontsize=16)\n", (6580, 6606), True, 'import matplotlib.pyplot as plt\n'), ((6621, 6680), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""largest connected component size"""'], {'fontsize': '(16)'}), "('largest connected component size', fontsize=16)\n", (6631, 6680), True, 'import matplotlib.pyplot as plt\n'), ((6695, 6761), 'matplotlib.pyplot.title', 'plt.title', (['"""Largest Connected Component Localization"""'], {'fontsize': '(18)'}), "('Largest Connected Component Localization', fontsize=18)\n", (6704, 6761), True, 'import matplotlib.pyplot as plt\n'), ((6776, 6818), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '(14)'}), "(loc='upper right', fontsize=14)\n", (6786, 6818), True, 'import matplotlib.pyplot as plt\n'), ((10456, 10478), 'numpy.mean', 'np.mean', (['analysis_list'], {}), '(analysis_list)\n', (10463, 10478), True, 'import numpy as np\n'), ((10481, 10503), 'numpy.mean', 'np.mean', (['analysis_rand'], {}), '(analysis_rand)\n', (10488, 10503), True, 'import numpy as np\n'), ((10511, 10532), 'numpy.std', 'np.std', (['analysis_rand'], {}), '(analysis_rand)\n', (10517, 10532), True, 'import numpy as np\n'), ((10549, 10565), 'scipy.special.ndtr', 'ndtr', (['analysis_z'], {}), '(analysis_z)\n', (10553, 10565), False, 'from scipy.special import ndtr\n'), ((4483, 4512), 'numpy.random.shuffle', 'np.random.shuffle', (['genes_temp'], {}), '(genes_temp)\n', (4500, 4512), True, 'import numpy as np\n'), ((5310, 5337), 'networkx.subgraph', 'nx.subgraph', (['Gint', 'focal_80'], {}), '(Gint, focal_80)\n', (5321, 5337), True, 'import networkx as nx\n'), ((5368, 5412), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['G_sub_temp'], {}), '(G_sub_temp)\n', (5400, 5412), True, 'import networkx as nx\n'), ((5565, 5595), 'networkx.subgraph', 'nx.subgraph', (['Gint', 'seed_random'], {}), '(Gint, seed_random)\n', (5576, 5595), True, 'import networkx as nx\n'), ((5626, 5670), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['G_sub_temp'], {}), '(G_sub_temp)\n', (5658, 5670), True, 'import networkx as nx\n'), ((10611, 10627), 'scipy.special.ndtr', 'ndtr', (['analysis_z'], {}), '(analysis_z)\n', (10615, 10627), False, 'from scipy.special import ndtr\n'), ((4887, 4914), 'networkx.subgraph', 'nx.subgraph', (['Gint', 'focal_80'], {}), '(Gint, focal_80)\n', (4898, 4914), True, 'import networkx as nx\n'), ((5070, 5100), 'networkx.subgraph', 'nx.subgraph', (['Gint', 'seed_random'], {}), '(Gint, seed_random)\n', (5081, 5100), True, 'import networkx as nx\n')]
|
import os
import numpy as np
import scipy.io as sio
import skimage as sk
#from osgeo import gdal
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix
import sklearn
import warnings
def save_as_mat(data, name):
sio.savemat(name, {name: data})
def Read_TIFF_Image(Path):
img =[]
#gdal_header = gdal.Open(Path)
#img = gdal_header.ReadAsArray()
return img
def Compute_NDVI_Band(Image):
Image = Image.astype(np.float32)
nir_band = Image[4, :, :]
red_band = Image[3, :, :]
ndvi = np.zeros((Image.shape[1] , Image.shape[2] , 1))
ndvi[ : , : , 0] = np.divide((nir_band-red_band),(nir_band+red_band))
return ndvi
def compute_metrics(true_labels, predicted_labels):
conf_mat = confusion_matrix(true_labels, predicted_labels)
accuracy = 100*accuracy_score(true_labels, predicted_labels)
with warnings.catch_warnings():
warnings.filterwarnings("error")
try:
precision = 100*precision_score(true_labels, predicted_labels)
except Warning as e:
if isinstance(e, sklearn.exceptions.UndefinedMetricWarning): precision = np.nan
else: raise e
try:
recall = 100*recall_score(true_labels, predicted_labels)
except Warning as e:
if isinstance(e, sklearn.exceptions.UndefinedMetricWarning): recall = np.nan
else: raise e
try:
f1score = 100*f1_score(true_labels, predicted_labels)
except Warning as e:
if isinstance(e, sklearn.exceptions.UndefinedMetricWarning): f1score = np.nan
else: raise e
return accuracy, f1score, recall, precision, conf_mat
def Data_Augmentation_Definition(corners_coordinates):
num_sample = np.size(corners_coordinates , 0)
data_cols = np.size(corners_coordinates , 1)
corners_coordinates_augmented = np.zeros((3 * num_sample, data_cols + 1))
counter = 0
for s in range(num_sample):
corners_coordinates_0 = corners_coordinates[s]
# central_pixels_coor_augmented[counter, 0 : 2] = central_pixels_coor_x_0
# central_pixels_coor_augmented[counter, 2] = 0
# labels_augmented[counter, :] = labels_y_0
# counter += 1
corners_coordinates_augmented[counter, 0 : 4] = corners_coordinates_0
corners_coordinates_augmented[counter, 4] = 1
counter += 1
corners_coordinates_augmented[counter, 0 : 4] = corners_coordinates_0
corners_coordinates_augmented[counter, 4] = 2
counter += 1
corners_coordinates_augmented[counter, 0 : 4] = corners_coordinates_0
corners_coordinates_augmented[counter, 4] = 3
counter += 1
return corners_coordinates_augmented
def Data_Augmentation_Execution(data, transformation_indexs):
data_rows = np.size(data , 1)
data_cols = np.size(data , 2)
data_depth = np.size(data , 3)
num_sample = np.size(data , 0)
data_transformed = np.zeros((num_sample, data_rows, data_cols, data_depth))
counter = 0
for s in range(num_sample):
data_x_0 = data[s, :, :, :]
transformation_index = transformation_indexs[s]
#Rotating
if transformation_index == 0:
data_transformed[s, :, :, :] = data_x_0
if transformation_index == 1:
data_transformed[s, :, :, :] = np.rot90(data_x_0)
if transformation_index == 2:
data_transformed[s, :, :, :] = np.flip(data_x_0, 0)
if transformation_index == 3:
data_transformed[s, :, :, :] = np.flip(data_x_0, 1)
return data_transformed
def Patch_Extraction(data, corners_coordinates, patch_size):
data_depth = np.size(data, 2)
num_samp = np.size(corners_coordinates , 0)
patches_cointainer = np.zeros((num_samp, patch_size, patch_size, data_depth))
for i in range(num_samp):
patches_cointainer[i, :, :, :] = data[int(corners_coordinates[i , 0]) : int(corners_coordinates[i , 2]) , int(corners_coordinates[i , 1]) : int(corners_coordinates[i , 3]) , :]
return patches_cointainer
def mask_creation(mask_row, mask_col, num_patch_row, num_patch_col, Train_tiles, Valid_tiles, Undesired_tiles):
train_index = 1
teste_index = 2
valid_index = 3
undesired_index = 4
patch_dim_row = mask_row//num_patch_row
patch_dim_col = mask_col//num_patch_col
mask_array = 2 * np.ones((mask_row, mask_col))
train_mask = np.ones((patch_dim_row, patch_dim_col))
valid_mask = 3 * np.ones((patch_dim_row, patch_dim_col))
undesired_mask = 4 * np.ones((patch_dim_row, patch_dim_col))
counter_r = 1
counter = 1
for i in range(0, mask_row, patch_dim_row):
for j in range(0 , mask_col, patch_dim_col):
train = np.size(np.where(Train_tiles == counter),1)
valid = np.size(np.where(Valid_tiles == counter),1)
undesired = np.size(np.where(Undesired_tiles == counter), 1)
if train == 1:
mask_array[i : i + patch_dim_row, j : j + patch_dim_col] = train_mask
if counter_r == num_patch_row:
mask_array[i : mask_row, j : j + patch_dim_col] = np.ones((mask_row - i, patch_dim_col))
if valid == 1:
mask_array[i : i + patch_dim_row, j : j + patch_dim_col] = valid_mask
if counter_r == num_patch_row:
mask_array[i : mask_row, j : j + patch_dim_col] = 3 * np.ones((mask_row - i, patch_dim_col))
if undesired == 1:
mask_array[i : i + patch_dim_row, j : j + patch_dim_col] = undesired_mask
if counter_r == num_patch_row:
mask_array[i : mask_row, j : j + patch_dim_col] = 4 * np.ones((mask_row - i, patch_dim_col))
counter += 1
counter_r += 1
return mask_array
def Corner_Coordinates_Definition_Training(mask, last_reference, actual_reference, patch_dimension,
overlap_percent, percent_of_positive_pixels_in_actual_reference):
mask_rows = np.size(mask, 0)
mask_cols = np.size(mask, 1)
# Correcting the references for convenience
last_reference[actual_reference == 2] = 1
actual_reference[actual_reference == 2] = 0
# Computing the overlaps and other things to extract patches
overlap = round(patch_dimension * overlap_percent)
overlap -= overlap % 2
stride = patch_dimension - overlap
step_row = (stride - mask_rows % stride) % stride
step_col = (stride - mask_cols % stride) % stride
k1, k2 = (mask_rows + step_row)//stride, (mask_cols + step_col)//stride
#Taking the initial coordinates
coordinates = np.zeros((k1 * k2 , 4))
counter = 0
for i in range(k1):
for j in range(k2):
coordinates[counter, 0] = i * stride
coordinates[counter, 1] = j * stride
coordinates[counter, 2] = i * stride + patch_dimension
coordinates[counter, 3] = j * stride + patch_dimension
counter += 1
pad_tuple = ((overlap//2, overlap//2 + step_row) , (overlap//2, overlap//2 + step_col))
# Making the padding procedure
# into the mask
mask_padded = np.pad(mask, pad_tuple, mode='symmetric')
# into the past deforestation reference
last_reference_padded = np.pad(last_reference, pad_tuple, mode='symmetric')
# into the actual deforestation reference
actual_reference_padded = np.pad(actual_reference, pad_tuple, mode='symmetric')
#Initializing the central pixels coordinates containers
corners_coordinates_tr = []
corners_coordinates_vl = []
class_weights = []
pad_tuple = ((overlap//2, overlap//2 + step_row) , (overlap//2, overlap//2 + step_col), (0 , 0))
# Refine the central pixels coordinates
counter_tr = 0
counter_vl = 0
positive_percent_accumulated = 0
for i in range(np.size(coordinates , 0)):
mask_reference_value = mask_padded[int(coordinates[i , 0]) : int(coordinates[i , 2]) , int(coordinates[i , 1]) : int(coordinates[i , 3])]
last_reference_value = last_reference_padded[int(coordinates[i , 0]) : int(coordinates[i , 2]) , int(coordinates[i , 1]) : int(coordinates[i , 3])]
actual_reference_value = actual_reference_padded[int(coordinates[i , 0]) : int(coordinates[i , 2]) , int(coordinates[i , 1]) : int(coordinates[i , 3])]
# Looking for a test pixels in the mask reference
test_pixels_indexs = np.transpose(np.array(np.where(mask_reference_value == 2)))
if np.size(test_pixels_indexs,0) == 0:
number_positives_actual_reference = np.sum(actual_reference_value)
percent_of_positive_pixels_in_actual_reference_i = (number_positives_actual_reference/(patch_dimension * patch_dimension)) * 100
if percent_of_positive_pixels_in_actual_reference_i > percent_of_positive_pixels_in_actual_reference:
positive_percent_accumulated += percent_of_positive_pixels_in_actual_reference_i
train_pixels_indexs = np.transpose(np.array(np.where(mask_reference_value == 1)))
percent_of_training_pixels = (train_pixels_indexs.shape[0]/(patch_dimension * patch_dimension)) * 100
if percent_of_training_pixels > 70:
corners_coordinates_tr.append(coordinates[i , :])
counter_tr += 1
if percent_of_positive_pixels_in_actual_reference_i > 3:
valid_pixels_indexs = np.transpose(np.array(np.where(mask_reference_value == 3)))
percent_of_validation_pixels = (valid_pixels_indexs.shape[0]/(patch_dimension * patch_dimension)) * 100
if percent_of_validation_pixels > 70:
corners_coordinates_vl.append(coordinates[i , :])
mean_positive_percent = positive_percent_accumulated/counter_tr
class_weights.append(mean_positive_percent/100)
class_weights.append(1 - (mean_positive_percent/100))
return corners_coordinates_tr, corners_coordinates_vl, last_reference_padded, actual_reference_padded, pad_tuple, class_weights
def Corner_Coordinates_Definition_Testing(mask, patch_dimension, overlap_percent):
mask_rows = np.size(mask, 0)
mask_cols = np.size(mask, 1)
# Computing the overlaps and other things to extract patches
overlap = round(patch_dimension * overlap_percent)
overlap -= overlap % 2
stride = patch_dimension - overlap
step_row = (stride - mask_rows % stride) % stride
step_col = (stride - mask_cols % stride) % stride
k1, k2 = (mask_rows + step_row)//stride, (mask_cols + step_col)//stride
#Taking the initial coordinates
coordinates = np.zeros((k1 * k2 , 4))
counter = 0
for i in range(k1):
for j in range(k2):
coordinates[counter, 0] = i * stride
coordinates[counter, 1] = j * stride
coordinates[counter, 2] = i * stride + patch_dimension
coordinates[counter, 3] = j * stride + patch_dimension
counter += 1
pad_tuple = ((overlap//2, overlap//2 + step_row) , (overlap//2, overlap//2 + step_col), (0 , 0))
return coordinates, pad_tuple, k1, k2, step_row, step_col, stride, overlap
def Classification_Maps(Predicted_labels, True_labels, central_pixels_coordinates, hit_map):
Classification_Map = np.zeros((hit_map.shape[0], hit_map.shape[1], 3))
TP_counter = 0
FP_counter = 0
for i in range(central_pixels_coordinates.shape[0]):
T_label = True_labels[i]
P_label = Predicted_labels[i]
if T_label == 1:
if P_label == T_label:
TP_counter += 1
#True positve
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),0] = 0
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),1] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),2] = 0
else:
#False Negative
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),0] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),1] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),2] = 0
if T_label == 0:
if P_label == T_label:
#True Negative
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),0] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),1] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),2] = 255
else:
#False Positive
FP_counter += 1
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),0] = 255
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),1] = 0
Classification_Map[int(central_pixels_coordinates[i , 0]),int(central_pixels_coordinates[i , 1]),2] = 0
return Classification_Map, TP_counter, FP_counter
def plot_embedding(X, y, d, title=None):
"""Plot an embedding X with the class label y colored by the domain d."""
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
# Plot colors numbers
plt.figure(figsize=(10,10))
ax = plt.subplot(111)
for i in range(X.shape[0]):
# plot colored number
plt.text(X[i, 0], X[i, 1], str(y[i]),
color=plt.cm.bwr(d[i] / 1.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
|
[
"numpy.sum",
"sklearn.metrics.accuracy_score",
"numpy.ones",
"sklearn.metrics.f1_score",
"numpy.rot90",
"numpy.pad",
"numpy.max",
"warnings.catch_warnings",
"numpy.divide",
"numpy.size",
"sklearn.metrics.recall_score",
"numpy.min",
"numpy.flip",
"warnings.filterwarnings",
"numpy.zeros",
"scipy.io.savemat",
"numpy.where",
"sklearn.metrics.precision_score",
"sklearn.metrics.confusion_matrix"
] |
[((265, 296), 'scipy.io.savemat', 'sio.savemat', (['name', '{name: data}'], {}), '(name, {name: data})\n', (276, 296), True, 'import scipy.io as sio\n'), ((563, 608), 'numpy.zeros', 'np.zeros', (['(Image.shape[1], Image.shape[2], 1)'], {}), '((Image.shape[1], Image.shape[2], 1))\n', (571, 608), True, 'import numpy as np\n'), ((634, 685), 'numpy.divide', 'np.divide', (['(nir_band - red_band)', '(nir_band + red_band)'], {}), '(nir_band - red_band, nir_band + red_band)\n', (643, 685), True, 'import numpy as np\n'), ((769, 816), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (785, 816), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix\n'), ((1775, 1806), 'numpy.size', 'np.size', (['corners_coordinates', '(0)'], {}), '(corners_coordinates, 0)\n', (1782, 1806), True, 'import numpy as np\n'), ((1824, 1855), 'numpy.size', 'np.size', (['corners_coordinates', '(1)'], {}), '(corners_coordinates, 1)\n', (1831, 1855), True, 'import numpy as np\n'), ((1902, 1943), 'numpy.zeros', 'np.zeros', (['(3 * num_sample, data_cols + 1)'], {}), '((3 * num_sample, data_cols + 1))\n', (1910, 1943), True, 'import numpy as np\n'), ((2880, 2896), 'numpy.size', 'np.size', (['data', '(1)'], {}), '(data, 1)\n', (2887, 2896), True, 'import numpy as np\n'), ((2914, 2930), 'numpy.size', 'np.size', (['data', '(2)'], {}), '(data, 2)\n', (2921, 2930), True, 'import numpy as np\n'), ((2949, 2965), 'numpy.size', 'np.size', (['data', '(3)'], {}), '(data, 3)\n', (2956, 2965), True, 'import numpy as np\n'), ((2984, 3000), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (2991, 3000), True, 'import numpy as np\n'), ((3030, 3086), 'numpy.zeros', 'np.zeros', (['(num_sample, data_rows, data_cols, data_depth)'], {}), '((num_sample, data_rows, data_cols, data_depth))\n', (3038, 3086), True, 'import numpy as np\n'), ((3774, 3790), 'numpy.size', 'np.size', (['data', '(2)'], {}), '(data, 2)\n', (3781, 3790), True, 'import numpy as np\n'), ((3806, 3837), 'numpy.size', 'np.size', (['corners_coordinates', '(0)'], {}), '(corners_coordinates, 0)\n', (3813, 3837), True, 'import numpy as np\n'), ((3864, 3920), 'numpy.zeros', 'np.zeros', (['(num_samp, patch_size, patch_size, data_depth)'], {}), '((num_samp, patch_size, patch_size, data_depth))\n', (3872, 3920), True, 'import numpy as np\n'), ((4553, 4592), 'numpy.ones', 'np.ones', (['(patch_dim_row, patch_dim_col)'], {}), '((patch_dim_row, patch_dim_col))\n', (4560, 4592), True, 'import numpy as np\n'), ((6212, 6228), 'numpy.size', 'np.size', (['mask', '(0)'], {}), '(mask, 0)\n', (6219, 6228), True, 'import numpy as np\n'), ((6245, 6261), 'numpy.size', 'np.size', (['mask', '(1)'], {}), '(mask, 1)\n', (6252, 6261), True, 'import numpy as np\n'), ((6843, 6865), 'numpy.zeros', 'np.zeros', (['(k1 * k2, 4)'], {}), '((k1 * k2, 4))\n', (6851, 6865), True, 'import numpy as np\n'), ((7363, 7404), 'numpy.pad', 'np.pad', (['mask', 'pad_tuple'], {'mode': '"""symmetric"""'}), "(mask, pad_tuple, mode='symmetric')\n", (7369, 7404), True, 'import numpy as np\n'), ((7477, 7528), 'numpy.pad', 'np.pad', (['last_reference', 'pad_tuple'], {'mode': '"""symmetric"""'}), "(last_reference, pad_tuple, mode='symmetric')\n", (7483, 7528), True, 'import numpy as np\n'), ((7605, 7658), 'numpy.pad', 'np.pad', (['actual_reference', 'pad_tuple'], {'mode': '"""symmetric"""'}), "(actual_reference, pad_tuple, mode='symmetric')\n", (7611, 7658), True, 'import numpy as np\n'), ((10385, 10401), 'numpy.size', 'np.size', (['mask', '(0)'], {}), '(mask, 0)\n', (10392, 10401), True, 'import numpy as np\n'), ((10418, 10434), 'numpy.size', 'np.size', (['mask', '(1)'], {}), '(mask, 1)\n', (10425, 10434), True, 'import numpy as np\n'), ((10874, 10896), 'numpy.zeros', 'np.zeros', (['(k1 * k2, 4)'], {}), '((k1 * k2, 4))\n', (10882, 10896), True, 'import numpy as np\n'), ((11541, 11590), 'numpy.zeros', 'np.zeros', (['(hit_map.shape[0], hit_map.shape[1], 3)'], {}), '((hit_map.shape[0], hit_map.shape[1], 3))\n', (11549, 11590), True, 'import numpy as np\n'), ((836, 881), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (850, 881), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix\n'), ((891, 916), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (914, 916), False, 'import warnings\n'), ((926, 958), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (949, 958), False, 'import warnings\n'), ((4501, 4530), 'numpy.ones', 'np.ones', (['(mask_row, mask_col)'], {}), '((mask_row, mask_col))\n', (4508, 4530), True, 'import numpy as np\n'), ((4614, 4653), 'numpy.ones', 'np.ones', (['(patch_dim_row, patch_dim_col)'], {}), '((patch_dim_row, patch_dim_col))\n', (4621, 4653), True, 'import numpy as np\n'), ((4679, 4718), 'numpy.ones', 'np.ones', (['(patch_dim_row, patch_dim_col)'], {}), '((patch_dim_row, patch_dim_col))\n', (4686, 4718), True, 'import numpy as np\n'), ((8054, 8077), 'numpy.size', 'np.size', (['coordinates', '(0)'], {}), '(coordinates, 0)\n', (8061, 8077), True, 'import numpy as np\n'), ((13777, 13789), 'numpy.min', 'np.min', (['X', '(0)'], {}), '(X, 0)\n', (13783, 13789), True, 'import numpy as np\n'), ((13791, 13803), 'numpy.max', 'np.max', (['X', '(0)'], {}), '(X, 0)\n', (13797, 13803), True, 'import numpy as np\n'), ((3416, 3434), 'numpy.rot90', 'np.rot90', (['data_x_0'], {}), '(data_x_0)\n', (3424, 3434), True, 'import numpy as np\n'), ((3516, 3536), 'numpy.flip', 'np.flip', (['data_x_0', '(0)'], {}), '(data_x_0, 0)\n', (3523, 3536), True, 'import numpy as np\n'), ((3618, 3638), 'numpy.flip', 'np.flip', (['data_x_0', '(1)'], {}), '(data_x_0, 1)\n', (3625, 3638), True, 'import numpy as np\n'), ((8701, 8731), 'numpy.size', 'np.size', (['test_pixels_indexs', '(0)'], {}), '(test_pixels_indexs, 0)\n', (8708, 8731), True, 'import numpy as np\n'), ((8785, 8815), 'numpy.sum', 'np.sum', (['actual_reference_value'], {}), '(actual_reference_value)\n', (8791, 8815), True, 'import numpy as np\n'), ((1000, 1046), 'sklearn.metrics.precision_score', 'precision_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1015, 1046), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix\n'), ((1232, 1275), 'sklearn.metrics.recall_score', 'recall_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1244, 1275), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix\n'), ((1459, 1498), 'sklearn.metrics.f1_score', 'f1_score', (['true_labels', 'predicted_labels'], {}), '(true_labels, predicted_labels)\n', (1467, 1498), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score, confusion_matrix\n'), ((4894, 4926), 'numpy.where', 'np.where', (['(Train_tiles == counter)'], {}), '(Train_tiles == counter)\n', (4902, 4926), True, 'import numpy as np\n'), ((4958, 4990), 'numpy.where', 'np.where', (['(Valid_tiles == counter)'], {}), '(Valid_tiles == counter)\n', (4966, 4990), True, 'import numpy as np\n'), ((5026, 5062), 'numpy.where', 'np.where', (['(Undesired_tiles == counter)'], {}), '(Undesired_tiles == counter)\n', (5034, 5062), True, 'import numpy as np\n'), ((8652, 8687), 'numpy.where', 'np.where', (['(mask_reference_value == 2)'], {}), '(mask_reference_value == 2)\n', (8660, 8687), True, 'import numpy as np\n'), ((5297, 5335), 'numpy.ones', 'np.ones', (['(mask_row - i, patch_dim_col)'], {}), '((mask_row - i, patch_dim_col))\n', (5304, 5335), True, 'import numpy as np\n'), ((5570, 5608), 'numpy.ones', 'np.ones', (['(mask_row - i, patch_dim_col)'], {}), '((mask_row - i, patch_dim_col))\n', (5577, 5608), True, 'import numpy as np\n'), ((5851, 5889), 'numpy.ones', 'np.ones', (['(mask_row - i, patch_dim_col)'], {}), '((mask_row - i, patch_dim_col))\n', (5858, 5889), True, 'import numpy as np\n'), ((9228, 9263), 'numpy.where', 'np.where', (['(mask_reference_value == 1)'], {}), '(mask_reference_value == 1)\n', (9236, 9263), True, 'import numpy as np\n'), ((9680, 9715), 'numpy.where', 'np.where', (['(mask_reference_value == 3)'], {}), '(mask_reference_value == 3)\n', (9688, 9715), True, 'import numpy as np\n')]
|
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
import numpy as np
import pandas as pd
import matplotlib
import matplotlib as mpl
matplotlib.use("Agg")
from matplotlib import style
from matplotlib import pyplot as plt
plt.style.use("ggplot")
import seaborn as sns
plt.style.use("seaborn")
sns.set_palette("cubehelix")
plt.rcParams["figure.figsize"] = [18, 10]
plt.rcParams["figure.dpi"] = 150
sm, med, lg = 10, 15, 20
plt.rc("font", size=sm) # controls default text sizes
plt.rc("axes", titlesize=med) # fontsize of the axes title
plt.rc("axes", labelsize=med) # fontsize of the x & y labels
plt.rc("xtick", labelsize=sm) # fontsize of the tick labels
plt.rc("ytick", labelsize=sm) # fontsize of the tick labels
plt.rc("legend", fontsize=sm) # legend fontsize
plt.rc("figure", titlesize=lg) # fontsize of the figure title
plt.rc("axes", linewidth=2) # linewidth of plot lines
import streamlit as st
from pathlib import Path
path = str(Path.cwd()) + "/"
from datetime import datetime
today = str(datetime.now())[:10]
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
class The_Efficient_Frontier(object):
def __init__(self, RISKY_ASSETS):
self.RISKY_ASSETS = RISKY_ASSETS
self.prices_df = yf.download(self.RISKY_ASSETS, start="2020-01-01")["Adj Close"]
self.N_PORTFOLIOS = 10 ** 5
self.N_DAYS = 252
self.n_assets = len(self.RISKY_ASSETS)
self.string = ""
for r in self.RISKY_ASSETS:
self.string += r + "_"
def ef_setup(self):
self.returns_df = self.prices_df.pct_change().dropna()
self.avg_returns = self.returns_df.mean() * self.N_DAYS
self.cov_mat = self.returns_df.cov() * self.N_DAYS
# simulate random portfolio weights:
np.random.seed(42)
self.weights = np.random.random(size=(self.N_PORTFOLIOS, self.n_assets))
self.weights /= np.sum(self.weights, axis=1)[:, np.newaxis]
# calculate portfolio metrics:
self.portf_rtns = np.dot(self.weights, self.avg_returns)
self.portf_vol = []
for i in range(0, len(self.weights)):
self.portf_vol.append(
np.sqrt(
np.dot(self.weights[i].T, np.dot(self.cov_mat, self.weights[i]))
)
)
self.portf_vol = np.array(self.portf_vol)
self.portf_sharpe_ratio = self.portf_rtns / self.portf_vol
# create joint dataframe with all data:
self.portf_results_df = pd.DataFrame(
{
"returns": self.portf_rtns,
"volatility": self.portf_vol,
"sharpe_ratio": self.portf_sharpe_ratio,
}
)
# locate points creating efficient frontier:
self.N_POINTS = 100
self.portf_vol_ef = []
self.indices_to_skip = []
self.portf_rtns_ef = np.linspace(
self.portf_results_df.returns.min(),
self.portf_results_df.returns.max(),
self.N_POINTS,
)
self.portf_rtns_ef = np.round(self.portf_rtns_ef, 2)
self.portf_rtns = np.round(self.portf_rtns, 2)
for point_index in range(self.N_POINTS):
if self.portf_rtns_ef[point_index] not in self.portf_rtns:
self.indices_to_skip.append(point_index)
continue
self.matched_ind = np.where(
self.portf_rtns == self.portf_rtns_ef[point_index]
)
self.portf_vol_ef.append(np.min(self.portf_vol[self.matched_ind]))
self.portf_rtns_ef = np.delete(self.portf_rtns_ef, self.indices_to_skip)
def results_maxSharpeRatio(self):
self.ef_setup()
self.max_sharpe_ind = np.argmax(self.portf_results_df.sharpe_ratio)
self.max_sharpe_portf = self.portf_results_df.loc[self.max_sharpe_ind]
self.min_vol_ind = np.argmin(self.portf_results_df.volatility)
self.min_vol_portf = self.portf_results_df.loc[self.min_vol_ind]
st.header("- - - Maximum Sharpe Ratio portfolio - - -")
st.subheader("Performance:")
for index, value in self.max_sharpe_portf.items():
st.write(f"{index}: {100 * value:.2f}% ", end="", flush=True)
st.subheader("\nWeights")
for x, y in zip(
self.RISKY_ASSETS,
self.weights[np.argmax(self.portf_results_df.sharpe_ratio)],
):
st.write(f"{x}: {100*y:.2f}% ", end="", flush=True)
def results_minVolatility(self):
self.results_maxSharpeRatio()
st.header("- - - Minimum Volatility portfolio - - -")
st.subheader("Performance:")
for index, value in self.min_vol_portf.items():
st.write(f"{index}: {100 * value:.2f}% ", end="", flush=True)
st.subheader("\nWeights")
for x, y in zip(
self.RISKY_ASSETS, self.weights[np.argmin(self.portf_results_df.volatility)]
):
st.write(f"{x}: {100*y:.2f}% ", end="", flush=True)
def final_plot(self):
self.results_minVolatility()
fig, ax = plt.subplots()
self.portf_results_df.plot(
kind="scatter",
x="volatility",
y="returns",
c="sharpe_ratio",
cmap="RdYlGn",
edgecolors="black",
ax=ax,
)
ax.scatter(
x=self.max_sharpe_portf.volatility,
y=self.max_sharpe_portf.returns,
c="black",
marker="X",
s=175,
label="Max Sharpe Ratio",
)
ax.scatter(
x=self.min_vol_portf.volatility,
y=self.min_vol_portf.returns,
c="black",
marker="P",
s=175,
label="Min Volatility",
)
self.portf_results_df.plot(
kind="scatter",
x="volatility",
y="returns",
c="sharpe_ratio",
cmap="RdYlGn",
edgecolors="black",
ax=ax,
)
ax.set(
xlabel="Volatility", ylabel="Expected Returns", title="Efficient Frontier"
)
ax.plot(self.portf_vol_ef, self.portf_rtns_ef, "b--")
for asset_index in range(self.n_assets):
ax.scatter(
x=np.sqrt(self.cov_mat.iloc[asset_index, asset_index]),
y=self.avg_returns[asset_index],
# marker=self.MARKS[asset_index],
s=100,
color="black",
label=self.RISKY_ASSETS[asset_index],
)
ax.set(
xlabel="Volatility",
ylabel="Expected Returns",
title=f"Efficient Frontier", # {self.string}",
)
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(15)
ax.grid(True, color="k", linestyle="-", linewidth=1, alpha=0.3)
ax.legend(loc="best", prop={"size": 16})
plt.tight_layout()
st.pyplot(fig)
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
if __name__ == "__main__":
RISKY_ASSETS = []
manys = [2, 4, 6, 8, 10, 12, 14]
how_many = int(
st.sidebar.selectbox("Select Number Of Securities For Portfolio:", manys)
)
# how_many = int(input('How Many Stocks In Your Portfolio? (up to 14): '))
for i in range(1, how_many + 1):
tic = input(f"Enter Stock {i}: ")
RISKY_ASSETS.append(tic)
RISKY_ASSETS.sort()
marks0 = ["o", "^", "s", "p", "h", "8", "*", "d", ">", "v", "<", "1", "2", "3", "4"]
mark = marks0[: len(RISKY_ASSETS) + 1]
The_Efficient_Frontier(RISKY_ASSETS).final_plot()
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
|
[
"numpy.random.seed",
"numpy.sum",
"numpy.argmax",
"numpy.argmin",
"matplotlib.pyplot.style.use",
"streamlit.sidebar.selectbox",
"numpy.round",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"streamlit.subheader",
"yfinance.download",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots",
"datetime.datetime.now",
"streamlit.header",
"numpy.min",
"matplotlib.use",
"streamlit.pyplot",
"numpy.dot",
"seaborn.set_palette",
"numpy.delete",
"warnings.filterwarnings",
"streamlit.write",
"numpy.random.random",
"numpy.array",
"numpy.where",
"pathlib.Path.cwd",
"numpy.sqrt"
] |
[((17, 50), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (40, 50), False, 'import warnings\n'), ((157, 178), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (171, 178), False, 'import matplotlib\n'), ((246, 269), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (259, 269), True, 'from matplotlib import pyplot as plt\n'), ((293, 317), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (306, 317), True, 'from matplotlib import pyplot as plt\n'), ((318, 346), 'seaborn.set_palette', 'sns.set_palette', (['"""cubehelix"""'], {}), "('cubehelix')\n", (333, 346), True, 'import seaborn as sns\n'), ((448, 471), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'sm'}), "('font', size=sm)\n", (454, 471), True, 'from matplotlib import pyplot as plt\n'), ((503, 532), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'med'}), "('axes', titlesize=med)\n", (509, 532), True, 'from matplotlib import pyplot as plt\n'), ((563, 592), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'med'}), "('axes', labelsize=med)\n", (569, 592), True, 'from matplotlib import pyplot as plt\n'), ((625, 654), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'sm'}), "('xtick', labelsize=sm)\n", (631, 654), True, 'from matplotlib import pyplot as plt\n'), ((686, 715), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'sm'}), "('ytick', labelsize=sm)\n", (692, 715), True, 'from matplotlib import pyplot as plt\n'), ((747, 776), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'sm'}), "('legend', fontsize=sm)\n", (753, 776), True, 'from matplotlib import pyplot as plt\n'), ((796, 826), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'lg'}), "('figure', titlesize=lg)\n", (802, 826), True, 'from matplotlib import pyplot as plt\n'), ((859, 886), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'linewidth': '(2)'}), "('axes', linewidth=2)\n", (865, 886), True, 'from matplotlib import pyplot as plt\n'), ((976, 986), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (984, 986), False, 'from pathlib import Path\n'), ((1037, 1051), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1049, 1051), False, 'from datetime import datetime\n'), ((2110, 2128), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2124, 2128), True, 'import numpy as np\n'), ((2152, 2209), 'numpy.random.random', 'np.random.random', ([], {'size': '(self.N_PORTFOLIOS, self.n_assets)'}), '(size=(self.N_PORTFOLIOS, self.n_assets))\n', (2168, 2209), True, 'import numpy as np\n'), ((2343, 2381), 'numpy.dot', 'np.dot', (['self.weights', 'self.avg_returns'], {}), '(self.weights, self.avg_returns)\n', (2349, 2381), True, 'import numpy as np\n'), ((2658, 2682), 'numpy.array', 'np.array', (['self.portf_vol'], {}), '(self.portf_vol)\n', (2666, 2682), True, 'import numpy as np\n'), ((2830, 2947), 'pandas.DataFrame', 'pd.DataFrame', (["{'returns': self.portf_rtns, 'volatility': self.portf_vol, 'sharpe_ratio':\n self.portf_sharpe_ratio}"], {}), "({'returns': self.portf_rtns, 'volatility': self.portf_vol,\n 'sharpe_ratio': self.portf_sharpe_ratio})\n", (2842, 2947), True, 'import pandas as pd\n'), ((3381, 3412), 'numpy.round', 'np.round', (['self.portf_rtns_ef', '(2)'], {}), '(self.portf_rtns_ef, 2)\n', (3389, 3412), True, 'import numpy as np\n'), ((3439, 3467), 'numpy.round', 'np.round', (['self.portf_rtns', '(2)'], {}), '(self.portf_rtns, 2)\n', (3447, 3467), True, 'import numpy as np\n'), ((3900, 3951), 'numpy.delete', 'np.delete', (['self.portf_rtns_ef', 'self.indices_to_skip'], {}), '(self.portf_rtns_ef, self.indices_to_skip)\n', (3909, 3951), True, 'import numpy as np\n'), ((4045, 4090), 'numpy.argmax', 'np.argmax', (['self.portf_results_df.sharpe_ratio'], {}), '(self.portf_results_df.sharpe_ratio)\n', (4054, 4090), True, 'import numpy as np\n'), ((4197, 4240), 'numpy.argmin', 'np.argmin', (['self.portf_results_df.volatility'], {}), '(self.portf_results_df.volatility)\n', (4206, 4240), True, 'import numpy as np\n'), ((4322, 4377), 'streamlit.header', 'st.header', (['"""- - - Maximum Sharpe Ratio portfolio - - -"""'], {}), "('- - - Maximum Sharpe Ratio portfolio - - -')\n", (4331, 4377), True, 'import streamlit as st\n'), ((4386, 4414), 'streamlit.subheader', 'st.subheader', (['"""Performance:"""'], {}), "('Performance:')\n", (4398, 4414), True, 'import streamlit as st\n'), ((4556, 4581), 'streamlit.subheader', 'st.subheader', (['"""\nWeights"""'], {}), "('\\nWeights')\n", (4568, 4581), True, 'import streamlit as st\n'), ((4870, 4923), 'streamlit.header', 'st.header', (['"""- - - Minimum Volatility portfolio - - -"""'], {}), "('- - - Minimum Volatility portfolio - - -')\n", (4879, 4923), True, 'import streamlit as st\n'), ((4932, 4960), 'streamlit.subheader', 'st.subheader', (['"""Performance:"""'], {}), "('Performance:')\n", (4944, 4960), True, 'import streamlit as st\n'), ((5099, 5124), 'streamlit.subheader', 'st.subheader', (['"""\nWeights"""'], {}), "('\\nWeights')\n", (5111, 5124), True, 'import streamlit as st\n'), ((5396, 5410), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5408, 5410), True, 'from matplotlib import pyplot as plt\n'), ((7256, 7274), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7272, 7274), True, 'from matplotlib import pyplot as plt\n'), ((7283, 7297), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (7292, 7297), True, 'import streamlit as st\n'), ((7789, 7862), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Select Number Of Securities For Portfolio:"""', 'manys'], {}), "('Select Number Of Securities For Portfolio:', manys)\n", (7809, 7862), True, 'import streamlit as st\n'), ((1576, 1626), 'yfinance.download', 'yf.download', (['self.RISKY_ASSETS'], {'start': '"""2020-01-01"""'}), "(self.RISKY_ASSETS, start='2020-01-01')\n", (1587, 1626), True, 'import yfinance as yf\n'), ((2234, 2262), 'numpy.sum', 'np.sum', (['self.weights'], {'axis': '(1)'}), '(self.weights, axis=1)\n', (2240, 2262), True, 'import numpy as np\n'), ((3701, 3761), 'numpy.where', 'np.where', (['(self.portf_rtns == self.portf_rtns_ef[point_index])'], {}), '(self.portf_rtns == self.portf_rtns_ef[point_index])\n', (3709, 3761), True, 'import numpy as np\n'), ((4486, 4547), 'streamlit.write', 'st.write', (['f"""{index}: {100 * value:.2f}% """'], {'end': '""""""', 'flush': '(True)'}), "(f'{index}: {100 * value:.2f}% ', end='', flush=True)\n", (4494, 4547), True, 'import streamlit as st\n'), ((4734, 4787), 'streamlit.write', 'st.write', (['f"""{x}: {100 * y:.2f}% """'], {'end': '""""""', 'flush': '(True)'}), "(f'{x}: {100 * y:.2f}% ', end='', flush=True)\n", (4742, 4787), True, 'import streamlit as st\n'), ((5029, 5090), 'streamlit.write', 'st.write', (['f"""{index}: {100 * value:.2f}% """'], {'end': '""""""', 'flush': '(True)'}), "(f'{index}: {100 * value:.2f}% ', end='', flush=True)\n", (5037, 5090), True, 'import streamlit as st\n'), ((5262, 5315), 'streamlit.write', 'st.write', (['f"""{x}: {100 * y:.2f}% """'], {'end': '""""""', 'flush': '(True)'}), "(f'{x}: {100 * y:.2f}% ', end='', flush=True)\n", (5270, 5315), True, 'import streamlit as st\n'), ((3829, 3869), 'numpy.min', 'np.min', (['self.portf_vol[self.matched_ind]'], {}), '(self.portf_vol[self.matched_ind])\n', (3835, 3869), True, 'import numpy as np\n'), ((4663, 4708), 'numpy.argmax', 'np.argmax', (['self.portf_results_df.sharpe_ratio'], {}), '(self.portf_results_df.sharpe_ratio)\n', (4672, 4708), True, 'import numpy as np\n'), ((5194, 5237), 'numpy.argmin', 'np.argmin', (['self.portf_results_df.volatility'], {}), '(self.portf_results_df.volatility)\n', (5203, 5237), True, 'import numpy as np\n'), ((6593, 6645), 'numpy.sqrt', 'np.sqrt', (['self.cov_mat.iloc[asset_index, asset_index]'], {}), '(self.cov_mat.iloc[asset_index, asset_index])\n', (6600, 6645), True, 'import numpy as np\n'), ((2562, 2599), 'numpy.dot', 'np.dot', (['self.cov_mat', 'self.weights[i]'], {}), '(self.cov_mat, self.weights[i])\n', (2568, 2599), True, 'import numpy as np\n')]
|
from stl import mesh
import math
import numpy
# Create 3 faces of a cube
data = numpy.zeros(6, dtype=mesh.Mesh.dtype)
# Top of the cube
data['vectors'][0] = numpy.array([[0, 1, 1],
[1, 0, 1],
[0, 0, 1]])
data['vectors'][1] = numpy.array([[1, 0, 1],
[0, 1, 1],
[1, 1, 1]])
# Front face
data['vectors'][2] = numpy.array([[1, 0, 0],
[1, 0, 1],
[1, 1, 0]])
data['vectors'][3] = numpy.array([[1, 1, 1],
[1, 0, 1],
[1, 1, 0]])
# Left face
data['vectors'][4] = numpy.array([[0, 0, 0],
[1, 0, 0],
[1, 0, 1]])
data['vectors'][5] = numpy.array([[0, 0, 0],
[0, 0, 1],
[1, 0, 1]])
# Since the cube faces are from 0 to 1 we can move it to the middle by
# substracting .5
data['vectors'] -= .5
# Generate 4 different meshes so we can rotate them later
meshes = [mesh.Mesh(data.copy()) for _ in range(4)]
# Rotate 90 degrees over the Y axis
meshes[0].rotate([0.0, 0.5, 0.0], math.radians(90))
# Translate 2 points over the X axis
meshes[1].x += 2
# Rotate 90 degrees over the X axis
meshes[2].rotate([0.5, 0.0, 0.0], math.radians(90))
# Translate 2 points over the X and Y points
meshes[2].x += 2
meshes[2].y += 2
# Rotate 90 degrees over the X and Y axis
meshes[3].rotate([0.5, 0.0, 0.0], math.radians(90))
meshes[3].rotate([0.0, 0.5, 0.0], math.radians(90))
# Translate 2 points over the Y axis
meshes[3].y += 2
# Optionally render the rotated cube faces
from matplotlib import pyplot
from mpl_toolkits import mplot3d
# Create a new plot
figure = pyplot.figure()
axes = mplot3d.Axes3D(figure)
# Render the cube faces
for m in meshes:
axes.add_collection3d(mplot3d.art3d.Poly3DCollection(m.vectors))
# Auto scale to the mesh size
scale = numpy.concatenate([m.points for m in meshes]).flatten(-1)
axes.auto_scale_xyz(scale, scale, scale)
# Show the plot to the screen
pyplot.show()
|
[
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"math.radians",
"numpy.zeros",
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.concatenate"
] |
[((81, 118), 'numpy.zeros', 'numpy.zeros', (['(6)'], {'dtype': 'mesh.Mesh.dtype'}), '(6, dtype=mesh.Mesh.dtype)\n', (92, 118), False, 'import numpy\n'), ((159, 205), 'numpy.array', 'numpy.array', (['[[0, 1, 1], [1, 0, 1], [0, 0, 1]]'], {}), '([[0, 1, 1], [1, 0, 1], [0, 0, 1]])\n', (170, 205), False, 'import numpy\n'), ((295, 341), 'numpy.array', 'numpy.array', (['[[1, 0, 1], [0, 1, 1], [1, 1, 1]]'], {}), '([[1, 0, 1], [0, 1, 1], [1, 1, 1]])\n', (306, 341), False, 'import numpy\n'), ((444, 490), 'numpy.array', 'numpy.array', (['[[1, 0, 0], [1, 0, 1], [1, 1, 0]]'], {}), '([[1, 0, 0], [1, 0, 1], [1, 1, 0]])\n', (455, 490), False, 'import numpy\n'), ((580, 626), 'numpy.array', 'numpy.array', (['[[1, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[1, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (591, 626), False, 'import numpy\n'), ((728, 774), 'numpy.array', 'numpy.array', (['[[0, 0, 0], [1, 0, 0], [1, 0, 1]]'], {}), '([[0, 0, 0], [1, 0, 0], [1, 0, 1]])\n', (739, 774), False, 'import numpy\n'), ((864, 910), 'numpy.array', 'numpy.array', (['[[0, 0, 0], [0, 0, 1], [1, 0, 1]]'], {}), '([[0, 0, 0], [0, 0, 1], [1, 0, 1]])\n', (875, 910), False, 'import numpy\n'), ((1853, 1868), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (1866, 1868), False, 'from matplotlib import pyplot\n'), ((1876, 1898), 'mpl_toolkits.mplot3d.Axes3D', 'mplot3d.Axes3D', (['figure'], {}), '(figure)\n', (1890, 1898), False, 'from mpl_toolkits import mplot3d\n'), ((2179, 2192), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2190, 2192), False, 'from matplotlib import pyplot\n'), ((1273, 1289), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (1285, 1289), False, 'import math\n'), ((1417, 1433), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (1429, 1433), False, 'import math\n'), ((1591, 1607), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (1603, 1607), False, 'import math\n'), ((1643, 1659), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (1655, 1659), False, 'import math\n'), ((1967, 2008), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'mplot3d.art3d.Poly3DCollection', (['m.vectors'], {}), '(m.vectors)\n', (1997, 2008), False, 'from mpl_toolkits import mplot3d\n'), ((2049, 2094), 'numpy.concatenate', 'numpy.concatenate', (['[m.points for m in meshes]'], {}), '([m.points for m in meshes])\n', (2066, 2094), False, 'import numpy\n')]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from arch.api import federation
from arch.api.utils import log_utils
from federatedml.logistic_regression.hetero_logistic_regression.hetero_lr_base import HeteroLRBase
from federatedml.optim.gradient import HeteroLogisticGradient
from federatedml.secureprotol import EncryptModeCalculator
from federatedml.statistic.data_overview import rubbish_clear
from federatedml.util import consts
from federatedml.statistic import data_overview
LOGGER = log_utils.getLogger()
class HeteroLRHost(HeteroLRBase):
def __init__(self):
super(HeteroLRHost, self).__init__()
self.batch_num = None
self.batch_index_list = []
self.role = consts.HOST
def compute_forward(self, data_instances, coef_, intercept_, batch_index=-1):
"""
Compute W * X + b and (W * X + b)^2, where X is the input data, W is the coefficient of lr,
and b is the interception
Parameters
----------
data_instances: DTable of Instance, input data
coef_: list, coefficient of lr
intercept_: float, the interception of lr
"""
wx = self.compute_wx(data_instances, coef_, intercept_)
en_wx = self.encrypted_calculator[batch_index].encrypt(wx)
wx_square = wx.mapValues(lambda v: np.square(v))
en_wx_square = self.encrypted_calculator[batch_index].encrypt(wx_square)
host_forward = en_wx.join(en_wx_square, lambda wx, wx_square: (wx, wx_square))
# temporary resource recovery and will be removed in the future
rubbish_list = [wx,
en_wx,
wx_square,
en_wx_square
]
rubbish_clear(rubbish_list)
return host_forward
def fit(self, data_instances):
"""
Train lr model of role host
Parameters
----------
data_instances: DTable of Instance, input data
"""
LOGGER.info("Enter hetero_lr host")
self._abnormal_detection(data_instances)
self.header = self.get_header(data_instances)
public_key = federation.get(name=self.transfer_variable.paillier_pubkey.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.paillier_pubkey),
idx=0)
LOGGER.info("Get public_key from arbiter:{}".format(public_key))
self.encrypt_operator.set_public_key(public_key)
batch_info = federation.get(name=self.transfer_variable.batch_info.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.batch_info),
idx=0)
LOGGER.info("Get batch_info from guest:" + str(batch_info))
self.batch_size = batch_info["batch_size"]
self.batch_num = batch_info["batch_num"]
if self.batch_size < consts.MIN_BATCH_SIZE and self.batch_size != -1:
raise ValueError(
"Batch size get from guest should not less than 10, except -1, batch_size is {}".format(
self.batch_size))
self.encrypted_calculator = [EncryptModeCalculator(self.encrypt_operator,
self.encrypted_mode_calculator_param.mode,
self.encrypted_mode_calculator_param.re_encrypted_rate) for _
in range(self.batch_num)]
LOGGER.info("Start initialize model.")
model_shape = self.get_features_shape(data_instances)
if self.init_param_obj.fit_intercept:
self.init_param_obj.fit_intercept = False
if self.fit_intercept:
self.fit_intercept = False
self.coef_ = self.initializer.init_model(model_shape, init_params=self.init_param_obj)
self.n_iter_ = 0
index_data_inst_map = {}
while self.n_iter_ < self.max_iter:
LOGGER.info("iter:" + str(self.n_iter_))
batch_index = 0
while batch_index < self.batch_num:
LOGGER.info("batch:{}".format(batch_index))
# set batch_data
if len(self.batch_index_list) < self.batch_num:
batch_data_index = federation.get(name=self.transfer_variable.batch_data_index.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.batch_data_index, self.n_iter_,
batch_index),
idx=0)
LOGGER.info("Get batch_index from Guest")
self.batch_index_list.append(batch_data_index)
else:
batch_data_index = self.batch_index_list[batch_index]
# Get mini-batch train data
if len(index_data_inst_map) < self.batch_num:
batch_data_inst = batch_data_index.join(data_instances, lambda g, d: d)
index_data_inst_map[batch_index] = batch_data_inst
else:
batch_data_inst = index_data_inst_map[batch_index]
LOGGER.info("batch_data_inst size:{}".format(batch_data_inst.count()))
# transforms features of raw input 'batch_data_inst' into more representative features 'batch_feat_inst'
batch_feat_inst = self.transform(batch_data_inst)
# compute forward
host_forward = self.compute_forward(batch_feat_inst, self.coef_, self.intercept_, batch_index)
federation.remote(host_forward,
name=self.transfer_variable.host_forward_dict.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_forward_dict,
self.n_iter_,
batch_index),
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_forward to guest")
# compute host gradient
fore_gradient = federation.get(name=self.transfer_variable.fore_gradient.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.fore_gradient, self.n_iter_, batch_index),
idx=0)
LOGGER.info("Get fore_gradient from guest")
if self.gradient_operator is None:
self.gradient_operator = HeteroLogisticGradient(self.encrypt_operator)
host_gradient = self.gradient_operator.compute_gradient(batch_feat_inst, fore_gradient,
fit_intercept=False)
# regulation if necessary
if self.updater is not None:
loss_regular = self.updater.loss_norm(self.coef_)
en_loss_regular = self.encrypt_operator.encrypt(loss_regular)
federation.remote(en_loss_regular,
name=self.transfer_variable.host_loss_regular.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_loss_regular,
self.n_iter_,
batch_index),
role=consts.GUEST,
idx=0)
LOGGER.info("Remote host_loss_regular to guest")
federation.remote(host_gradient,
name=self.transfer_variable.host_gradient.name,
tag=self.transfer_variable.generate_transferid(self.transfer_variable.host_gradient,
self.n_iter_,
batch_index),
role=consts.ARBITER,
idx=0)
LOGGER.info("Remote host_gradient to arbiter")
# Get optimize host gradient and update model
optim_host_gradient = federation.get(name=self.transfer_variable.host_optim_gradient.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_optim_gradient, self.n_iter_,
batch_index),
idx=0)
LOGGER.info("Get optim_host_gradient from arbiter")
LOGGER.info("update_model")
self.update_model(optim_host_gradient)
# update local model that transforms features of raw input 'batch_data_inst'
training_info = {"iteration": self.n_iter_, "batch_index": batch_index}
self.update_local_model(fore_gradient, batch_data_inst, self.coef_, **training_info)
batch_index += 1
# temporary resource recovery and will be removed in the future
rubbish_list = [host_forward,
fore_gradient
]
data_overview.rubbish_clear(rubbish_list)
is_stopped = federation.get(name=self.transfer_variable.is_stopped.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.is_stopped, self.n_iter_, batch_index),
idx=0)
LOGGER.info("Get is_stop flag from arbiter:{}".format(is_stopped))
self.n_iter_ += 1
if is_stopped:
LOGGER.info("Get stop signal from arbiter, model is converged, iter:{}".format(self.n_iter_))
break
LOGGER.info("Reach max iter {}, train model finish!".format(self.max_iter))
def predict(self, data_instances):
"""
Prediction of lr
Parameters
----------
data_instances:DTable of Instance, input data
"""
LOGGER.info("Start predict ...")
data_features = self.transform(data_instances)
prob_host = self.compute_wx(data_features, self.coef_, self.intercept_)
federation.remote(prob_host,
name=self.transfer_variable.host_prob.name,
tag=self.transfer_variable.generate_transferid(
self.transfer_variable.host_prob),
role=consts.GUEST,
idx=0)
LOGGER.info("Remote probability to Guest")
|
[
"arch.api.utils.log_utils.getLogger",
"numpy.square",
"federatedml.statistic.data_overview.rubbish_clear",
"federatedml.optim.gradient.HeteroLogisticGradient",
"federatedml.secureprotol.EncryptModeCalculator"
] |
[((1082, 1103), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (1101, 1103), False, 'from arch.api.utils import log_utils\n'), ((2325, 2352), 'federatedml.statistic.data_overview.rubbish_clear', 'rubbish_clear', (['rubbish_list'], {}), '(rubbish_list)\n', (2338, 2352), False, 'from federatedml.statistic.data_overview import rubbish_clear\n'), ((3846, 3999), 'federatedml.secureprotol.EncryptModeCalculator', 'EncryptModeCalculator', (['self.encrypt_operator', 'self.encrypted_mode_calculator_param.mode', 'self.encrypted_mode_calculator_param.re_encrypted_rate'], {}), '(self.encrypt_operator, self.\n encrypted_mode_calculator_param.mode, self.\n encrypted_mode_calculator_param.re_encrypted_rate)\n', (3867, 3999), False, 'from federatedml.secureprotol import EncryptModeCalculator\n'), ((1904, 1916), 'numpy.square', 'np.square', (['v'], {}), '(v)\n', (1913, 1916), True, 'import numpy as np\n'), ((10424, 10465), 'federatedml.statistic.data_overview.rubbish_clear', 'data_overview.rubbish_clear', (['rubbish_list'], {}), '(rubbish_list)\n', (10451, 10465), False, 'from federatedml.statistic import data_overview\n'), ((7519, 7564), 'federatedml.optim.gradient.HeteroLogisticGradient', 'HeteroLogisticGradient', (['self.encrypt_operator'], {}), '(self.encrypt_operator)\n', (7541, 7564), False, 'from federatedml.optim.gradient import HeteroLogisticGradient\n')]
|
import numpy as np
import cv2
# Identify pixels above the threshold
# Threshold of RGB > 160 does a nice job of identifying ground pixels only
def color_thresh(img, rgb_thresh=(160, 160, 160),above = True):
# Create an array of zeros same xy size as img, but single channel
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all three threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
above_thresh = (img[:,:,0] > rgb_thresh[0]) \
& (img[:,:,1] > rgb_thresh[1]) \
& (img[:,:,2] > rgb_thresh[2])
below_thresh = (img[:,:,0] < rgb_thresh[0]) \
& (img[:,:,1] < rgb_thresh[1]) \
& (img[:,:,2] < rgb_thresh[2])
# Index the array of zeros with the boolean array and set to 1
if above:
color_select[above_thresh] = 1
else:
color_select[below_thresh] = 1
# Return the binary image
return color_select
# Identify pixels within a range of threshold
# Threshold of RGB > 160 does a nice job of identifying ground pixels only
def color_thresh_range(img, rgb_thresh_max=(255, 255, 80), rgb_thresh_min=(140, 110, 0)):
# Create an array of zeros same xy size as img, but single channel
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all three threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
thresh = (img[:,:,0] > rgb_thresh_min[0]) \
& (img[:,:,1] > rgb_thresh_min[1]) \
& (img[:,:,2] > rgb_thresh_min[2]) \
& (img[:,:,0] < rgb_thresh_max[0]) \
& (img[:,:,1] < rgb_thresh_max[1]) \
& (img[:,:,2] < rgb_thresh_max[2])
# Index the array of zeros with the boolean array and set to 1
color_select[thresh] = 1
# Return the binary image
return color_select
# Define a function to convert from image coords to rover coords
def rover_coords(binary_img):
# Identify nonzero pixels
ypos, xpos = binary_img.nonzero()
# Calculate pixel positions with reference to the rover position being at the
# center bottom of the image.
x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)
y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)
return x_pixel, y_pixel
# Define a function to convert to radial coords in rover space
def to_polar_coords(x_pixel, y_pixel):
# Convert (x_pixel, y_pixel) to (distance, angle)
# in polar coordinates in rover space
# Calculate distance to each pixel
dist = np.sqrt(x_pixel**2 + y_pixel**2)
# Calculate angle away from vertical for each pixel
angles = np.arctan2(y_pixel, x_pixel)
return dist, angles
# Define a function to map rover space pixels to world space
def rotate_pix(xpix, ypix, yaw):
# Convert yaw to radians
yaw_rad = yaw * np.pi / 180
xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))
ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))
# Return the result
return xpix_rotated, ypix_rotated
def translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale):
# Apply a scaling and a translation
xpix_translated = (xpix_rot / scale) + xpos
ypix_translated = (ypix_rot / scale) + ypos
# Return the result
return xpix_translated, ypix_translated
# Define a function to apply rotation and translation (and clipping)
# Once you define the two functions above this function should work
def pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):
# Apply rotation
xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)
# Apply translation
xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)
# Perform rotation, translation and clipping all at once
x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)
y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)
# Return the result
return x_pix_world, y_pix_world
def get_sub_global_map(xpos, ypos, yaw, world_map, world_size, global_scale):
# Apply rotation
xpix = np.tile(np.arange(global_scale),global_scale)
ypix = np.repeat(np.arange(global_scale)-global_scale/2, global_scale)
xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)
# Apply translation
xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, 1)
# Perform rotation, translation and clipping all at once
x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)
y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)
weights = world_map[y_pix_world, x_pix_world,:]
# Return the result
return xpix, ypix, weights
# Define a function to perform a perspective transform
def perspect_transform(img, src, dst):
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image
return warped
# Apply the above functions in succession and update the Rover state accordingly
def perception_step(Rover):
# Perform perception steps to update Rover()
# TODO:
# NOTE: camera image is coming to you in Rover.img
img = Rover.img
# 1) Define source and destination points for perspective transform
source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])
dst_size = 5
bottom_offset = 6
destination = np.float32([[img.shape[1]/2 - dst_size, img.shape[0] - bottom_offset],
[img.shape[1]/2 + dst_size, img.shape[0] - bottom_offset],
[img.shape[1]/2 + dst_size, img.shape[0] - 2*dst_size - bottom_offset],
[img.shape[1]/2 - dst_size, img.shape[0] - 2*dst_size - bottom_offset],
])
# 2) Apply perspective transform
warped = perspect_transform(img, source, destination)
# 3) Apply color threshold to identify navigable terrain/obstacles/rock samples
threshed = color_thresh(warped)
# 4) Update Rover.vision_image (this will be displayed on left side of screen)
# Example: Rover.vision_image[:,:,0] = obstacle color-thresholded binary image
# Rover.vision_image[:,:,1] = rock_sample color-thresholded binary image
# Rover.vision_image[:,:,2] = navigable terrain color-thresholded binary image
Rover.vision_image[:,:, 0] = threshed
# 5) Convert map image pixel values to rover-centric coords
xpix, ypix = rover_coords(threshed)
# 6) Convert rover-centric pixel values to world coordinates
xpos = Rover.pos[0]
ypos = Rover.pos[1]
yaw = Rover.yaw
world_size = 200
scale = 10
x_pix_world, y_pix_world = pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale)
# 7) Update Rover worldmap (to be displayed on right side of screen)
# Example: Rover.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1
# Rover.worldmap[rock_y_world, rock_x_world, 1] += 1
# Rover.worldmap[navigable_y_world, navigable_x_world, 2] += 1
Rover.worldmap[y_pix_world, x_pix_world, 2] = np.clip(Rover.worldmap[y_pix_world, x_pix_world, 2]+1,0,255)
threshed = color_thresh(warped, above = False)
xpix_obs, ypix_obs = rover_coords(threshed)
x_pix_world_obs, y_pix_world_obs = pix_to_world(xpix_obs, ypix_obs, xpos, ypos, yaw, world_size, scale)
Rover.worldmap[y_pix_world_obs, x_pix_world_obs, 0] = np.clip(Rover.worldmap[y_pix_world_obs, x_pix_world_obs, 0]+1,0,255)
threshed = color_thresh_range(warped)
xpix_rock, ypix_rock = rover_coords(threshed)
x_pix_world_rock, y_pix_world_rock = pix_to_world(xpix_rock, ypix_rock, xpos, ypos, yaw, world_size, scale)
Rover.worldmap[y_pix_world_rock, x_pix_world_rock, 1] = np.clip(Rover.worldmap[y_pix_world_rock, x_pix_world_rock, 1]+1,0,255)
# 8) Convert rover-centric pixel positions to polar coordinates
# Update Rover pixel distances and angles
# Rover.nav_dists = rover_centric_pixel_distances
# Rover.nav_angles = rover_centric_angles
dist, angles = to_polar_coords(xpix, ypix)
Rover.nav_dists = dist
Rover.nav_angles = angles
global_scale = 30
xpix_sub_global, ypix_sub_global, weights_sub_global = get_sub_global_map(xpos, ypos, yaw, Rover.worldmap, world_size, global_scale)
sub_global_map = weights_sub_global.reshape((global_scale,global_scale,3))
dis_sub_global, angles_sub_global = to_polar_coords(xpix_sub_global, ypix_sub_global)
weights_sub_global = (255-np.abs(weights_sub_global[:,0]+weights_sub_global[:,2]))/255
weights_sub_global[weights_sub_global<0.95] = 0
if np.mean(weights_sub_global) == 0:
mean_dir_sub_global = 0
elif np.mean(weights_sub_global) > 1:
mean_dir_sub_global = 0
else:
mean_dir_sub_global = np.sum(np.multiply(angles_sub_global,weights_sub_global))/np.sum(weights_sub_global)
Rover.dir_global = mean_dir_sub_global
return Rover
|
[
"cv2.warpPerspective",
"numpy.zeros_like",
"numpy.arctan2",
"numpy.int_",
"numpy.abs",
"numpy.sum",
"cv2.getPerspectiveTransform",
"numpy.multiply",
"numpy.float32",
"numpy.clip",
"numpy.mean",
"numpy.arange",
"numpy.sin",
"numpy.cos",
"numpy.sqrt"
] |
[((298, 325), 'numpy.zeros_like', 'np.zeros_like', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (311, 325), True, 'import numpy as np\n'), ((1314, 1341), 'numpy.zeros_like', 'np.zeros_like', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (1327, 1341), True, 'import numpy as np\n'), ((2660, 2696), 'numpy.sqrt', 'np.sqrt', (['(x_pixel ** 2 + y_pixel ** 2)'], {}), '(x_pixel ** 2 + y_pixel ** 2)\n', (2667, 2696), True, 'import numpy as np\n'), ((2762, 2790), 'numpy.arctan2', 'np.arctan2', (['y_pixel', 'x_pixel'], {}), '(y_pixel, x_pixel)\n', (2772, 2790), True, 'import numpy as np\n'), ((4916, 4953), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (4943, 4953), False, 'import cv2\n'), ((4967, 5024), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(img.shape[1], img.shape[0])'], {}), '(img, M, (img.shape[1], img.shape[0]))\n', (4986, 5024), False, 'import cv2\n'), ((5412, 5469), 'numpy.float32', 'np.float32', (['[[14, 140], [301, 140], [200, 96], [118, 96]]'], {}), '([[14, 140], [301, 140], [200, 96], [118, 96]])\n', (5422, 5469), True, 'import numpy as np\n'), ((5527, 5828), 'numpy.float32', 'np.float32', (['[[img.shape[1] / 2 - dst_size, img.shape[0] - bottom_offset], [img.shape[1] /\n 2 + dst_size, img.shape[0] - bottom_offset], [img.shape[1] / 2 +\n dst_size, img.shape[0] - 2 * dst_size - bottom_offset], [img.shape[1] /\n 2 - dst_size, img.shape[0] - 2 * dst_size - bottom_offset]]'], {}), '([[img.shape[1] / 2 - dst_size, img.shape[0] - bottom_offset], [\n img.shape[1] / 2 + dst_size, img.shape[0] - bottom_offset], [img.shape[\n 1] / 2 + dst_size, img.shape[0] - 2 * dst_size - bottom_offset], [img.\n shape[1] / 2 - dst_size, img.shape[0] - 2 * dst_size - bottom_offset]])\n', (5537, 5828), True, 'import numpy as np\n'), ((7211, 7275), 'numpy.clip', 'np.clip', (['(Rover.worldmap[y_pix_world, x_pix_world, 2] + 1)', '(0)', '(255)'], {}), '(Rover.worldmap[y_pix_world, x_pix_world, 2] + 1, 0, 255)\n', (7218, 7275), True, 'import numpy as np\n'), ((7538, 7610), 'numpy.clip', 'np.clip', (['(Rover.worldmap[y_pix_world_obs, x_pix_world_obs, 0] + 1)', '(0)', '(255)'], {}), '(Rover.worldmap[y_pix_world_obs, x_pix_world_obs, 0] + 1, 0, 255)\n', (7545, 7610), True, 'import numpy as np\n'), ((7876, 7950), 'numpy.clip', 'np.clip', (['(Rover.worldmap[y_pix_world_rock, x_pix_world_rock, 1] + 1)', '(0)', '(255)'], {}), '(Rover.worldmap[y_pix_world_rock, x_pix_world_rock, 1] + 1, 0, 255)\n', (7883, 7950), True, 'import numpy as np\n'), ((3942, 3960), 'numpy.int_', 'np.int_', (['xpix_tran'], {}), '(xpix_tran)\n', (3949, 3960), True, 'import numpy as np\n'), ((4007, 4025), 'numpy.int_', 'np.int_', (['ypix_tran'], {}), '(ypix_tran)\n', (4014, 4025), True, 'import numpy as np\n'), ((4225, 4248), 'numpy.arange', 'np.arange', (['global_scale'], {}), '(global_scale)\n', (4234, 4248), True, 'import numpy as np\n'), ((4583, 4601), 'numpy.int_', 'np.int_', (['xpix_tran'], {}), '(xpix_tran)\n', (4590, 4601), True, 'import numpy as np\n'), ((4648, 4666), 'numpy.int_', 'np.int_', (['ypix_tran'], {}), '(ypix_tran)\n', (4655, 4666), True, 'import numpy as np\n'), ((8768, 8795), 'numpy.mean', 'np.mean', (['weights_sub_global'], {}), '(weights_sub_global)\n', (8775, 8795), True, 'import numpy as np\n'), ((2998, 3013), 'numpy.cos', 'np.cos', (['yaw_rad'], {}), '(yaw_rad)\n', (3004, 3013), True, 'import numpy as np\n'), ((3025, 3040), 'numpy.sin', 'np.sin', (['yaw_rad'], {}), '(yaw_rad)\n', (3031, 3040), True, 'import numpy as np\n'), ((3098, 3113), 'numpy.sin', 'np.sin', (['yaw_rad'], {}), '(yaw_rad)\n', (3104, 3113), True, 'import numpy as np\n'), ((3125, 3140), 'numpy.cos', 'np.cos', (['yaw_rad'], {}), '(yaw_rad)\n', (3131, 3140), True, 'import numpy as np\n'), ((4284, 4307), 'numpy.arange', 'np.arange', (['global_scale'], {}), '(global_scale)\n', (4293, 4307), True, 'import numpy as np\n'), ((8647, 8706), 'numpy.abs', 'np.abs', (['(weights_sub_global[:, 0] + weights_sub_global[:, 2])'], {}), '(weights_sub_global[:, 0] + weights_sub_global[:, 2])\n', (8653, 8706), True, 'import numpy as np\n'), ((8843, 8870), 'numpy.mean', 'np.mean', (['weights_sub_global'], {}), '(weights_sub_global)\n', (8850, 8870), True, 'import numpy as np\n'), ((9006, 9032), 'numpy.sum', 'np.sum', (['weights_sub_global'], {}), '(weights_sub_global)\n', (9012, 9032), True, 'import numpy as np\n'), ((8955, 9005), 'numpy.multiply', 'np.multiply', (['angles_sub_global', 'weights_sub_global'], {}), '(angles_sub_global, weights_sub_global)\n', (8966, 9005), True, 'import numpy as np\n')]
|
import tensorflow as tf
from utils.util_class import WrongInputException
def augmentation_factory(augment_probs=None):
augment_probs = augment_probs if augment_probs else dict()
augmenters = []
for key, prob in augment_probs.items():
if key is "CropAndResize":
augm = CropAndResize(prob)
elif key is "HorizontalFlip":
augm = HorizontalFlip(prob)
elif key is "ColorJitter":
augm = ColorJitter(prob)
else:
raise WrongInputException(f"Wrong augmentation type: {key}")
augmenters.append(augm)
total_augment = TotalAugment(augmenters)
return total_augment
class TotalAugment:
def __init__(self, augment_objects=None):
self.augment_objects = augment_objects
def __call__(self, features):
feat_aug = self.preprocess(features)
for augmenter in self.augment_objects:
feat_aug = augmenter(feat_aug)
feat_aug = self.postprocess(features, feat_aug)
return feat_aug
def preprocess(self, features):
"""
!!NOTE!!
when changing input dict's key or value, you MUST copy a dict like
feat_aug = {key: val for key, val in features.items()}
"""
# create a new feature dict
feat_aug = {key: val for key, val in features.items() if "image5d" not in key}
# to use tf.image functions, reshape to [batch*snippet, height, width, 3]
batch, snippet, height, width, channels = features["image5d"].get_shape()
imshape = (batch * snippet, height, width, channels)
feat_aug["image5d"] = tf.reshape(features["image5d"], imshape)
if "image5d_R" in features:
feat_aug["image5d_R"] = tf.reshape(features["image5d_R"], imshape)
return feat_aug
def postprocess(self, features, feat_aug):
image5d = features["image5d"]
feat_aug["image5d"] = tf.reshape(feat_aug["image5d"], image5d.get_shape())
if "image5d_R" in feat_aug:
feat_aug["image5d_R"] = tf.reshape(feat_aug["image5d_R"], image5d.get_shape())
return feat_aug
class AugmentBase:
def __init__(self, aug_prob=0.):
self.aug_prob = aug_prob
self.param = 0
def __call__(self, features):
raise NotImplementedError()
class CropAndResize(AugmentBase):
"""
randomly crop "image5d" and resize it to original size
create "intrinsic_aug" as camera matrix for "image5d"
"""
def __init__(self, aug_prob=0.3):
super().__init__(aug_prob)
self.half_crop_ratio = 0.1
def __call__(self, features):
nimage, height, width, _ = features["image5d"].get_shape()
crop_size = tf.constant([height, width])
box_indices = tf.range(0, nimage)
boxes = self.random_crop_boxes(nimage)
self.param = boxes[0]
features["image5d"] = tf.image.crop_and_resize(features["image5d"], boxes, box_indices, crop_size)
features["intrinsic"] = self.adjust_intrinsic(features["intrinsic"], boxes, crop_size)
if "image5d_R" in features:
features["image5d_R"] = tf.image.crop_and_resize(features["image5d_R"], boxes, box_indices, crop_size)
features["intrinsic_R"] = self.adjust_intrinsic(features["intrinsic_R"], boxes, crop_size)
if "depth_gt" in features:
batch = features["depth_gt"].get_shape()[0]
features["depth_gt"] = tf.image.crop_and_resize(features["depth_gt"], boxes[:batch], box_indices[:batch],
crop_size, method="nearest")
return features
def random_crop_boxes(self, num_box):
# aug_prob : 1-aug_prob = half_crop_ratio : minval1
maxval1 = self.half_crop_ratio
minval1 = -(1. - self.aug_prob) * self.half_crop_ratio / self.aug_prob
y1x1 = tf.random.uniform((1, 2), minval1, maxval1)
y1x1 = tf.clip_by_value(y1x1, 0, 1)
minval2 = 1. - maxval1
maxval2 = 1. - minval1
y2x2 = tf.random.uniform((1, 2), minval2, maxval2)
y2x2 = tf.clip_by_value(y2x2, 0, 1)
assert (minval1 < maxval1) and (minval2 < maxval2)
# boxes: [1, 4]
boxes = tf.concat([y1x1, y2x2], axis=1)
# boxes: [num_box, 4]
boxes = tf.tile(boxes, [num_box, 1])
return boxes
def adjust_intrinsic(self, intrinsic, boxes, imsize):
"""
:param intrinsic: [batch, 3, 3]
:param boxes: (y1,x1,y2,x2) in range [0~1] [batch, 4]
:param imsize: [height, width] [2]
:return: adjusted intrinsic [batch, 3, 3]
"""
imsize = tf.cast(imsize, tf.float32)
# size: [1, 3, 3], contents: [[0, 0, x1_ratio*width], [0, 0, y1_ratio*height], [0, 0, 0]]
center_change = tf.stack([tf.stack([0., 0., boxes[0, 1]*imsize[1]], axis=0),
tf.stack([0., 0., boxes[0, 0]*imsize[0]], axis=0),
tf.stack([0., 0., 0.], axis=0)], axis=0)
# cx'=cx-x1, cy'=cy-y1
intrin_crop = intrinsic - center_change
# cx,fx *= W/(x2-x1), cy,fy *= H/(y2-y1)
x_ratio = 1. / (boxes[0, 3] - boxes[0, 1])
y_ratio = 1. / (boxes[0, 2] - boxes[0, 0])
intrin_adj = tf.stack([intrin_crop[:, 0] * x_ratio, intrin_crop[:, 1] * y_ratio, intrin_crop[:, 2]], axis=1)
return intrin_adj
class HorizontalFlip(AugmentBase):
"""
randomly horizontally flip "image5d" by aug_prob
"""
def __init__(self, aug_prob=0.2):
super().__init__(aug_prob)
def __call__(self, features):
rndval = tf.random.uniform(())
features = tf.cond(rndval < self.aug_prob,
lambda: self.flip_features(features),
lambda: features
)
return features
def flip_features(self, features):
feat_aug = dict()
feat_aug["image5d"] = tf.image.flip_left_right(features["image5d"])
if "image5d_R" in features:
feat_aug["image5d_R"] = tf.image.flip_left_right(features["image5d_R"])
feat_aug["intrinsic"] = self.flip_intrinsic(features["intrinsic"], features["image5d"].get_shape())
if "intrinsic_R" in features:
feat_aug["intrinsic_R"] = self.flip_intrinsic(features["intrinsic_R"], features["image5d"].get_shape())
if "pose_gt" in features:
feat_aug["pose_gt"] = self.flip_gt_pose(features["pose_gt"])
if "pose_gt_R" in features:
feat_aug["pose_gt_R"] = self.flip_gt_pose(features["pose_gt_R"])
if "stereo_T_LR" in features:
feat_aug["stereo_T_LR"] = self.flip_stereo_pose(features["stereo_T_LR"])
feat_rest = {key: val for key, val in features.items() if key not in feat_aug}
feat_aug.update(feat_rest)
return feat_aug
def flip_intrinsic(self, intrinsic, imshape):
batch, height, width, _ = imshape
intrin_wh = tf.constant([[[0, 0, width], [0, 0, 0], [0, 0, 0]]], dtype=tf.float32)
intrin_flip = tf.abs(intrin_wh - intrinsic)
return intrin_flip
def flip_gt_pose(self, pose):
T_flip = tf.constant([[[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]]], dtype=tf.float32)
# [batch, numsrc, 4, 4] = [1, 1, 4, 4] @ [batch, numsrc, 4, 4] @ [1, 1, 4, 4]
pose_flip = T_flip @ pose @ tf.linalg.inv(T_flip)
return pose_flip
def flip_stereo_pose(self, pose):
T_flip = tf.constant([[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]], dtype=tf.float32)
# [batch, 4, 4] = [1, 4, 4] @ [batch, 4, 4] @ [1, 4, 4]
pose_flip = T_flip @ pose @ tf.linalg.inv(T_flip)
return pose_flip
class ColorJitter(AugmentBase):
def __init__(self, aug_prob=0.2):
super().__init__(aug_prob)
def __call__(self, features):
rndval = tf.random.uniform(())
gamma = tf.random.uniform((), minval=0.5, maxval=1.5)
saturation = tf.random.uniform((), minval=0.5, maxval=1.5)
features["image5d"], self.param = \
tf.cond(rndval < self.aug_prob,
lambda: self.jitter_color(features["image5d"], gamma, saturation),
lambda: (features["image5d"], tf.constant([0, 0], dtype=tf.float32))
)
if "image5d_R" in features:
features["image5d_R"], self.param = \
tf.cond(rndval < self.aug_prob,
lambda: self.jitter_color(features["image5d_R"], gamma, saturation),
lambda: (features["image5d_R"], tf.constant([0, 0], dtype=tf.float32))
)
return features
def jitter_color(self, image, gamma, saturation):
# convert image -1 ~ 1 to 0 ~ 1
image = (image + 1.) / 2.
image = tf.image.adjust_saturation(image, saturation)
image = tf.image.adjust_gamma(image, gamma=gamma, gain=1.)
# convert image 0 ~ 1 to -1 ~ 1
image = image * 2. - 1.
param = tf.stack([gamma, saturation], axis=0)
return image, param
# ---------------------------------
import numpy as np
import utils.convert_pose as cp
def test_random_crop_boxes():
print("===== test test_random_crop_boxes")
cropper = CropAndResize()
boxes = cropper.random_crop_boxes(4)
print("boxes:", boxes)
wh = boxes[:, 2:] - boxes[:, :2]
assert (wh.numpy() > cropper.half_crop_ratio*2).all()
print("!!! test_random_crop_boxes passed")
def test_adjust_intrinsic():
print("===== test test_adjust_intrinsic")
batch, height, width = 3, 200, 240
imsize = tf.constant([height, width], dtype=tf.float32)
intrinsic = tf.constant([[[width/2, 0, width/2], [0, height/2, height/2], [0, 0, 1]]], dtype=tf.float32)
intrinsic = tf.tile(intrinsic, [batch, 1, 1])
print("intrinsic original", intrinsic[0])
xcrop, ycrop = 0.05, 0.1
cropper = CropAndResize()
boxes = tf.tile(tf.constant([[ycrop, xcrop, 1-ycrop, 1-xcrop]]), [batch, 1])
print("crop box:", boxes[0])
# EXECUTE
intrin_adj = cropper.adjust_intrinsic(intrinsic, boxes, imsize)
print("intrinsic adjusted", intrin_adj[0])
assert np.isclose(intrin_adj.numpy()[0], intrin_adj.numpy()[-1]).all()
assert np.isclose(intrin_adj[0, 0, 0], width / 2 / (1 - 2*xcrop)), \
f"fx={intrin_adj[0, 0, 0]}, expected={width / 2 / (1 - 2*xcrop)}"
assert np.isclose(intrin_adj[0, 0, 2], width / 2), \
f"cx={intrin_adj[0, 0, 2]}, expected={width / 2}"
print("!!! test_adjust_intrinsic passed")
def test_flip_pose_np():
print("===== test test_flip_pose_np")
batch = 2
pose_vec = np.random.uniform(-2, 2, (batch, 6))
pose_mat = cp.pose_rvec2matr(pose_vec)
flip = np.identity(4)
flip[0, 0] = -1
flip = flip[np.newaxis, ...]
pose_mat_flip = np.matmul(np.matmul(flip, pose_mat), np.linalg.inv(flip))
pose_vec_flip = cp.pose_matr2rvec(pose_mat_flip)
print("pose vec:\n", pose_vec)
print("pose mat:\n", pose_mat)
print("pose mat flip:\n", pose_mat_flip)
print("pose vec flip:\n", pose_vec_flip)
print("pose vec rotation: (rad)\n", np.linalg.norm(pose_vec[:, 3:], axis=1))
print("pose vec flip rotation: (rad)\n", np.linalg.norm(pose_vec_flip[:, 3:], axis=1))
print("pose == pose_flip:\n", np.isclose(pose_vec, pose_vec_flip))
flip_vec = np.array([[-1, 1, 1, 1, -1, -1]], dtype=np.float32)
assert np.isclose(pose_vec, pose_vec_flip*flip_vec).all()
print("!!! test_flip_pose_np passed")
def test_flip_pose_tf():
print("===== test test_flip_pose_tf")
batch, numsrc = 2, 2
pose_vec = tf.random.uniform((batch, numsrc, 6), -2, 2)
pose_mat = cp.pose_rvec2matr_batch_tf(pose_vec)
flipper = HorizontalFlip()
pose_mat_flip = flipper.flip_gt_pose(pose_mat)
pose_vec_flip = cp.pose_matr2rvec_batch(pose_mat_flip)
print("pose vec:\n", pose_vec[1])
print("pose mat:\n", pose_mat[1, 1])
print("pose mat flip:\n", pose_mat_flip[1, 1])
print("pose vec flip:\n", pose_vec_flip[1])
print("pose vec rotation [batch, numsrc]: (rad)\n", np.linalg.norm(pose_vec[:, :, 3:], axis=1))
print("pose vec flip rotation [batch, numsrc]: (rad)\n", np.linalg.norm(pose_vec_flip[:, :, 3:], axis=1))
print("pose == pose_flip:\n", np.isclose(pose_vec[1, 1], pose_vec_flip[1, 1]))
flip_vec = tf.constant([[[[-1, 1, 1, 1, -1, -1]]]], dtype=tf.float32)
assert np.isclose(pose_vec.numpy(), pose_vec_flip.numpy()*flip_vec, atol=1.e-3).all(), \
f"{pose_vec.numpy() - pose_vec_flip.numpy()*flip_vec}"
print("!!! test_flip_pose_tf passed")
def test_flip_intrinsic():
print("===== test test_flip_intrinsic")
batch, height, width = 3, 200, 240
intrinsic = tf.random.uniform((batch, 3, 3), minval=100, maxval=200)
print("intrinsic original", intrinsic[0])
imshape = (batch, height, width, 3)
flipper = HorizontalFlip()
# EXECUTE
intrin_flip = flipper.flip_intrinsic(intrinsic, imshape)
intrinsic = intrinsic.numpy()
intrin_flip = intrin_flip.numpy()
# fy, cy: SAME
assert np.isclose(intrinsic[:, 1:], intrin_flip[:, 1:]).all(), \
f"original\n{intrinsic[:, 1:]}\nflipped\n{intrin_flip[:, 1:]}"
# fx: SAME
assert np.isclose(intrinsic[:, 0, :2], intrin_flip[:, 0, :2]).all(), \
f"original\n{intrinsic[:, 0, :2]}\nflipped\n{intrin_flip[:, 0, :2]}"
# cx <- W - cx
assert np.isclose(width - intrinsic[:, 0, 2], intrin_flip[:, 0, 2]).all(), \
f"original\n{intrinsic[:, 0, 2]}\nflipped\n{intrin_flip[:, 0, 2]}"
print("horizontally flipped intrinsic\n", intrin_flip[0])
print("!!! test_flip_intrinsic passed")
import os.path as op
import cv2
from config import opts
from tfrecords.tfrecord_reader import TfrecordReader
from utils.util_funcs import to_uint8_image, multi_scale_depths
from model.synthesize.synthesize_base import SynthesizeMultiScale
from utils.convert_pose import pose_matr2rvec_batch
def test_augmentations():
print("===== test test_augmentations")
tfrgen = TfrecordReader(op.join(opts.DATAPATH_TFR, "kitti_raw_test"), shuffle=False)
dataset = tfrgen.get_dataset()
total_aug = TotalAugment()
data_aug = {"CropAndResize": CropAndResize(aug_prob=0.5),
"HorizontalFlip": HorizontalFlip(aug_prob=0.5),
"ColorJitter": ColorJitter(aug_prob=0.5)}
for bi, features in enumerate(dataset):
print(f"\n!!~~~~~~~~~~ {bi}: new features ~~~~~~~~~~!!")
images = []
feat_aug = total_aug.preprocess(features)
img = show_result(feat_aug, "preprocess")
images.append(img)
for name, augment in data_aug.items():
feat_aug = augment(feat_aug)
img = show_result(feat_aug, name, augment.param)
images.append(img)
feat_aug = total_aug.postprocess(features, feat_aug)
source_image, synth_target = synthesize_target(feat_aug)
images.append(synth_target)
images.append(source_image)
images = np.concatenate(images, axis=0)
cv2.imshow("augmentation", images)
ori_images = []
raw_image_u8 = to_uint8_image(features["image"])
ori_images.append(raw_image_u8[0, -opts.get_img_shape("H"):])
source_image, synth_target = synthesize_target(features)
ori_images.append(synth_target)
ori_images.append(source_image)
ori_images = np.concatenate(ori_images, axis=0)
cv2.imshow("original image", ori_images)
key = cv2.waitKey()
if key == ord('q'):
break
cv2.destroyAllWindows()
def show_result(features, name, param=""):
print(f"----- augmentation: {name}")
print("parameter:", param)
image_u8 = to_uint8_image(features["image5d"])
target_index = opts.SNIPPET_LEN - 1
target = image_u8[target_index].numpy()
intrin = features["intrinsic"]
print("intrinsic:\n", intrin[0].numpy())
pose = features["pose_gt"]
print("pose:\n", pose[0, 0].numpy())
return target
def synthesize_target(features):
sources, target, intrinsic, depth_gt_ms, pose_gt = prep_synthesize(features)
synth_target_ms = SynthesizeMultiScale()(sources, intrinsic, depth_gt_ms, pose_gt)
synth_u8 = to_uint8_image(synth_target_ms[0])
synth_u8 = synth_u8[0, 0].numpy()
source_u8 = to_uint8_image(sources)
source_u8 = source_u8[0, 0].numpy()
return source_u8, synth_u8
def prep_synthesize(features):
image5d = features["image5d"]
sources = image5d[:, :-1]
target = image5d[:, -1]
intrinsic = features["intrinsic"]
pose_gt = features["pose_gt"]
pose_gt = pose_matr2rvec_batch(pose_gt)
depth_gt = features["depth_gt"]
depth_gt_ms = multi_scale_depths(depth_gt, [1, 2, 4, 8])
return sources, target, intrinsic, depth_gt_ms, pose_gt
def test_augmentation_factory():
print("===== test test_augmentations")
tfrgen = TfrecordReader(op.join(opts.DATAPATH_TFR, "kitti_raw_test"), shuffle=False)
dataset = tfrgen.get_dataset()
augmenter = augmentation_factory(opts.AUGMENT_PROBS)
for bi, features in enumerate(dataset):
print(f"\n!!~~~~~~~~~~ {bi}: new features ~~~~~~~~~~!!")
print(features.keys())
print("before augment features:")
fkeys = list(features.keys())
for i in range(np.ceil(len(features.keys())/5.).astype(int)):
print(fkeys[i*5:(i+1)*5])
feat_aug = augmenter(features)
print("after augment features:")
fkeys = list(feat_aug.keys())
for i in range(np.ceil(len(feat_aug.keys())/5.).astype(int)):
print(fkeys[i*5:(i+1)*5])
image = to_uint8_image(features["image5d_R"][1])
image_aug = to_uint8_image(feat_aug["image5d_R"][1])
snippet, height, width, chann = image.get_shape()
image = image.numpy().reshape(-1, width, chann)
image_aug = image_aug.numpy().reshape(-1, width, chann)
image = np.concatenate([image, image_aug], axis=1)
cv2.imshow("image vs augmented", image)
key = cv2.waitKey()
if key == ord('q'):
break
cv2.destroyAllWindows()
import utils.util_funcs as uf
def test_stereo_augmentation():
print("===== test test_augmentations")
tfrgen = TfrecordReader(op.join(opts.DATAPATH_TFR, "kitti_raw_test"), shuffle=False)
dataset = tfrgen.get_dataset()
augmenter = augmentation_factory(opts.AUGMENT_PROBS)
batidx, sclidx = 0, 0
for bi, features in enumerate(dataset):
print(f"\n!!~~~~~~~~~~ {bi} step ~~~~~~~~~~!!")
view_imgs = dict()
feat_aug = augmenter(features)
pose_T_RL = tf.linalg.inv(feat_aug["stereo_T_LR"])
pose_T_RL = cp.pose_matr2rvec_batch(tf.expand_dims(pose_T_RL, 1))
right_target = tf.expand_dims(feat_aug["image5d_R"][:, -1], 1)
depth_ms = uf.multi_scale_depths(feat_aug["depth_gt"], [1, 2, 4, 8])
synth_stereo_left = SynthesizeMultiScale()(source_image=right_target,
intrinsic=feat_aug["intrinsic"],
pred_depth_ms=depth_ms,
pred_pose=pose_T_RL)
view_imgs["raw_left_target"] = features["image5d"][batidx, -1]
view_imgs["raw_right_target"] = features["image5d_R"][batidx, -1]
view_imgs["aug_left_target_orig"] = feat_aug["image5d"][batidx, -1]
view_imgs["aug_left_target_synt"] = synth_stereo_left[sclidx][batidx, 0]
view_imgs["aug_right_target"] = feat_aug["image5d_R"][batidx, -1]
view = uf.stack_titled_images(view_imgs)
cv2.imshow("stereo synthesis", view)
key = cv2.waitKey()
if key == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
test_random_crop_boxes()
test_adjust_intrinsic()
test_flip_pose_np()
test_flip_pose_tf()
test_flip_intrinsic()
test_augmentations()
test_augmentation_factory()
test_stereo_augmentation()
|
[
"tensorflow.image.flip_left_right",
"tensorflow.clip_by_value",
"tensorflow.reshape",
"tensorflow.image.crop_and_resize",
"numpy.isclose",
"tensorflow.linalg.inv",
"numpy.linalg.norm",
"utils.convert_pose.pose_rvec2matr_batch_tf",
"cv2.imshow",
"os.path.join",
"tensorflow.abs",
"tensorflow.random.uniform",
"numpy.identity",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.cast",
"config.opts.get_img_shape",
"cv2.destroyAllWindows",
"tensorflow.image.adjust_saturation",
"tensorflow.range",
"utils.util_funcs.to_uint8_image",
"cv2.waitKey",
"tensorflow.constant",
"tensorflow.tile",
"model.synthesize.synthesize_base.SynthesizeMultiScale",
"numpy.linalg.inv",
"utils.convert_pose.pose_rvec2matr",
"tensorflow.image.adjust_gamma",
"utils.util_funcs.multi_scale_depths",
"numpy.concatenate",
"tensorflow.expand_dims",
"numpy.random.uniform",
"utils.util_funcs.stack_titled_images",
"utils.convert_pose.pose_matr2rvec_batch",
"utils.convert_pose.pose_matr2rvec",
"numpy.array",
"utils.util_class.WrongInputException",
"numpy.matmul"
] |
[((9665, 9711), 'tensorflow.constant', 'tf.constant', (['[height, width]'], {'dtype': 'tf.float32'}), '([height, width], dtype=tf.float32)\n', (9676, 9711), True, 'import tensorflow as tf\n'), ((9728, 9832), 'tensorflow.constant', 'tf.constant', (['[[[width / 2, 0, width / 2], [0, height / 2, height / 2], [0, 0, 1]]]'], {'dtype': 'tf.float32'}), '([[[width / 2, 0, width / 2], [0, height / 2, height / 2], [0, 0,\n 1]]], dtype=tf.float32)\n', (9739, 9832), True, 'import tensorflow as tf\n'), ((9837, 9870), 'tensorflow.tile', 'tf.tile', (['intrinsic', '[batch, 1, 1]'], {}), '(intrinsic, [batch, 1, 1])\n', (9844, 9870), True, 'import tensorflow as tf\n'), ((10308, 10368), 'numpy.isclose', 'np.isclose', (['intrin_adj[0, 0, 0]', '(width / 2 / (1 - 2 * xcrop))'], {}), '(intrin_adj[0, 0, 0], width / 2 / (1 - 2 * xcrop))\n', (10318, 10368), True, 'import numpy as np\n'), ((10458, 10500), 'numpy.isclose', 'np.isclose', (['intrin_adj[0, 0, 2]', '(width / 2)'], {}), '(intrin_adj[0, 0, 2], width / 2)\n', (10468, 10500), True, 'import numpy as np\n'), ((10709, 10745), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(2)', '(batch, 6)'], {}), '(-2, 2, (batch, 6))\n', (10726, 10745), True, 'import numpy as np\n'), ((10761, 10788), 'utils.convert_pose.pose_rvec2matr', 'cp.pose_rvec2matr', (['pose_vec'], {}), '(pose_vec)\n', (10778, 10788), True, 'import utils.convert_pose as cp\n'), ((10800, 10814), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (10811, 10814), True, 'import numpy as np\n'), ((10966, 10998), 'utils.convert_pose.pose_matr2rvec', 'cp.pose_matr2rvec', (['pose_mat_flip'], {}), '(pose_mat_flip)\n', (10983, 10998), True, 'import utils.convert_pose as cp\n'), ((11418, 11469), 'numpy.array', 'np.array', (['[[-1, 1, 1, 1, -1, -1]]'], {'dtype': 'np.float32'}), '([[-1, 1, 1, 1, -1, -1]], dtype=np.float32)\n', (11426, 11469), True, 'import numpy as np\n'), ((11683, 11727), 'tensorflow.random.uniform', 'tf.random.uniform', (['(batch, numsrc, 6)', '(-2)', '(2)'], {}), '((batch, numsrc, 6), -2, 2)\n', (11700, 11727), True, 'import tensorflow as tf\n'), ((11743, 11779), 'utils.convert_pose.pose_rvec2matr_batch_tf', 'cp.pose_rvec2matr_batch_tf', (['pose_vec'], {}), '(pose_vec)\n', (11769, 11779), True, 'import utils.convert_pose as cp\n'), ((11882, 11920), 'utils.convert_pose.pose_matr2rvec_batch', 'cp.pose_matr2rvec_batch', (['pose_mat_flip'], {}), '(pose_mat_flip)\n', (11905, 11920), True, 'import utils.convert_pose as cp\n'), ((12408, 12466), 'tensorflow.constant', 'tf.constant', (['[[[[-1, 1, 1, 1, -1, -1]]]]'], {'dtype': 'tf.float32'}), '([[[[-1, 1, 1, 1, -1, -1]]]], dtype=tf.float32)\n', (12419, 12466), True, 'import tensorflow as tf\n'), ((12793, 12849), 'tensorflow.random.uniform', 'tf.random.uniform', (['(batch, 3, 3)'], {'minval': '(100)', 'maxval': '(200)'}), '((batch, 3, 3), minval=100, maxval=200)\n', (12810, 12849), True, 'import tensorflow as tf\n'), ((15635, 15658), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (15656, 15658), False, 'import cv2\n'), ((15791, 15826), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (["features['image5d']"], {}), "(features['image5d'])\n", (15805, 15826), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((16299, 16333), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (['synth_target_ms[0]'], {}), '(synth_target_ms[0])\n', (16313, 16333), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((16388, 16411), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (['sources'], {}), '(sources)\n', (16402, 16411), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((16694, 16723), 'utils.convert_pose.pose_matr2rvec_batch', 'pose_matr2rvec_batch', (['pose_gt'], {}), '(pose_gt)\n', (16714, 16723), False, 'from utils.convert_pose import pose_matr2rvec_batch\n'), ((16778, 16820), 'utils.util_funcs.multi_scale_depths', 'multi_scale_depths', (['depth_gt', '[1, 2, 4, 8]'], {}), '(depth_gt, [1, 2, 4, 8])\n', (16796, 16820), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((18180, 18203), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (18201, 18203), False, 'import cv2\n'), ((19828, 19851), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (19849, 19851), False, 'import cv2\n'), ((1624, 1664), 'tensorflow.reshape', 'tf.reshape', (["features['image5d']", 'imshape'], {}), "(features['image5d'], imshape)\n", (1634, 1664), True, 'import tensorflow as tf\n'), ((2708, 2736), 'tensorflow.constant', 'tf.constant', (['[height, width]'], {}), '([height, width])\n', (2719, 2736), True, 'import tensorflow as tf\n'), ((2759, 2778), 'tensorflow.range', 'tf.range', (['(0)', 'nimage'], {}), '(0, nimage)\n', (2767, 2778), True, 'import tensorflow as tf\n'), ((2887, 2963), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (["features['image5d']", 'boxes', 'box_indices', 'crop_size'], {}), "(features['image5d'], boxes, box_indices, crop_size)\n", (2911, 2963), True, 'import tensorflow as tf\n'), ((3872, 3915), 'tensorflow.random.uniform', 'tf.random.uniform', (['(1, 2)', 'minval1', 'maxval1'], {}), '((1, 2), minval1, maxval1)\n', (3889, 3915), True, 'import tensorflow as tf\n'), ((3931, 3959), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y1x1', '(0)', '(1)'], {}), '(y1x1, 0, 1)\n', (3947, 3959), True, 'import tensorflow as tf\n'), ((4037, 4080), 'tensorflow.random.uniform', 'tf.random.uniform', (['(1, 2)', 'minval2', 'maxval2'], {}), '((1, 2), minval2, maxval2)\n', (4054, 4080), True, 'import tensorflow as tf\n'), ((4096, 4124), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y2x2', '(0)', '(1)'], {}), '(y2x2, 0, 1)\n', (4112, 4124), True, 'import tensorflow as tf\n'), ((4224, 4255), 'tensorflow.concat', 'tf.concat', (['[y1x1, y2x2]'], {'axis': '(1)'}), '([y1x1, y2x2], axis=1)\n', (4233, 4255), True, 'import tensorflow as tf\n'), ((4302, 4330), 'tensorflow.tile', 'tf.tile', (['boxes', '[num_box, 1]'], {}), '(boxes, [num_box, 1])\n', (4309, 4330), True, 'import tensorflow as tf\n'), ((4647, 4674), 'tensorflow.cast', 'tf.cast', (['imsize', 'tf.float32'], {}), '(imsize, tf.float32)\n', (4654, 4674), True, 'import tensorflow as tf\n'), ((5270, 5369), 'tensorflow.stack', 'tf.stack', (['[intrin_crop[:, 0] * x_ratio, intrin_crop[:, 1] * y_ratio, intrin_crop[:, 2]]'], {'axis': '(1)'}), '([intrin_crop[:, 0] * x_ratio, intrin_crop[:, 1] * y_ratio,\n intrin_crop[:, 2]], axis=1)\n', (5278, 5369), True, 'import tensorflow as tf\n'), ((5623, 5644), 'tensorflow.random.uniform', 'tf.random.uniform', (['()'], {}), '(())\n', (5640, 5644), True, 'import tensorflow as tf\n'), ((5954, 5999), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (["features['image5d']"], {}), "(features['image5d'])\n", (5978, 5999), True, 'import tensorflow as tf\n'), ((6988, 7058), 'tensorflow.constant', 'tf.constant', (['[[[0, 0, width], [0, 0, 0], [0, 0, 0]]]'], {'dtype': 'tf.float32'}), '([[[0, 0, width], [0, 0, 0], [0, 0, 0]]], dtype=tf.float32)\n', (6999, 7058), True, 'import tensorflow as tf\n'), ((7081, 7110), 'tensorflow.abs', 'tf.abs', (['(intrin_wh - intrinsic)'], {}), '(intrin_wh - intrinsic)\n', (7087, 7110), True, 'import tensorflow as tf\n'), ((7190, 7286), 'tensorflow.constant', 'tf.constant', (['[[[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]]]'], {'dtype': 'tf.float32'}), '([[[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]]],\n dtype=tf.float32)\n', (7201, 7286), True, 'import tensorflow as tf\n'), ((7508, 7602), 'tensorflow.constant', 'tf.constant', (['[[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]]'], {'dtype': 'tf.float32'}), '([[[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]],\n dtype=tf.float32)\n', (7519, 7602), True, 'import tensorflow as tf\n'), ((7906, 7927), 'tensorflow.random.uniform', 'tf.random.uniform', (['()'], {}), '(())\n', (7923, 7927), True, 'import tensorflow as tf\n'), ((7944, 7989), 'tensorflow.random.uniform', 'tf.random.uniform', (['()'], {'minval': '(0.5)', 'maxval': '(1.5)'}), '((), minval=0.5, maxval=1.5)\n', (7961, 7989), True, 'import tensorflow as tf\n'), ((8011, 8056), 'tensorflow.random.uniform', 'tf.random.uniform', (['()'], {'minval': '(0.5)', 'maxval': '(1.5)'}), '((), minval=0.5, maxval=1.5)\n', (8028, 8056), True, 'import tensorflow as tf\n'), ((8861, 8906), 'tensorflow.image.adjust_saturation', 'tf.image.adjust_saturation', (['image', 'saturation'], {}), '(image, saturation)\n', (8887, 8906), True, 'import tensorflow as tf\n'), ((8923, 8974), 'tensorflow.image.adjust_gamma', 'tf.image.adjust_gamma', (['image'], {'gamma': 'gamma', 'gain': '(1.0)'}), '(image, gamma=gamma, gain=1.0)\n', (8944, 8974), True, 'import tensorflow as tf\n'), ((9062, 9099), 'tensorflow.stack', 'tf.stack', (['[gamma, saturation]'], {'axis': '(0)'}), '([gamma, saturation], axis=0)\n', (9070, 9099), True, 'import tensorflow as tf\n'), ((9997, 10048), 'tensorflow.constant', 'tf.constant', (['[[ycrop, xcrop, 1 - ycrop, 1 - xcrop]]'], {}), '([[ycrop, xcrop, 1 - ycrop, 1 - xcrop]])\n', (10008, 10048), True, 'import tensorflow as tf\n'), ((10898, 10923), 'numpy.matmul', 'np.matmul', (['flip', 'pose_mat'], {}), '(flip, pose_mat)\n', (10907, 10923), True, 'import numpy as np\n'), ((10925, 10944), 'numpy.linalg.inv', 'np.linalg.inv', (['flip'], {}), '(flip)\n', (10938, 10944), True, 'import numpy as np\n'), ((11199, 11238), 'numpy.linalg.norm', 'np.linalg.norm', (['pose_vec[:, 3:]'], {'axis': '(1)'}), '(pose_vec[:, 3:], axis=1)\n', (11213, 11238), True, 'import numpy as np\n'), ((11285, 11329), 'numpy.linalg.norm', 'np.linalg.norm', (['pose_vec_flip[:, 3:]'], {'axis': '(1)'}), '(pose_vec_flip[:, 3:], axis=1)\n', (11299, 11329), True, 'import numpy as np\n'), ((11365, 11400), 'numpy.isclose', 'np.isclose', (['pose_vec', 'pose_vec_flip'], {}), '(pose_vec, pose_vec_flip)\n', (11375, 11400), True, 'import numpy as np\n'), ((12155, 12197), 'numpy.linalg.norm', 'np.linalg.norm', (['pose_vec[:, :, 3:]'], {'axis': '(1)'}), '(pose_vec[:, :, 3:], axis=1)\n', (12169, 12197), True, 'import numpy as np\n'), ((12260, 12307), 'numpy.linalg.norm', 'np.linalg.norm', (['pose_vec_flip[:, :, 3:]'], {'axis': '(1)'}), '(pose_vec_flip[:, :, 3:], axis=1)\n', (12274, 12307), True, 'import numpy as np\n'), ((12343, 12390), 'numpy.isclose', 'np.isclose', (['pose_vec[1, 1]', 'pose_vec_flip[1, 1]'], {}), '(pose_vec[1, 1], pose_vec_flip[1, 1])\n', (12353, 12390), True, 'import numpy as np\n'), ((14115, 14159), 'os.path.join', 'op.join', (['opts.DATAPATH_TFR', '"""kitti_raw_test"""'], {}), "(opts.DATAPATH_TFR, 'kitti_raw_test')\n", (14122, 14159), True, 'import os.path as op\n'), ((15079, 15109), 'numpy.concatenate', 'np.concatenate', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (15093, 15109), True, 'import numpy as np\n'), ((15118, 15152), 'cv2.imshow', 'cv2.imshow', (['"""augmentation"""', 'images'], {}), "('augmentation', images)\n", (15128, 15152), False, 'import cv2\n'), ((15201, 15234), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (["features['image']"], {}), "(features['image'])\n", (15215, 15234), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((15471, 15505), 'numpy.concatenate', 'np.concatenate', (['ori_images'], {'axis': '(0)'}), '(ori_images, axis=0)\n', (15485, 15505), True, 'import numpy as np\n'), ((15514, 15554), 'cv2.imshow', 'cv2.imshow', (['"""original image"""', 'ori_images'], {}), "('original image', ori_images)\n", (15524, 15554), False, 'import cv2\n'), ((15570, 15583), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (15581, 15583), False, 'import cv2\n'), ((16219, 16241), 'model.synthesize.synthesize_base.SynthesizeMultiScale', 'SynthesizeMultiScale', ([], {}), '()\n', (16239, 16241), False, 'from model.synthesize.synthesize_base import SynthesizeMultiScale\n'), ((16987, 17031), 'os.path.join', 'op.join', (['opts.DATAPATH_TFR', '"""kitti_raw_test"""'], {}), "(opts.DATAPATH_TFR, 'kitti_raw_test')\n", (16994, 17031), True, 'import os.path as op\n'), ((17714, 17754), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (["features['image5d_R'][1]"], {}), "(features['image5d_R'][1])\n", (17728, 17754), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((17775, 17815), 'utils.util_funcs.to_uint8_image', 'to_uint8_image', (["feat_aug['image5d_R'][1]"], {}), "(feat_aug['image5d_R'][1])\n", (17789, 17815), False, 'from utils.util_funcs import to_uint8_image, multi_scale_depths\n'), ((18010, 18052), 'numpy.concatenate', 'np.concatenate', (['[image, image_aug]'], {'axis': '(1)'}), '([image, image_aug], axis=1)\n', (18024, 18052), True, 'import numpy as np\n'), ((18061, 18100), 'cv2.imshow', 'cv2.imshow', (['"""image vs augmented"""', 'image'], {}), "('image vs augmented', image)\n", (18071, 18100), False, 'import cv2\n'), ((18115, 18128), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (18126, 18128), False, 'import cv2\n'), ((18341, 18385), 'os.path.join', 'op.join', (['opts.DATAPATH_TFR', '"""kitti_raw_test"""'], {}), "(opts.DATAPATH_TFR, 'kitti_raw_test')\n", (18348, 18385), True, 'import os.path as op\n'), ((18708, 18746), 'tensorflow.linalg.inv', 'tf.linalg.inv', (["feat_aug['stereo_T_LR']"], {}), "(feat_aug['stereo_T_LR'])\n", (18721, 18746), True, 'import tensorflow as tf\n'), ((18844, 18891), 'tensorflow.expand_dims', 'tf.expand_dims', (["feat_aug['image5d_R'][:, -1]", '(1)'], {}), "(feat_aug['image5d_R'][:, -1], 1)\n", (18858, 18891), True, 'import tensorflow as tf\n'), ((18911, 18968), 'utils.util_funcs.multi_scale_depths', 'uf.multi_scale_depths', (["feat_aug['depth_gt']", '[1, 2, 4, 8]'], {}), "(feat_aug['depth_gt'], [1, 2, 4, 8])\n", (18932, 18968), True, 'import utils.util_funcs as uf\n'), ((19670, 19703), 'utils.util_funcs.stack_titled_images', 'uf.stack_titled_images', (['view_imgs'], {}), '(view_imgs)\n', (19692, 19703), True, 'import utils.util_funcs as uf\n'), ((19712, 19748), 'cv2.imshow', 'cv2.imshow', (['"""stereo synthesis"""', 'view'], {}), "('stereo synthesis', view)\n", (19722, 19748), False, 'import cv2\n'), ((19763, 19776), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (19774, 19776), False, 'import cv2\n'), ((1737, 1779), 'tensorflow.reshape', 'tf.reshape', (["features['image5d_R']", 'imshape'], {}), "(features['image5d_R'], imshape)\n", (1747, 1779), True, 'import tensorflow as tf\n'), ((3131, 3209), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (["features['image5d_R']", 'boxes', 'box_indices', 'crop_size'], {}), "(features['image5d_R'], boxes, box_indices, crop_size)\n", (3155, 3209), True, 'import tensorflow as tf\n'), ((3440, 3556), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (["features['depth_gt']", 'boxes[:batch]', 'box_indices[:batch]', 'crop_size'], {'method': '"""nearest"""'}), "(features['depth_gt'], boxes[:batch], box_indices[:\n batch], crop_size, method='nearest')\n", (3464, 3556), True, 'import tensorflow as tf\n'), ((6072, 6119), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (["features['image5d_R']"], {}), "(features['image5d_R'])\n", (6096, 6119), True, 'import tensorflow as tf\n'), ((7405, 7426), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['T_flip'], {}), '(T_flip)\n', (7418, 7426), True, 'import tensorflow as tf\n'), ((7699, 7720), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['T_flip'], {}), '(T_flip)\n', (7712, 7720), True, 'import tensorflow as tf\n'), ((11481, 11527), 'numpy.isclose', 'np.isclose', (['pose_vec', '(pose_vec_flip * flip_vec)'], {}), '(pose_vec, pose_vec_flip * flip_vec)\n', (11491, 11527), True, 'import numpy as np\n'), ((13146, 13194), 'numpy.isclose', 'np.isclose', (['intrinsic[:, 1:]', 'intrin_flip[:, 1:]'], {}), '(intrinsic[:, 1:], intrin_flip[:, 1:])\n', (13156, 13194), True, 'import numpy as np\n'), ((13301, 13355), 'numpy.isclose', 'np.isclose', (['intrinsic[:, 0, :2]', 'intrin_flip[:, 0, :2]'], {}), '(intrinsic[:, 0, :2], intrin_flip[:, 0, :2])\n', (13311, 13355), True, 'import numpy as np\n'), ((13472, 13532), 'numpy.isclose', 'np.isclose', (['(width - intrinsic[:, 0, 2])', 'intrin_flip[:, 0, 2]'], {}), '(width - intrinsic[:, 0, 2], intrin_flip[:, 0, 2])\n', (13482, 13532), True, 'import numpy as np\n'), ((18791, 18819), 'tensorflow.expand_dims', 'tf.expand_dims', (['pose_T_RL', '(1)'], {}), '(pose_T_RL, 1)\n', (18805, 18819), True, 'import tensorflow as tf\n'), ((18997, 19019), 'model.synthesize.synthesize_base.SynthesizeMultiScale', 'SynthesizeMultiScale', ([], {}), '()\n', (19017, 19019), False, 'from model.synthesize.synthesize_base import SynthesizeMultiScale\n'), ((4807, 4860), 'tensorflow.stack', 'tf.stack', (['[0.0, 0.0, boxes[0, 1] * imsize[1]]'], {'axis': '(0)'}), '([0.0, 0.0, boxes[0, 1] * imsize[1]], axis=0)\n', (4815, 4860), True, 'import tensorflow as tf\n'), ((4892, 4945), 'tensorflow.stack', 'tf.stack', (['[0.0, 0.0, boxes[0, 0] * imsize[0]]'], {'axis': '(0)'}), '([0.0, 0.0, boxes[0, 0] * imsize[0]], axis=0)\n', (4900, 4945), True, 'import tensorflow as tf\n'), ((4977, 5010), 'tensorflow.stack', 'tf.stack', (['[0.0, 0.0, 0.0]'], {'axis': '(0)'}), '([0.0, 0.0, 0.0], axis=0)\n', (4985, 5010), True, 'import tensorflow as tf\n'), ((504, 558), 'utils.util_class.WrongInputException', 'WrongInputException', (['f"""Wrong augmentation type: {key}"""'], {}), "(f'Wrong augmentation type: {key}')\n", (523, 558), False, 'from utils.util_class import WrongInputException\n'), ((8283, 8320), 'tensorflow.constant', 'tf.constant', (['[0, 0]'], {'dtype': 'tf.float32'}), '([0, 0], dtype=tf.float32)\n', (8294, 8320), True, 'import tensorflow as tf\n'), ((8627, 8664), 'tensorflow.constant', 'tf.constant', (['[0, 0]'], {'dtype': 'tf.float32'}), '([0, 0], dtype=tf.float32)\n', (8638, 8664), True, 'import tensorflow as tf\n'), ((15278, 15301), 'config.opts.get_img_shape', 'opts.get_img_shape', (['"""H"""'], {}), "('H')\n", (15296, 15301), False, 'from config import opts\n')]
|
# contrast activity contrast back lr
import os
import mne
from mne.io import read_raw_fif
import numpy
import numpy as np
import matplotlib.pyplot as plt
import os.path as op
from operator import itemgetter
from mne.io import Raw
from mne.io import read_raw_ctf
from mne.preprocessing import ICA
from mne.viz import plot_evoked_topo
from mne.minimum_norm import apply_inverse
import math
import matplotlib
from mne.viz import topomap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mne.stats import permutation_t_test
from mne.stats import permutation_cluster_1samp_test
from mne.stats import (spatio_temporal_cluster_1samp_test, summarize_clusters_stc)
from mne.viz import plot_topomap
list_data_back = []
list_data_left = []
list_data_right = []
list_data_front = []
list_data_tfpc = []
list_data_ttpc = []
for ith_sub in list(range(2, 14)):
temp_data_array = "/Users/boo/Desktop/MEG_data_script/PreProcessed_data/artefact_removed_sub" + str(
ith_sub) + "_raw_100hz_sfre_100.fif"
temp_event_array = "/Users/boo/Desktop/MEG_data_script/PreProcessed_data/events_post_resample_sub" + str(
ith_sub) + "_100hz_sfre_100.npy"
array_data = read_raw_fif(temp_data_array)
array_event = numpy.load(temp_event_array)
# pick channel
all_chan = array_data.ch_names
picks_mag = mne.pick_types(array_data.info, meg='mag')
meg_channel = itemgetter(*picks_mag)(all_chan)
meg_channel = meg_channel[29:301]
pos = mne.channels.layout._find_topomap_coords(array_data.info, picks_mag[29:301])
# Compute epochs
min_onset = -0.2
max_endpoint = 2
baseline = (min_onset, 0)
event_id_back = {'back': 80}
event_id_front = {'front': 90}
event_id_left = {'left': 82}
event_id_right = {'right': 84}
event_id_tfpc = {'tfpc': 110}
event_id_ttpc = {'ttpc': 100}
epochs_back = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_back, tmin=min_onset, tmax=max_endpoint,
baseline=baseline)
epochs_front = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_front, tmin=min_onset,
tmax=max_endpoint, baseline=baseline)
epochs_left = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_left, tmin=min_onset, tmax=max_endpoint,
baseline=baseline)
epochs_right = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_right, tmin=min_onset,
tmax=max_endpoint, baseline=baseline)
epochs_tfpc = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_tfpc, tmin=min_onset, tmax=max_endpoint,
baseline=baseline)
epochs_ttpc = mne.Epochs(array_data, array_event, picks=meg_channel, event_id=event_id_ttpc, tmin=min_onset, tmax=max_endpoint,
baseline=baseline)
epochs_back.load_data()
epochs_front.load_data()
epochs_left.load_data()
epochs_right.load_data()
epochs_tfpc.load_data()
epochs_ttpc.load_data()
evoke_back = epochs_back.average()
evoke_front = epochs_front.average()
evoke_left = epochs_left.average()
evoke_right = epochs_right.average()
evoke_tfpc = epochs_tfpc.average()
evoke_ttpc = epochs_ttpc.average()
# add into list
list_data_back.append(evoke_back.data)
list_data_front.append(evoke_front.data)
list_data_left.append(evoke_left.data)
list_data_right.append(evoke_right.data)
list_data_tfpc.append(evoke_tfpc.data)
list_data_ttpc.append(evoke_ttpc.data)
array_back = np.array(list_data_back)
array_left = np.array(list_data_left)
array_right = np.array(list_data_right)
array_front = np.array(list_data_front)
array_tfpc = np.array(list_data_tfpc)
array_ttpc = np.array(list_data_ttpc)
onset_t = 0
end_point = 220
segm = 3
num_dp = np.shape(array_back)[2]
resampled_ima_t_f = np.zeros([array_front.shape[0], array_front.shape[1], len(np.arange(onset_t, end_point, segm))])
resampled_ima_t_b = np.zeros([array_back.shape[0], array_back.shape[1], len(np.arange(onset_t, end_point, segm))])
resampled_ima_t_l = np.zeros([array_left.shape[0], array_left.shape[1], len(np.arange(onset_t, end_point, segm))])
resampled_ima_t_r = np.zeros([array_right.shape[0], array_right.shape[1], len(np.arange(onset_t, end_point, segm))])
resampled_ima_t_fpc = np.zeros([array_tfpc.shape[0], array_tfpc.shape[1], len(np.arange(onset_t, end_point, segm))])
resampled_ima_t_tpc = np.zeros([array_ttpc.shape[0], array_ttpc.shape[1], len(np.arange(onset_t, end_point, segm))])
for ind, ith_ts in enumerate(np.arange(onset_t, end_point, segm)):
print(range(ith_ts, ith_ts + segm))
if ith_ts + segm < num_dp:
resampled_ima_t_f[..., ind] = np.mean(array_front[..., range(ith_ts, ith_ts + segm)], axis=2)
resampled_ima_t_b[..., ind] = np.mean(array_back[..., range(ith_ts, ith_ts + segm)], axis=2)
resampled_ima_t_l[..., ind] = np.mean(array_left[..., range(ith_ts, ith_ts + segm)], axis=2)
resampled_ima_t_r[..., ind] = np.mean(array_right[..., range(ith_ts, ith_ts + segm)], axis=2)
resampled_ima_t_fpc[..., ind] = np.mean(array_tfpc[..., range(ith_ts, ith_ts + segm)], axis=2)
resampled_ima_t_tpc[..., ind] = np.mean(array_ttpc[..., range(ith_ts, ith_ts + segm)], axis=2)
the_data_set = (resampled_ima_t_l + resampled_ima_t_r)/2 - resampled_ima_t_b
##########################
mean_curr_ima = tval_lr_b
# plot
fig = plt.figure(constrained_layout=False, figsize=[2, 2])
fig.subplots_adjust(left=0.02, right=0.9, bottom=0.02, top=0.98)
num_row = 1
num_col = 1
gs = matplotlib.gridspec.GridSpec(nrows=num_row, ncols=num_col, figure=fig)
images = []
for ax_row in list(range(num_row)):
cur_ax = fig.add_subplot(gs[ax_row])
kwargs = dict(vmin=-5e-14, vmax=5e-14, sensors=False, res=64, names=None, show_names=False,
mask_params={}, outlines='head', contours=6, image_interp='bilinear', show=False,
extrapolate='box')
tp, cn, interp = topomap._plot_topomap(mean_curr_ima, pos, axes=cur_ax,
mask=mask, **kwargs)
images.append(tp)
cax = fig.add_subplot()
cpos = cax.get_position()
cpos.x0 = 0.94
cpos.x1 = 0.96
cpos.y0 = .15
cpos.y1 = .75
cax.set_position(cpos)
cbar = fig.colorbar(images[-1], ax=cax, cax=cax)
# cbar.set_ticks(cn.levels)
cbar.ax.tick_params(labelsize=15)
fig.savefig('/Users/boo/Desktop/example.png')
plt.close()
|
[
"numpy.load",
"mne.io.read_raw_fif",
"mne.pick_types",
"matplotlib.pyplot.close",
"numpy.shape",
"matplotlib.pyplot.figure",
"mne.channels.layout._find_topomap_coords",
"numpy.array",
"mne.Epochs",
"numpy.arange",
"matplotlib.gridspec.GridSpec",
"operator.itemgetter",
"mne.viz.topomap._plot_topomap"
] |
[((3639, 3663), 'numpy.array', 'np.array', (['list_data_back'], {}), '(list_data_back)\n', (3647, 3663), True, 'import numpy as np\n'), ((3677, 3701), 'numpy.array', 'np.array', (['list_data_left'], {}), '(list_data_left)\n', (3685, 3701), True, 'import numpy as np\n'), ((3716, 3741), 'numpy.array', 'np.array', (['list_data_right'], {}), '(list_data_right)\n', (3724, 3741), True, 'import numpy as np\n'), ((3756, 3781), 'numpy.array', 'np.array', (['list_data_front'], {}), '(list_data_front)\n', (3764, 3781), True, 'import numpy as np\n'), ((3795, 3819), 'numpy.array', 'np.array', (['list_data_tfpc'], {}), '(list_data_tfpc)\n', (3803, 3819), True, 'import numpy as np\n'), ((3833, 3857), 'numpy.array', 'np.array', (['list_data_ttpc'], {}), '(list_data_ttpc)\n', (3841, 3857), True, 'import numpy as np\n'), ((5524, 5576), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(False)', 'figsize': '[2, 2]'}), '(constrained_layout=False, figsize=[2, 2])\n', (5534, 5576), True, 'import matplotlib.pyplot as plt\n'), ((5671, 5741), 'matplotlib.gridspec.GridSpec', 'matplotlib.gridspec.GridSpec', ([], {'nrows': 'num_row', 'ncols': 'num_col', 'figure': 'fig'}), '(nrows=num_row, ncols=num_col, figure=fig)\n', (5699, 5741), False, 'import matplotlib\n'), ((6542, 6553), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6551, 6553), True, 'import matplotlib.pyplot as plt\n'), ((1180, 1209), 'mne.io.read_raw_fif', 'read_raw_fif', (['temp_data_array'], {}), '(temp_data_array)\n', (1192, 1209), False, 'from mne.io import read_raw_fif\n'), ((1228, 1256), 'numpy.load', 'numpy.load', (['temp_event_array'], {}), '(temp_event_array)\n', (1238, 1256), False, 'import numpy\n'), ((1328, 1370), 'mne.pick_types', 'mne.pick_types', (['array_data.info'], {'meg': '"""mag"""'}), "(array_data.info, meg='mag')\n", (1342, 1370), False, 'import mne\n'), ((1470, 1546), 'mne.channels.layout._find_topomap_coords', 'mne.channels.layout._find_topomap_coords', (['array_data.info', 'picks_mag[29:301]'], {}), '(array_data.info, picks_mag[29:301])\n', (1510, 1546), False, 'import mne\n'), ((1864, 2001), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_back', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_back, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (1874, 2001), False, 'import mne\n'), ((2045, 2183), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_front', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_front, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (2055, 2183), False, 'import mne\n'), ((2227, 2364), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_left', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_left, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (2237, 2364), False, 'import mne\n'), ((2408, 2546), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_right', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_right, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (2418, 2546), False, 'import mne\n'), ((2590, 2727), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_tfpc', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_tfpc, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (2600, 2727), False, 'import mne\n'), ((2770, 2907), 'mne.Epochs', 'mne.Epochs', (['array_data', 'array_event'], {'picks': 'meg_channel', 'event_id': 'event_id_ttpc', 'tmin': 'min_onset', 'tmax': 'max_endpoint', 'baseline': 'baseline'}), '(array_data, array_event, picks=meg_channel, event_id=\n event_id_ttpc, tmin=min_onset, tmax=max_endpoint, baseline=baseline)\n', (2780, 2907), False, 'import mne\n'), ((3905, 3925), 'numpy.shape', 'np.shape', (['array_back'], {}), '(array_back)\n', (3913, 3925), True, 'import numpy as np\n'), ((4657, 4692), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4666, 4692), True, 'import numpy as np\n'), ((6105, 6180), 'mne.viz.topomap._plot_topomap', 'topomap._plot_topomap', (['mean_curr_ima', 'pos'], {'axes': 'cur_ax', 'mask': 'mask'}), '(mean_curr_ima, pos, axes=cur_ax, mask=mask, **kwargs)\n', (6126, 6180), False, 'from mne.viz import topomap\n'), ((1389, 1411), 'operator.itemgetter', 'itemgetter', (['*picks_mag'], {}), '(*picks_mag)\n', (1399, 1411), False, 'from operator import itemgetter\n'), ((4008, 4043), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4017, 4043), True, 'import numpy as np\n'), ((4123, 4158), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4132, 4158), True, 'import numpy as np\n'), ((4238, 4273), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4247, 4273), True, 'import numpy as np\n'), ((4355, 4390), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4364, 4390), True, 'import numpy as np\n'), ((4472, 4507), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4481, 4507), True, 'import numpy as np\n'), ((4589, 4624), 'numpy.arange', 'np.arange', (['onset_t', 'end_point', 'segm'], {}), '(onset_t, end_point, segm)\n', (4598, 4624), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import pyautogui
hand_hist = None
traverse_point = []
total_rectangle = 9
hand_rect_one_x = None
hand_rect_one_y = None
hand_rect_two_x = None
hand_rect_two_y = None
def rescale_frame(frame, wpercent=130, hpercent=130):
width = int(frame.shape[1] * wpercent / 100)
height = int(frame.shape[0] * hpercent / 100)
return cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)
def contours(hist_mask_image):
gray_hist_mask_image = cv2.cvtColor(hist_mask_image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_hist_mask_image, 0, 255, 0)
cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return cont
def draw_rect(frame):
rows, cols, _ = frame.shape
global total_rectangle, hand_rect_one_x, hand_rect_one_y, hand_rect_two_x, hand_rect_two_y
hand_rect_one_x = np.array(
[6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 9 * rows / 20, 9 * rows / 20, 9 * rows / 20, 12 * rows / 20,
12 * rows / 20, 12 * rows / 20], dtype=np.uint32)
hand_rect_one_y = np.array(
[9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20,
10 * cols / 20, 11 * cols / 20], dtype=np.uint32)
hand_rect_two_x = hand_rect_one_x + 10
hand_rect_two_y = hand_rect_one_y + 10
for i in range(total_rectangle):
cv2.rectangle(frame, (hand_rect_one_y[i], hand_rect_one_x[i]),
(hand_rect_two_y[i], hand_rect_two_x[i]),
(0, 255, 0), 1)
return frame
def hand_histogram(frame):
global hand_rect_one_x, hand_rect_one_y
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([90, 10, 3], dtype=hsv_frame.dtype)
for i in range(total_rectangle):
roi[i * 10: i * 10 + 10, 0: 10] = hsv_frame[hand_rect_one_x[i]:hand_rect_one_x[i] + 10,
hand_rect_one_y[i]:hand_rect_one_y[i] + 10]
hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def hist_masking(frame, hist):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(frame, thresh)
def centroid(max_contour):
moment = cv2.moments(max_contour)
if moment['m00'] != 0:
cx = int(moment['m10'] / moment['m00'])
cy = int(moment['m01'] / moment['m00'])
return cx, cy
else:
return None
def farthest_point(defects, contour, centroid):
if defects is not None and centroid is not None:
s = defects[:, 0][:, 0]
cx, cy = centroid
x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
xp = cv2.pow(cv2.subtract(x, cx), 2)
yp = cv2.pow(cv2.subtract(y, cy), 2)
dist = cv2.sqrt(cv2.add(xp, yp))
dist_max_i = np.argmax(dist)
if dist_max_i < len(s):
farthest_defect = s[dist_max_i]
farthest_point = tuple(contour[farthest_defect][0])
return farthest_point
else:
return None
def draw_circles(frame, traverse_point):
if traverse_point is not None:
for i in range(len(traverse_point)):
cv2.circle(frame, traverse_point[i], int(5 - (5 * i * 3) / 100), [0, 255, 255], -1)
def manage_image_opr(frame, hand_hist):
hist_mask_image = hist_masking(frame, hand_hist)
hist_mask_image = cv2.erode(hist_mask_image, None, iterations=2)
hist_mask_image = cv2.dilate(hist_mask_image, None, iterations=2)
contour_list = contours(hist_mask_image)
max_cont = max(contour_list, key=cv2.contourArea)
cnt_centroid = centroid(max_cont)
cv2.circle(frame, cnt_centroid, 5, [255, 0, 255], -1)
if max_cont is not None:
hull = cv2.convexHull(max_cont, returnPoints=False)
defects = cv2.convexityDefects(max_cont, hull)
far_point = farthest_point(defects, max_cont, cnt_centroid)
print("Centroid : " + str(cnt_centroid) + ", farthest Point : " + str(far_point))
#usar farthest_point aqui para el mouse
puntox=far_point[0]*(1920/640)
if (far_point[1])<=235:
puntoyf=(far_point[1]-(60*(1-((far_point[1]-60)/180))))*(1080/480)
elif (far_point[1])>=245:
puntoyf=(far_point[1]+(60*((far_point[1]-245)/175)))*(1080/480)
else:
puntoyf=(far_point[1])*(1080/480)
pyautogui.moveTo(puntox,puntoyf)
#######################################
cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
if len(traverse_point) < 20:
traverse_point.append(far_point)
else:
traverse_point.pop(0)
traverse_point.append(far_point)
draw_circles(frame, traverse_point)
def main():
global hand_hist
is_hand_hist_created = False
capture = cv2.VideoCapture(0)
while capture.isOpened():
pressed_key = cv2.waitKey(1)
_, frame = capture.read()
frame = cv2.flip(frame, 1)
if pressed_key & 0xFF == ord('z'):
is_hand_hist_created = True
hand_hist = hand_histogram(frame)
if is_hand_hist_created:
manage_image_opr(frame, hand_hist)
else:
frame = draw_rect(frame)
cv2.imshow("Live Feed", rescale_frame(frame))
if pressed_key == 27:
break
cv2.destroyAllWindows()
capture.release()
if __name__ == '__main__':
main()
|
[
"cv2.bitwise_and",
"numpy.argmax",
"cv2.rectangle",
"cv2.normalize",
"cv2.erode",
"cv2.convexityDefects",
"cv2.subtract",
"cv2.filter2D",
"cv2.dilate",
"cv2.cvtColor",
"cv2.calcBackProject",
"cv2.destroyAllWindows",
"cv2.resize",
"cv2.circle",
"cv2.waitKey",
"cv2.calcHist",
"cv2.convexHull",
"cv2.flip",
"cv2.merge",
"cv2.add",
"cv2.getStructuringElement",
"cv2.threshold",
"cv2.moments",
"numpy.zeros",
"cv2.VideoCapture",
"numpy.array",
"cv2.findContours",
"pyautogui.moveTo"
] |
[((364, 428), 'cv2.resize', 'cv2.resize', (['frame', '(width, height)'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, (width, height), interpolation=cv2.INTER_AREA)\n', (374, 428), False, 'import cv2\n'), ((489, 538), 'cv2.cvtColor', 'cv2.cvtColor', (['hist_mask_image', 'cv2.COLOR_BGR2GRAY'], {}), '(hist_mask_image, cv2.COLOR_BGR2GRAY)\n', (501, 538), False, 'import cv2\n'), ((557, 603), 'cv2.threshold', 'cv2.threshold', (['gray_hist_mask_image', '(0)', '(255)', '(0)'], {}), '(gray_hist_mask_image, 0, 255, 0)\n', (570, 603), False, 'import cv2\n'), ((626, 690), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (642, 690), False, 'import cv2\n'), ((880, 1054), 'numpy.array', 'np.array', (['[6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 9 * rows / 20, 9 * rows / 20,\n 9 * rows / 20, 12 * rows / 20, 12 * rows / 20, 12 * rows / 20]'], {'dtype': 'np.uint32'}), '([6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 9 * rows / 20, 9 *\n rows / 20, 9 * rows / 20, 12 * rows / 20, 12 * rows / 20, 12 * rows / \n 20], dtype=np.uint32)\n', (888, 1054), True, 'import numpy as np\n'), ((1087, 1264), 'numpy.array', 'np.array', (['[9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / \n 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / 20]'], {'dtype': 'np.uint32'}), '([9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 *\n cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / \n 20], dtype=np.uint32)\n', (1095, 1264), True, 'import numpy as np\n'), ((1680, 1718), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (1692, 1718), False, 'import cv2\n'), ((1729, 1773), 'numpy.zeros', 'np.zeros', (['[90, 10, 3]'], {'dtype': 'hsv_frame.dtype'}), '([90, 10, 3], dtype=hsv_frame.dtype)\n', (1737, 1773), True, 'import numpy as np\n'), ((2011, 2074), 'cv2.calcHist', 'cv2.calcHist', (['[roi]', '[0, 1]', 'None', '[180, 256]', '[0, 180, 0, 256]'], {}), '([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])\n', (2023, 2074), False, 'import cv2\n'), ((2086, 2146), 'cv2.normalize', 'cv2.normalize', (['hand_hist', 'hand_hist', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)\n', (2099, 2146), False, 'import cv2\n'), ((2190, 2228), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (2202, 2228), False, 'import cv2\n'), ((2240, 2301), 'cv2.calcBackProject', 'cv2.calcBackProject', (['[hsv]', '[0, 1]', 'hist', '[0, 180, 0, 256]', '(1)'], {}), '([hsv], [0, 1], hist, [0, 180, 0, 256], 1)\n', (2259, 2301), False, 'import cv2\n'), ((2314, 2368), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(31, 31)'], {}), '(cv2.MORPH_ELLIPSE, (31, 31))\n', (2339, 2368), False, 'import cv2\n'), ((2373, 2405), 'cv2.filter2D', 'cv2.filter2D', (['dst', '(-1)', 'disc', 'dst'], {}), '(dst, -1, disc, dst)\n', (2385, 2405), False, 'import cv2\n'), ((2425, 2472), 'cv2.threshold', 'cv2.threshold', (['dst', '(150)', '(255)', 'cv2.THRESH_BINARY'], {}), '(dst, 150, 255, cv2.THRESH_BINARY)\n', (2438, 2472), False, 'import cv2\n'), ((2542, 2577), 'cv2.merge', 'cv2.merge', (['(thresh, thresh, thresh)'], {}), '((thresh, thresh, thresh))\n', (2551, 2577), False, 'import cv2\n'), ((2590, 2620), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'thresh'], {}), '(frame, thresh)\n', (2605, 2620), False, 'import cv2\n'), ((2663, 2687), 'cv2.moments', 'cv2.moments', (['max_contour'], {}), '(max_contour)\n', (2674, 2687), False, 'import cv2\n'), ((3867, 3913), 'cv2.erode', 'cv2.erode', (['hist_mask_image', 'None'], {'iterations': '(2)'}), '(hist_mask_image, None, iterations=2)\n', (3876, 3913), False, 'import cv2\n'), ((3936, 3983), 'cv2.dilate', 'cv2.dilate', (['hist_mask_image', 'None'], {'iterations': '(2)'}), '(hist_mask_image, None, iterations=2)\n', (3946, 3983), False, 'import cv2\n'), ((4127, 4180), 'cv2.circle', 'cv2.circle', (['frame', 'cnt_centroid', '(5)', '[255, 0, 255]', '(-1)'], {}), '(frame, cnt_centroid, 5, [255, 0, 255], -1)\n', (4137, 4180), False, 'import cv2\n'), ((5300, 5319), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (5316, 5319), False, 'import cv2\n'), ((5829, 5852), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5850, 5852), False, 'import cv2\n'), ((1407, 1532), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(hand_rect_one_y[i], hand_rect_one_x[i])', '(hand_rect_two_y[i], hand_rect_two_x[i])', '(0, 255, 0)', '(1)'], {}), '(frame, (hand_rect_one_y[i], hand_rect_one_x[i]), (\n hand_rect_two_y[i], hand_rect_two_x[i]), (0, 255, 0), 1)\n', (1420, 1532), False, 'import cv2\n'), ((3037, 3085), 'numpy.array', 'np.array', (['contour[s][:, 0][:, 0]'], {'dtype': 'np.float'}), '(contour[s][:, 0][:, 0], dtype=np.float)\n', (3045, 3085), True, 'import numpy as np\n'), ((3098, 3146), 'numpy.array', 'np.array', (['contour[s][:, 0][:, 1]'], {'dtype': 'np.float'}), '(contour[s][:, 0][:, 1], dtype=np.float)\n', (3106, 3146), True, 'import numpy as np\n'), ((3301, 3316), 'numpy.argmax', 'np.argmax', (['dist'], {}), '(dist)\n', (3310, 3316), True, 'import numpy as np\n'), ((4226, 4270), 'cv2.convexHull', 'cv2.convexHull', (['max_cont'], {'returnPoints': '(False)'}), '(max_cont, returnPoints=False)\n', (4240, 4270), False, 'import cv2\n'), ((4289, 4325), 'cv2.convexityDefects', 'cv2.convexityDefects', (['max_cont', 'hull'], {}), '(max_cont, hull)\n', (4309, 4325), False, 'import cv2\n'), ((4860, 4893), 'pyautogui.moveTo', 'pyautogui.moveTo', (['puntox', 'puntoyf'], {}), '(puntox, puntoyf)\n', (4876, 4893), False, 'import pyautogui\n'), ((4949, 4997), 'cv2.circle', 'cv2.circle', (['frame', 'far_point', '(5)', '[0, 0, 255]', '(-1)'], {}), '(frame, far_point, 5, [0, 0, 255], -1)\n', (4959, 4997), False, 'import cv2\n'), ((5373, 5387), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5384, 5387), False, 'import cv2\n'), ((5438, 5456), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (5446, 5456), False, 'import cv2\n'), ((3169, 3188), 'cv2.subtract', 'cv2.subtract', (['x', 'cx'], {}), '(x, cx)\n', (3181, 3188), False, 'import cv2\n'), ((3214, 3233), 'cv2.subtract', 'cv2.subtract', (['y', 'cy'], {}), '(y, cy)\n', (3226, 3233), False, 'import cv2\n'), ((3262, 3277), 'cv2.add', 'cv2.add', (['xp', 'yp'], {}), '(xp, yp)\n', (3269, 3277), False, 'import cv2\n')]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softsign and SoftsignGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftsignTest(test.TestCase):
def _npSoftsign(self, np_features):
return np_features / (1 + np.abs(np_features))
def _testSoftsign(self, np_features, use_gpu=False):
np_softsign = self._npSoftsign(np_features)
with self.cached_session(use_gpu=use_gpu):
softsign = nn_ops.softsign(np_features)
tf_softsign = self.evaluate(softsign)
self.assertAllClose(np_softsign, tf_softsign)
self.assertShapeEqual(np_softsign, softsign)
def testNumbers(self):
for t in [np.float, np.double]:
self._testSoftsign(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftsign(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
def testGradient(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softsign(x, name="softsign")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softsign (float) gradient err = ", err)
self.assertLess(err, 1e-4)
def testNoInts(self):
with self.cached_session():
with self.assertRaisesRegexp(
TypeError,
"'features' has DataType int32 not in list of allowed values"):
nn_ops.softsign(constant_op.constant(7)).eval()
if __name__ == "__main__":
test.main()
|
[
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.gradient_checker.compute_gradient_error",
"numpy.abs",
"numpy.asarray",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.nn_ops.softsign",
"numpy.array"
] |
[((2751, 2762), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (2760, 2762), False, 'from tensorflow.python.platform import test\n'), ((1416, 1444), 'tensorflow.python.ops.nn_ops.softsign', 'nn_ops.softsign', (['np_features'], {}), '(np_features)\n', (1431, 1444), False, 'from tensorflow.python.ops import nn_ops\n'), ((1962, 2068), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9]'], {'shape': '[2, 5]', 'name': '"""x"""'}), "([-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9\n ], shape=[2, 5], name='x')\n", (1982, 2068), False, 'from tensorflow.python.framework import constant_op\n'), ((2105, 2140), 'tensorflow.python.ops.nn_ops.softsign', 'nn_ops.softsign', (['x'], {'name': '"""softsign"""'}), "(x, name='softsign')\n", (2120, 2140), False, 'from tensorflow.python.ops import nn_ops\n'), ((2156, 2260), 'numpy.asarray', 'np.asarray', (['[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]]'], {'dtype': 'np.float32', 'order': '"""F"""'}), "([[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],\n dtype=np.float32, order='F')\n", (2166, 2260), True, 'import numpy as np\n'), ((2300, 2387), 'tensorflow.python.ops.gradient_checker.compute_gradient_error', 'gradient_checker.compute_gradient_error', (['x', '[2, 5]', 'y', '[2, 5]'], {'x_init_value': 'x_init'}), '(x, [2, 5], y, [2, 5], x_init_value=\n x_init)\n', (2339, 2387), False, 'from tensorflow.python.ops import gradient_checker\n'), ((1227, 1246), 'numpy.abs', 'np.abs', (['np_features'], {}), '(np_features)\n', (1233, 1246), True, 'import numpy as np\n'), ((1686, 1735), 'numpy.array', 'np.array', (['[[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]'], {}), '([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]])\n', (1694, 1735), True, 'import numpy as np\n'), ((1808, 1857), 'numpy.array', 'np.array', (['[[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]'], {}), '([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]])\n', (1816, 1857), True, 'import numpy as np\n'), ((2688, 2711), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(7)'], {}), '(7)\n', (2708, 2711), False, 'from tensorflow.python.framework import constant_op\n')]
|
#import cv2
import numpy as np
#import os
#import tensorflow as tf
#from tensorflow import keras
def calib_input(iter):
X = np.load("features.npy") / 255.0
images = X[:100]
return {"conv2d_input": images}
|
[
"numpy.load"
] |
[((127, 150), 'numpy.load', 'np.load', (['"""features.npy"""'], {}), "('features.npy')\n", (134, 150), True, 'import numpy as np\n')]
|
import os
import time
import torch
import numpy as np
import utils
import logging
from options import *
from model.hidden import Hidden
from average_meter import AverageMeter
def train(model: Hidden,
device: torch.device,
hidden_config: HiDDenConfiguration,
train_options: TrainingOptions,
this_run_folder: str,
tb_logger):
"""
Trains the HiDDeN model
:param model: The model
:param device: torch.device object, usually this is GPU (if avaliable), otherwise CPU.
:param hidden_config: The network configuration
:param train_options: The training settings
:param this_run_folder: The parent folder for the current training run to store training artifacts/results/logs.
:param tb_logger: TensorBoardLogger object which is a thin wrapper for TensorboardX logger.
Pass None to disable TensorboardX logging
:return:
"""
train_data, val_data = utils.get_data_loaders(hidden_config, train_options)
file_count = len(train_data.dataset)
if file_count % train_options.batch_size == 0:
steps_in_epoch = file_count // train_options.batch_size
else:
steps_in_epoch = file_count // train_options.batch_size + 1
print_each = 10
images_to_save = 8
saved_images_size = (512, 512)
for epoch in range(train_options.start_epoch, train_options.number_of_epochs + 1):
logging.info('\nStarting epoch {}/{}'.format(epoch, train_options.number_of_epochs))
logging.info('Batch size = {}\nSteps in epoch = {}'.format(train_options.batch_size, steps_in_epoch))
losses_accu = {}
epoch_start = time.time()
step = 1
for image, _ in train_data:
image = image.to(device)
message = torch.Tensor(np.random.choice([0, 1], (image.shape[0], hidden_config.message_length))).to(device)
losses, _ = model.train_on_batch([image, message])
if not losses_accu: # dict is empty, initialize
for name in losses:
# losses_accu[name] = []
losses_accu[name] = AverageMeter()
for name, loss in losses.items():
losses_accu[name].update(loss)
if step % print_each == 0 or step == steps_in_epoch:
logging.info(
'Epoch: {}/{} Step: {}/{}'.format(epoch, train_options.number_of_epochs, step, steps_in_epoch))
utils.log_progress(losses_accu)
logging.info('-' * 40)
step += 1
train_duration = time.time() - epoch_start
logging.info('Epoch {} training duration {:.2f} sec'.format(epoch, train_duration))
logging.info('-' * 40)
utils.write_losses(os.path.join(this_run_folder, 'train.csv'), losses_accu, epoch, train_duration)
if tb_logger is not None:
tb_logger.save_losses(losses_accu, epoch)
tb_logger.save_grads(epoch)
tb_logger.save_tensors(epoch)
first_iteration = True
logging.info('Running validation for epoch {}/{}'.format(epoch, train_options.number_of_epochs))
for image, _ in val_data:
image = image.to(device)
message = torch.Tensor(np.random.choice([0, 1], (image.shape[0], hidden_config.message_length))).to(device)
losses, (encoded_images, noised_images, decoded_messages) = model.validate_on_batch([image, message])
if not losses_accu: # dict is empty, initialize
for name in losses:
losses_accu[name] = AverageMeter()
for name, loss in losses.items():
losses_accu[name].update(loss)
if first_iteration:
if hidden_config.enable_fp16:
image = image.float()
encoded_images = encoded_images.float()
utils.save_images(image.cpu()[:images_to_save, :, :, :],
encoded_images[:images_to_save, :, :, :].cpu(),
epoch,
os.path.join(this_run_folder, 'images'), resize_to=saved_images_size)
first_iteration = False
utils.log_progress(losses_accu)
logging.info('-' * 40)
utils.save_checkpoint(model, train_options.experiment_name, epoch, os.path.join(this_run_folder, 'checkpoints'))
utils.write_losses(os.path.join(this_run_folder, 'validation.csv'), losses_accu, epoch,
time.time() - epoch_start)
# if epoch % 10 == 0:
# sleep_sec = 5 * 60
# logging.info(f'\nSleeping for {sleep_sec} seconds to cool down the GPU\n')
# time.sleep(sleep_sec)
|
[
"utils.get_data_loaders",
"average_meter.AverageMeter",
"time.time",
"logging.info",
"numpy.random.choice",
"os.path.join",
"utils.log_progress"
] |
[((952, 1004), 'utils.get_data_loaders', 'utils.get_data_loaders', (['hidden_config', 'train_options'], {}), '(hidden_config, train_options)\n', (974, 1004), False, 'import utils\n'), ((1656, 1667), 'time.time', 'time.time', ([], {}), '()\n', (1665, 1667), False, 'import time\n'), ((2704, 2726), 'logging.info', 'logging.info', (["('-' * 40)"], {}), "('-' * 40)\n", (2716, 2726), False, 'import logging\n'), ((4221, 4252), 'utils.log_progress', 'utils.log_progress', (['losses_accu'], {}), '(losses_accu)\n', (4239, 4252), False, 'import utils\n'), ((4261, 4283), 'logging.info', 'logging.info', (["('-' * 40)"], {}), "('-' * 40)\n", (4273, 4283), False, 'import logging\n'), ((2578, 2589), 'time.time', 'time.time', ([], {}), '()\n', (2587, 2589), False, 'import time\n'), ((2754, 2796), 'os.path.join', 'os.path.join', (['this_run_folder', '"""train.csv"""'], {}), "(this_run_folder, 'train.csv')\n", (2766, 2796), False, 'import os\n'), ((4359, 4403), 'os.path.join', 'os.path.join', (['this_run_folder', '"""checkpoints"""'], {}), "(this_run_folder, 'checkpoints')\n", (4371, 4403), False, 'import os\n'), ((4432, 4479), 'os.path.join', 'os.path.join', (['this_run_folder', '"""validation.csv"""'], {}), "(this_run_folder, 'validation.csv')\n", (4444, 4479), False, 'import os\n'), ((2459, 2490), 'utils.log_progress', 'utils.log_progress', (['losses_accu'], {}), '(losses_accu)\n', (2477, 2490), False, 'import utils\n'), ((2507, 2529), 'logging.info', 'logging.info', (["('-' * 40)"], {}), "('-' * 40)\n", (2519, 2529), False, 'import logging\n'), ((4528, 4539), 'time.time', 'time.time', ([], {}), '()\n', (4537, 4539), False, 'import time\n'), ((2123, 2137), 'average_meter.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2135, 2137), False, 'from average_meter import AverageMeter\n'), ((3584, 3598), 'average_meter.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3596, 3598), False, 'from average_meter import AverageMeter\n'), ((4102, 4141), 'os.path.join', 'os.path.join', (['this_run_folder', '"""images"""'], {}), "(this_run_folder, 'images')\n", (4114, 4141), False, 'import os\n'), ((1793, 1865), 'numpy.random.choice', 'np.random.choice', (['[0, 1]', '(image.shape[0], hidden_config.message_length)'], {}), '([0, 1], (image.shape[0], hidden_config.message_length))\n', (1809, 1865), True, 'import numpy as np\n'), ((3248, 3320), 'numpy.random.choice', 'np.random.choice', (['[0, 1]', '(image.shape[0], hidden_config.message_length)'], {}), '([0, 1], (image.shape[0], hidden_config.message_length))\n', (3264, 3320), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 14:28:11 2018
@author: <NAME>
"""
import numpy as np
import numpy.random as rd
import argparse
from collections import deque
import pickle
import os
from ddpg import Actor, Critic
from make_env import make_env
import torch
dtype = torch.float
device = torch.device("cuda")
def ornsteinUhlenbeck(x_prev, mu,
sigma = 0.3, theta = 0.15, dt = 0.01):
mu = np.zeros_like(x_prev)
n = np.size(x_prev)
x = x_prev + theta*(mu - x_prev)*dt + sigma*np.sqrt(dt)*rd.normal(0, 1, n)
return x
def sample(buffer, N):
if len(buffer) <= N:
return buffer
else:
idx = rd.choice(len(buffer), N, replace = False)
sample = []
for i in range(N):
sample.append(buffer[idx[i]])
return sample
def episode(n_episodes, buffer_size, N, learn, render, x0, mu, sigma, theta, dt,
alpha, gamma, tau, init_actors = None, init_critics = None):
actors, critics = [], []
for i in range(env.n):
if init_actors is not None:
actors = init_actors
critics = init_critics
else:
actors.append(Actor(env.observation_space[i].shape[0], env.action_space[i].n))
critics.append(Critic(env.observation_space[i].shape[0], env.action_space[i].n, actors[i]))
replay_buffer = deque()
evolution = []
for ep in range(n_episodes):
noise = x0
state = env.reset()
ep_rewards = np.zeros(env.n)
step_count = 0
done = np.array([False] * 4)
while (not any(done) and step_count < 1000):
if render:
env.render()
###Choose an action and go to next state
actions = []
for i in range(env.n):
noise = ornsteinUhlenbeck(noise, mu, sigma, theta, dt)
action = actors[i].forwardPass(state[i]).detach().numpy()
actions.append(np.clip(action + noise, -2, 2))
next_state, rewards, done, _ = env.step(actions)
rewards = np.asarray(rewards) - 500*np.asarray(done)
ep_rewards += rewards
if learn:
###Store in the replay buffer
replay_buffer.append(np.array([state, actions, rewards, next_state]))
if len(replay_buffer)>buffer_size:
replay_buffer.popleft()
###Sample a minibatch from the buffer
minibatch = sample(replay_buffer, N)
###Learn from this minibatch
for i in range(env.n):
critics[i].learn(minibatch, i)
actors[i].learn(minibatch, i)
###Prepare for next step
step_count +=1
state = next_state
ep_rewards /= step_count
print("Episode " + str(ep) + " : " + str(ep_rewards) + " in " + str(step_count) + " steps")
evolution.append((ep_rewards, step_count))
return actors, critics, evolution
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--env', default='simple_tag_guided', type=str)
parser.add_argument('--n_episodes', default=5000, type=int)
parser.add_argument ('--learn', default=True, type=bool)
parser.add_argument ('--render', default=False, type=bool)
parser.add_argument ('--buffer_size', default=1000, type=int)
parser.add_argument ('--minibatch_size', default=32, type=int)
parser.add_argument ('--alpha', default=0.001, type=float)
parser.add_argument ('--gamma', default=0.9, type=float)
parser.add_argument ('--tau', default=0.01, type=float)
parser.add_argument ('--ou_x0', default=0, type=float)
parser.add_argument ('--ou_mu', default=0, type=float)
parser.add_argument ('--ou_sigma', default=0.3, type=float)
parser.add_argument ('--ou_theta', default=0.15, type=float)
parser.add_argument ('--ou_dt', default=0.01, type=float)
args = parser.parse_args()
env = make_env(args.env)
actors, critics, evolution = episode(n_episodes = args.n_episodes,
buffer_size = args.buffer_size,
N = args.minibatch_size, learn = args.learn, render = args.render,
x0 = args.ou_x0 * np.ones(env.action_space[0].n),
mu = args.ou_mu * np.ones(env.action_space[0].n),
sigma = args.ou_sigma, theta = args.ou_theta, dt = args.ou_dt,
alpha = args.alpha, gamma = args.gamma, tau = args.tau)
pickle.dump(actors, open('actors','wb'))
pickle.dump(critics, open('critics','wb'))
pickle.dump(evolution, open('evolution','wb'))
print(os.getcwd())
|
[
"numpy.size",
"numpy.zeros_like",
"make_env.make_env",
"argparse.ArgumentParser",
"os.getcwd",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.clip",
"numpy.array",
"numpy.random.normal",
"torch.device",
"ddpg.Critic",
"ddpg.Actor",
"collections.deque",
"numpy.sqrt"
] |
[((307, 327), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (319, 327), False, 'import torch\n'), ((443, 464), 'numpy.zeros_like', 'np.zeros_like', (['x_prev'], {}), '(x_prev)\n', (456, 464), True, 'import numpy as np\n'), ((477, 492), 'numpy.size', 'np.size', (['x_prev'], {}), '(x_prev)\n', (484, 492), True, 'import numpy as np\n'), ((1429, 1436), 'collections.deque', 'deque', ([], {}), '()\n', (1434, 1436), False, 'from collections import deque\n'), ((3270, 3295), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3293, 3295), False, 'import argparse\n'), ((4229, 4247), 'make_env.make_env', 'make_env', (['args.env'], {}), '(args.env)\n', (4237, 4247), False, 'from make_env import make_env\n'), ((1589, 1604), 'numpy.zeros', 'np.zeros', (['env.n'], {}), '(env.n)\n', (1597, 1604), True, 'import numpy as np\n'), ((1643, 1664), 'numpy.array', 'np.array', (['([False] * 4)'], {}), '([False] * 4)\n', (1651, 1664), True, 'import numpy as np\n'), ((4909, 4920), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4918, 4920), False, 'import os\n'), ((557, 575), 'numpy.random.normal', 'rd.normal', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (566, 575), True, 'import numpy.random as rd\n'), ((545, 556), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (552, 556), True, 'import numpy as np\n'), ((1235, 1298), 'ddpg.Actor', 'Actor', (['env.observation_space[i].shape[0]', 'env.action_space[i].n'], {}), '(env.observation_space[i].shape[0], env.action_space[i].n)\n', (1240, 1298), False, 'from ddpg import Actor, Critic\n'), ((1327, 1402), 'ddpg.Critic', 'Critic', (['env.observation_space[i].shape[0]', 'env.action_space[i].n', 'actors[i]'], {}), '(env.observation_space[i].shape[0], env.action_space[i].n, actors[i])\n', (1333, 1402), False, 'from ddpg import Actor, Critic\n'), ((2196, 2215), 'numpy.asarray', 'np.asarray', (['rewards'], {}), '(rewards)\n', (2206, 2215), True, 'import numpy as np\n'), ((4514, 4544), 'numpy.ones', 'np.ones', (['env.action_space[0].n'], {}), '(env.action_space[0].n)\n', (4521, 4544), True, 'import numpy as np\n'), ((4576, 4606), 'numpy.ones', 'np.ones', (['env.action_space[0].n'], {}), '(env.action_space[0].n)\n', (4583, 4606), True, 'import numpy as np\n'), ((2081, 2111), 'numpy.clip', 'np.clip', (['(action + noise)', '(-2)', '(2)'], {}), '(action + noise, -2, 2)\n', (2088, 2111), True, 'import numpy as np\n'), ((2222, 2238), 'numpy.asarray', 'np.asarray', (['done'], {}), '(done)\n', (2232, 2238), True, 'import numpy as np\n'), ((2391, 2438), 'numpy.array', 'np.array', (['[state, actions, rewards, next_state]'], {}), '([state, actions, rewards, next_state])\n', (2399, 2438), True, 'import numpy as np\n')]
|
import numpy as np
from yaml import safe_load
import pandas as pd
import glob
from cricscraper.cricinfo import CricInfo
from cricscraper.matchinfo import MatchInfo
class CricSheet:
innings_name = ["1st innings", "2nd innings", "3rd innings", "4th innings"]
def __init__(self, files=None, folder=None):
if folder:
self.files = glob.glob("{}/*.yaml".format(folder))
else:
self.files = files
self.dataFrame = pd.DataFrame()
self.__parser()
@staticmethod
def __get_fielders(wicket):
if wicket != 0:
try:
return ", ".join(wicket.get('fielders'))
except:
return None
return None
def __parser(self):
ordered_columns = ['match id', 'inning', 'delivery', 'over', 'batsman', 'non striker', 'bowler', 'runs off bat', 'extras', 'total', 'extra kind', 'wicket kind', 'player out', 'fielders', 'team1', 'team2', 'outcome', 'winner', 'by', 'win amount', 'player of match','toss winner', 'toss decision', 'match type', 'venue', 'city', 'gender', 'umpire1','umpire2']
for filename in self.files:
with open(filename) as f_input:
data = safe_load(f_input)
innings = data['innings']
for i in range(len(innings)):
dict_innings = {}
try:
inning = innings[i][CricSheet.innings_name[i]]['deliveries']
except:
continue
dict_innings["inning"] = np.ones(len(inning), dtype=int) * (i+1)
dict_innings['delivery'] = [delivery for ball in inning for delivery in ball]
dict_innings['batsman'] = [list(ball.values())[0].get("batsman") for ball in inning]
dict_innings['non striker'] = [list(ball.values())[0].get("non_striker") for ball in inning]
dict_innings['bowler'] = [list(ball.values())[0].get("bowler") for ball in inning]
dict_innings["runs"] = [list(ball.values())[0].get('runs') for ball in inning]
dict_innings["wicket"] = [list(ball.values())[0].get('wicket', 0) for ball in inning]
dict_innings['extra kind1'] = [list(ball.values())[0].get('extras', 0) for ball in inning]
frame = pd.DataFrame(dict_innings)
dict_innings['runs off bat'] = frame['runs'].apply(lambda x: x.get('batsman'))
dict_innings['extras'] = frame['runs'].apply(lambda x: x.get('extras'))
dict_innings['total'] = frame['runs'].apply(lambda x: x.get('total')).cumsum()
dict_innings['extra kind'] = frame['extra kind1'].apply(lambda x: next(iter(x.keys())) if x != 0 else None)
dict_innings['over'] = frame['delivery'].apply(lambda x: np.ceil(x))
def fn(x):
try:
return x.get('kind') if x != 0 else None
except:
return None
def fn1(x):
try:
return x.get('player_out') if x != 0 else None
except:
return None
dict_innings['wicket kind'] = frame.wicket.apply(fn)
dict_innings['player out'] = frame.wicket.apply(fn1)
dict_innings['fielders'] = frame.wicket.apply(CricSheet.__get_fielders)
# get match info from Info class
match_info = MatchInfo(data["info"])
assign_info = match_info.dict_info()
assign_info['match id'] = int(filename.split('.')[0].split('/')[-1])
frame = pd.DataFrame(dict_innings).assign(**assign_info)
frame.drop(["runs", "wicket", "extra kind1"], axis=1, inplace=True)
self.dataFrame = pd.concat([self.dataFrame, frame])
self.dataFrame.reset_index(inplace=True, drop=True)
self.dataFrame = self.dataFrame[ordered_columns]
def view(self):
'''
Returns DataFrame.
DataFrame can be used directly for required purposes.
'''
return self.dataFrame
def save(self, filename="output"):
'''
Saves the converted csv file.
Parameter:
filename (string): name of the output csv file
optional: True
default: "output.csv"
'''
if filename.endswith(".csv"):
filename = filename.replace('.csv', '')
filename += ".csv"
print("File saved - {}".format(filename))
return self.dataFrame.to_csv(filename)
def get_more_info(self):
'''Returns dictionary of CricInfo object'''
data = {}
for file in self.files:
match_id = int(file.split('.')[0].split('/')[-1])
data[str(match_id)] = CricInfo(match_id)
return data
|
[
"pandas.DataFrame",
"cricscraper.matchinfo.MatchInfo",
"numpy.ceil",
"cricscraper.cricinfo.CricInfo",
"yaml.safe_load",
"pandas.concat"
] |
[((429, 443), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (441, 443), True, 'import pandas as pd\n'), ((4120, 4138), 'cricscraper.cricinfo.CricInfo', 'CricInfo', (['match_id'], {}), '(match_id)\n', (4128, 4138), False, 'from cricscraper.cricinfo import CricInfo\n'), ((1086, 1104), 'yaml.safe_load', 'safe_load', (['f_input'], {}), '(f_input)\n', (1095, 1104), False, 'from yaml import safe_load\n'), ((2014, 2040), 'pandas.DataFrame', 'pd.DataFrame', (['dict_innings'], {}), '(dict_innings)\n', (2026, 2040), True, 'import pandas as pd\n'), ((2962, 2985), 'cricscraper.matchinfo.MatchInfo', 'MatchInfo', (["data['info']"], {}), "(data['info'])\n", (2971, 2985), False, 'from cricscraper.matchinfo import MatchInfo\n'), ((3267, 3301), 'pandas.concat', 'pd.concat', (['[self.dataFrame, frame]'], {}), '([self.dataFrame, frame])\n', (3276, 3301), True, 'import pandas as pd\n'), ((2461, 2471), 'numpy.ceil', 'np.ceil', (['x'], {}), '(x)\n', (2468, 2471), True, 'import numpy as np\n'), ((3121, 3147), 'pandas.DataFrame', 'pd.DataFrame', (['dict_innings'], {}), '(dict_innings)\n', (3133, 3147), True, 'import pandas as pd\n')]
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import math
import numpy as np
from openvino.tools.mo.ops.ONNXResize10 import ONNXResize10
from openvino.tools.mo.ops.upsample import UpsampleOp
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version
from openvino.tools.mo.utils.error import Error
class UpsampleFrontExtractor(FrontExtractorOp):
op = 'Upsample'
enabled = True
@classmethod
def extract(cls, node):
onnx_opset_version = get_onnx_opset_version(node)
if onnx_opset_version is not None and onnx_opset_version >= 9:
mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode())
ONNXResize10.update_node_stat(node, {'mode': mode})
else:
mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode())
scales = onnx_attr(node, 'scales', 'floats', dst_type=lambda x: np.array(x, dtype=np.float32))
width_scale = onnx_attr(node, 'width_scale', 'f')
height_scale = onnx_attr(node, 'height_scale', 'f')
supported_modes = ['nearest', 'linear']
if mode not in supported_modes:
raise Error(
'Error decoding Upsample node {}, mode = {} is not in the list of supported modes {}.',
node.name,
mode,
supported_modes
)
if scales is not None:
if scales.shape != (4,):
raise Error(
'Upsample scales attribute is wrong for node {}. Only 4D scales are supported.',
node.name
)
if math.fabs(scales[0] - 1) > 1e-5 or math.fabs(scales[1] - 1) > 1e-5:
raise Error(
'Upsampling of batch and feature dimensions is not supported for node {}.',
node.name
)
height_scale = scales[2]
width_scale = scales[3]
if (width_scale is None or height_scale is None) and len(node.in_nodes()) != 2:
raise Error(
'One/both of widths_scale = {} and height_scale = {} is not defined for Upsample node {}.',
width_scale,
height_scale,
node.name
)
UpsampleOp.update_node_stat(node, {'mode': mode, 'height_scale': height_scale,
'width_scale': width_scale})
return cls.enabled
|
[
"openvino.tools.mo.front.onnx.extractors.utils.onnx_attr",
"openvino.tools.mo.front.onnx.extractors.utils.get_onnx_opset_version",
"math.fabs",
"openvino.tools.mo.ops.upsample.UpsampleOp.update_node_stat",
"openvino.tools.mo.utils.error.Error",
"numpy.array",
"openvino.tools.mo.ops.ONNXResize10.ONNXResize10.update_node_stat"
] |
[((597, 625), 'openvino.tools.mo.front.onnx.extractors.utils.get_onnx_opset_version', 'get_onnx_opset_version', (['node'], {}), '(node)\n', (619, 625), False, 'from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version\n'), ((807, 858), 'openvino.tools.mo.ops.ONNXResize10.ONNXResize10.update_node_stat', 'ONNXResize10.update_node_stat', (['node', "{'mode': mode}"], {}), "(node, {'mode': mode})\n", (836, 858), False, 'from openvino.tools.mo.ops.ONNXResize10 import ONNXResize10\n'), ((1104, 1139), 'openvino.tools.mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""width_scale"""', '"""f"""'], {}), "(node, 'width_scale', 'f')\n", (1113, 1139), False, 'from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version\n'), ((1167, 1203), 'openvino.tools.mo.front.onnx.extractors.utils.onnx_attr', 'onnx_attr', (['node', '"""height_scale"""', '"""f"""'], {}), "(node, 'height_scale', 'f')\n", (1176, 1203), False, 'from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version\n'), ((2539, 2650), 'openvino.tools.mo.ops.upsample.UpsampleOp.update_node_stat', 'UpsampleOp.update_node_stat', (['node', "{'mode': mode, 'height_scale': height_scale, 'width_scale': width_scale}"], {}), "(node, {'mode': mode, 'height_scale':\n height_scale, 'width_scale': width_scale})\n", (2566, 2650), False, 'from openvino.tools.mo.ops.upsample import UpsampleOp\n'), ((1323, 1460), 'openvino.tools.mo.utils.error.Error', 'Error', (['"""Error decoding Upsample node {}, mode = {} is not in the list of supported modes {}."""', 'node.name', 'mode', 'supported_modes'], {}), "(\n 'Error decoding Upsample node {}, mode = {} is not in the list of supported modes {}.'\n , node.name, mode, supported_modes)\n", (1328, 1460), False, 'from openvino.tools.mo.utils.error import Error\n'), ((2292, 2437), 'openvino.tools.mo.utils.error.Error', 'Error', (['"""One/both of widths_scale = {} and height_scale = {} is not defined for Upsample node {}."""', 'width_scale', 'height_scale', 'node.name'], {}), "(\n 'One/both of widths_scale = {} and height_scale = {} is not defined for Upsample node {}.'\n , width_scale, height_scale, node.name)\n", (2297, 2437), False, 'from openvino.tools.mo.utils.error import Error\n'), ((1652, 1759), 'openvino.tools.mo.utils.error.Error', 'Error', (['"""Upsample scales attribute is wrong for node {}. Only 4D scales are supported."""', 'node.name'], {}), "(\n 'Upsample scales attribute is wrong for node {}. Only 4D scales are supported.'\n , node.name)\n", (1657, 1759), False, 'from openvino.tools.mo.utils.error import Error\n'), ((1933, 2034), 'openvino.tools.mo.utils.error.Error', 'Error', (['"""Upsampling of batch and feature dimensions is not supported for node {}."""', 'node.name'], {}), "(\n 'Upsampling of batch and feature dimensions is not supported for node {}.',\n node.name)\n", (1938, 2034), False, 'from openvino.tools.mo.utils.error import Error\n'), ((1047, 1076), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float32'}), '(x, dtype=np.float32)\n', (1055, 1076), True, 'import numpy as np\n'), ((1839, 1863), 'math.fabs', 'math.fabs', (['(scales[0] - 1)'], {}), '(scales[0] - 1)\n', (1848, 1863), False, 'import math\n'), ((1874, 1898), 'math.fabs', 'math.fabs', (['(scales[1] - 1)'], {}), '(scales[1] - 1)\n', (1883, 1898), False, 'import math\n')]
|
# encoding: utf-8
from nose.tools import *
import numpy as np
from cmpy.inference import standardize_data
from cmpy import machines
from ..canonical import tmatrix
from ..counts import path_counts, out_arrays
def test_path_counts1():
# Test without state_path
m = machines.Even()
delta, nodes, symbols = tmatrix(m)
prng = np.random.RandomState()
prng.seed(0)
d = m.symbols(20, prng=prng)
d = standardize_data(d)
counts, final, states = path_counts(delta, d)
counts_ = [[[4, 8], [0, 8]], [[0, 4], [1, 4]]]
assert_equal(counts.tolist(), counts_)
final_ = [0, -1]
assert_equal(final.tolist(), final_)
assert_equal(states, None)
def test_path_counts2():
# Test with node_path
m = machines.Even()
delta, nodes, symbols = tmatrix(m)
prng = np.random.RandomState()
prng.seed(0)
d = m.symbols(20, prng=prng)
d = standardize_data(d)
counts, final, states = path_counts(delta, d, node_path=True)
counts_ = [[[4, 8], [0, 8]], [[0, 4], [1, 4]]]
assert_equal(counts.tolist(), counts_)
final_ = [0, -1]
assert_equal(final.tolist(), final_)
states_ = [[0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 1, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_equal(states.tolist(), states_)
def test_path_counts3():
# Test with node_path and preallocated arrays
m = machines.Even()
delta, nodes, symbols = tmatrix(m)
prng = np.random.RandomState()
prng.seed(0)
d = m.symbols(20, prng=prng)
d = standardize_data(d)
counts, final, states = out_arrays(2, 2, 20, node_path=True)
path_counts(delta, d, node_path=True, out_arrays=(counts, final, states))
counts_ = [[[4, 8], [0, 8]], [[0, 4], [1, 4]]]
assert_equal(counts.tolist(), counts_)
final_ = [0, -1]
assert_equal(final.tolist(), final_)
states_ = [[0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 1, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
assert_equal(states.tolist(), states_)
|
[
"cmpy.inference.standardize_data",
"numpy.random.RandomState",
"cmpy.machines.Even"
] |
[((277, 292), 'cmpy.machines.Even', 'machines.Even', ([], {}), '()\n', (290, 292), False, 'from cmpy import machines\n'), ((344, 367), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (365, 367), True, 'import numpy as np\n'), ((426, 445), 'cmpy.inference.standardize_data', 'standardize_data', (['d'], {}), '(d)\n', (442, 445), False, 'from cmpy.inference import standardize_data\n'), ((747, 762), 'cmpy.machines.Even', 'machines.Even', ([], {}), '()\n', (760, 762), False, 'from cmpy import machines\n'), ((814, 837), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (835, 837), True, 'import numpy as np\n'), ((896, 915), 'cmpy.inference.standardize_data', 'standardize_data', (['d'], {}), '(d)\n', (912, 915), False, 'from cmpy.inference import standardize_data\n'), ((1430, 1445), 'cmpy.machines.Even', 'machines.Even', ([], {}), '()\n', (1443, 1445), False, 'from cmpy import machines\n'), ((1497, 1520), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (1518, 1520), True, 'import numpy as np\n'), ((1579, 1598), 'cmpy.inference.standardize_data', 'standardize_data', (['d'], {}), '(d)\n', (1595, 1598), False, 'from cmpy.inference import standardize_data\n')]
|
import sys
import gurobipy
import math
import numpy as np
import time
# Lies die Lösungsdatei ein und gib eine Liste der Mittelpunkte zurück
# solutionFilePath = Pfad zur Lösungsdatei (string)
# n = Dimension der Kugel (int, >= 1)
def readSolution(solutionFilePath, n=3):
solution = []
try:
# Öffne die Lösungsdatei und lies die Lösung ein
with open(solutionFilePath) as solutionFile:
# Lies die Lösungsdatei zeilenweise,
# konvertiere jede Zeile zu einem Koordinatentupel
# und speichere die Koordinatentupel in der Liste solution ab
for line in solutionFile:
entries = line.split(";")
try:
solution.append(tuple(entries[i] for i in range(n)))
except:
print(f"Ungültige Zeile: {line}")
except:
print(f"Konnte die Datei {solutionFilePath} nicht öffnen.")
sys.exit(-1)
return solution
# Überprüfe die übergebene Liste von Mittelpunkten,
# ob die dort zentrierten Kappen die Sphäre vollständig überdecken
# solution = Liste der Mittelpunkte der Kappen (List[tuple(n)])
# alpha = Öffnungswinkel der Kappen (float, >= 0, <= 360)
# n = Dimension der Kugel (int, >= 1)
def checkSolution(solution, alpha, n=3, printing=True):
# Erzeuge ein Gurobi-Modell um zu überprüfen,
# ob die Überdeckung vollständig ist
# (Annahme: die Kappen sind "offen")
model = gurobipy.Model()
# Deaktiviere die Gurobi-Ausgabe
model.setParam("OutputFlag", 0)
# Aktiviere den nicht-konvexen Löser
model.setParam("NonConvex", 2)
# Erzeuge die Variablen und Nebenbedingungen
# Die y-Variablen kodieren den gesuchten unüberdeckten Punkt
y = {}
for i in range(n):
y[i] = model.addVar(
lb=-gurobipy.GRB.INFINITY,
ub=gurobipy.GRB.INFINITY,
vtype=gurobipy.GRB.CONTINUOUS,
name=f"y{i}",
)
# Der Punkt muss auf der Sphäre liegen, also eine 2-Norm von Wert 1 haben.
model.addQConstr(gurobipy.quicksum(y[i] * y[i] for i in range(n)) == 1, "Norm")
# Der Punkt darf von keiner Kappe in der übergebenen Lösung überdeckt werden
for j in range(len(solution)):
x = solution[j]
model.addConstr(
gurobipy.quicksum(x[i] * y[i] for i in range(n))
<= math.cos((0.5 * alpha) / 180 * math.pi),
f"Angle{j}",
)
# Schreibe zum Debuggen eine LP-Datei heraus
# model.write("Lösung.lp")
# Löse das Modell und entscheide an Hand des Zulässigkeitsstatus,
# ob die Überdeckung vollständig
model.optimize()
if model.status == 2:
if printing:
print(
"Die Überdeckung ist nicht vollständig.\nDer folgende Punkt ist nicht überdeckt:"
)
arr = np.array([0.0, 0.0, 0.0])
for i in range(n):
if printing:
print(f"y{i} = ", y[i].X)
arr[i] = y[i].X
return arr
else:
print("Die Überdeckung ist vollständig.")
if __name__ == "__main__":
try:
# Lies den Pfad zur Lösungsdatei ein
solutionFilePath = sys.argv[1]
# Lies den Öffnungwinkel der Kappen ein
alpha = float(sys.argv[2])
# Falls keine korrekten Parameter übergeben wurden,
# gib den Benutzungshinweis aus und schließe das Programm
except:
print("Verwendung: ./checker.py {Lösungsdatei} {Öffnungswinkel}")
sys.exit(-1)
# Lies die Lösung ein
solution = readSolution(solutionFilePath)
# Überprüfe die Lösung
checkSolution(solution, alpha)
# Überprüfe die übergebene Liste von Mittelpunkten,
# ob die dort zentrierten Kappen die Sphäre vollständig überdecken
# Sammle dann so lange Punkte ein, bis eine vollständige Überdeckung erreicht wurde
# solution = Liste der Mittelpunkte der Kappen (List[tuple(n)])
# alpha = Öffnungswinkel der Kappen (float, >= 0, <= 360)
# n = Dimension der Kugel (int, >= 1)
def collect_missing(solution, alpha, n=3, printer=None):
# Erzeuge ein Gurobi-Modell um zu überprüfen,
# ob die Überdeckung vollständig ist
# (Annahme: die Kappen sind "offen")
model = gurobipy.Model()
# Deaktiviere die Gurobi-Ausgabe
model.setParam("OutputFlag", 0)
# Aktiviere den nicht-konvexen Löser
model.setParam("NonConvex", 2)
# Erzeuge die Variablen und Nebenbedingungen
# Die y-Variablen kodieren den gesuchten unüberdeckten Punkt
y = {}
for i in range(n):
y[i] = model.addVar(
lb=-gurobipy.GRB.INFINITY,
ub=gurobipy.GRB.INFINITY,
vtype=gurobipy.GRB.CONTINUOUS,
name=f"y{i}",
)
# Der Punkt muss auf der Sphäre liegen, also eine 2-Norm von Wert 1 haben.
model.addQConstr(gurobipy.quicksum(y[i] * y[i] for i in range(n)) == 1, "Norm")
# Der Punkt darf von keiner Kappe in der übergebenen Lösung überdeckt werden
for j in range(len(solution)):
x = solution[j]
model.addConstr(
gurobipy.quicksum(x[i] * y[i] for i in range(n))
<= math.cos((0.5 * alpha) / 180 * math.pi),
f"Angle{j}",
)
added = []
# begining of do while
starttime = time.time()
model.optimize()
while model.status == 2:
x = np.array([0.0, 0.0, 0.0])
for i in range(n):
x[i] = y[i].X
added.append(x)
model.addConstr(
gurobipy.quicksum(x[i] * y[i] for i in range(n))
<= math.cos((0.5 * alpha) / 180 * math.pi),
f"Angle{j}",
)
if printer is not None:
printed = printer(len(added), len(solution) + len(added), time.time() - starttime)
if printed:
starttime = time.time()
pass
# do while
model.optimize()
pass
return added
|
[
"gurobipy.Model",
"time.time",
"numpy.array",
"math.cos",
"sys.exit"
] |
[((1451, 1467), 'gurobipy.Model', 'gurobipy.Model', ([], {}), '()\n', (1465, 1467), False, 'import gurobipy\n'), ((4200, 4216), 'gurobipy.Model', 'gurobipy.Model', ([], {}), '()\n', (4214, 4216), False, 'import gurobipy\n'), ((5245, 5256), 'time.time', 'time.time', ([], {}), '()\n', (5254, 5256), False, 'import time\n'), ((2838, 2863), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2846, 2863), True, 'import numpy as np\n'), ((5321, 5346), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5329, 5346), True, 'import numpy as np\n'), ((936, 948), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (944, 948), False, 'import sys\n'), ((3485, 3497), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (3493, 3497), False, 'import sys\n'), ((2360, 2397), 'math.cos', 'math.cos', (['(0.5 * alpha / 180 * math.pi)'], {}), '(0.5 * alpha / 180 * math.pi)\n', (2368, 2397), False, 'import math\n'), ((5109, 5146), 'math.cos', 'math.cos', (['(0.5 * alpha / 180 * math.pi)'], {}), '(0.5 * alpha / 180 * math.pi)\n', (5117, 5146), False, 'import math\n'), ((5525, 5562), 'math.cos', 'math.cos', (['(0.5 * alpha / 180 * math.pi)'], {}), '(0.5 * alpha / 180 * math.pi)\n', (5533, 5562), False, 'import math\n'), ((5781, 5792), 'time.time', 'time.time', ([], {}), '()\n', (5790, 5792), False, 'import time\n'), ((5704, 5715), 'time.time', 'time.time', ([], {}), '()\n', (5713, 5715), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
import cv2, glob
import numpy as np
import pandas as pd
from os import path
from math import isnan
from sklearn.metrics.pairwise import euclidean_distances
from JPP_precision import load_JPP_ply
from Modules.utils import get_parameter, get_args, figure_disappears, enum_test_files
from Modules.features_labels import make_labels
from Modules.coordinate_conversion import project_point_cloud
def make_ground_truth(test_filename):
n_joints = 19
ground_truth = np.ones((n_joints, 2))
label_img = cv2.imread("%s.png" % test_filename)[:, :, :3][:, :, ::-1]
label_array = make_labels(label_img).reshape(label_img.shape[:2])
parts2joints_map = np.array((0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 5, 18, 18, 18, 18, 6, 7, 8, 9, 10, 11, 18, 18, 18, 18, 12, 13, 14, 15, 16, 17, 18))
for j in range(n_joints):
ground_truth[j, :] = np.mean(np.array(np.where(parts2joints_map[label_array] == j)), axis=1)
return ground_truth[:-1, :]
def JPP_precision():
args = get_args()
discr_setting_type = args.discr_setting_type
num_train_images = args.n_train_images
data_path = args.data_path
jpp_path = data_path + "Main/JointPositionPrediction/"
jpp_gt_path = jpp_path + "GroundTruth/"
jpp_out_path = jpp_path + "Output/"
eval_path = jpp_path + "Evaluation/"
test_path = args.test_path
n_test_images = args.n_test_images
device = "Kinect" if "SyntheticImages" in test_path else "Xtion"
target_joint_names = ["Head", "neck", "Chest", "Waist",
"rShoulder", "lShoulder", "rElbow", "lElbow", "rWrist", "lWrist", "rHand", "lHand",
"rKnee", "lKnee", "rAnkle", "lAnkle", "rFoot", "lFoot"]
n_joints = len(target_joint_names)
test_filenames = enum_test_files(data_path, args.test_path, n_test_images)
setting_str = "_" + str(num_train_images) + ("_%s" % discr_setting_type if discr_setting_type else "")
average_error_path = eval_path + "JPP_average_error_px" + setting_str + ".csv"
sum_prediction_error = np.zeros((n_joints+1,))
for test_filename in test_filenames:
test_filename_id = "/".join(test_filename.split("/")[-2:])
print(test_filename_id)
test_JPP_path = jpp_out_path + test_filename_id + setting_str + "_JPP.ply"
test_gt_path = jpp_gt_path + test_filename_id + "_px_gt.csv"
error_path = eval_path + test_filename_id + setting_str + "_JPP_error_px.csv"
if path.exists(test_gt_path):
gt_joint_positions = np.array(pd.read_csv(test_gt_path, header=None))
else:
gt_joint_positions = make_ground_truth(test_filename)
joint_positions_3d = load_JPP_ply(test_JPP_path)
visible_joints = []
for j, joint_position in enumerate(joint_positions_3d):
if joint_position != (0, 0):
visible_joints.append(j)
visible_joints = np.array(visible_joints)
depth_img = cv2.imread(test_filename + " Z.png", flags=0)
params = get_parameter(test_filename + "_param")
_, joint_positions_2d = project_point_cloud(joint_positions_3d, depth_img, visible_joints, device)
joint_positions_2d = np.array(joint_positions_2d).transpose()
error_per_joint = np.zeros((18,))
for j, (gt, p) in enumerate(zip(gt_joint_positions, joint_positions_2d)):
if ((not isnan(gt[0])) and (not isnan(gt[1]))) and (p[0] != 0 or p[1] != 0):
error_per_joint[j] = euclidean_distances(gt.reshape((1, -1)), p.reshape((1, -1))) * joint_positions_3d[j, 2] / 200.
elif (isnan(gt[0]) and isnan(gt[1])) and (p[0] == 0 and p[1] == 0):
error_per_joint[j] = np.nan
else:
error_per_joint[j] = 20 * joint_positions_3d[j, 2] / 200.
mean_error = np.nanmean(error_per_joint)
prediction_error = np.r_[error_per_joint, mean_error]
sum_prediction_error += prediction_error
pd.DataFrame(prediction_error, index=target_joint_names+["Mean"]).to_csv(error_path, header=False)
print("\tMean Error is %f" % mean_error)
mean_errors = sum_prediction_error / n_test_images
pd.DataFrame(mean_errors, index=target_joint_names+["Mean"]).to_csv(average_error_path, header=False)
print("Mean error is %f" % mean_errors[-1])
if __name__ == "__main__":
JPP_precision()
|
[
"pandas.DataFrame",
"math.isnan",
"Modules.coordinate_conversion.project_point_cloud",
"pandas.read_csv",
"JPP_precision.load_JPP_ply",
"numpy.zeros",
"numpy.ones",
"os.path.exists",
"Modules.features_labels.make_labels",
"Modules.utils.get_args",
"cv2.imread",
"numpy.where",
"numpy.array",
"Modules.utils.get_parameter",
"Modules.utils.enum_test_files",
"numpy.nanmean"
] |
[((494, 516), 'numpy.ones', 'np.ones', (['(n_joints, 2)'], {}), '((n_joints, 2))\n', (501, 516), True, 'import numpy as np\n'), ((687, 815), 'numpy.array', 'np.array', (['(0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 5, 18, 18, 18, 18, 6, 7, 8, 9, 10, 11, 18, \n 18, 18, 18, 12, 13, 14, 15, 16, 17, 18)'], {}), '((0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 5, 18, 18, 18, 18, 6, 7, 8, 9, 10, \n 11, 18, 18, 18, 18, 12, 13, 14, 15, 16, 17, 18))\n', (695, 815), True, 'import numpy as np\n'), ((1011, 1021), 'Modules.utils.get_args', 'get_args', ([], {}), '()\n', (1019, 1021), False, 'from Modules.utils import get_parameter, get_args, figure_disappears, enum_test_files\n'), ((1785, 1842), 'Modules.utils.enum_test_files', 'enum_test_files', (['data_path', 'args.test_path', 'n_test_images'], {}), '(data_path, args.test_path, n_test_images)\n', (1800, 1842), False, 'from Modules.utils import get_parameter, get_args, figure_disappears, enum_test_files\n'), ((2062, 2087), 'numpy.zeros', 'np.zeros', (['(n_joints + 1,)'], {}), '((n_joints + 1,))\n', (2070, 2087), True, 'import numpy as np\n'), ((2478, 2503), 'os.path.exists', 'path.exists', (['test_gt_path'], {}), '(test_gt_path)\n', (2489, 2503), False, 'from os import path\n'), ((2697, 2724), 'JPP_precision.load_JPP_ply', 'load_JPP_ply', (['test_JPP_path'], {}), '(test_JPP_path)\n', (2709, 2724), False, 'from JPP_precision import load_JPP_ply\n'), ((2924, 2948), 'numpy.array', 'np.array', (['visible_joints'], {}), '(visible_joints)\n', (2932, 2948), True, 'import numpy as np\n'), ((2969, 3014), 'cv2.imread', 'cv2.imread', (["(test_filename + ' Z.png')"], {'flags': '(0)'}), "(test_filename + ' Z.png', flags=0)\n", (2979, 3014), False, 'import cv2, glob\n'), ((3032, 3071), 'Modules.utils.get_parameter', 'get_parameter', (["(test_filename + '_param')"], {}), "(test_filename + '_param')\n", (3045, 3071), False, 'from Modules.utils import get_parameter, get_args, figure_disappears, enum_test_files\n'), ((3104, 3178), 'Modules.coordinate_conversion.project_point_cloud', 'project_point_cloud', (['joint_positions_3d', 'depth_img', 'visible_joints', 'device'], {}), '(joint_positions_3d, depth_img, visible_joints, device)\n', (3123, 3178), False, 'from Modules.coordinate_conversion import project_point_cloud\n'), ((3276, 3291), 'numpy.zeros', 'np.zeros', (['(18,)'], {}), '((18,))\n', (3284, 3291), True, 'import numpy as np\n'), ((3833, 3860), 'numpy.nanmean', 'np.nanmean', (['error_per_joint'], {}), '(error_per_joint)\n', (3843, 3860), True, 'import numpy as np\n'), ((534, 570), 'cv2.imread', 'cv2.imread', (["('%s.png' % test_filename)"], {}), "('%s.png' % test_filename)\n", (544, 570), False, 'import cv2, glob\n'), ((612, 634), 'Modules.features_labels.make_labels', 'make_labels', (['label_img'], {}), '(label_img)\n', (623, 634), False, 'from Modules.features_labels import make_labels\n'), ((4189, 4251), 'pandas.DataFrame', 'pd.DataFrame', (['mean_errors'], {'index': "(target_joint_names + ['Mean'])"}), "(mean_errors, index=target_joint_names + ['Mean'])\n", (4201, 4251), True, 'import pandas as pd\n'), ((888, 932), 'numpy.where', 'np.where', (['(parts2joints_map[label_array] == j)'], {}), '(parts2joints_map[label_array] == j)\n', (896, 932), True, 'import numpy as np\n'), ((2547, 2585), 'pandas.read_csv', 'pd.read_csv', (['test_gt_path'], {'header': 'None'}), '(test_gt_path, header=None)\n', (2558, 2585), True, 'import pandas as pd\n'), ((3208, 3236), 'numpy.array', 'np.array', (['joint_positions_2d'], {}), '(joint_positions_2d)\n', (3216, 3236), True, 'import numpy as np\n'), ((3981, 4048), 'pandas.DataFrame', 'pd.DataFrame', (['prediction_error'], {'index': "(target_joint_names + ['Mean'])"}), "(prediction_error, index=target_joint_names + ['Mean'])\n", (3993, 4048), True, 'import pandas as pd\n'), ((3395, 3407), 'math.isnan', 'isnan', (['gt[0]'], {}), '(gt[0])\n', (3400, 3407), False, 'from math import isnan\n'), ((3418, 3430), 'math.isnan', 'isnan', (['gt[1]'], {}), '(gt[1])\n', (3423, 3430), False, 'from math import isnan\n'), ((3613, 3625), 'math.isnan', 'isnan', (['gt[0]'], {}), '(gt[0])\n', (3618, 3625), False, 'from math import isnan\n'), ((3630, 3642), 'math.isnan', 'isnan', (['gt[1]'], {}), '(gt[1])\n', (3635, 3642), False, 'from math import isnan\n')]
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
x=np.arange(0,2*np.pi,0.1)
y=np.exp(x)
plt.plot(x,y)
plt.show()
|
[
"numpy.arange",
"numpy.exp",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] |
[((71, 99), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(0.1)'], {}), '(0, 2 * np.pi, 0.1)\n', (80, 99), True, 'import numpy as np\n'), ((98, 107), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (104, 107), True, 'import numpy as np\n'), ((108, 122), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (116, 122), True, 'import matplotlib.pyplot as plt\n'), ((122, 132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (130, 132), True, 'import matplotlib.pyplot as plt\n')]
|
"""
Copyright (c) 2019-present NAVER Corp.
MIT License
"""
# -*- coding: utf-8 -*-
import sys
import os
import time
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from PIL import Image
import cv2
from skimage import io
import numpy as np
import json
import zipfile
import tools.utils as utils
import tools.dataset as dataset
import tools.imgproc as imgproc
import tools.craft_utils as craft_utils
from models.craft import CRAFT
from models.moran import MORAN
import matplotlib.pyplot as plt
from collections import OrderedDict
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
def craft_net(net, image, text_threshold, link_threshold, low_text, cuda, poly, refine_net=None):
t0 = time.time()
# resize
img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, args.canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=args.mag_ratio)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = imgproc.normalizeMeanVariance(img_resized)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
if cuda:
x = x.cuda()
# forward pass
with torch.no_grad():
y, feature = net(x)
# make score and link map
score_text = y[0,:,:,0].cpu().data.numpy()
score_link = y[0,:,:,1].cpu().data.numpy()
tmp1 = score_link.copy()
tmp2 = score_text.copy()
# Post-processing
boxes, polys, rot_rects = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text, False)
# coordinate adjustment
boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h)
rot_rects = craft_utils.adjustResultCoordinatesNew(rot_rects, ratio_w, ratio_h)
# render results (optional)
render_img = score_text.copy()
render_img = np.hstack((render_img, score_link))
ret_score_text = imgproc.cvt2HeatmapImg(render_img)
if args.show_time : print("\ninfer/postproc time : {:.3f}/{:.3f}".format(t0, t1))
return boxes, ret_score_text,rot_rects
parser = argparse.ArgumentParser(description='CRAFT Text Detection')
# CRAFT args
parser.add_argument('--craft_trained_model', default='pretrained/craft_mlt_25k.pth', type=str, help='pretrained model')
parser.add_argument('--img_path', default='test/1.jpg', type=str, help='folder path to input images')
parser.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')
parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')
parser.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')
parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda for inference')
parser.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')
parser.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')
parser.add_argument('--poly', default=False, action='store_true', help='enable polygon type')
parser.add_argument('--show_time', default=False, action='store_true', help='show processing time')
parser.add_argument('--refine', default=False, action='store_true', help='enable link refiner')
parser.add_argument('--refiner_model', default='pretrained/craft_refiner_CTW1500.pth', type=str, help='pretrained refiner model')
# moran
parser.add_argument('--moran_path', default='pretrained/moran.pth', type=str, help='pretrained moran model')
args = parser.parse_args()
moran_path = args.moran_path
alphabet = '0:1:2:3:4:5:6:7:8:9:a:b:c:d:e:f:g:h:i:j:k:l:m:n:o:p:q:r:s:t:u:v:w:x:y:z:$'
if __name__ == '__main__':
################################################
# cv2 initialize
################################################
cap = cv2.VideoCapture(0)
################################################
# CRAFT loading part
################################################
# load net
net = CRAFT() # initialize
if args.cuda:
net.load_state_dict(copyStateDict(torch.load(args.craft_trained_model)))
else:
net.load_state_dict(copyStateDict(torch.load(args.craft_trained_model, map_location='cpu')))
if args.cuda:
net = net.cuda()
net = torch.nn.DataParallel(net)
cudnn.benchmark = False
net.eval()
################################################
# MORAN loading part
################################################
cuda_flag = False
if torch.cuda.is_available():
cuda_flag = True
MORAN = MORAN(1, len(alphabet.split(':')), 256, 32, 100, BidirDecoder=True, CUDA=cuda_flag)
MORAN = MORAN.cuda()
else:
MORAN = MORAN(1, len(alphabet.split(':')), 256, 32, 100, BidirDecoder=True, inputDataType='torch.FloatTensor', CUDA=cuda_flag)
print('loading pretrained model from %s' % moran_path)
if cuda_flag:
state_dict = torch.load(moran_path)
else:
state_dict = torch.load(moran_path, map_location='cpu')
MORAN_state_dict_rename = OrderedDict()
for k, v in state_dict.items():
name = k.replace("module.", "") # remove `module.`
MORAN_state_dict_rename[name] = v
MORAN.load_state_dict(MORAN_state_dict_rename)
for p in MORAN.parameters():
p.requires_grad = False
MORAN.eval()
while(cap.isOpened()):
all_text = []
all_text_reverse = []
################################################
# CRAFT processing part
################################################
# load data
tik = time.time()
ret, image = cap.read()
# image = cv2.imread('test/1.jpg')
image_raw = image.copy()
bboxes, score_text,rot_rects = craft_net(net, image, args.text_threshold, args.link_threshold, args.low_text, args.cuda, args.poly)
print("time1: ",time.time()-tik)
# save text rectangles
filename, file_ext = os.path.splitext(os.path.basename(args.img_path))
# 这个可以保存切分的图片
img_cuts = utils.saveSplitTextRects(image,rot_rects,save_file=False,save_prefix="rect_"+filename)
print("time2: ",time.time()-tik)
if not img_cuts:
cv2.imshow('Capture', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
continue
###############################################
# MORAN processing part
################################################
converter = utils.strLabelConverterForAttention(alphabet, ':')
transformer = dataset.resizeNormalize((100, 32))
images = [transformer(Image.fromarray(img.astype('uint8')).convert('L')) for img in img_cuts]
images = [Variable(img.view(1, *img.size())) for img in images]
all_image = torch.cat(images,axis=0)
if cuda_flag:
all_image = all_image.cuda()
text = torch.LongTensor(1 * 5)
length = torch.IntTensor(1)
text = Variable(text)
length = Variable(length)
# 从单张修改为多张,只需要改Length
# 作者给的处理工具已经考虑了多个图片同时处理的情况
max_iter = 20
t, l = converter.encode('0'*max_iter)
utils.loadData(text, t)
utils.loadData(length, l)
length = torch.ones(len(img_cuts))*20
length = length.int()
output = MORAN(all_image, length, text, text, test=True, debug=False)
preds, preds_reverse = output[0]
_, preds = preds.max(1)
_, preds_reverse = preds_reverse.max(1)
sim_preds = converter.decode(preds.data, length.data)
all_text = [v.strip().split('$')[0] for v in sim_preds]
print(sim_preds)
print("time3: ",time.time()-tik)
result_img = utils.saveResult(args.img_path, image_raw[:,:,::-1], bboxes,save_file=False, texts=all_text)
print("time4: ",time.time()-tik)
print(all_text)
cv2.imshow('Capture', result_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
[
"argparse.ArgumentParser",
"torch.cat",
"tools.craft_utils.adjustResultCoordinates",
"models.moran.MORAN.cuda",
"torch.no_grad",
"cv2.imshow",
"models.moran.MORAN.eval",
"torch.load",
"models.craft.CRAFT",
"tools.utils.strLabelConverterForAttention",
"tools.utils.saveResult",
"tools.utils.loadData",
"torch.autograd.Variable",
"os.path.basename",
"cv2.waitKey",
"numpy.hstack",
"tools.imgproc.cvt2HeatmapImg",
"torch.cuda.is_available",
"tools.utils.saveSplitTextRects",
"torch.IntTensor",
"tools.craft_utils.adjustResultCoordinatesNew",
"torch.from_numpy",
"models.moran.MORAN",
"torch.LongTensor",
"tools.craft_utils.getDetBoxes",
"tools.imgproc.normalizeMeanVariance",
"time.time",
"cv2.VideoCapture",
"tools.dataset.resizeNormalize",
"models.moran.MORAN.parameters",
"tools.imgproc.resize_aspect_ratio",
"models.moran.MORAN.load_state_dict",
"collections.OrderedDict",
"torch.nn.DataParallel"
] |
[((2591, 2650), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CRAFT Text Detection"""'}), "(description='CRAFT Text Detection')\n", (2614, 2650), False, 'import argparse\n'), ((812, 825), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (823, 825), False, 'from collections import OrderedDict\n'), ((1164, 1175), 'time.time', 'time.time', ([], {}), '()\n', (1173, 1175), False, 'import time\n'), ((1239, 1354), 'tools.imgproc.resize_aspect_ratio', 'imgproc.resize_aspect_ratio', (['image', 'args.canvas_size'], {'interpolation': 'cv2.INTER_LINEAR', 'mag_ratio': 'args.mag_ratio'}), '(image, args.canvas_size, interpolation=cv2.\n INTER_LINEAR, mag_ratio=args.mag_ratio)\n', (1266, 1354), True, 'import tools.imgproc as imgproc\n'), ((1424, 1466), 'tools.imgproc.normalizeMeanVariance', 'imgproc.normalizeMeanVariance', (['img_resized'], {}), '(img_resized)\n', (1453, 1466), True, 'import tools.imgproc as imgproc\n'), ((1979, 2079), 'tools.craft_utils.getDetBoxes', 'craft_utils.getDetBoxes', (['score_text', 'score_link', 'text_threshold', 'link_threshold', 'low_text', '(False)'], {}), '(score_text, score_link, text_threshold,\n link_threshold, low_text, False)\n', (2002, 2079), True, 'import tools.craft_utils as craft_utils\n'), ((2118, 2178), 'tools.craft_utils.adjustResultCoordinates', 'craft_utils.adjustResultCoordinates', (['boxes', 'ratio_w', 'ratio_h'], {}), '(boxes, ratio_w, ratio_h)\n', (2153, 2178), True, 'import tools.craft_utils as craft_utils\n'), ((2196, 2263), 'tools.craft_utils.adjustResultCoordinatesNew', 'craft_utils.adjustResultCoordinatesNew', (['rot_rects', 'ratio_w', 'ratio_h'], {}), '(rot_rects, ratio_w, ratio_h)\n', (2234, 2263), True, 'import tools.craft_utils as craft_utils\n'), ((2351, 2386), 'numpy.hstack', 'np.hstack', (['(render_img, score_link)'], {}), '((render_img, score_link))\n', (2360, 2386), True, 'import numpy as np\n'), ((2409, 2443), 'tools.imgproc.cvt2HeatmapImg', 'imgproc.cvt2HeatmapImg', (['render_img'], {}), '(render_img)\n', (2431, 2443), True, 'import tools.imgproc as imgproc\n'), ((4322, 4341), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (4338, 4341), False, 'import cv2\n'), ((4503, 4510), 'models.craft.CRAFT', 'CRAFT', ([], {}), '()\n', (4508, 4510), False, 'from models.craft import CRAFT\n'), ((5047, 5072), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5070, 5072), False, 'import torch\n'), ((5611, 5624), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5622, 5624), False, 'from collections import OrderedDict\n'), ((5770, 5816), 'models.moran.MORAN.load_state_dict', 'MORAN.load_state_dict', (['MORAN_state_dict_rename'], {}), '(MORAN_state_dict_rename)\n', (5791, 5816), False, 'from models.moran import MORAN\n'), ((5833, 5851), 'models.moran.MORAN.parameters', 'MORAN.parameters', ([], {}), '()\n', (5849, 5851), False, 'from models.moran import MORAN\n'), ((5891, 5903), 'models.moran.MORAN.eval', 'MORAN.eval', ([], {}), '()\n', (5901, 5903), False, 'from models.moran import MORAN\n'), ((1686, 1701), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1699, 1701), False, 'import torch\n'), ((4804, 4830), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (4825, 4830), False, 'import torch\n'), ((5218, 5230), 'models.moran.MORAN.cuda', 'MORAN.cuda', ([], {}), '()\n', (5228, 5230), False, 'from models.moran import MORAN\n'), ((5481, 5503), 'torch.load', 'torch.load', (['moran_path'], {}), '(moran_path)\n', (5491, 5503), False, 'import torch\n'), ((5537, 5579), 'torch.load', 'torch.load', (['moran_path'], {'map_location': '"""cpu"""'}), "(moran_path, map_location='cpu')\n", (5547, 5579), False, 'import torch\n'), ((6183, 6194), 'time.time', 'time.time', ([], {}), '()\n', (6192, 6194), False, 'import time\n'), ((6644, 6740), 'tools.utils.saveSplitTextRects', 'utils.saveSplitTextRects', (['image', 'rot_rects'], {'save_file': '(False)', 'save_prefix': "('rect_' + filename)"}), "(image, rot_rects, save_file=False, save_prefix=\n 'rect_' + filename)\n", (6668, 6740), True, 'import tools.utils as utils\n'), ((7106, 7156), 'tools.utils.strLabelConverterForAttention', 'utils.strLabelConverterForAttention', (['alphabet', '""":"""'], {}), "(alphabet, ':')\n", (7141, 7156), True, 'import tools.utils as utils\n'), ((7180, 7214), 'tools.dataset.resizeNormalize', 'dataset.resizeNormalize', (['(100, 32)'], {}), '((100, 32))\n', (7203, 7214), True, 'import tools.dataset as dataset\n'), ((7412, 7437), 'torch.cat', 'torch.cat', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (7421, 7437), False, 'import torch\n'), ((7518, 7541), 'torch.LongTensor', 'torch.LongTensor', (['(1 * 5)'], {}), '(1 * 5)\n', (7534, 7541), False, 'import torch\n'), ((7560, 7578), 'torch.IntTensor', 'torch.IntTensor', (['(1)'], {}), '(1)\n', (7575, 7578), False, 'import torch\n'), ((7595, 7609), 'torch.autograd.Variable', 'Variable', (['text'], {}), '(text)\n', (7603, 7609), False, 'from torch.autograd import Variable\n'), ((7628, 7644), 'torch.autograd.Variable', 'Variable', (['length'], {}), '(length)\n', (7636, 7644), False, 'from torch.autograd import Variable\n'), ((7793, 7816), 'tools.utils.loadData', 'utils.loadData', (['text', 't'], {}), '(text, t)\n', (7807, 7816), True, 'import tools.utils as utils\n'), ((7826, 7851), 'tools.utils.loadData', 'utils.loadData', (['length', 'l'], {}), '(length, l)\n', (7840, 7851), True, 'import tools.utils as utils\n'), ((7948, 8008), 'models.moran.MORAN', 'MORAN', (['all_image', 'length', 'text', 'text'], {'test': '(True)', 'debug': '(False)'}), '(all_image, length, text, text, test=True, debug=False)\n', (7953, 8008), False, 'from models.moran import MORAN\n'), ((8353, 8453), 'tools.utils.saveResult', 'utils.saveResult', (['args.img_path', 'image_raw[:, :, ::-1]', 'bboxes'], {'save_file': '(False)', 'texts': 'all_text'}), '(args.img_path, image_raw[:, :, ::-1], bboxes, save_file=\n False, texts=all_text)\n', (8369, 8453), True, 'import tools.utils as utils\n'), ((8522, 8555), 'cv2.imshow', 'cv2.imshow', (['"""Capture"""', 'result_img'], {}), "('Capture', result_img)\n", (8532, 8555), False, 'import cv2\n'), ((1476, 1495), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1492, 1495), False, 'import torch\n'), ((6568, 6599), 'os.path.basename', 'os.path.basename', (['args.img_path'], {}), '(args.img_path)\n', (6584, 6599), False, 'import os\n'), ((6812, 6840), 'cv2.imshow', 'cv2.imshow', (['"""Capture"""', 'image'], {}), "('Capture', image)\n", (6822, 6840), False, 'import cv2\n'), ((4590, 4626), 'torch.load', 'torch.load', (['args.craft_trained_model'], {}), '(args.craft_trained_model)\n', (4600, 4626), False, 'import torch\n'), ((4683, 4739), 'torch.load', 'torch.load', (['args.craft_trained_model'], {'map_location': '"""cpu"""'}), "(args.craft_trained_model, map_location='cpu')\n", (4693, 4739), False, 'import torch\n'), ((6472, 6483), 'time.time', 'time.time', ([], {}), '()\n', (6481, 6483), False, 'import time\n'), ((6756, 6767), 'time.time', 'time.time', ([], {}), '()\n', (6765, 6767), False, 'import time\n'), ((8314, 8325), 'time.time', 'time.time', ([], {}), '()\n', (8323, 8325), False, 'import time\n'), ((8471, 8482), 'time.time', 'time.time', ([], {}), '()\n', (8480, 8482), False, 'import time\n'), ((8568, 8582), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8579, 8582), False, 'import cv2\n'), ((6857, 6871), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6868, 6871), False, 'import cv2\n')]
|
# Author: <NAME> <<EMAIL>>
# License: Simplified BSD
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.utils.extmath import safe_sparse_dot
from scipy.sparse import issparse
import numpy as np
def safe_power(X, degree=2):
"""Element-wise power supporting both sparse and dense data.
Parameters
----------
X : ndarray or sparse
The array whose entries to raise to the power.
degree : int, default: 2
The power to which to raise the elements.
Returns
-------
X_ret : ndarray or sparse
Same shape as X, but (x_ret)_ij = (x)_ij ^ degree
"""
if issparse(X):
if hasattr(X, 'power'):
return X.power(degree)
else:
# old scipy
X = X.copy()
X.data **= degree
return X
else:
return X ** degree
def _D(X, P, degree=2):
"""The "replacement" part of the homogeneous polynomial kernel.
D[i, j] = sum_k [(X_ik * P_jk) ** degree]
"""
return safe_sparse_dot(safe_power(X, degree), P.T ** degree)
def homogeneous_kernel(X, P, degree=2):
"""Convenience alias for homogeneous polynomial kernel between X and P::
K_P(x, p) = <x, p> ^ degree
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 2
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
return polynomial_kernel(X, P, degree=degree, gamma=1, coef0=0)
def anova_kernel(X, P, degree=2):
"""ANOVA kernel between X and P::
K_A(x, p) = sum_i1>i2>...>id x_i1 p_i1 x_i2 p_i2 ... x_id p_id
See <NAME> and <NAME>,
Kernel Methods for Pattern Analysis section 9.2.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 2
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
if degree == 2:
K = homogeneous_kernel(X, P, degree=2)
K -= _D(X, P, degree=2)
K /= 2
elif degree == 3:
K = homogeneous_kernel(X, P, degree=3)
K -= 3 * _D(X, P, degree=2) * _D(X, P, degree=1)
K += 2 * _D(X, P, degree=3)
K /= 6
else:
raise NotImplementedError("ANOVA kernel for degree >= 4 not yet "
"implemented efficiently.")
return K
def _poly_predict(X, P, lams, kernel, degree=2):
if kernel == "anova":
K = anova_kernel(X, P, degree)
elif kernel == "poly":
K = homogeneous_kernel(X, P, degree)
else:
raise ValueError(("Unsuppported kernel: {}. Use one "
"of {{'anova'|'poly'}}").format(kernel))
return np.dot(K, lams)
|
[
"numpy.dot",
"scipy.sparse.issparse",
"sklearn.metrics.pairwise.polynomial_kernel"
] |
[((630, 641), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (638, 641), False, 'from scipy.sparse import issparse\n'), ((1498, 1554), 'sklearn.metrics.pairwise.polynomial_kernel', 'polynomial_kernel', (['X', 'P'], {'degree': 'degree', 'gamma': '(1)', 'coef0': '(0)'}), '(X, P, degree=degree, gamma=1, coef0=0)\n', (1515, 1554), False, 'from sklearn.metrics.pairwise import polynomial_kernel\n'), ((2827, 2842), 'numpy.dot', 'np.dot', (['K', 'lams'], {}), '(K, lams)\n', (2833, 2842), True, 'import numpy as np\n')]
|
import numpy
from noise import snoise2
from worldengine.model.world import Step
from worldengine.simulations.basic import find_threshold_f
from worldengine.simulations.hydrology import WatermapSimulation
from worldengine.simulations.irrigation import IrrigationSimulation
from worldengine.simulations.humidity import HumiditySimulation
from worldengine.simulations.temperature import TemperatureSimulation
from worldengine.simulations.permeability import PermeabilitySimulation
from worldengine.simulations.erosion import ErosionSimulation
from worldengine.simulations.precipitation import PrecipitationSimulation
from worldengine.simulations.biome import BiomeSimulation
from worldengine.simulations.icecap import IcecapSimulation
from worldengine.common import anti_alias, get_verbose
# ------------------
# Initial generation
# ------------------
def center_land(world):
"""Translate the map horizontally and vertically to put as much ocean as
possible at the borders. It operates on elevation and plates map"""
y_sums = world.layers['elevation'].data.sum(1) # 1 == sum along x-axis
y_with_min_sum = y_sums.argmin()
if get_verbose():
print("geo.center_land: height complete")
x_sums = world.layers['elevation'].data.sum(0) # 0 == sum along y-axis
x_with_min_sum = x_sums.argmin()
if get_verbose():
print("geo.center_land: width complete")
latshift = 0
world.layers['elevation'].data = numpy.roll(numpy.roll(world.layers['elevation'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1)
world.layers['plates'].data = numpy.roll(numpy.roll(world.layers['plates'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1)
if get_verbose():
print("geo.center_land: width complete")
def place_oceans_at_map_borders(world):
"""
Lower the elevation near the border of the map
"""
ocean_border = int(min(30, max(world.width / 5, world.height / 5)))
def place_ocean(x, y, i):
world.layers['elevation'].data[y, x] = \
(world.layers['elevation'].data[y, x] * i) / ocean_border
for x in range(world.width):
for i in range(ocean_border):
place_ocean(x, i, i)
place_ocean(x, world.height - i - 1, i)
for y in range(world.height):
for i in range(ocean_border):
place_ocean(i, y, i)
place_ocean(world.width - i - 1, y, i)
def add_noise_to_elevation(world, seed):
octaves = 8
freq = 16.0 * octaves
for y in range(world.height):
for x in range(world.width):
n = snoise2(x / freq * 2, y / freq * 2, octaves, base=seed)
world.layers['elevation'].data[y, x] += n
def fill_ocean(elevation, sea_level):#TODO: Make more use of numpy?
height, width = elevation.shape
ocean = numpy.zeros(elevation.shape, dtype=bool)
to_expand = []
for x in range(width):#handle top and bottom border of the map
if elevation[0, x] <= sea_level:
to_expand.append((x, 0))
if elevation[height - 1, x] <= sea_level:
to_expand.append((x, height - 1))
for y in range(height):#handle left- and rightmost border of the map
if elevation[y, 0] <= sea_level:
to_expand.append((0, y))
if elevation[y, width - 1] <= sea_level:
to_expand.append((width - 1, y))
for t in to_expand:
tx, ty = t
if not ocean[ty, tx]:
ocean[ty, tx] = True
for px, py in _around(tx, ty, width, height):
if not ocean[py, px] and elevation[py, px] <= sea_level:
to_expand.append((px, py))
return ocean
def initialize_ocean_and_thresholds(world, ocean_level=1.0):
"""
Calculate the ocean, the sea depth and the elevation thresholds
:param world: a world having elevation but not thresholds
:param ocean_level: the elevation representing the ocean level
:return: nothing, the world will be changed
"""
e = world.layers['elevation'].data
ocean = fill_ocean(e, ocean_level)
hl = find_threshold_f(e, 0.10) # the highest 10% of all (!) land are declared hills
ml = find_threshold_f(e, 0.03) # the highest 3% are declared mountains
e_th = [('sea', ocean_level),
('plain', hl),
('hill', ml),
('mountain', None)]
harmonize_ocean(ocean, e, ocean_level)
world.ocean = ocean
world.elevation = (e, e_th)
world.sea_depth = sea_depth(world, ocean_level)
def harmonize_ocean(ocean, elevation, ocean_level):
"""
The goal of this function is to make the ocean floor less noisy.
The underwater erosion should cause the ocean floor to be more uniform
"""
shallow_sea = ocean_level * 0.85
midpoint = shallow_sea / 2.0
ocean_points = numpy.logical_and(elevation < shallow_sea, ocean)
shallow_ocean = numpy.logical_and(elevation < midpoint, ocean_points)
elevation[shallow_ocean] = midpoint - ((midpoint - elevation[shallow_ocean]) / 5.0)
deep_ocean = numpy.logical_and(elevation > midpoint, ocean_points)
elevation[deep_ocean] = midpoint + ((elevation[deep_ocean] - midpoint) / 5.0)
# ----
# Misc
# ----
def sea_depth(world, sea_level):
# a dynamic programming approach to gather how far the next land is
# from a given coordinate up to a maximum distance of max_radius
# result is 0 for land coordinates and -1 for coordinates further than
# max_radius away from land
# there might be even faster ways but it does the trick
def next_land_dynamic(ocean, max_radius=5):
next_land = numpy.full(ocean.shape, -1, int)
# non ocean tiles are zero distance away from next land
next_land[numpy.logical_not(ocean)]=0
height, width = ocean.shape
for dist in range(max_radius):
indices = numpy.transpose(numpy.where(next_land==dist))
for y, x in indices:
for dy in range(-1, 2):
ny = y + dy
if 0 <= ny < height:
for dx in range(-1, 2):
nx = x + dx
if 0 <= nx < width:
if next_land[ny,nx] == -1:
next_land[ny,nx] = dist + 1
return next_land
# We want to multiply the raw sea_depth by one of these factors
# depending on the distance from the next land
# possible TODO: make this a parameter
factors = [0.0, 0.3, 0.5, 0.7, 0.9]
next_land = next_land_dynamic(world.layers['ocean'].data)
sea_depth = sea_level - world.layers['elevation'].data
for y in range(world.height):
for x in range(world.width):
dist_to_next_land = next_land[y,x]
if dist_to_next_land > 0:
sea_depth[y,x]*=factors[dist_to_next_land-1]
sea_depth = anti_alias(sea_depth, 10)
min_depth = sea_depth.min()
max_depth = sea_depth.max()
sea_depth = (sea_depth - min_depth) / (max_depth - min_depth)
return sea_depth
def _around(x, y, width, height):
ps = []
for dx in range(-1, 2):
nx = x + dx
if 0 <= nx < width:
for dy in range(-1, 2):
ny = y + dy
if 0 <= ny < height and (dx != 0 or dy != 0):
ps.append((nx, ny))
return ps
def generate_world(w, step):
if isinstance(step, str):
step = Step.get_by_name(step)
if not step.include_precipitations:
return w
# Prepare sufficient seeds for the different steps of the generation
rng = numpy.random.RandomState(w.seed) # create a fresh RNG in case the global RNG is compromised (i.e. has been queried an indefinite amount of times before generate_world() was called)
sub_seeds = rng.randint(0, numpy.iinfo(numpy.int32).max, size=100) # choose lowest common denominator (32 bit Windows numpy cannot handle a larger value)
seed_dict = {
'PrecipitationSimulation': sub_seeds[ 0], # after 0.19.0 do not ever switch out the seeds here to maximize seed-compatibility
'ErosionSimulation': sub_seeds[ 1],
'WatermapSimulation': sub_seeds[ 2],
'IrrigationSimulation': sub_seeds[ 3],
'TemperatureSimulation': sub_seeds[ 4],
'HumiditySimulation': sub_seeds[ 5],
'PermeabilitySimulation': sub_seeds[ 6],
'BiomeSimulation': sub_seeds[ 7],
'IcecapSimulation': sub_seeds[ 8],
'': sub_seeds[99]
}
TemperatureSimulation().execute(w, seed_dict['TemperatureSimulation'])
# Precipitation with thresholds
PrecipitationSimulation().execute(w, seed_dict['PrecipitationSimulation'])
if not step.include_erosion:
return w
ErosionSimulation().execute(w, seed_dict['ErosionSimulation']) # seed not currently used
if get_verbose():
print("...erosion calculated")
WatermapSimulation().execute(w, seed_dict['WatermapSimulation']) # seed not currently used
# FIXME: create setters
IrrigationSimulation().execute(w, seed_dict['IrrigationSimulation']) # seed not currently used
HumiditySimulation().execute(w, seed_dict['HumiditySimulation']) # seed not currently used
PermeabilitySimulation().execute(w, seed_dict['PermeabilitySimulation'])
cm, biome_cm = BiomeSimulation().execute(w, seed_dict['BiomeSimulation']) # seed not currently used
for cl in cm.keys():
count = cm[cl]
if get_verbose():
print("%s = %i" % (str(cl), count))
if get_verbose():
print('') # empty line
print('Biome obtained:')
for cl in biome_cm.keys():
count = biome_cm[cl]
if get_verbose():
print(" %30s = %7i" % (str(cl), count))
IcecapSimulation().execute(w, seed_dict['IcecapSimulation']) # makes use of temperature-map
return w
|
[
"worldengine.simulations.basic.find_threshold_f",
"worldengine.simulations.temperature.TemperatureSimulation",
"numpy.iinfo",
"worldengine.simulations.permeability.PermeabilitySimulation",
"worldengine.simulations.biome.BiomeSimulation",
"worldengine.simulations.hydrology.WatermapSimulation",
"worldengine.simulations.precipitation.PrecipitationSimulation",
"numpy.full",
"numpy.logical_not",
"numpy.random.RandomState",
"noise.snoise2",
"worldengine.simulations.erosion.ErosionSimulation",
"worldengine.common.get_verbose",
"worldengine.common.anti_alias",
"numpy.roll",
"worldengine.simulations.irrigation.IrrigationSimulation",
"worldengine.simulations.icecap.IcecapSimulation",
"worldengine.model.world.Step.get_by_name",
"worldengine.simulations.humidity.HumiditySimulation",
"numpy.logical_and",
"numpy.zeros",
"numpy.where"
] |
[((1152, 1165), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (1163, 1165), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((1338, 1351), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (1349, 1351), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((1729, 1742), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (1740, 1742), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((2840, 2880), 'numpy.zeros', 'numpy.zeros', (['elevation.shape'], {'dtype': 'bool'}), '(elevation.shape, dtype=bool)\n', (2851, 2880), False, 'import numpy\n'), ((4099, 4123), 'worldengine.simulations.basic.find_threshold_f', 'find_threshold_f', (['e', '(0.1)'], {}), '(e, 0.1)\n', (4115, 4123), False, 'from worldengine.simulations.basic import find_threshold_f\n'), ((4188, 4213), 'worldengine.simulations.basic.find_threshold_f', 'find_threshold_f', (['e', '(0.03)'], {}), '(e, 0.03)\n', (4204, 4213), False, 'from worldengine.simulations.basic import find_threshold_f\n'), ((4830, 4879), 'numpy.logical_and', 'numpy.logical_and', (['(elevation < shallow_sea)', 'ocean'], {}), '(elevation < shallow_sea, ocean)\n', (4847, 4879), False, 'import numpy\n'), ((4901, 4954), 'numpy.logical_and', 'numpy.logical_and', (['(elevation < midpoint)', 'ocean_points'], {}), '(elevation < midpoint, ocean_points)\n', (4918, 4954), False, 'import numpy\n'), ((5061, 5114), 'numpy.logical_and', 'numpy.logical_and', (['(elevation > midpoint)', 'ocean_points'], {}), '(elevation > midpoint, ocean_points)\n', (5078, 5114), False, 'import numpy\n'), ((6918, 6943), 'worldengine.common.anti_alias', 'anti_alias', (['sea_depth', '(10)'], {}), '(sea_depth, 10)\n', (6928, 6943), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((7642, 7674), 'numpy.random.RandomState', 'numpy.random.RandomState', (['w.seed'], {}), '(w.seed)\n', (7666, 7674), False, 'import numpy\n'), ((9024, 9037), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (9035, 9037), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((9714, 9727), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (9725, 9727), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((1468, 1546), 'numpy.roll', 'numpy.roll', (["world.layers['elevation'].data", '(-y_with_min_sum + latshift)'], {'axis': '(0)'}), "(world.layers['elevation'].data, -y_with_min_sum + latshift, axis=0)\n", (1478, 1546), False, 'import numpy\n'), ((1619, 1694), 'numpy.roll', 'numpy.roll', (["world.layers['plates'].data", '(-y_with_min_sum + latshift)'], {'axis': '(0)'}), "(world.layers['plates'].data, -y_with_min_sum + latshift, axis=0)\n", (1629, 1694), False, 'import numpy\n'), ((5637, 5669), 'numpy.full', 'numpy.full', (['ocean.shape', '(-1)', 'int'], {}), '(ocean.shape, -1, int)\n', (5647, 5669), False, 'import numpy\n'), ((7477, 7499), 'worldengine.model.world.Step.get_by_name', 'Step.get_by_name', (['step'], {}), '(step)\n', (7493, 7499), False, 'from worldengine.model.world import Step\n'), ((9643, 9656), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (9654, 9656), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((9866, 9879), 'worldengine.common.get_verbose', 'get_verbose', ([], {}), '()\n', (9877, 9879), False, 'from worldengine.common import anti_alias, get_verbose\n'), ((2611, 2666), 'noise.snoise2', 'snoise2', (['(x / freq * 2)', '(y / freq * 2)', 'octaves'], {'base': 'seed'}), '(x / freq * 2, y / freq * 2, octaves, base=seed)\n', (2618, 2666), False, 'from noise import snoise2\n'), ((5753, 5777), 'numpy.logical_not', 'numpy.logical_not', (['ocean'], {}), '(ocean)\n', (5770, 5777), False, 'import numpy\n'), ((7855, 7879), 'numpy.iinfo', 'numpy.iinfo', (['numpy.int32'], {}), '(numpy.int32)\n', (7866, 7879), False, 'import numpy\n'), ((8686, 8709), 'worldengine.simulations.temperature.TemperatureSimulation', 'TemperatureSimulation', ([], {}), '()\n', (8707, 8709), False, 'from worldengine.simulations.temperature import TemperatureSimulation\n'), ((8797, 8822), 'worldengine.simulations.precipitation.PrecipitationSimulation', 'PrecipitationSimulation', ([], {}), '()\n', (8820, 8822), False, 'from worldengine.simulations.precipitation import PrecipitationSimulation\n'), ((8927, 8946), 'worldengine.simulations.erosion.ErosionSimulation', 'ErosionSimulation', ([], {}), '()\n', (8944, 8946), False, 'from worldengine.simulations.erosion import ErosionSimulation\n'), ((9083, 9103), 'worldengine.simulations.hydrology.WatermapSimulation', 'WatermapSimulation', ([], {}), '()\n', (9101, 9103), False, 'from worldengine.simulations.hydrology import WatermapSimulation\n'), ((9208, 9230), 'worldengine.simulations.irrigation.IrrigationSimulation', 'IrrigationSimulation', ([], {}), '()\n', (9228, 9230), False, 'from worldengine.simulations.irrigation import IrrigationSimulation\n'), ((9308, 9328), 'worldengine.simulations.humidity.HumiditySimulation', 'HumiditySimulation', ([], {}), '()\n', (9326, 9328), False, 'from worldengine.simulations.humidity import HumiditySimulation\n'), ((9405, 9429), 'worldengine.simulations.permeability.PermeabilitySimulation', 'PermeabilitySimulation', ([], {}), '()\n', (9427, 9429), False, 'from worldengine.simulations.permeability import PermeabilitySimulation\n'), ((9498, 9515), 'worldengine.simulations.biome.BiomeSimulation', 'BiomeSimulation', ([], {}), '()\n', (9513, 9515), False, 'from worldengine.simulations.biome import BiomeSimulation\n'), ((9938, 9956), 'worldengine.simulations.icecap.IcecapSimulation', 'IcecapSimulation', ([], {}), '()\n', (9954, 9956), False, 'from worldengine.simulations.icecap import IcecapSimulation\n'), ((5896, 5926), 'numpy.where', 'numpy.where', (['(next_land == dist)'], {}), '(next_land == dist)\n', (5907, 5926), False, 'import numpy\n')]
|
#!/usr/bin/env python
import copy
import numpy as np
from scipy import signal
from edrixs.photon_transition import dipole_polvec_rixs
from edrixs.utils import boltz_dist
from edrixs.rixs_utils import scattering_mat
if __name__ == "__main__":
'''
Purpose: This example shows how to calculate RIXS spectrum.
This example use purely python code.
'''
# PARAMETERS
#-----------
# the parameters for the experimental RIXS geometry are taken from [PRL 117, 147401 (2016)]
# the incident angle of X-ray
thin, thout = 15/180.0*np.pi, 75/180.0*np.pi
# azimuthal angle
phi = 0.0
# core-hole life-time broadening
gamma_n = 0.20
# resolution of RIXS excitations
gamma_f = 0.10
# energy offset of the incident X-ray
om_offset = 857.4
# set energy mesh of the incident X-ray (eV)
# L3 edge
om1, om2 = -5.9,-0.9
# L2 dege
#om1, om2 = 10.9, 14.9
nom = 100
om_mesh = np.linspace(om1, om2, nom)
# energy loss mesh
neloss = 1000
eloss_mesh = np.linspace(-0.5, 5.0, neloss)
# ground state list
gs_list=list(range(0, 3))
# temperature
T = 300
# END of PARAMETERS
#------------------
# load data, the eigenvalues of the initial and the intermediate Hamiltonian, and the transition matrix
data = np.loadtxt('eval_i.dat')
eval_i = data[:,1]
data = np.loadtxt('eval_n.dat')
eval_n = data[:,1]
ncfgs_n, ncfgs_i = len(eval_n), len(eval_i)
# the transition operator for the absorption process
data = np.loadtxt('trans_mat.dat')
trans_mat_abs = data[:,3].reshape((3, ncfgs_n, ncfgs_i)) + 1j * data[:,4].reshape((3, ncfgs_n, ncfgs_i))
# the transition operator for the emission process
trans_mat_emi = np.zeros((3, ncfgs_i, ncfgs_n), dtype=np.complex128)
for i in range(3):
trans_mat_emi[i] = np.conj(np.transpose(trans_mat_abs[i]))
# We calculate RIXS for \pi-\pi, \pi-\sigma, \sigma-\pi, \sigma-\sigma polarizations
rixs = np.zeros((4, neloss, nom), dtype=np.float64)
gs_prob = boltz_dist([eval_i[i] for i in gs_list], T)
polvec_list = [(0,0), (0,np.pi/2.0), (np.pi/2.0, 0), (np.pi/2.0, np.pi/2.0)]
print("edrixs >>> calculating RIXS ...")
for i, om_inc in enumerate(om_mesh):
print(" incident X-ray energy: ", i, " ", om_inc)
F_fi = scattering_mat(eval_i, eval_n, trans_mat_abs[:,:,gs_list], trans_mat_emi, om_inc, gamma_n)
for j, (alpha, beta) in enumerate(polvec_list):
ei, ef = dipole_polvec_rixs(thin, thout, phi, alpha, beta)
F_magnitude = np.zeros((ncfgs_i, len(gs_list)), dtype=np.complex128)
for m in range(3):
for n in range(3):
F_magnitude[:,:] += ef[m] * F_fi[m,n] * ei[n]
for m in gs_list:
for n in range(ncfgs_i):
rixs[j, :, i] += np.abs(F_magnitude[n,m])**2 * gamma_f/np.pi / ( (eloss_mesh-(eval_i[n]-eval_i[m]))**2 + gamma_f**2 ) * gs_prob[m]
# gaussian broadening
inc_res = 0.17
emi_res = 0.12
gauss = np.zeros((neloss, nom))
mid_in = ( min(om_mesh) + max(om_mesh) ) /2.0
mid_out = ( min(eloss_mesh)+max(eloss_mesh)) /2.0
for i in range(nom):
for j in range(neloss):
gauss[j,i] = 1/(2.0*np.pi*inc_res*emi_res)*np.exp(-((mid_in-om_mesh[i])**2/(2*inc_res**2) + (mid_out-eloss_mesh[j])**2/(2*emi_res**2)))
for i in range(4):
rixs[i,:,:] = signal.fftconvolve(rixs[i,:,:], gauss, mode = 'same')
print("edrixs >>> done !")
f=open('rixs.dat', 'w')
for i in range(neloss):
for j in range(nom):
str_form = "{:20.10f}"*6 +"\n"
line=str_form.format(eloss_mesh[i], om_mesh[j]+om_offset, rixs[0,i,j], rixs[1,i,j], rixs[2,i,j], rixs[3,i,j])
f.write(line)
f.close()
|
[
"edrixs.photon_transition.dipole_polvec_rixs",
"numpy.abs",
"scipy.signal.fftconvolve",
"edrixs.utils.boltz_dist",
"numpy.zeros",
"numpy.transpose",
"numpy.loadtxt",
"numpy.linspace",
"numpy.exp",
"edrixs.rixs_utils.scattering_mat"
] |
[((1002, 1028), 'numpy.linspace', 'np.linspace', (['om1', 'om2', 'nom'], {}), '(om1, om2, nom)\n', (1013, 1028), True, 'import numpy as np\n'), ((1087, 1117), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(5.0)', 'neloss'], {}), '(-0.5, 5.0, neloss)\n', (1098, 1117), True, 'import numpy as np\n'), ((1372, 1396), 'numpy.loadtxt', 'np.loadtxt', (['"""eval_i.dat"""'], {}), "('eval_i.dat')\n", (1382, 1396), True, 'import numpy as np\n'), ((1432, 1456), 'numpy.loadtxt', 'np.loadtxt', (['"""eval_n.dat"""'], {}), "('eval_n.dat')\n", (1442, 1456), True, 'import numpy as np\n'), ((1598, 1625), 'numpy.loadtxt', 'np.loadtxt', (['"""trans_mat.dat"""'], {}), "('trans_mat.dat')\n", (1608, 1625), True, 'import numpy as np\n'), ((1810, 1862), 'numpy.zeros', 'np.zeros', (['(3, ncfgs_i, ncfgs_n)'], {'dtype': 'np.complex128'}), '((3, ncfgs_i, ncfgs_n), dtype=np.complex128)\n', (1818, 1862), True, 'import numpy as np\n'), ((2054, 2098), 'numpy.zeros', 'np.zeros', (['(4, neloss, nom)'], {'dtype': 'np.float64'}), '((4, neloss, nom), dtype=np.float64)\n', (2062, 2098), True, 'import numpy as np\n'), ((2113, 2156), 'edrixs.utils.boltz_dist', 'boltz_dist', (['[eval_i[i] for i in gs_list]', 'T'], {}), '([eval_i[i] for i in gs_list], T)\n', (2123, 2156), False, 'from edrixs.utils import boltz_dist\n'), ((3142, 3165), 'numpy.zeros', 'np.zeros', (['(neloss, nom)'], {}), '((neloss, nom))\n', (3150, 3165), True, 'import numpy as np\n'), ((2409, 2505), 'edrixs.rixs_utils.scattering_mat', 'scattering_mat', (['eval_i', 'eval_n', 'trans_mat_abs[:, :, gs_list]', 'trans_mat_emi', 'om_inc', 'gamma_n'], {}), '(eval_i, eval_n, trans_mat_abs[:, :, gs_list], trans_mat_emi,\n om_inc, gamma_n)\n', (2423, 2505), False, 'from edrixs.rixs_utils import scattering_mat\n'), ((3521, 3574), 'scipy.signal.fftconvolve', 'signal.fftconvolve', (['rixs[i, :, :]', 'gauss'], {'mode': '"""same"""'}), "(rixs[i, :, :], gauss, mode='same')\n", (3539, 3574), False, 'from scipy import signal\n'), ((1921, 1951), 'numpy.transpose', 'np.transpose', (['trans_mat_abs[i]'], {}), '(trans_mat_abs[i])\n', (1933, 1951), True, 'import numpy as np\n'), ((2579, 2628), 'edrixs.photon_transition.dipole_polvec_rixs', 'dipole_polvec_rixs', (['thin', 'thout', 'phi', 'alpha', 'beta'], {}), '(thin, thout, phi, alpha, beta)\n', (2597, 2628), False, 'from edrixs.photon_transition import dipole_polvec_rixs\n'), ((3383, 3499), 'numpy.exp', 'np.exp', (['(-((mid_in - om_mesh[i]) ** 2 / (2 * inc_res ** 2) + (mid_out - eloss_mesh[\n j]) ** 2 / (2 * emi_res ** 2)))'], {}), '(-((mid_in - om_mesh[i]) ** 2 / (2 * inc_res ** 2) + (mid_out -\n eloss_mesh[j]) ** 2 / (2 * emi_res ** 2)))\n', (3389, 3499), True, 'import numpy as np\n'), ((2950, 2975), 'numpy.abs', 'np.abs', (['F_magnitude[n, m]'], {}), '(F_magnitude[n, m])\n', (2956, 2975), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import io
import os
import shutil
import itertools
import gzip
import warnings
import tempfile
import atexit
import zarr
import h5py
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pytest
from pytest import approx
from allel.io.vcf_read import (iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5,
vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv,
vcf_to_recarray, read_vcf_headers)
from allel.test.tools import compare_arrays
# needed for PY2/PY3 consistent behaviour
warnings.resetwarnings()
warnings.simplefilter('always')
# setup temp dir for testing
tempdir = tempfile.mkdtemp()
atexit.register(shutil.rmtree, tempdir)
def fixture_path(fn):
return os.path.join(os.path.dirname(__file__), os.pardir, 'data', fn)
def test_read_vcf_chunks():
vcf_path = fixture_path('sample.vcf')
fields, samples, headers, it = iter_vcf_chunks(vcf_path, fields='*', chunk_length=4,
buffer_size=100)
# check headers
assert 'q10' in headers.filters
assert 's50' in headers.filters
assert 'AA' in headers.infos
assert 'AC' in headers.infos
assert 'AF' in headers.infos
assert 'AN' in headers.infos
assert 'DB' in headers.infos
assert 'DP' in headers.infos
assert 'H2' in headers.infos
assert 'NS' in headers.infos
assert 'DP' in headers.formats
assert 'GQ' in headers.formats
assert 'GT' in headers.formats
assert 'HQ' in headers.formats
assert ['NA00001', 'NA00002', 'NA00003'] == headers.samples
assert ['NA00001', 'NA00002', 'NA00003'] == samples.tolist()
assert '1' == headers.infos['AA']['Number']
assert 'String' == headers.infos['AA']['Type']
assert 'Ancestral Allele' == headers.infos['AA']['Description']
assert '2' == headers.formats['HQ']['Number']
assert 'Integer' == headers.formats['HQ']['Type']
assert 'Haplotype Quality' == headers.formats['HQ']['Description']
# check chunk lengths
chunks = [chunk for chunk, _, _, _ in it]
assert 3 == len(chunks)
assert 4 == chunks[0]['variants/POS'].shape[0]
assert 4 == chunks[1]['variants/POS'].shape[0]
assert 1 == chunks[2]['variants/POS'].shape[0]
# check chunk contents
expected_fields = [
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/altlen',
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
'calldata/DP',
]
for chunk in chunks:
assert sorted(expected_fields) == sorted(chunk.keys())
def test_fields_all():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='*')
expected_fields = [
'samples',
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/altlen',
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
'calldata/DP',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_exclude():
vcf_path = fixture_path('sample.vcf')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
callset = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
expected_fields = [
'samples',
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_rename():
vcf_path = fixture_path('sample.vcf')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
callset = read_vcf(vcf_path, fields='*', rename_fields=rename)
print(sorted(callset.keys()))
expected_fields = [
'samples',
# fixed fields
'variants/chromosome',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'spam/eggs',
'variants/numalt',
'variants/is_snp',
# FORMAT fields
'foo/bar',
'calldata/DP',
'calldata/GQ',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_rename_clash():
vcf_path = fixture_path('sample.vcf')
# rename two fields to the same path
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# rename two fields to the same path (case insensitive)
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'SPAM/EGGS'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'SPAM'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam',
'calldata/GT': 'SPAM/EGGS'}
with pytest.raises(ValueError):
read_vcf(vcf_path, fields='*', rename_fields=rename)
def test_fields_default():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path)
expected_fields = [
'samples',
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'calldata/GT',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_all_variants():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='variants/*')
expected_fields = [
# fixed fields
'variants/CHROM',
'variants/POS',
'variants/ID',
'variants/REF',
'variants/ALT',
'variants/QUAL',
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
# special computed fields
'variants/altlen',
'variants/numalt',
'variants/is_snp',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_info():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='INFO')
expected_fields = [
# INFO fields
'variants/AA',
'variants/AC',
'variants/AF',
'variants/AN',
'variants/DB',
'variants/DP',
'variants/H2',
'variants/NS',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_filter():
vcf_path = fixture_path('sample.vcf')
callset1 = read_vcf(vcf_path, fields='FILTER')
expected_fields = [
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
]
assert sorted(expected_fields) == sorted(callset1.keys())
# this has explicit PASS definition in header, shouldn't cause problems
vcf_path = fixture_path('test16.vcf')
callset2 = read_vcf(vcf_path, fields='FILTER')
expected_fields = [
'variants/FILTER_PASS',
'variants/FILTER_q10',
'variants/FILTER_s50',
]
assert sorted(expected_fields) == sorted(callset2.keys())
for k in callset1.keys():
assert_array_equal(callset1[k], callset2[k])
def test_fields_all_calldata():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='calldata/*')
expected_fields = [
'calldata/GT',
'calldata/GQ',
'calldata/HQ',
'calldata/DP',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_selected():
vcf_path = fixture_path('sample.vcf')
# without samples
callset = read_vcf(vcf_path,
fields=['CHROM', 'variants/POS', 'AC', 'variants/AF', 'GT',
'calldata/HQ', 'FILTER_q10', 'variants/numalt'])
expected_fields = [
'variants/CHROM',
'variants/POS',
'variants/FILTER_q10',
'variants/AC',
'variants/AF',
'variants/numalt',
# FORMAT fields
'calldata/GT',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
# with samples
callset = read_vcf(vcf_path,
fields=['CHROM', 'variants/POS', 'AC', 'variants/AF', 'GT',
'calldata/HQ', 'FILTER_q10', 'variants/numalt', 'samples'],
chunk_length=4, buffer_size=100)
expected_fields = [
'samples',
'variants/CHROM',
'variants/POS',
'variants/FILTER_q10',
'variants/AC',
'variants/AF',
'variants/numalt',
# FORMAT fields
'calldata/GT',
'calldata/HQ',
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_dups():
vcf_path = fixture_path('sample.vcf')
# silently collapse dups
callset = read_vcf(vcf_path,
fields=['CHROM', 'variants/CHROM', 'variants/AF', 'variants/AF',
'numalt', 'variants/numalt'])
expected_fields = [
'variants/CHROM',
'variants/AF',
'variants/numalt'
]
assert sorted(expected_fields) == sorted(callset.keys())
def test_fields_dups_case_insensitive():
vcf_path = fixture_path('altlen.vcf')
# allow case-insensitive dups here (but not in vcf_to_zarr)
callset = read_vcf(vcf_path, fields=['ALTLEN', 'altlen'])
expected_fields = [
'variants/ALTLEN',
'variants/altlen',
]
assert sorted(expected_fields) == sorted(callset.keys())
def _test_read_vcf_content(vcf, chunk_length, buffer_size):
# object dtype for strings
if isinstance(vcf, str):
input_file = vcf
close = False
else:
input_file = vcf()
close = True
callset = read_vcf(input_file,
fields='*',
chunk_length=chunk_length,
buffer_size=buffer_size,
types={'calldata/DP': 'object'})
if close:
input_file.close()
# samples
assert (3,) == callset['samples'].shape
assert 'O' == callset['samples'].dtype.kind
assert ['NA00001', 'NA00002', 'NA00003'] == callset['samples'].tolist()
# fixed fields
assert (9,) == callset['variants/CHROM'].shape
assert np.dtype(object) == callset['variants/CHROM'].dtype
assert '19' == callset['variants/CHROM'][0]
assert (9,) == callset['variants/POS'].shape
assert 111 == callset['variants/POS'][0]
assert (9,) == callset['variants/ID'].shape
assert np.dtype(object) == callset['variants/ID'].dtype
assert 'rs6054257' == callset['variants/ID'][2]
assert (9,) == callset['variants/REF'].shape
assert np.dtype(object) == callset['variants/REF'].dtype
assert 'A' == callset['variants/REF'][0]
assert (9, 3) == callset['variants/ALT'].shape
assert np.dtype(object) == callset['variants/ALT'].dtype
assert 'ATG' == callset['variants/ALT'][8, 1]
assert (9,) == callset['variants/QUAL'].shape
assert 10.0 == callset['variants/QUAL'][1]
assert (9,) == callset['variants/FILTER_PASS'].shape
assert callset['variants/FILTER_PASS'][2]
assert not callset['variants/FILTER_PASS'][3]
assert (9,) == callset['variants/FILTER_q10'].shape
assert callset['variants/FILTER_q10'][3]
# INFO fields
assert 3 == callset['variants/NS'][2]
assert .5 == callset['variants/AF'][2, 0]
assert callset['variants/DB'][2]
assert (3, 1, -1) == tuple(callset['variants/AC'][6])
# test calldata content
assert (9, 3, 2) == callset['calldata/GT'].shape
assert (0, 0) == tuple(callset['calldata/GT'][0, 0])
assert (-1, -1) == tuple(callset['calldata/GT'][6, 2])
assert (-1, -1) == tuple(callset['calldata/GT'][7, 2])
assert (9, 3, 2) == callset['calldata/HQ'].shape
assert (10, 15) == tuple(callset['calldata/HQ'][0, 0])
assert (9, 3) == callset['calldata/DP'].shape
assert np.dtype(object) == callset['calldata/DP'].dtype
assert ('4', '2', '3') == tuple(callset['calldata/DP'][6])
# String (S) dtype
if isinstance(vcf, str):
input_file = vcf
close = False
else:
input_file = vcf()
close = True
types = {'CHROM': 'S12', 'ID': 'S20', 'REF': 'S20', 'ALT': 'S20', 'calldata/DP': 'S3',
'samples': 'S20'}
callset = read_vcf(input_file, fields='*', chunk_length=chunk_length,
buffer_size=buffer_size, types=types)
if close:
input_file.close()
# samples
assert (3,) == callset['samples'].shape
assert 'S' == callset['samples'].dtype.kind
assert [b'NA00001', b'NA00002', b'NA00003'] == callset['samples'].tolist()
# fixed fields
assert (9,) == callset['variants/CHROM'].shape
assert 'S' == callset['variants/CHROM'].dtype.kind
assert b'19' == callset['variants/CHROM'][0]
assert (9,) == callset['variants/POS'].shape
assert 111 == callset['variants/POS'][0]
assert (9,) == callset['variants/ID'].shape
assert 'S' == callset['variants/ID'].dtype.kind
assert b'rs6054257' == callset['variants/ID'][2]
assert (9,) == callset['variants/REF'].shape
assert b'A' == callset['variants/REF'][0]
assert 'S' == callset['variants/REF'].dtype.kind
assert (9, 3) == callset['variants/ALT'].shape
assert b'ATG' == callset['variants/ALT'][8, 1]
assert 'S' == callset['variants/ALT'].dtype.kind
assert (9,) == callset['variants/QUAL'].shape
assert 10.0 == callset['variants/QUAL'][1]
assert (9,) == callset['variants/FILTER_PASS'].shape
assert callset['variants/FILTER_PASS'][2]
assert not callset['variants/FILTER_PASS'][3]
assert (9,) == callset['variants/FILTER_q10'].shape
assert callset['variants/FILTER_q10'][3]
# INFO fields
assert 3 == callset['variants/NS'][2]
assert .5 == callset['variants/AF'][2, 0]
assert callset['variants/DB'][2]
assert (3, 1, -1) == tuple(callset['variants/AC'][6])
# test calldata content
assert (9, 3, 2) == callset['calldata/GT'].shape
assert (0, 0) == tuple(callset['calldata/GT'][0, 0])
assert (-1, -1) == tuple(callset['calldata/GT'][6, 2])
assert (-1, -1) == tuple(callset['calldata/GT'][7, 2])
assert (9, 3, 2) == callset['calldata/HQ'].shape
assert (10, 15) == tuple(callset['calldata/HQ'][0, 0])
assert (9, 3) == callset['calldata/DP'].shape
assert 'S' == callset['calldata/DP'].dtype.kind
assert (b'4', b'2', b'3') == tuple(callset['calldata/DP'][6])
def test_inputs():
vcf_path = fixture_path('sample.vcf')
with open(vcf_path, mode='rb') as f:
data = f.read(-1)
inputs = (vcf_path,
vcf_path + '.gz',
lambda: open(vcf_path, mode='rb'),
lambda: gzip.open(vcf_path + '.gz', mode='rb'),
lambda: io.BytesIO(data),
lambda: io.BytesIO(data.replace(b'\n', b'\r')),
lambda: io.BytesIO(data.replace(b'\n', b'\r\n')))
chunk_length = 3
buffer_size = 10
for i in inputs:
_test_read_vcf_content(i, chunk_length, buffer_size)
def test_chunk_lengths():
vcf_path = fixture_path('sample.vcf')
chunk_lengths = 1, 2, 3, 5, 10, 20
buffer_size = 10
for chunk_length in chunk_lengths:
_test_read_vcf_content(vcf_path, chunk_length, buffer_size)
def test_buffer_sizes():
vcf_path = fixture_path('sample.vcf')
chunk_length = 3
buffer_sizes = 1, 2, 4, 8, 16, 32, 64, 128, 256, 512
for buffer_size in buffer_sizes:
_test_read_vcf_content(vcf_path, chunk_length, buffer_size)
def test_utf8():
vcf_path = fixture_path('sample.utf8.vcf')
callset = read_vcf(vcf_path, fields='*')
# samples
assert (3,) == callset['samples'].shape
assert 'O' == callset['samples'].dtype.kind
assert [u'NA00001', u'Γεια σου κόσμε!', u'NA00003'] == callset['samples'].tolist()
# CHROM
assert (9,) == callset['variants/CHROM'].shape
assert np.dtype(object) == callset['variants/CHROM'].dtype
assert '19' == callset['variants/CHROM'][0]
assert u'Njatjeta Botë!' == callset['variants/CHROM'][-2]
# POS
assert (9,) == callset['variants/POS'].shape
assert 111 == callset['variants/POS'][0]
# ID
assert (9,) == callset['variants/ID'].shape
assert np.dtype(object) == callset['variants/ID'].dtype
assert 'foo' == callset['variants/ID'][0]
assert u'¡Hola mundo!' == callset['variants/ID'][1]
# REF
assert (9,) == callset['variants/REF'].shape
assert np.dtype(object) == callset['variants/REF'].dtype
assert 'A' == callset['variants/REF'][0]
# ALT
assert (9, 3) == callset['variants/ALT'].shape
assert np.dtype(object) == callset['variants/ALT'].dtype
assert 'ATG' == callset['variants/ALT'][8, 1]
# QUAL
assert (9,) == callset['variants/QUAL'].shape
assert 10.0 == callset['variants/QUAL'][1]
# FILTER
assert (9,) == callset['variants/FILTER_PASS'].shape
assert callset['variants/FILTER_PASS'][2]
assert not callset['variants/FILTER_PASS'][5]
assert (9,) == callset[u'variants/FILTER_Helló_világ!'].shape
assert not callset[u'variants/FILTER_Helló_világ!'][0]
assert callset[u'variants/FILTER_Helló_világ!'][5]
# INFO fields
assert u'foo' == callset['variants/TEXT'][0]
assert u'こんにちは世界' == callset['variants/TEXT'][4]
# calldata
assert (9, 3, 2) == callset['calldata/GT'].shape
assert (0, 0) == tuple(callset['calldata/GT'][0, 0])
assert (-1, -1) == tuple(callset['calldata/GT'][6, 2])
assert (-1, -1) == tuple(callset['calldata/GT'][7, 2])
assert (9, 3, 2) == callset['calldata/HQ'].shape
assert (10, 15) == tuple(callset['calldata/HQ'][0, 0])
assert (9, 3) == callset['calldata/DP'].shape
assert (4, 2, 3) == tuple(callset['calldata/DP'][6])
assert (u'foo', u'Hej Världen!', u'.') == tuple(callset['calldata/GTXT'][0])
def test_truncation_chrom():
input_data = (b"#CHROM\n"
b"2L\n"
b"2R\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['CHROM', 'samples'],
types={'CHROM': string_type})
# check fields
expected_fields = ['variants/CHROM']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/CHROM']
assert 2 == len(a)
if string_type == 'S10':
assert b'2L' == a[0]
assert b'2R' == a[1]
else:
assert '2L' == a[0]
assert '2R' == a[1]
def test_truncation_pos():
input_data = (b"#CHROM\tPOS\n"
b"2L\t12\n"
b"2R\t34\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['POS', 'samples'])
# check fields
expected_fields = ['variants/POS']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/POS']
assert 2 == len(a)
assert 12 == a[0]
assert 34 == a[1]
def test_truncation_id():
input_data = (b"#CHROM\tPOS\tID\n"
b"2L\t12\tfoo\n"
b"2R\t34\tbar\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['ID', 'samples'],
types={'ID': string_type})
# check fields
expected_fields = ['variants/ID']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/ID']
assert 2 == len(a)
if string_type == 'S10':
assert b'foo' == a[0]
assert b'bar' == a[1]
else:
assert 'foo' == a[0]
assert 'bar' == a[1]
def test_truncation_ref():
input_data = (b"#CHROM\tPOS\tID\tREF\n"
b"2L\t12\tfoo\tA\n"
b"2R\t34\tbar\tC\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['REF', 'samples'],
types={'REF': string_type})
# check fields
expected_fields = ['variants/REF']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/REF']
assert 2 == len(a)
if string_type == 'S10':
assert b'A' == a[0]
assert b'C' == a[1]
else:
assert 'A' == a[0]
assert 'C' == a[1]
def test_truncation_alt():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\n"
b"2L\t12\tfoo\tA\tC\n"
b"2R\t34\tbar\tC\tG\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
for string_type in 'S10', 'object':
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['ALT', 'samples'], numbers=dict(ALT=1),
types={'ALT': string_type})
# check fields
expected_fields = ['variants/ALT']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/ALT']
assert 2 == len(a)
if string_type == 'S10':
assert b'C' == a[0]
assert b'G' == a[1]
else:
assert 'C' == a[0]
assert 'G' == a[1]
def test_truncation_qual():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\n"
b"2L\t12\tfoo\tA\tC\t1.2\n"
b"2R\t34\tbar\tC\tG\t3.4\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file, fields=['QUAL', 'samples'])
# check fields
expected_fields = ['variants/QUAL']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/QUAL']
assert 2 == len(a)
assert approx(1.2) == a[0]
assert approx(3.4) == a[1]
def test_truncation_filter():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['FILTER_PASS', 'FILTER_q10', 'FILTER_s50', 'samples'])
# check fields
expected_fields = ['variants/FILTER_PASS', 'variants/FILTER_q10',
'variants/FILTER_s50']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/FILTER_PASS']
assert 3 == len(a)
assert [False, True, False] == a.tolist()
a = callset['variants/FILTER_q10']
assert 3 == len(a)
assert [False, False, True] == a.tolist()
a = callset['variants/FILTER_s50']
assert 3 == len(a)
assert [False, False, True] == a.tolist()
def test_truncation_info():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\tfoo=42;bar=1.2\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\t.\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\t\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['foo', 'bar', 'samples'],
types=dict(foo='Integer', bar='Float'))
# check fields
expected_fields = ['variants/foo', 'variants/bar']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/foo']
assert 3 == len(a)
assert 42 == a[0]
assert -1 == a[1]
assert -1 == a[2]
a = callset['variants/bar']
assert 3 == len(a)
assert approx(1.2) == a[0]
assert np.isnan(a[1])
assert np.isnan(a[2])
def test_truncation_format():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\tfoo=42;bar=1.2\tGT:GQ\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\t.\t.\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\t\t\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['foo', 'bar', 'samples'],
types=dict(foo='Integer', bar='Float'))
# check fields
expected_fields = ['variants/foo', 'variants/bar']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
a = callset['variants/foo']
assert 3 == len(a)
assert 42 == a[0]
assert -1 == a[1]
assert -1 == a[2]
a = callset['variants/bar']
assert 3 == len(a)
assert approx(1.2) == a[0]
assert np.isnan(a[1])
assert np.isnan(a[2])
def test_truncation_calldata():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\tfoo=42;bar=1.2\tGT:GQ\t0/1:12\t1/2:34\n"
b"2R\t34\tbar\tC\tG\t3.4\tPASS\t.\tGT\t./.\n"
b"2R\t56\tbaz\tG\tT\t56.77\tq10,s50\t\n")
# with and without final line terminator
for data in (input_data, input_data[:-1]):
input_file = io.BytesIO(data)
callset = read_vcf(input_file,
fields=['calldata/GT', 'calldata/GQ', 'samples'],
types={'calldata/GT': 'i1', 'calldata/GQ': 'i2'})
# check fields
expected_fields = ['calldata/GT', 'calldata/GQ', 'samples']
assert sorted(expected_fields) == sorted(callset.keys())
# check data content
assert 2 == len(callset['samples'])
assert ['S2', 'S1'] == callset['samples'].tolist()
a = callset['calldata/GT']
assert (3, 2, 2) == a.shape
assert (0, 1) == tuple(a[0, 0])
assert (1, 2) == tuple(a[0, 1])
assert (-1, -1) == tuple(a[1, 0])
assert (-1, -1) == tuple(a[1, 1])
assert (-1, -1) == tuple(a[2, 0])
assert (-1, -1) == tuple(a[2, 1])
a = callset['calldata/GQ']
assert (3, 2) == a.shape
assert 12 == a[0, 0]
assert 34 == a[0, 1]
assert -1 == a[1, 0]
assert -1 == a[1, 1]
assert -1 == a[2, 0]
assert -1 == a[2, 1]
def test_info_types():
vcf_path = fixture_path('sample.vcf')
for dtype in ('i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'f4', 'f8', 'S10',
'object'):
callset = read_vcf(vcf_path, fields=['variants/DP', 'variants/AC'],
types={'variants/DP': dtype, 'variants/AC': dtype},
numbers={'variants/AC': 3})
assert np.dtype(dtype) == callset['variants/DP'].dtype
assert (9,) == callset['variants/DP'].shape
assert (9, 3) == callset['variants/AC'].shape
def test_vcf_types():
input_data = (
b'##INFO=<ID=foo,Number=1,Type=String,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=bar\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype(object) == callset['variants/foo'].dtype
input_data = (
b'##INFO=<ID=foo,Number=1,Type=Integer,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=42\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype('i4') == callset['variants/foo'].dtype
input_data = (
b'##INFO=<ID=foo,Number=1,Type=Float,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=42.0\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype('f4') == callset['variants/foo'].dtype
input_data = (
b'##INFO=<ID=foo,Number=1,Type=Character,Description="Testing 123.">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\t.\tA\tC\t.\t.\tfoo=b\t.\n"
)
callset = read_vcf(io.BytesIO(input_data), fields=['foo'])
assert np.dtype('S1') == callset['variants/foo'].dtype
def test_genotype_types():
vcf_path = fixture_path('sample.vcf')
for dtype in 'i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'S3', 'object':
callset = read_vcf(vcf_path, fields=['GT'], types={'GT': dtype},
numbers={'GT': 2})
assert np.dtype(dtype) == callset['calldata/GT'].dtype
assert (9, 3, 2) == callset['calldata/GT'].shape
# non-GT field with genotype dtype
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS1\tS2\tS3\n"
b"2L\t12\t.\tA\t.\t.\t.\t.\tCustomGT:CustomGQ\t0/0/0:11\t0/1/2:12\t././.:.\n"
b"2L\t34\t.\tC\tT\t.\t.\t.\tCustomGT:CustomGQ\t0/1/2:22\t3/3/.:33\t.\n"
b"3R\t45\t.\tG\tA,T\t.\t.\t.\tCustomGT:CustomGQ\t0/1:.\t5:12\t\n"
)
callset = read_vcf(io.BytesIO(input_data),
fields=['calldata/CustomGT', 'calldata/CustomGQ'],
numbers={'calldata/CustomGT': 3, 'calldata/CustomGQ': 1},
types={'calldata/CustomGT': 'genotype/i1',
'calldata/CustomGQ': 'i2'})
e = np.array([[[0, 0, 0], [0, 1, 2], [-1, -1, -1]],
[[0, 1, 2], [3, 3, -1], [-1, -1, -1]],
[[0, 1, -1], [5, -1, -1], [-1, -1, -1]]], dtype='i1')
a = callset['calldata/CustomGT']
assert_array_equal(e, a)
assert e.dtype == a.dtype
e = np.array([[11, 12, -1],
[22, 33, -1],
[-1, 12, -1]], dtype='i2')
a = callset['calldata/CustomGQ']
assert_array_equal(e, a)
assert e.dtype == a.dtype
def test_calldata_types():
vcf_path = fixture_path('sample.vcf')
for dtype in ('i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'f4', 'f8', 'S10',
'object'):
callset = read_vcf(vcf_path, fields=['HQ'], types={'HQ': dtype},
numbers={'HQ': 2})
assert np.dtype(dtype) == callset['calldata/HQ'].dtype
assert (9, 3, 2) == callset['calldata/HQ'].shape
def test_genotype_ploidy():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=1))
gt = callset['calldata/GT']
assert (9, 3) == gt.shape
assert (0, 0, 0) == tuple(gt[8, :])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=2))
gt = callset['calldata/GT']
assert (9, 3, 2) == gt.shape
assert (0, -1) == tuple(gt[8, 0])
assert (0, 1) == tuple(gt[8, 1])
assert (0, 2) == tuple(gt[8, 2])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=3))
gt = callset['calldata/GT']
assert (9, 3, 3) == gt.shape
assert (0, -1, -1) == tuple(gt[8, 0])
assert (0, 1, -1) == tuple(gt[8, 1])
assert (0, 2, -1) == tuple(gt[8, 2])
def test_fills_info():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='AN', numbers=dict(AN=1))
a = callset['variants/AN']
assert (9,) == a.shape
assert -1 == a[0]
assert -1 == a[1]
assert -1 == a[2]
callset = read_vcf(vcf_path, fields='AN', numbers=dict(AN=1), fills=dict(AN=-2))
a = callset['variants/AN']
assert (9,) == a.shape
assert -2 == a[0]
assert -2 == a[1]
assert -2 == a[2]
callset = read_vcf(vcf_path, fields='AN', numbers=dict(AN=1), fills=dict(AN=-1))
a = callset['variants/AN']
assert (9,) == a.shape
assert -1 == a[0]
assert -1 == a[1]
assert -1 == a[2]
def test_fills_genotype():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=2))
gt = callset['calldata/GT']
assert (9, 3, 2) == gt.shape
assert (0, -1) == tuple(gt[8, 0])
assert (0, 1) == tuple(gt[8, 1])
assert (0, 2) == tuple(gt[8, 2])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=2), fills=dict(GT=-2))
gt = callset['calldata/GT']
assert (9, 3, 2) == gt.shape
assert (0, -2) == tuple(gt[8, 0])
assert (0, 1) == tuple(gt[8, 1])
assert (0, 2) == tuple(gt[8, 2])
callset = read_vcf(vcf_path, fields='GT', numbers=dict(GT=3), fills=dict(GT=-1))
gt = callset['calldata/GT']
assert (9, 3, 3) == gt.shape
assert (0, -1, -1) == tuple(gt[8, 0])
assert (0, 1, -1) == tuple(gt[8, 1])
assert (0, 2, -1) == tuple(gt[8, 2])
def test_fills_calldata():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields='HQ', numbers=dict(HQ=2))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (-1, -1) == tuple(a[7, 0])
assert (-1, -1) == tuple(a[8, 0])
callset = read_vcf(vcf_path, fields='HQ', numbers=dict(HQ=2), fills=dict(HQ=-2))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (-2, -2) == tuple(a[7, 0])
assert (-2, -2) == tuple(a[8, 0])
callset = read_vcf(vcf_path, fields='HQ', numbers=dict(HQ=2), fills=dict(HQ=-1))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (-1, -1) == tuple(a[7, 0])
assert (-1, -1) == tuple(a[8, 0])
def test_numbers():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields=['ALT'], numbers=dict(ALT=1))
a = callset['variants/ALT']
assert (9,) == a.shape
assert 'A' == a[8]
callset = read_vcf(vcf_path, fields=['ALT'], numbers=dict(ALT=2),
types=dict(ALT='S4'))
a = callset['variants/ALT']
assert (9, 2) == a.shape
assert b'A' == a[8, 0]
assert b'ATG' == a[8, 1]
callset = read_vcf(vcf_path, fields=['ALT'], numbers=dict(ALT=3),
types=dict(ALT='S4'))
a = callset['variants/ALT']
assert (9, 3) == a.shape
assert b'A' == a[8, 0]
assert b'ATG' == a[8, 1]
assert b'C' == a[8, 2]
callset = read_vcf(vcf_path, fields=['AC'], numbers=dict(AC=0))
a = callset['variants/AC']
assert (9,) == a.shape
assert not a[0]
assert a[6]
callset = read_vcf(vcf_path, fields=['AC'], numbers=dict(AC=1))
a = callset['variants/AC']
assert (9,) == a.shape
assert -1 == a[0]
assert 3 == a[6]
callset = read_vcf(vcf_path, fields=['AC'], numbers=dict(AC=2))
a = callset['variants/AC']
assert (9, 2) == a.shape
assert -1 == a[0, 0]
assert -1 == a[0, 1]
assert 3 == a[6, 0]
assert 1 == a[6, 1]
callset = read_vcf(vcf_path, fields='AF', numbers=dict(AF=1))
a = callset['variants/AF']
assert (9,) == a.shape
assert 0.5 == a[2]
assert approx(0.333) == a[4]
callset = read_vcf(vcf_path, fields='AF', numbers=dict(AF=2))
a = callset['variants/AF']
assert (9, 2) == a.shape
assert 0.5 == a[2, 0]
assert np.isnan(a[2, 1])
assert approx(0.333) == a[4, 0]
assert approx(0.667) == a[4, 1]
callset = read_vcf(vcf_path, fields=['HQ'], numbers=dict(HQ=1))
a = callset['calldata/HQ']
assert (9, 3) == a.shape
assert 10 == a[0, 0]
assert 51 == a[2, 0]
assert -1 == a[6, 0]
callset = read_vcf(vcf_path, fields=['HQ'], numbers=dict(HQ=2))
a = callset['calldata/HQ']
assert (9, 3, 2) == a.shape
assert (10, 15) == tuple(a[0, 0])
assert (51, 51) == tuple(a[2, 0])
assert (-1, -1) == tuple(a[6, 0])
def test_alt_number():
vcf_path = fixture_path('sample.vcf')
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=2)
a = callset['variants/ALT']
assert (9, 2) == a.shape
a = callset['variants/AC']
assert (9, 2) == a.shape
a = callset['variants/AF']
assert (9, 2) == a.shape
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=1)
a = callset['variants/ALT']
assert (9,) == a.shape
a = callset['variants/AC']
assert (9,) == a.shape
a = callset['variants/AF']
assert (9,) == a.shape
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=5)
a = callset['variants/ALT']
assert (9, 5) == a.shape
a = callset['variants/AC']
assert (9, 5) == a.shape
a = callset['variants/AF']
assert (9, 5) == a.shape
# can override
callset = read_vcf(vcf_path, fields=['ALT', 'AC', 'AF'],
alt_number=5, numbers={'ALT': 2, 'AC': 4})
a = callset['variants/ALT']
assert (9, 2) == a.shape
a = callset['variants/AC']
assert (9, 4) == a.shape
a = callset['variants/AF']
assert (9, 5) == a.shape
def test_read_region():
for vcf_path in (fixture_path('sample.vcf.gz'),
fixture_path('sample.vcf')):
for tabix in 'tabix', None, 'foobar':
region = '19'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '19')
assert 2 == len(pos)
assert_array_equal([111, 112], pos)
region = '20'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 6 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 6 == len(pos)
assert_array_equal([14370, 17330, 1110696, 1230237, 1234567, 1235237], pos)
region = 'X'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 1 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == 'X')
assert 1 == len(pos)
assert_array_equal([10], pos)
region = 'Y'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
assert callset is None
region = '20:1-100000'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([14370, 17330], pos)
region = '20:1000000-1233000'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1110696, 1230237], pos)
region = '20:1233000-2000000'
callset = read_vcf(vcf_path, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1234567, 1235237], pos)
def test_read_region_unsorted():
# Test behaviour when data are not sorted by chromosome or position and tabix is
# not available.
fn = fixture_path('unsorted.vcf')
tabix = None
region = '19'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '19')
assert 2 == len(pos)
assert_array_equal([111, 112], pos)
region = '20'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 6 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 6 == len(pos)
assert_array_equal([14370, 1230237, 1234567, 1235237, 17330, 1110696], pos)
region = 'X'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 1 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == 'X')
assert 1 == len(pos)
assert_array_equal([10], pos)
region = 'Y'
callset = read_vcf(fn, region=region, tabix=tabix)
assert callset is None
region = '20:1-100000'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([14370, 17330], pos)
region = '20:1000000-1233000'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1230237, 1110696], pos)
region = '20:1233000-2000000'
callset = read_vcf(fn, region=region, tabix=tabix)
chrom = callset['variants/CHROM']
pos = callset['variants/POS']
assert 2 == len(chrom)
assert isinstance(chrom, np.ndarray)
assert np.all(chrom == '20')
assert 2 == len(pos)
assert_array_equal([1234567, 1235237], pos)
def test_read_samples():
vcf_path = fixture_path('sample.vcf')
for samples in ['NA00001', 'NA00003'], [0, 2], ['NA00003', 'NA00001'], [2, 'NA00001']:
callset = read_vcf(vcf_path, fields=['samples', 'GT'], samples=samples)
assert ['NA00001', 'NA00003'] == callset['samples'].astype('U').tolist()
gt = callset['calldata/GT']
assert (9, 2, 2) == gt.shape
assert (0, 0) == tuple(gt[2, 0])
assert (1, 1) == tuple(gt[2, 1])
assert (1, 2) == tuple(gt[4, 0])
assert (2, 2) == tuple(gt[4, 1])
for samples in ['NA00002'], [1]:
callset = read_vcf(vcf_path, fields=['samples', 'GT'], samples=samples)
assert ['NA00002'] == callset['samples'].astype('U').tolist()
gt = callset['calldata/GT']
assert (9, 1, 2) == gt.shape
assert (1, 0) == tuple(gt[2, 0])
assert (2, 1) == tuple(gt[4, 0])
def test_read_empty():
vcf_path = fixture_path('empty.vcf')
callset = read_vcf(vcf_path)
assert callset is None
def test_ann():
vcf_path = fixture_path('ann.vcf')
# all ANN fields
callset = read_vcf(vcf_path, fields=['ANN'], transformers=[ANNTransformer()])
expect_keys = sorted(['variants/ANN_Allele',
'variants/ANN_Annotation',
'variants/ANN_Annotation_Impact',
'variants/ANN_Gene_Name',
'variants/ANN_Gene_ID',
'variants/ANN_Feature_Type',
'variants/ANN_Feature_ID',
'variants/ANN_Transcript_BioType',
'variants/ANN_Rank',
'variants/ANN_HGVS_c',
'variants/ANN_HGVS_p',
'variants/ANN_cDNA_pos',
'variants/ANN_cDNA_length',
'variants/ANN_CDS_pos',
'variants/ANN_CDS_length',
'variants/ANN_AA_pos',
'variants/ANN_AA_length',
'variants/ANN_Distance'])
assert expect_keys == sorted(callset.keys())
a = callset['variants/ANN_Allele']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['T', '', 'T'], a)
a = callset['variants/ANN_Annotation']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['intergenic_region', '', 'missense_variant'], a)
a = callset['variants/ANN_Annotation_Impact']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['MODIFIER', '', 'MODERATE'], a)
a = callset['variants/ANN_Gene_Name']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['AGAP004677', '', 'AGAP005273'], a)
a = callset['variants/ANN_Gene_ID']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['AGAP004677', '', 'AGAP005273'], a)
a = callset['variants/ANN_Feature_Type']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['intergenic_region', '', 'transcript'], a)
a = callset['variants/ANN_Feature_ID']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['AGAP004677', '', 'AGAP005273-RA'], a)
a = callset['variants/ANN_Transcript_BioType']
assert np.dtype('object') == a.dtype
assert (3,) == a.shape
assert_array_equal(['', '', 'VectorBase'], a)
assert np.dtype('object') == a.dtype
a = callset['variants/ANN_Rank']
assert (3,) == a.shape
assert np.dtype('int8') == a.dtype
assert_array_equal([-1, -1, 1], a[:])
a = callset['variants/ANN_HGVS_c']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['', '', '17A>T'], a)
a = callset['variants/ANN_HGVS_p']
assert (3,) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['', '', 'Asp6Val'], a)
a = callset['variants/ANN_cDNA_pos']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 17], a)
a = callset['variants/ANN_cDNA_length']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 4788], a)
a = callset['variants/ANN_CDS_pos']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 17], a)
a = callset['variants/ANN_CDS_length']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 4788], a)
a = callset['variants/ANN_AA_pos']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 6], a)
a = callset['variants/ANN_AA_length']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 1596], a)
a = callset['variants/ANN_Distance']
assert (3,) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([3000, -1, -1], a)
# numbers=2
callset = read_vcf(vcf_path, fields=['ANN'], numbers={'ANN': 2},
transformers=[ANNTransformer()])
a = callset['variants/ANN_Allele']
assert (3, 2) == a.shape
assert np.dtype('object') == a.dtype
assert_array_equal(['T', ''], a[0])
assert_array_equal(['', ''], a[1])
assert_array_equal(['T', 'G'], a[2])
a = callset['variants/ANN_cDNA_pos']
assert (3, 2) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 17], a[:, 0])
assert_array_equal([-1, -1, 12], a[:, 1])
a = callset['variants/ANN_cDNA_length']
assert (3, 2) == a.shape
assert np.dtype('int32') == a.dtype
assert_array_equal([-1, -1, 4788], a[:, 0])
assert_array_equal([-1, -1, 4768], a[:, 1])
# choose fields and types
transformers = [
ANNTransformer(
fields=['Allele', 'ANN_HGVS_c', 'variants/ANN_cDNA_pos'],
types={'Allele': 'S12',
'ANN_HGVS_c': 'S20',
'variants/ANN_cDNA_pos': 'i8'})
]
callset = read_vcf(vcf_path, fields=['ANN'], transformers=transformers)
assert (sorted(['variants/ANN_Allele', 'variants/ANN_HGVS_c',
'variants/ANN_cDNA_pos']) == sorted(callset.keys()))
a = callset['variants/ANN_Allele']
assert (3,) == a.shape
assert np.dtype('S12') == a.dtype
assert_array_equal([b'T', b'', b'T'], a)
a = callset['variants/ANN_HGVS_c']
assert (3,) == a.shape
assert np.dtype('S20') == a.dtype
assert_array_equal([b'', b'', b'17A>T'], a)
a = callset['variants/ANN_cDNA_pos']
assert (3,) == a.shape
assert np.dtype('i8') == a.dtype
assert_array_equal([-1, -1, 17], a)
def test_format_inconsistencies():
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\tfoo\tA\tC\t1.2\t.\t.\tGT:GQ\t0/1:12\t1/2\t2/3:34:67,89\t\n"
b"2R\t34\tbar\tC\tG\t3.4\t.\t.\tGT\t./.\t\t3/3:45\t1/2:11:55,67\n"
)
input_file = io.BytesIO(input_data)
callset = read_vcf(input_file, fields=['calldata/GT', 'calldata/GQ'])
gt = callset['calldata/GT']
assert (2, 4, 2) == gt.shape
assert_array_equal([[0, 1], [1, 2], [2, 3], [-1, -1]], gt[0])
assert_array_equal([[-1, -1], [-1, -1], [3, 3], [1, 2]], gt[1])
gq = callset['calldata/GQ']
assert (2, 4) == gq.shape
assert_array_equal([12, -1, 34, -1], gq[0])
assert_array_equal([-1, -1, -1, -1], gq[1])
# noinspection PyTypeChecker
def test_warnings():
warnings.resetwarnings()
warnings.simplefilter('error')
# empty CHROM
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"\t12\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# empty POS
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy POS
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\taaa\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy POS
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12aaa\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy QUAL
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\taaa\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dodgy QUAL
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t1.2aaa\t.\t.\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# empty QUAL - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t\t.\t.\t.\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# empty FILTER - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t\t.\t.\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# empty INFO - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t\t.\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# empty FORMAT - no warning
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\t\t.\t.\t.\t.\n"
)
read_vcf(io.BytesIO(input_data))
# dodgy calldata (integer)
input_data = (
b'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tGT\t0/1\taa/bb\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT'])
# dodgy calldata (integer)
input_data = (
b'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tGT\t0/1\t12aa/22\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT'])
# dodgy calldata (float)
input_data = (
b'##FORMAT=<ID=MQ,Number=1,Type=Float,Description="Mapping Quality">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tMQ\t.\t12.3\taaa\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/MQ'])
# dodgy calldata (float)
input_data = (
b'##FORMAT=<ID=MQ,Number=1,Type=Float,Description="Mapping Quality">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tMQ\t.\t12.3\t34.5aaa\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/MQ'])
# dodgy INFO (missing key)
input_data = (
b'##INFO=<ID=MQ,Number=1,Type=Float,Description="Mapping Quality">\n'
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\tfoo=qux;MQ=12\t.\t.\t.\t.\t.\n"
b"2L\t34\t.\t.\t.\t.\t.\tfoo=bar;=34;baz\t.\t.\t.\t.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['variants/MQ'])
# INFO not declared in header
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\tfoo\tA\tC,T\t12.3\tPASS\tfoo=bar\tGT:GQ\t0/0:99\t0/1:12\t./.:.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['variants/foo'])
# FORMAT not declared in header
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\tfoo\tA\tC,T\t12.3\tPASS\tfoo=bar\tGT:GQ\t0/0:99\t0/1:12\t./.:.\t.\n"
)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT'])
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GQ'])
warnings.resetwarnings()
warnings.simplefilter('always')
def test_missing_headers():
vcf_path = fixture_path('test14.vcf')
# INFO DP not declared
callset = read_vcf(vcf_path, fields=['DP'], types={'DP': 'String'})
a = callset['variants/DP']
assert '14' == a[2] # default type is string
callset = read_vcf(vcf_path, fields=['DP'], types={'DP': 'Integer'})
a = callset['variants/DP']
assert 14 == a[2]
# what about a field which isn't present at all?
callset = read_vcf(vcf_path, fields=['FOO'])
assert '' == callset['variants/FOO'][2] # default missing value for string field
# FORMAT field DP not declared in VCF header
callset = read_vcf(vcf_path, fields=['calldata/DP'],
types={'calldata/DP': 'Integer'})
assert 1 == callset['calldata/DP'][2, 0]
def test_extra_samples():
# more calldata samples than samples declared in header
path = fixture_path('test48b.vcf')
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS2\tS1\tS3\tS4\n"
b"2L\t12\t.\t.\t.\t.\t.\t.\tGT:GQ\t0/0:34\t0/1:45\t1/1:56\t1/2:99\t2/3:101\n"
)
warnings.resetwarnings()
warnings.simplefilter('error')
with pytest.warns(UserWarning):
read_vcf(path)
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data), fields=['calldata/GT', 'calldata/GQ'])
warnings.resetwarnings()
warnings.simplefilter('always')
# try again without raising warnings to check data
callset = read_vcf(io.BytesIO(input_data), fields=['calldata/GT', 'calldata/GQ'])
assert (1, 4, 2) == callset['calldata/GT'].shape
callset = read_vcf(path)
assert (9, 2, 2) == callset['calldata/GT'].shape
# noinspection PyTypeChecker
def test_no_samples():
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t12\tfoo\tA\tC,T\t12.3\tPASS\tfoo=bar\tGT:GQ\t0/0:99\t0/1:12\t./.:.\t.\n"
)
callset = read_vcf(io.BytesIO(input_data),
fields=['calldata/GT', 'calldata/GQ', 'samples', 'POS'])
assert 'variants/POS' in callset
assert 'samples' not in callset
assert 'calldata/GT' not in callset
assert 'calldata/GQ' not in callset
h5_path = os.path.join(tempdir, 'sample.h5')
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(io.BytesIO(input_data), h5_path,
fields=['calldata/GT', 'calldata/GQ', 'samples', 'POS'])
with h5py.File(h5_path, mode='r') as callset:
assert 'variants/POS' in callset
assert 'samples' not in callset
assert 'calldata/GT' not in callset
assert 'calldata/GQ' not in callset
zarr_path = os.path.join(tempdir, 'sample.zarr')
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(io.BytesIO(input_data), zarr_path,
fields=['calldata/GT', 'calldata/GQ', 'samples', 'POS'])
callset = zarr.open_group(zarr_path, mode='r')
assert 'variants/POS' in callset
assert 'samples' not in callset
assert 'calldata/GT' not in callset
assert 'calldata/GQ' not in callset
def test_computed_fields():
input_data = (b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\n"
b"2L\t2\t.\t.\t.\t.\t.\t.\t.\n"
b"2L\t4\t.\t.\tG\t.\t.\t.\t.\n"
b"2L\t12\t.\tA\t.\t.\t.\t.\t.\n"
b"2L\t34\t.\tC\tT\t.\t.\t.\t.\n"
b"3R\t45\t.\tG\tA,T\t.\t.\t.\t.\n"
b"3R\t47\t.\tG\tC,T,*\t.\t.\t.\t.\n"
b"3R\t56\t.\tG\tA,GTAC\t.\t.\t.\t.\n"
b"3R\t56\t.\tCATG\tC,GATG\t.\t.\t.\t.\n"
b"3R\t56\t.\tGTAC\tATAC,GTACTACTAC,G,GTACA,GTA\t.\t.\t.\t.\n")
for string_dtype in 'S20', 'object':
callset = read_vcf(io.BytesIO(input_data),
fields='*',
numbers={'ALT': 5},
types={'REF': string_dtype, 'ALT': string_dtype})
a = callset['variants/ALT']
assert (9, 5) == a.shape
e = np.array([[b'', b'', b'', b'', b''],
[b'G', b'', b'', b'', b''],
[b'', b'', b'', b'', b''],
[b'T', b'', b'', b'', b''],
[b'A', b'T', b'', b'', b''],
[b'C', b'T', b'*', b'', b''],
[b'A', b'GTAC', b'', b'', b''],
[b'C', b'GATG', b'', b'', b''],
[b'ATAC', b'GTACTACTAC', b'G', b'GTACA', b'GTA']])
if a.dtype.kind == 'O':
e = e.astype('U').astype(object)
assert_array_equal(e, a)
a = callset['variants/numalt']
assert (9,) == a.shape
assert_array_equal([0, 1, 0, 1, 2, 3, 2, 2, 5], a)
a = callset['variants/altlen']
assert (9, 5) == a.shape
e = np.array([[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, -1, 0, 0],
[0, 3, 0, 0, 0],
[-3, 0, 0, 0, 0],
[0, 6, -3, 1, -1]])
assert_array_equal(e, a)
a = callset['variants/is_snp']
assert (9,) == a.shape
assert np.dtype(bool) == a.dtype
assert_array_equal([False, False, False, True, True, False, False, False, False],
a)
# test is_snp with reduced ALT number
callset = read_vcf(io.BytesIO(input_data),
fields='*',
numbers={'ALT': 1},
types={'REF': string_dtype, 'ALT': string_dtype})
a = callset['variants/ALT']
assert (9,) == a.shape
e = np.array([b'', b'G', b'', b'T', b'A', b'C', b'A', b'C', b'ATAC'])
if a.dtype.kind == 'O':
e = e.astype('U').astype(object)
assert_array_equal(e, a)
a = callset['variants/numalt']
assert (9,) == a.shape
assert_array_equal([0, 1, 0, 1, 2, 3, 2, 2, 5], a)
a = callset['variants/altlen']
assert (9,) == a.shape
e = np.array([0, 1, 0, 0, 0, 0, 0, -3, 0])
assert_array_equal(e, a)
a = callset['variants/is_snp']
assert (9,) == a.shape
assert np.dtype(bool) == a.dtype
assert_array_equal([False, False, False, True, True, False, False, False, False],
a)
def test_genotype_ac():
input_data = (
b"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tS1\tS2\tS3\n"
b"2L\t12\t.\tA\t.\t.\t.\t.\tGT:GQ\t0/0/0:11\t0/1/2:12\t././.:.\n"
b"2L\t34\t.\tC\tT\t.\t.\t.\tGT:GQ\t0/1/2:22\t3/3/.:33\t.\n"
b"3R\t45\t.\tG\tA,T\t.\t.\t.\tGT:GQ\t0/1:.\t3:12\t\n"
b"X\t55\t.\tG\tA,T\t.\t.\t.\tGT:GQ\t0/1/1/3/4:.\t1/1/2/2/4/4/5:12\t0/0/1/2/3/./4\n"
)
for t in 'i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8':
callset = read_vcf(io.BytesIO(input_data),
fields=['calldata/GT'],
numbers={'calldata/GT': 4},
types={'calldata/GT': 'genotype_ac/' + t})
e = np.array([[[3, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]],
[[1, 1, 1, 0], [0, 0, 0, 2], [0, 0, 0, 0]],
[[1, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]],
[[1, 2, 0, 1], [0, 2, 2, 0], [2, 1, 1, 1]]], dtype=t)
a = callset['calldata/GT']
assert e.dtype == a.dtype
assert_array_equal(e, a)
vcf_path = fixture_path('test63.vcf')
callset = read_vcf(vcf_path, fields='GT', numbers={'GT': 3},
types={'GT': 'genotype_ac/i1'})
e = np.array([
[(2, 0, 0), (3, 0, 0), (1, 0, 0)],
[(0, 1, 0), (1, 1, 0), (1, 1, 1)],
[(0, 0, 0), (0, 0, 0), (0, 0, 0)],
[(0, 0, 0), (0, 0, 0), (0, 0, 0)],
])
a = callset['calldata/GT']
assert_array_equal(e, a)
def test_region_truncate():
vcf_path = fixture_path('test54.vcf.gz')
for tabix in 'tabix', None:
callset = read_vcf(vcf_path, region='chr1:10-100', tabix=tabix)
pos = callset['variants/POS']
assert 2 == pos.shape[0]
assert_array_equal([20, 30], pos)
def test_errors():
# try to open a directory
path = '.'
with pytest.raises(OSError):
read_vcf(path)
# try to open a file that doesn't exist
path = 'doesnotexist.vcf'
with pytest.raises(FileNotFoundError):
read_vcf(path)
# try to open a file that doesn't exist
path = 'doesnotexist.vcf.gz'
with pytest.raises(FileNotFoundError):
read_vcf(path)
# file is nothing like a VCF (has no header)
path = fixture_path('test48a.vcf')
with pytest.raises(RuntimeError):
read_vcf(path)
def test_dup_headers():
warnings.resetwarnings()
warnings.simplefilter('error')
# dup FILTER
input_data = b"""##fileformat=VCFv4.1
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=ZZ,Number=1,Type=String,Description="ZZ">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT test1 test2 test3 test4
chr1 1 . A G . PASS DP=2 GT:AD 0:1,0 .:1,0 0:0,0 .:0,0
chr1 2 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 0:1,0 0:0,0 .:0,0
chr1 3 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 1:1,0 . ./.
"""
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dup INFO
input_data = b"""##fileformat=VCFv4.1
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=ZZ,Number=1,Type=String,Description="ZZ">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT test1 test2 test3 test4
chr1 1 . A G . PASS DP=2 GT:AD 0:1,0 .:1,0 0:0,0 .:0,0
chr1 2 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 0:1,0 0:0,0 .:0,0
chr1 3 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 1:1,0 . ./.
"""
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
# dup FORMAT
input_data = b"""##fileformat=VCFv4.1
##FILTER=<ID=s50,Description="Less than 50% of samples have data">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=AD,Number=A,Type=Integer,Description="Allele Depths">
##FORMAT=<ID=ZZ,Number=1,Type=String,Description="ZZ">
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT test1 test2 test3 test4
chr1 1 . A G . PASS DP=2 GT:AD 0:1,0 .:1,0 0:0,0 .:0,0
chr1 2 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 0:1,0 0:0,0 .:0,0
chr1 3 . A G . PASS DP=2 GT:AD:ZZ 0:1,0:dummy 1:1,0 . ./.
"""
with pytest.warns(UserWarning):
read_vcf(io.BytesIO(input_data))
warnings.resetwarnings()
warnings.simplefilter('always')
def test_override_vcf_type():
vcf_path = fixture_path('test4.vcf')
callset = read_vcf(vcf_path, fields=['MQ0FractionTest'])
assert 0 == callset['variants/MQ0FractionTest'][2]
callset = read_vcf(vcf_path, fields=['MQ0FractionTest'],
types={'MQ0FractionTest': 'Float'})
assert approx(0.03) == callset['variants/MQ0FractionTest'][2]
def test_header_overrides_default_vcf_type():
vcf_path = fixture_path('test176.vcf')
callset = read_vcf(vcf_path, fields='*')
gq = callset['calldata/GQ']
assert 'f' == gq.dtype.kind
assert np.isnan(gq[0, 0])
assert approx(48.2) == gq[2, 0]
assert approx(48.1) == gq[2, 1]
assert approx(43.9) == gq[2, 2]
assert approx(49.) == gq[3, 0]
assert approx(3.) == gq[3, 1]
assert approx(41.) == gq[3, 2]
def test_missing_calldata():
vcf_path = fixture_path('test1.vcf')
callset = read_vcf(vcf_path, fields='calldata/*', numbers={'AD': 2})
gt = callset['calldata/GT']
ad = callset['calldata/AD']
assert (-1, -1) == tuple(gt[0, 1])
assert (1, 0) == tuple(ad[0, 1])
assert (-1, -1) == tuple(gt[2, 2])
assert (-1, -1) == tuple(ad[2, 2])
assert (-1, -1) == tuple(gt[2, 3])
assert (-1, -1) == tuple(ad[2, 3])
def test_calldata_cleared():
vcf_path = fixture_path('test32.vcf')
callset = read_vcf(vcf_path, fields=['calldata/GT', 'calldata/DP', 'calldata/GQ'])
gt = callset['calldata/GT']
dp = callset['calldata/DP']
gq = callset['calldata/GQ']
assert (0, 0) == tuple(gt[0, 3])
assert 8 == dp[0, 3]
assert 3 == gq[0, 3]
assert (-1, -1) == tuple(gt[1, 3])
assert -1 == dp[1, 3]
assert -1 == gq[1, 3]
def test_calldata_quirks():
vcf_path = fixture_path('test1.vcf')
callset = read_vcf(vcf_path, fields=['AD', 'GT'], numbers={'AD': 2})
gt = callset['calldata/GT']
ad = callset['calldata/AD']
e = np.array([[-1, -1], [0, -1], [1, -1]])
assert_array_equal(e, gt[:, 1])
e = np.array([[1, 0], [1, 0], [1, 0]])
assert_array_equal(e, ad[:, 1])
def test_vcf_to_npz():
vcf_paths = [fixture_path(x) for x in ['sample.vcf', 'sample.vcf.gz']]
npz_path = os.path.join(tempdir, 'sample.npz')
region_values = None, '20', '20:10000-20000', 'Y'
tabix_values = 'tabix', None
samples_values = None, ['NA00001', 'NA00003']
string_type_values = 'S10', 'object'
param_matrix = itertools.product(vcf_paths, region_values, tabix_values,
samples_values, string_type_values)
for vcf_path, region, tabix, samples, string_type in param_matrix:
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
expected = read_vcf(vcf_path, fields='*', alt_number=2, region=region,
tabix=tabix, samples=samples, types=types)
if os.path.exists(npz_path):
os.remove(npz_path)
vcf_to_npz(vcf_path, npz_path, fields='*', chunk_length=2, alt_number=2,
region=region, tabix=tabix, samples=samples, types=types)
if expected is None:
assert not os.path.exists(npz_path)
else:
actual = np.load(npz_path, allow_pickle=True)
for key in expected.keys():
if expected[key].dtype.kind == 'f':
assert_array_almost_equal(expected[key], actual[key])
else:
assert_array_equal(expected[key], actual[key])
for key in actual.keys():
assert key in expected
actual.close()
def test_vcf_to_npz_exclude():
vcf_path = fixture_path('sample.vcf')
npz_path = os.path.join(tempdir, 'sample.npz')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
expected = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
if os.path.exists(npz_path):
os.remove(npz_path)
vcf_to_npz(vcf_path, npz_path, fields='*', exclude_fields=exclude)
actual = np.load(npz_path, allow_pickle=True)
for key in expected.keys():
if expected[key].dtype.kind == 'f':
assert_array_almost_equal(expected[key], actual[key])
else:
assert_array_equal(expected[key], actual[key])
for key in actual.keys():
assert key in expected
actual.close()
def test_vcf_to_npz_rename():
vcf_path = fixture_path('sample.vcf')
npz_path = os.path.join(tempdir, 'sample.npz')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
expected = read_vcf(vcf_path, fields='*', rename_fields=rename)
if os.path.exists(npz_path):
os.remove(npz_path)
vcf_to_npz(vcf_path, npz_path, fields='*', rename_fields=rename)
actual = np.load(npz_path, allow_pickle=True)
for key in expected.keys():
if expected[key].dtype.kind == 'f':
assert_array_almost_equal(expected[key], actual[key])
else:
assert_array_equal(expected[key], actual[key])
for key in actual.keys():
assert key in expected
actual.close()
def test_vcf_to_zarr():
vcf_paths = [fixture_path(x) for x in ['sample.vcf', 'sample.vcf.gz']]
zarr_path = os.path.join(tempdir, 'sample.zarr')
region_values = None, '20', '20:10000-20000', 'Y'
tabix_values = 'tabix', None
samples_values = None, ['NA00001', 'NA00003']
string_type_values = 'S10', 'object'
param_matrix = itertools.product(vcf_paths, region_values, tabix_values,
samples_values, string_type_values)
for vcf_path, region, tabix, samples, string_type in param_matrix:
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
expected = read_vcf(vcf_path, fields='*', alt_number=2, region=region,
tabix=tabix, samples=samples, types=types)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
region=region, tabix=tabix, samples=samples, types=types)
if expected is None:
assert not os.path.exists(zarr_path)
else:
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
e = expected[key]
a = actual[key][:]
compare_arrays(e, a)
assert (actual['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_zarr_exclude():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
expected = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', exclude_fields=exclude)
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
e = expected[key]
a = actual[key][:]
compare_arrays(e, a)
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_zarr_rename():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
expected = read_vcf(vcf_path, fields='*', rename_fields=rename)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
e = expected[key]
a = actual[key][:]
compare_arrays(e, a)
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_zarr_rename_clash():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
# dup values
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'spam'}
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
# parent clash
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam',
'calldata/GT': 'spam/eggs'}
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields='*', rename_fields=rename)
def test_vcf_to_zarr_dup_fields_case_insensitive():
vcf_path = fixture_path('altlen.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields=['ALTLEN', 'altlen'])
with pytest.raises(ValueError):
vcf_to_zarr(vcf_path, zarr_path, fields=['variants/ALTLEN', 'variants/altlen'])
# should be fine if renamed
vcf_to_zarr(vcf_path, zarr_path, fields=['ALTLEN', 'altlen'],
rename_fields={'altlen': 'variants/spam'})
def test_vcf_to_zarr_group():
vcf_path = fixture_path('sample.vcf.gz')
zarr_path = os.path.join(tempdir, 'sample.zarr')
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
chroms = ['19', '20', 'X']
for chrom in chroms:
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
region=chrom, group=chrom)
actual = zarr.open_group(zarr_path, mode='r')
assert chroms == sorted(actual)
for chrom in chroms:
assert ['calldata', 'samples', 'variants'] == sorted(actual[chrom])
expect = read_vcf(vcf_path, fields='*', alt_number=2, region=chrom)
for key in expect.keys():
e = expect[key]
a = actual[chrom][key][:]
compare_arrays(e, a)
assert (actual[chrom]['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual[chrom]['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
def test_vcf_to_zarr_string_codec():
vcf_path = fixture_path('sample.vcf')
zarr_path = os.path.join(tempdir, 'sample.zarr')
types = {'CHROM': object, 'ALT': object, 'samples': object}
expect = read_vcf(vcf_path, fields='*', alt_number=2, types=types)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
types=types)
actual = zarr.open_group(zarr_path, mode='r')
for key in expect.keys():
e = expect[key]
a = actual[key][:]
compare_arrays(e, a)
def test_vcf_to_zarr_ann():
vcf_path = fixture_path('ann.vcf')
zarr_path = os.path.join(tempdir, 'ann.zarr')
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
expected = read_vcf(vcf_path, fields='*', alt_number=2, types=types,
transformers=transformers)
if os.path.exists(zarr_path):
shutil.rmtree(zarr_path)
vcf_to_zarr(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,
types=types, transformers=transformers)
actual = zarr.open_group(zarr_path, mode='r')
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
def test_vcf_to_zarr_empty():
vcf_path = fixture_path('empty.vcf')
zarr_path = os.path.join(tempdir, 'empty.zarr')
vcf_to_zarr(vcf_path, zarr_path)
assert not os.path.exists(zarr_path)
def test_vcf_to_hdf5():
vcf_paths = [fixture_path(x) for x in ['sample.vcf', 'sample.vcf.gz']]
h5_path = os.path.join(tempdir, 'sample.h5')
region_values = None, '20', '20:10000-20000', 'Y'
tabix_values = 'tabix', None
samples_values = None, ['NA00001', 'NA00003']
string_type_values = 'S10', 'object'
param_matrix = itertools.product(vcf_paths, region_values, tabix_values,
samples_values, string_type_values)
for vcf_path, region, tabix, samples, string_type in param_matrix:
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
expected = read_vcf(vcf_path, fields='*', alt_number=2, region=region,
tabix=tabix, samples=samples, types=types)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', alt_number=2, chunk_length=2,
region=region, tabix=tabix, samples=samples, types=types)
if expected is None:
assert not os.path.exists(h5_path)
else:
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
assert (actual['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_hdf5_exclude():
vcf_path = fixture_path('sample.vcf')
h5_path = os.path.join(tempdir, 'sample.h5')
exclude = ['variants/altlen', 'ID', 'calldata/DP']
expected = read_vcf(vcf_path, fields='*', exclude_fields=exclude)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', exclude_fields=exclude)
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
for key in actual.keys():
if key not in {'variants', 'calldata'}:
assert key in expected
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_hdf5_rename():
vcf_path = fixture_path('sample.vcf')
h5_path = os.path.join(tempdir, 'sample.h5')
rename = {'CHROM': 'variants/chromosome',
'variants/altlen': 'spam/eggs',
'calldata/GT': 'foo/bar'}
expected = read_vcf(vcf_path, fields='*', rename_fields=rename)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', rename_fields=rename)
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
for key in actual['variants'].keys():
assert 'variants/' + key in expected
for key in actual['calldata'].keys():
assert 'calldata/' + key in expected
def test_vcf_to_hdf5_group():
vcf_path = fixture_path('sample.vcf.gz')
h5_path = os.path.join(tempdir, 'sample.h5')
if os.path.exists(h5_path):
os.remove(h5_path)
chroms = ['19', '20', 'X']
for chrom in chroms:
vcf_to_hdf5(vcf_path, h5_path, fields='*', alt_number=2, chunk_length=2,
region=chrom, group=chrom)
with h5py.File(h5_path, mode='r') as actual:
assert chroms == sorted(actual)
for chrom in chroms:
assert ['calldata', 'samples', 'variants'] == sorted(actual[chrom])
expect = read_vcf(vcf_path, fields='*', alt_number=2, region=chrom)
for key in expect.keys():
e = expect[key]
a = actual[chrom][key][:]
compare_arrays(e, a)
assert (actual[chrom]['variants/NS'].attrs['Description'] ==
'Number of Samples With Data')
assert (actual[chrom]['calldata/GQ'].attrs['Description'] ==
'Genotype Quality')
def test_vcf_to_hdf5_ann():
vcf_path = fixture_path('ann.vcf')
h5_path = os.path.join(tempdir, 'ann.h5')
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type, 'samples': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
expected = read_vcf(vcf_path, fields='*', types=types, transformers=transformers)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields='*', chunk_length=2, types=types,
transformers=transformers)
with h5py.File(h5_path, mode='r') as actual:
for key in expected.keys():
compare_arrays(expected[key], actual[key][:])
def test_vcf_to_hdf5_vlen():
vcf_path = fixture_path('sample.vcf')
h5_path = os.path.join(tempdir, 'sample.h5')
fields = ['CHROM', 'ID', 'samples']
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ID': string_type, 'samples': string_type}
expect = read_vcf(vcf_path, fields=fields, alt_number=2, types=types)
if os.path.exists(h5_path):
os.remove(h5_path)
vcf_to_hdf5(vcf_path, h5_path, fields=fields, alt_number=2, chunk_length=3,
types=types, vlen=False)
with h5py.File(h5_path, mode='r') as actual:
for key in expect.keys():
if expect[key].dtype.kind == 'f':
assert_array_almost_equal(expect[key], actual[key][:])
elif expect[key].dtype.kind == 'O':
# strings always stored as fixed length if vlen=False
assert 'S' == actual[key].dtype.kind
assert_array_equal(expect[key].astype('S'), actual[key][:])
else:
assert_array_equal(expect[key], actual[key][:])
def test_vcf_to_hdf5_empty():
vcf_path = fixture_path('empty.vcf')
h5_path = os.path.join(tempdir, 'empty.h5')
vcf_to_hdf5(vcf_path, h5_path)
assert not os.path.exists(h5_path)
def to_pandas_expectation(e):
# expect that all string fields end up as objects with nans for missing
if e.dtype.kind == 'S':
e = e.astype('U').astype(object)
if e.dtype == object:
e[e == ''] = np.nan
return e
def check_dataframe(callset, df):
for k in callset:
if k.startswith('variants/'):
group, name = k.split('/')
e = to_pandas_expectation(callset[k])
if e.ndim == 1:
compare_arrays(e, df[name].values)
elif e.ndim == 2:
for i in range(e.shape[1]):
compare_arrays(e[:, i], df['%s_%s' % (name, i + 1)])
def test_vcf_to_dataframe():
vcf_path = fixture_path('sample.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'GT']
numbers = {'AC': 3}
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
df = vcf_to_dataframe(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1', 'AC_2', 'AC_3'] ==
df.columns.tolist())
# always convert strings to object dtype for pandas
assert np.dtype(object) == df['CHROM'].dtype
assert np.dtype(object) == df['ALT_1'].dtype
check_dataframe(callset, df)
def test_vcf_to_dataframe_all():
vcf_path = fixture_path('sample.vcf')
fields = '*'
numbers = {'AC': 3}
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
df = vcf_to_dataframe(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
for k in ['CHROM', 'POS', 'ID', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1',
'AC_2', 'AC_3']:
assert k in df.columns.tolist()
# always convert strings to object dtype for pandas
assert np.dtype(object) == df['CHROM'].dtype
assert np.dtype(object) == df['ALT_1'].dtype
check_dataframe(callset, df)
def test_vcf_to_dataframe_exclude():
vcf_path = fixture_path('sample.vcf')
fields = '*'
exclude = ['ALT', 'ID']
df = vcf_to_dataframe(vcf_path, fields=fields, exclude_fields=exclude)
for k in ['CHROM', 'POS', 'REF', 'DP', 'AC_1', 'AC_2', 'AC_3']:
assert k in df.columns.tolist()
for k in ['ALT_1', 'ALT_2', 'ID']:
assert k not in df.columns.tolist()
def test_vcf_to_dataframe_ann():
vcf_path = fixture_path('ann.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'ANN', 'DP', 'AC', 'GT']
numbers = {'AC': 2, 'ALT': 2}
for string_type in 'S10', 'object':
types = {'CHROM': string_type, 'ALT': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
callset = read_vcf(vcf_path, fields=fields, numbers=numbers, types=types,
transformers=transformers)
df = vcf_to_dataframe(vcf_path, fields=fields, numbers=numbers, chunk_length=2,
types=types, transformers=transformers)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'ANN_Allele', 'ANN_HGVS_c',
'ANN_AA_pos', 'ANN_AA_length', 'DP', 'AC_1', 'AC_2'] ==
df.columns.tolist())
# always convert strings to object dtype for pandas
assert np.dtype(object) == df['CHROM'].dtype
assert np.dtype(object) == df['ALT_1'].dtype
check_dataframe(callset, df)
def test_vcf_to_csv():
vcf_path = fixture_path('sample.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'GT']
numbers = {'AC': 3}
for string_type in 'S20', 'object':
types = {'REF': string_type, 'ALT': string_type}
df = vcf_to_dataframe(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types, chunk_length=2)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields, alt_number=2, numbers=numbers,
types=types, chunk_length=2)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
for k in df.columns:
compare_arrays(df[k].values, adf[k].values)
def test_vcf_to_csv_all():
vcf_path = fixture_path('sample.vcf')
fields = '*'
df = vcf_to_dataframe(vcf_path, fields=fields)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
for k in df.columns:
compare_arrays(df[k].values, adf[k].values)
def test_vcf_to_csv_exclude():
vcf_path = fixture_path('sample.vcf')
fields = '*'
exclude = ['ALT', 'ID']
df = vcf_to_dataframe(vcf_path, fields=fields, exclude_fields=exclude)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields, exclude_fields=exclude)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
def test_vcf_to_csv_ann():
vcf_path = fixture_path('ann.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'ANN', 'GT']
numbers = {'AC': 2, 'ALT': 2}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
df = vcf_to_dataframe(vcf_path, fields=fields, numbers=numbers, types=types,
chunk_length=2, transformers=transformers)
csv_path = os.path.join(tempdir, 'test.csv')
if os.path.exists(csv_path):
os.remove(csv_path)
vcf_to_csv(vcf_path, csv_path, fields=fields, numbers=numbers, types=types,
chunk_length=2, transformers=transformers)
import pandas
adf = pandas.read_csv(csv_path, na_filter=True)
assert df.columns.tolist() == adf.columns.tolist()
for k in df.columns:
compare_arrays(df[k].values, adf[k].values)
def test_vcf_to_recarray():
vcf_path = fixture_path('sample.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'DP', 'AC', 'GT']
numbers = {'AC': 3}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
a = vcf_to_recarray(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1', 'AC_2', 'AC_3']
== list(a.dtype.names))
assert np.dtype(string_type) == a['CHROM'].dtype
for k in callset:
if k.startswith('variants/'):
group, name = k.split('/')
e = callset[k]
if e.ndim == 1:
assert_array_equal(e, a[name])
elif e.ndim == 2:
for i in range(e.shape[1]):
assert_array_equal(e[:, i], a['%s_%s' % (name, i + 1)])
else:
assert False, (k, e.ndim)
def test_vcf_to_recarray_all():
vcf_path = fixture_path('sample.vcf')
fields = '*'
numbers = {'AC': 3}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
callset = read_vcf(vcf_path, fields=fields, alt_number=2, numbers=numbers,
types=types)
a = vcf_to_recarray(vcf_path, fields=fields, alt_number=2, numbers=numbers,
chunk_length=2, types=types)
for k in ['CHROM', 'POS', 'ID', 'REF', 'ALT_1', 'ALT_2', 'DP', 'AC_1',
'AC_2', 'AC_3']:
assert k in a.dtype.names
assert np.dtype(string_type) == a['CHROM'].dtype
for k in callset:
if k.startswith('variants/'):
group, name = k.split('/')
e = callset[k]
if e.ndim == 1:
assert_array_equal(e, a[name])
elif e.ndim == 2:
for i in range(e.shape[1]):
assert_array_equal(e[:, i], a['%s_%s' % (name, i + 1)])
else:
assert False, (k, e.ndim)
def test_vcf_to_recarray_exclude():
vcf_path = fixture_path('sample.vcf')
fields = '*'
exclude = ['ALT', 'ID']
a = vcf_to_recarray(vcf_path, fields=fields, exclude_fields=exclude)
for k in ['CHROM', 'POS', 'REF', 'DP', 'AC_1', 'AC_2', 'AC_3']:
assert k in a.dtype.names
for k in 'ALT_1', 'ALT_2', 'ALT', 'ID':
assert k not in a.dtype.names
def test_vcf_to_recarray_ann():
vcf_path = fixture_path('ann.vcf')
fields = ['CHROM', 'POS', 'REF', 'ALT', 'ANN', 'DP', 'AC', 'GT']
numbers = {'AC': 2, 'ALT': 2}
for string_type in 'S20', 'object':
types = {'CHROM': string_type, 'REF': string_type, 'ALT': string_type}
transformers = [ANNTransformer(fields=['Allele', 'HGVS_c', 'AA'],
types={'Allele': string_type,
'HGVS_c': string_type})]
callset = read_vcf(vcf_path, fields=fields, numbers=numbers, types=types,
transformers=transformers)
a = vcf_to_recarray(vcf_path, fields=fields, numbers=numbers, chunk_length=2,
types=types, transformers=transformers)
assert (['CHROM', 'POS', 'REF', 'ALT_1', 'ALT_2', 'ANN_Allele', 'ANN_HGVS_c',
'ANN_AA_pos', 'ANN_AA_length', 'DP', 'AC_1', 'AC_2'] ==
list(a.dtype.names))
assert np.dtype(string_type) == a['CHROM'].dtype
assert np.dtype(string_type) == a['ALT_1'].dtype
for k in callset:
group, name = k.split('/')
if group == 'variants':
e = callset[k]
if e.ndim == 1:
assert_array_equal(e, a[name])
elif e.ndim == 2:
for i in range(e.shape[1]):
assert_array_equal(e[:, i], a['%s_%s' % (name, i + 1)])
else:
assert False, (k, e.ndim)
else:
assert name not in a.dtype.names
def test_read_vcf_headers():
vcf_path = fixture_path('sample.vcf')
headers = read_vcf_headers(vcf_path)
# check headers
assert 'q10' in headers.filters
assert 's50' in headers.filters
assert 'AA' in headers.infos
assert 'AC' in headers.infos
assert 'AF' in headers.infos
assert 'AN' in headers.infos
assert 'DB' in headers.infos
assert 'DP' in headers.infos
assert 'H2' in headers.infos
assert 'NS' in headers.infos
assert 'DP' in headers.formats
assert 'GQ' in headers.formats
assert 'GT' in headers.formats
assert 'HQ' in headers.formats
assert ['NA00001', 'NA00002', 'NA00003'] == headers.samples
assert '1' == headers.infos['AA']['Number']
assert 'String' == headers.infos['AA']['Type']
assert 'Ancestral Allele' == headers.infos['AA']['Description']
assert '2' == headers.formats['HQ']['Number']
assert 'Integer' == headers.formats['HQ']['Type']
assert 'Haplotype Quality' == headers.formats['HQ']['Description']
|
[
"atexit.register",
"numpy.load",
"os.remove",
"pandas.read_csv",
"numpy.isnan",
"allel.io.vcf_read.read_vcf",
"allel.io.vcf_read.vcf_to_dataframe",
"shutil.rmtree",
"numpy.testing.assert_array_almost_equal",
"os.path.join",
"zarr.open_group",
"allel.io.vcf_read.vcf_to_csv",
"allel.io.vcf_read.vcf_to_hdf5",
"allel.io.vcf_read.vcf_to_recarray",
"warnings.simplefilter",
"pytest.warns",
"os.path.dirname",
"os.path.exists",
"pytest.raises",
"tempfile.mkdtemp",
"itertools.product",
"io.BytesIO",
"h5py.File",
"numpy.testing.assert_array_equal",
"allel.io.vcf_read.iter_vcf_chunks",
"allel.test.tools.compare_arrays",
"allel.io.vcf_read.ANNTransformer",
"pytest.approx",
"allel.io.vcf_read.read_vcf_headers",
"numpy.all",
"gzip.open",
"warnings.resetwarnings",
"numpy.dtype",
"allel.io.vcf_read.vcf_to_npz",
"numpy.array",
"allel.io.vcf_read.vcf_to_zarr"
] |
[((617, 641), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (639, 641), False, 'import warnings\n'), ((642, 673), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (663, 673), False, 'import warnings\n'), ((715, 733), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (731, 733), False, 'import tempfile\n'), ((734, 773), 'atexit.register', 'atexit.register', (['shutil.rmtree', 'tempdir'], {}), '(shutil.rmtree, tempdir)\n', (749, 773), False, 'import atexit\n'), ((980, 1050), 'allel.io.vcf_read.iter_vcf_chunks', 'iter_vcf_chunks', (['vcf_path'], {'fields': '"""*"""', 'chunk_length': '(4)', 'buffer_size': '(100)'}), "(vcf_path, fields='*', chunk_length=4, buffer_size=100)\n", (995, 1050), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((3251, 3281), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""'}), "(vcf_path, fields='*')\n", (3259, 3281), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((4232, 4286), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, fields='*', exclude_fields=exclude)\n", (4240, 4286), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((5240, 5292), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (5248, 5292), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((7842, 7860), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {}), '(vcf_path)\n', (7850, 7860), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((8262, 8301), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""variants/*"""'}), "(vcf_path, fields='variants/*')\n", (8270, 8301), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((9059, 9092), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""INFO"""'}), "(vcf_path, fields='INFO')\n", (9067, 9092), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((9475, 9510), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""FILTER"""'}), "(vcf_path, fields='FILTER')\n", (9483, 9510), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((9831, 9866), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""FILTER"""'}), "(vcf_path, fields='FILTER')\n", (9839, 9866), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((10226, 10265), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""calldata/*"""'}), "(vcf_path, fields='calldata/*')\n", (10234, 10265), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((10558, 10689), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['CHROM', 'variants/POS', 'AC', 'variants/AF', 'GT', 'calldata/HQ',\n 'FILTER_q10', 'variants/numalt']"}), "(vcf_path, fields=['CHROM', 'variants/POS', 'AC', 'variants/AF',\n 'GT', 'calldata/HQ', 'FILTER_q10', 'variants/numalt'])\n", (10566, 10689), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((11089, 11268), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['CHROM', 'variants/POS', 'AC', 'variants/AF', 'GT', 'calldata/HQ',\n 'FILTER_q10', 'variants/numalt', 'samples']", 'chunk_length': '(4)', 'buffer_size': '(100)'}), "(vcf_path, fields=['CHROM', 'variants/POS', 'AC', 'variants/AF',\n 'GT', 'calldata/HQ', 'FILTER_q10', 'variants/numalt', 'samples'],\n chunk_length=4, buffer_size=100)\n", (11097, 11268), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((11784, 11901), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['CHROM', 'variants/CHROM', 'variants/AF', 'variants/AF', 'numalt',\n 'variants/numalt']"}), "(vcf_path, fields=['CHROM', 'variants/CHROM', 'variants/AF',\n 'variants/AF', 'numalt', 'variants/numalt'])\n", (11792, 11901), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((12282, 12329), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ALTLEN', 'altlen']"}), "(vcf_path, fields=['ALTLEN', 'altlen'])\n", (12290, 12329), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((12719, 12841), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': '"""*"""', 'chunk_length': 'chunk_length', 'buffer_size': 'buffer_size', 'types': "{'calldata/DP': 'object'}"}), "(input_file, fields='*', chunk_length=chunk_length, buffer_size=\n buffer_size, types={'calldata/DP': 'object'})\n", (12727, 12841), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((15297, 15399), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': '"""*"""', 'chunk_length': 'chunk_length', 'buffer_size': 'buffer_size', 'types': 'types'}), "(input_file, fields='*', chunk_length=chunk_length, buffer_size=\n buffer_size, types=types)\n", (15305, 15399), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((18622, 18652), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""'}), "(vcf_path, fields='*')\n", (18630, 18652), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((33588, 33732), 'numpy.array', 'np.array', (['[[[0, 0, 0], [0, 1, 2], [-1, -1, -1]], [[0, 1, 2], [3, 3, -1], [-1, -1, -1]\n ], [[0, 1, -1], [5, -1, -1], [-1, -1, -1]]]'], {'dtype': '"""i1"""'}), "([[[0, 0, 0], [0, 1, 2], [-1, -1, -1]], [[0, 1, 2], [3, 3, -1], [-1,\n -1, -1]], [[0, 1, -1], [5, -1, -1], [-1, -1, -1]]], dtype='i1')\n", (33596, 33732), True, 'import numpy as np\n'), ((33806, 33830), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (33824, 33830), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((33870, 33934), 'numpy.array', 'np.array', (['[[11, 12, -1], [22, 33, -1], [-1, 12, -1]]'], {'dtype': '"""i2"""'}), "([[11, 12, -1], [22, 33, -1], [-1, 12, -1]], dtype='i2')\n", (33878, 33934), True, 'import numpy as np\n'), ((34012, 34036), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (34030, 34036), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((39220, 39237), 'numpy.isnan', 'np.isnan', (['a[2, 1]'], {}), '(a[2, 1])\n', (39228, 39237), True, 'import numpy as np\n'), ((39842, 39902), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ALT', 'AC', 'AF']", 'alt_number': '(2)'}), "(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=2)\n", (39850, 39902), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((40099, 40159), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ALT', 'AC', 'AF']", 'alt_number': '(1)'}), "(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=1)\n", (40107, 40159), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((40350, 40410), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ALT', 'AC', 'AF']", 'alt_number': '(5)'}), "(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=5)\n", (40358, 40410), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((40626, 40719), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ALT', 'AC', 'AF']", 'alt_number': '(5)', 'numbers': "{'ALT': 2, 'AC': 4}"}), "(vcf_path, fields=['ALT', 'AC', 'AF'], alt_number=5, numbers={'ALT':\n 2, 'AC': 4})\n", (40634, 40719), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((43888, 43928), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (43896, 43928), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((44080, 44101), 'numpy.all', 'np.all', (["(chrom == '19')"], {}), "(chrom == '19')\n", (44086, 44101), True, 'import numpy as np\n'), ((44131, 44166), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[111, 112]', 'pos'], {}), '([111, 112], pos)\n', (44149, 44166), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((44200, 44240), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (44208, 44240), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((44392, 44413), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (44398, 44413), True, 'import numpy as np\n'), ((44443, 44518), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[14370, 1230237, 1234567, 1235237, 17330, 1110696]', 'pos'], {}), '([14370, 1230237, 1234567, 1235237, 17330, 1110696], pos)\n', (44461, 44518), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((44551, 44591), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (44559, 44591), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((44743, 44763), 'numpy.all', 'np.all', (["(chrom == 'X')"], {}), "(chrom == 'X')\n", (44749, 44763), True, 'import numpy as np\n'), ((44793, 44822), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[10]', 'pos'], {}), '([10], pos)\n', (44811, 44822), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((44855, 44895), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (44863, 44895), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((44965, 45005), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (44973, 45005), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((45157, 45178), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (45163, 45178), True, 'import numpy as np\n'), ((45208, 45247), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[14370, 17330]', 'pos'], {}), '([14370, 17330], pos)\n', (45226, 45247), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((45297, 45337), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (45305, 45337), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((45489, 45510), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (45495, 45510), True, 'import numpy as np\n'), ((45540, 45583), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[1230237, 1110696]', 'pos'], {}), '([1230237, 1110696], pos)\n', (45558, 45583), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((45633, 45673), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['fn'], {'region': 'region', 'tabix': 'tabix'}), '(fn, region=region, tabix=tabix)\n', (45641, 45673), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((45825, 45846), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (45831, 45846), True, 'import numpy as np\n'), ((45876, 45919), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[1234567, 1235237]', 'pos'], {}), '([1234567, 1235237], pos)\n', (45894, 45919), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((46902, 46920), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {}), '(vcf_path)\n', (46910, 46920), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((48208, 48245), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['T', '', 'T']", 'a'], {}), "(['T', '', 'T'], a)\n", (48226, 48245), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((48361, 48429), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['intergenic_region', '', 'missense_variant']", 'a'], {}), "(['intergenic_region', '', 'missense_variant'], a)\n", (48379, 48429), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((48552, 48603), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['MODIFIER', '', 'MODERATE']", 'a'], {}), "(['MODIFIER', '', 'MODERATE'], a)\n", (48570, 48603), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((48718, 48773), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['AGAP004677', '', 'AGAP005273']", 'a'], {}), "(['AGAP004677', '', 'AGAP005273'], a)\n", (48736, 48773), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((48886, 48941), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['AGAP004677', '', 'AGAP005273']", 'a'], {}), "(['AGAP004677', '', 'AGAP005273'], a)\n", (48904, 48941), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49059, 49121), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['intergenic_region', '', 'transcript']", 'a'], {}), "(['intergenic_region', '', 'transcript'], a)\n", (49077, 49121), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49237, 49295), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['AGAP004677', '', 'AGAP005273-RA']", 'a'], {}), "(['AGAP004677', '', 'AGAP005273-RA'], a)\n", (49255, 49295), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49419, 49464), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['', '', 'VectorBase']", 'a'], {}), "(['', '', 'VectorBase'], a)\n", (49437, 49464), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49613, 49650), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 1]', 'a[:]'], {}), '([-1, -1, 1], a[:])\n', (49631, 49650), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49762, 49802), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['', '', '17A>T']", 'a'], {}), "(['', '', '17A>T'], a)\n", (49780, 49802), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((49914, 49956), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['', '', 'Asp6Val']", 'a'], {}), "(['', '', 'Asp6Val'], a)\n", (49932, 49956), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50069, 50104), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 17]', 'a'], {}), '([-1, -1, 17], a)\n', (50087, 50104), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50220, 50257), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 4788]', 'a'], {}), '([-1, -1, 4788], a)\n', (50238, 50257), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50369, 50404), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 17]', 'a'], {}), '([-1, -1, 17], a)\n', (50387, 50404), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50519, 50556), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 4788]', 'a'], {}), '([-1, -1, 4788], a)\n', (50537, 50556), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50667, 50701), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 6]', 'a'], {}), '([-1, -1, 6], a)\n', (50685, 50701), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50815, 50852), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 1596]', 'a'], {}), '([-1, -1, 1596], a)\n', (50833, 50852), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((50965, 51002), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[3000, -1, -1]', 'a'], {}), '([3000, -1, -1], a)\n', (50983, 51002), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51258, 51293), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['T', '']", 'a[0]'], {}), "(['T', ''], a[0])\n", (51276, 51293), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51298, 51332), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['', '']", 'a[1]'], {}), "(['', ''], a[1])\n", (51316, 51332), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51337, 51373), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["['T', 'G']", 'a[2]'], {}), "(['T', 'G'], a[2])\n", (51355, 51373), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51488, 51529), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 17]', 'a[:, 0]'], {}), '([-1, -1, 17], a[:, 0])\n', (51506, 51529), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51534, 51575), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 12]', 'a[:, 1]'], {}), '([-1, -1, 12], a[:, 1])\n', (51552, 51575), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51693, 51736), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 4788]', 'a[:, 0]'], {}), '([-1, -1, 4788], a[:, 0])\n', (51711, 51736), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((51741, 51784), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 4768]', 'a[:, 1]'], {}), '([-1, -1, 4768], a[:, 1])\n', (51759, 51784), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((52078, 52139), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['ANN']", 'transformers': 'transformers'}), "(vcf_path, fields=['ANN'], transformers=transformers)\n", (52086, 52139), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((52387, 52427), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["[b'T', b'', b'T']", 'a'], {}), "([b'T', b'', b'T'], a)\n", (52405, 52427), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((52536, 52579), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["[b'', b'', b'17A>T']", 'a'], {}), "([b'', b'', b'17A>T'], a)\n", (52554, 52579), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((52689, 52724), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, 17]', 'a'], {}), '([-1, -1, 17], a)\n', (52707, 52724), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53042, 53064), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (53052, 53064), False, 'import io\n'), ((53079, 53138), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['calldata/GT', 'calldata/GQ']"}), "(input_file, fields=['calldata/GT', 'calldata/GQ'])\n", (53087, 53138), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((53208, 53269), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[[0, 1], [1, 2], [2, 3], [-1, -1]]', 'gt[0]'], {}), '([[0, 1], [1, 2], [2, 3], [-1, -1]], gt[0])\n', (53226, 53269), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53274, 53337), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[[-1, -1], [-1, -1], [3, 3], [1, 2]]', 'gt[1]'], {}), '([[-1, -1], [-1, -1], [3, 3], [1, 2]], gt[1])\n', (53292, 53337), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53404, 53447), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[12, -1, 34, -1]', 'gq[0]'], {}), '([12, -1, 34, -1], gq[0])\n', (53422, 53447), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53452, 53495), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[-1, -1, -1, -1]', 'gq[1]'], {}), '([-1, -1, -1, -1], gq[1])\n', (53470, 53495), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53553, 53577), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (53575, 53577), False, 'import warnings\n'), ((53582, 53612), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (53603, 53612), False, 'import warnings\n'), ((58804, 58828), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (58826, 58828), False, 'import warnings\n'), ((58833, 58864), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (58854, 58864), False, 'import warnings\n'), ((58979, 59036), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['DP']", 'types': "{'DP': 'String'}"}), "(vcf_path, fields=['DP'], types={'DP': 'String'})\n", (58987, 59036), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((59132, 59190), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['DP']", 'types': "{'DP': 'Integer'}"}), "(vcf_path, fields=['DP'], types={'DP': 'Integer'})\n", (59140, 59190), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((59311, 59345), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['FOO']"}), "(vcf_path, fields=['FOO'])\n", (59319, 59345), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((59496, 59572), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['calldata/DP']", 'types': "{'calldata/DP': 'Integer'}"}), "(vcf_path, fields=['calldata/DP'], types={'calldata/DP': 'Integer'})\n", (59504, 59572), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((59967, 59991), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (59989, 59991), False, 'import warnings\n'), ((59996, 60026), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (60017, 60026), False, 'import warnings\n'), ((60207, 60231), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (60229, 60231), False, 'import warnings\n'), ((60236, 60267), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (60257, 60267), False, 'import warnings\n'), ((60476, 60490), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (60484, 60490), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((61075, 61109), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (61087, 61109), False, 'import os\n'), ((61117, 61140), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (61131, 61140), False, 'import os\n'), ((61527, 61563), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (61539, 61563), False, 'import os\n'), ((61571, 61596), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (61585, 61596), False, 'import os\n'), ((61769, 61805), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (61784, 61805), False, 'import zarr\n'), ((66499, 66585), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""GT"""', 'numbers': "{'GT': 3}", 'types': "{'GT': 'genotype_ac/i1'}"}), "(vcf_path, fields='GT', numbers={'GT': 3}, types={'GT':\n 'genotype_ac/i1'})\n", (66507, 66585), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((66613, 66768), 'numpy.array', 'np.array', (['[[(2, 0, 0), (3, 0, 0), (1, 0, 0)], [(0, 1, 0), (1, 1, 0), (1, 1, 1)], [(0,\n 0, 0), (0, 0, 0), (0, 0, 0)], [(0, 0, 0), (0, 0, 0), (0, 0, 0)]]'], {}), '([[(2, 0, 0), (3, 0, 0), (1, 0, 0)], [(0, 1, 0), (1, 1, 0), (1, 1, \n 1)], [(0, 0, 0), (0, 0, 0), (0, 0, 0)], [(0, 0, 0), (0, 0, 0), (0, 0, 0)]])\n', (66621, 66768), True, 'import numpy as np\n'), ((66838, 66862), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (66856, 66862), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((67744, 67768), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (67766, 67768), False, 'import warnings\n'), ((67773, 67803), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (67794, 67803), False, 'import warnings\n'), ((70107, 70131), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (70129, 70131), False, 'import warnings\n'), ((70136, 70167), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (70157, 70167), False, 'import warnings\n'), ((70255, 70301), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['MQ0FractionTest']"}), "(vcf_path, fields=['MQ0FractionTest'])\n", (70263, 70301), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((70371, 70457), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['MQ0FractionTest']", 'types': "{'MQ0FractionTest': 'Float'}"}), "(vcf_path, fields=['MQ0FractionTest'], types={'MQ0FractionTest':\n 'Float'})\n", (70379, 70457), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((70648, 70678), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""'}), "(vcf_path, fields='*')\n", (70656, 70678), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((70754, 70772), 'numpy.isnan', 'np.isnan', (['gq[0, 0]'], {}), '(gq[0, 0])\n', (70762, 70772), True, 'import numpy as np\n'), ((71071, 71129), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""calldata/*"""', 'numbers': "{'AD': 2}"}), "(vcf_path, fields='calldata/*', numbers={'AD': 2})\n", (71079, 71129), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((71513, 71585), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['calldata/GT', 'calldata/DP', 'calldata/GQ']"}), "(vcf_path, fields=['calldata/GT', 'calldata/DP', 'calldata/GQ'])\n", (71521, 71585), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((71945, 72003), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['AD', 'GT']", 'numbers': "{'AD': 2}"}), "(vcf_path, fields=['AD', 'GT'], numbers={'AD': 2})\n", (71953, 72003), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((72076, 72114), 'numpy.array', 'np.array', (['[[-1, -1], [0, -1], [1, -1]]'], {}), '([[-1, -1], [0, -1], [1, -1]])\n', (72084, 72114), True, 'import numpy as np\n'), ((72119, 72150), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'gt[:, 1]'], {}), '(e, gt[:, 1])\n', (72137, 72150), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((72159, 72193), 'numpy.array', 'np.array', (['[[1, 0], [1, 0], [1, 0]]'], {}), '([[1, 0], [1, 0], [1, 0]])\n', (72167, 72193), True, 'import numpy as np\n'), ((72198, 72229), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'ad[:, 1]'], {}), '(e, ad[:, 1])\n', (72216, 72229), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((72345, 72380), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.npz"""'], {}), "(tempdir, 'sample.npz')\n", (72357, 72380), False, 'import os\n'), ((72578, 72675), 'itertools.product', 'itertools.product', (['vcf_paths', 'region_values', 'tabix_values', 'samples_values', 'string_type_values'], {}), '(vcf_paths, region_values, tabix_values, samples_values,\n string_type_values)\n', (72595, 72675), False, 'import itertools\n'), ((73838, 73873), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.npz"""'], {}), "(tempdir, 'sample.npz')\n", (73850, 73873), False, 'import os\n'), ((73944, 73998), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, fields='*', exclude_fields=exclude)\n", (73952, 73998), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((74006, 74030), 'os.path.exists', 'os.path.exists', (['npz_path'], {}), '(npz_path)\n', (74020, 74030), False, 'import os\n'), ((74064, 74130), 'allel.io.vcf_read.vcf_to_npz', 'vcf_to_npz', (['vcf_path', 'npz_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, npz_path, fields='*', exclude_fields=exclude)\n", (74074, 74130), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((74144, 74180), 'numpy.load', 'np.load', (['npz_path'], {'allow_pickle': '(True)'}), '(npz_path, allow_pickle=True)\n', (74151, 74180), True, 'import numpy as np\n'), ((74565, 74600), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.npz"""'], {}), "(tempdir, 'sample.npz')\n", (74577, 74600), False, 'import os\n'), ((74748, 74800), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (74756, 74800), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((74808, 74832), 'os.path.exists', 'os.path.exists', (['npz_path'], {}), '(npz_path)\n', (74822, 74832), False, 'import os\n'), ((74866, 74930), 'allel.io.vcf_read.vcf_to_npz', 'vcf_to_npz', (['vcf_path', 'npz_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, npz_path, fields='*', rename_fields=rename)\n", (74876, 74930), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((74944, 74980), 'numpy.load', 'np.load', (['npz_path'], {'allow_pickle': '(True)'}), '(npz_path, allow_pickle=True)\n', (74951, 74980), True, 'import numpy as np\n'), ((75393, 75429), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (75405, 75429), False, 'import os\n'), ((75627, 75724), 'itertools.product', 'itertools.product', (['vcf_paths', 'region_values', 'tabix_values', 'samples_values', 'string_type_values'], {}), '(vcf_paths, region_values, tabix_values, samples_values,\n string_type_values)\n', (75644, 75724), False, 'import itertools\n'), ((77268, 77304), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (77280, 77304), False, 'import os\n'), ((77375, 77429), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, fields='*', exclude_fields=exclude)\n", (77383, 77429), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((77437, 77462), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (77451, 77462), False, 'import os\n'), ((77501, 77569), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, zarr_path, fields='*', exclude_fields=exclude)\n", (77512, 77569), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((77583, 77619), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (77598, 77619), False, 'import zarr\n'), ((78112, 78148), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (78124, 78148), False, 'import os\n'), ((78296, 78348), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (78304, 78348), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((78356, 78381), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (78370, 78381), False, 'import os\n'), ((78420, 78486), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, zarr_path, fields='*', rename_fields=rename)\n", (78431, 78486), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((78500, 78536), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (78515, 78536), False, 'import zarr\n'), ((78922, 78958), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (78934, 78958), False, 'import os\n'), ((79854, 79890), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (79866, 79890), False, 'import os\n'), ((80157, 80266), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': "['ALTLEN', 'altlen']", 'rename_fields': "{'altlen': 'variants/spam'}"}), "(vcf_path, zarr_path, fields=['ALTLEN', 'altlen'], rename_fields\n ={'altlen': 'variants/spam'})\n", (80168, 80266), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((80371, 80407), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (80383, 80407), False, 'import os\n'), ((80415, 80440), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (80429, 80440), False, 'import os\n'), ((80674, 80710), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (80689, 80710), False, 'import zarr\n'), ((81391, 81427), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.zarr"""'], {}), "(tempdir, 'sample.zarr')\n", (81403, 81427), False, 'import os\n'), ((81505, 81562), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'types': 'types'}), "(vcf_path, fields='*', alt_number=2, types=types)\n", (81513, 81562), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((81570, 81595), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (81584, 81595), False, 'import os\n'), ((81634, 81725), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'types': 'types'}), "(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,\n types=types)\n", (81645, 81725), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((81751, 81787), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (81766, 81787), False, 'import zarr\n'), ((81983, 82016), 'os.path.join', 'os.path.join', (['tempdir', '"""ann.zarr"""'], {}), "(tempdir, 'ann.zarr')\n", (81995, 82016), False, 'import os\n'), ((82941, 82976), 'os.path.join', 'os.path.join', (['tempdir', '"""empty.zarr"""'], {}), "(tempdir, 'empty.zarr')\n", (82953, 82976), False, 'import os\n'), ((82981, 83013), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {}), '(vcf_path, zarr_path)\n', (82992, 83013), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((83170, 83204), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (83182, 83204), False, 'import os\n'), ((83402, 83499), 'itertools.product', 'itertools.product', (['vcf_paths', 'region_values', 'tabix_values', 'samples_values', 'string_type_values'], {}), '(vcf_paths, region_values, tabix_values, samples_values,\n string_type_values)\n', (83419, 83499), False, 'import itertools\n'), ((85020, 85054), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (85032, 85054), False, 'import os\n'), ((85125, 85179), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, fields='*', exclude_fields=exclude)\n", (85133, 85179), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((85187, 85210), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (85201, 85210), False, 'import os\n'), ((85243, 85309), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': '"""*"""', 'exclude_fields': 'exclude'}), "(vcf_path, h5_path, fields='*', exclude_fields=exclude)\n", (85254, 85309), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((85857, 85891), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (85869, 85891), False, 'import os\n'), ((86039, 86091), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (86047, 86091), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((86099, 86122), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (86113, 86122), False, 'import os\n'), ((86155, 86219), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, h5_path, fields='*', rename_fields=rename)\n", (86166, 86219), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((86644, 86678), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (86656, 86678), False, 'import os\n'), ((86686, 86709), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (86700, 86709), False, 'import os\n'), ((87685, 87716), 'os.path.join', 'os.path.join', (['tempdir', '"""ann.h5"""'], {}), "(tempdir, 'ann.h5')\n", (87697, 87716), False, 'import os\n'), ((88580, 88614), 'os.path.join', 'os.path.join', (['tempdir', '"""sample.h5"""'], {}), "(tempdir, 'sample.h5')\n", (88592, 88614), False, 'import os\n'), ((89707, 89740), 'os.path.join', 'os.path.join', (['tempdir', '"""empty.h5"""'], {}), "(tempdir, 'empty.h5')\n", (89719, 89740), False, 'import os\n'), ((89745, 89775), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {}), '(vcf_path, h5_path)\n', (89756, 89775), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((92308, 92373), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'exclude_fields': 'exclude'}), '(vcf_path, fields=fields, exclude_fields=exclude)\n', (92324, 92373), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94717, 94758), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields'}), '(vcf_path, fields=fields)\n', (94733, 94758), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94774, 94807), 'os.path.join', 'os.path.join', (['tempdir', '"""test.csv"""'], {}), "(tempdir, 'test.csv')\n", (94786, 94807), False, 'import os\n'), ((94815, 94839), 'os.path.exists', 'os.path.exists', (['csv_path'], {}), '(csv_path)\n', (94829, 94839), False, 'import os\n'), ((94873, 94918), 'allel.io.vcf_read.vcf_to_csv', 'vcf_to_csv', (['vcf_path', 'csv_path'], {'fields': 'fields'}), '(vcf_path, csv_path, fields=fields)\n', (94883, 94918), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94947, 94988), 'pandas.read_csv', 'pandas.read_csv', (['csv_path'], {'na_filter': '(True)'}), '(csv_path, na_filter=True)\n', (94962, 94988), False, 'import pandas\n'), ((95250, 95315), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'exclude_fields': 'exclude'}), '(vcf_path, fields=fields, exclude_fields=exclude)\n', (95266, 95315), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((95331, 95364), 'os.path.join', 'os.path.join', (['tempdir', '"""test.csv"""'], {}), "(tempdir, 'test.csv')\n", (95343, 95364), False, 'import os\n'), ((95372, 95396), 'os.path.exists', 'os.path.exists', (['csv_path'], {}), '(csv_path)\n', (95386, 95396), False, 'import os\n'), ((95430, 95499), 'allel.io.vcf_read.vcf_to_csv', 'vcf_to_csv', (['vcf_path', 'csv_path'], {'fields': 'fields', 'exclude_fields': 'exclude'}), '(vcf_path, csv_path, fields=fields, exclude_fields=exclude)\n', (95440, 95499), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((95528, 95569), 'pandas.read_csv', 'pandas.read_csv', (['csv_path'], {'na_filter': '(True)'}), '(csv_path, na_filter=True)\n', (95543, 95569), False, 'import pandas\n'), ((99255, 99319), 'allel.io.vcf_read.vcf_to_recarray', 'vcf_to_recarray', (['vcf_path'], {'fields': 'fields', 'exclude_fields': 'exclude'}), '(vcf_path, fields=fields, exclude_fields=exclude)\n', (99270, 99319), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((101212, 101238), 'allel.io.vcf_read.read_vcf_headers', 'read_vcf_headers', (['vcf_path'], {}), '(vcf_path)\n', (101228, 101238), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((822, 847), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (837, 847), False, 'import os\n'), ((6393, 6418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6406, 6418), False, 'import pytest\n'), ((6428, 6480), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (6436, 6480), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((6685, 6710), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6698, 6710), False, 'import pytest\n'), ((6720, 6772), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (6728, 6772), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((6931, 6956), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6944, 6956), False, 'import pytest\n'), ((6966, 7018), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (6974, 7018), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((7177, 7202), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7190, 7202), False, 'import pytest\n'), ((7212, 7264), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (7220, 7264), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((7423, 7448), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7436, 7448), False, 'import pytest\n'), ((7458, 7510), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (7466, 7510), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((7669, 7694), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7682, 7694), False, 'import pytest\n'), ((7704, 7756), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, fields='*', rename_fields=rename)\n", (7712, 7756), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((10091, 10135), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['callset1[k]', 'callset2[k]'], {}), '(callset1[k], callset2[k])\n', (10109, 10135), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((13235, 13251), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (13243, 13251), True, 'import numpy as np\n'), ((13488, 13504), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (13496, 13504), True, 'import numpy as np\n'), ((13649, 13665), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (13657, 13665), True, 'import numpy as np\n'), ((13806, 13822), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (13814, 13822), True, 'import numpy as np\n'), ((14889, 14905), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (14897, 14905), True, 'import numpy as np\n'), ((18922, 18938), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (18930, 18938), True, 'import numpy as np\n'), ((19258, 19274), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (19266, 19274), True, 'import numpy as np\n'), ((19480, 19496), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (19488, 19496), True, 'import numpy as np\n'), ((19648, 19664), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (19656, 19664), True, 'import numpy as np\n'), ((21992, 22008), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (22002, 22008), False, 'import io\n'), ((22027, 22074), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['POS', 'samples']"}), "(input_file, fields=['POS', 'samples'])\n", (22035, 22074), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((25388, 25404), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (25398, 25404), False, 'import io\n'), ((25423, 25471), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['QUAL', 'samples']"}), "(input_file, fields=['QUAL', 'samples'])\n", (25431, 25471), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((26139, 26155), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (26149, 26155), False, 'import io\n'), ((26174, 26261), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['FILTER_PASS', 'FILTER_q10', 'FILTER_s50', 'samples']"}), "(input_file, fields=['FILTER_PASS', 'FILTER_q10', 'FILTER_s50',\n 'samples'])\n", (26182, 26261), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((27284, 27300), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (27294, 27300), False, 'import io\n'), ((27900, 27914), 'numpy.isnan', 'np.isnan', (['a[1]'], {}), '(a[1])\n', (27908, 27914), True, 'import numpy as np\n'), ((27930, 27944), 'numpy.isnan', 'np.isnan', (['a[2]'], {}), '(a[2])\n', (27938, 27944), True, 'import numpy as np\n'), ((28362, 28378), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (28372, 28378), False, 'import io\n'), ((28978, 28992), 'numpy.isnan', 'np.isnan', (['a[1]'], {}), '(a[1])\n', (28986, 28992), True, 'import numpy as np\n'), ((29008, 29022), 'numpy.isnan', 'np.isnan', (['a[2]'], {}), '(a[2])\n', (29016, 29022), True, 'import numpy as np\n'), ((29470, 29486), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (29480, 29486), False, 'import io\n'), ((29505, 29629), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['calldata/GT', 'calldata/GQ', 'samples']", 'types': "{'calldata/GT': 'i1', 'calldata/GQ': 'i2'}"}), "(input_file, fields=['calldata/GT', 'calldata/GQ', 'samples'],\n types={'calldata/GT': 'i1', 'calldata/GQ': 'i2'})\n", (29513, 29629), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((30732, 30874), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['variants/DP', 'variants/AC']", 'types': "{'variants/DP': dtype, 'variants/AC': dtype}", 'numbers': "{'variants/AC': 3}"}), "(vcf_path, fields=['variants/DP', 'variants/AC'], types={\n 'variants/DP': dtype, 'variants/AC': dtype}, numbers={'variants/AC': 3})\n", (30740, 30874), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((31357, 31379), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (31367, 31379), False, 'import io\n'), ((31408, 31424), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (31416, 31424), True, 'import numpy as np\n'), ((31698, 31720), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (31708, 31720), False, 'import io\n'), ((31749, 31763), 'numpy.dtype', 'np.dtype', (['"""i4"""'], {}), "('i4')\n", (31757, 31763), True, 'import numpy as np\n'), ((32037, 32059), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (32047, 32059), False, 'import io\n'), ((32088, 32102), 'numpy.dtype', 'np.dtype', (['"""f4"""'], {}), "('f4')\n", (32096, 32102), True, 'import numpy as np\n'), ((32377, 32399), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (32387, 32399), False, 'import io\n'), ((32428, 32442), 'numpy.dtype', 'np.dtype', (['"""S1"""'], {}), "('S1')\n", (32436, 32442), True, 'import numpy as np\n'), ((32647, 32720), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['GT']", 'types': "{'GT': dtype}", 'numbers': "{'GT': 2}"}), "(vcf_path, fields=['GT'], types={'GT': dtype}, numbers={'GT': 2})\n", (32655, 32720), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((33276, 33298), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (33286, 33298), False, 'import io\n'), ((34271, 34344), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['HQ']", 'types': "{'HQ': dtype}", 'numbers': "{'HQ': 2}"}), "(vcf_path, fields=['HQ'], types={'HQ': dtype}, numbers={'HQ': 2})\n", (34279, 34344), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((39034, 39047), 'pytest.approx', 'approx', (['(0.333)'], {}), '(0.333)\n', (39040, 39047), False, 'from pytest import approx\n'), ((39249, 39262), 'pytest.approx', 'approx', (['(0.333)'], {}), '(0.333)\n', (39255, 39262), False, 'from pytest import approx\n'), ((39285, 39298), 'pytest.approx', 'approx', (['(0.667)'], {}), '(0.667)\n', (39291, 39298), False, 'from pytest import approx\n'), ((46099, 46160), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['samples', 'GT']", 'samples': 'samples'}), "(vcf_path, fields=['samples', 'GT'], samples=samples)\n", (46107, 46160), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((46535, 46596), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': "['samples', 'GT']", 'samples': 'samples'}), "(vcf_path, fields=['samples', 'GT'], samples=samples)\n", (46543, 46596), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((48174, 48192), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (48182, 48192), True, 'import numpy as np\n'), ((48327, 48345), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (48335, 48345), True, 'import numpy as np\n'), ((48518, 48536), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (48526, 48536), True, 'import numpy as np\n'), ((48684, 48702), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (48692, 48702), True, 'import numpy as np\n'), ((48852, 48870), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (48860, 48870), True, 'import numpy as np\n'), ((49025, 49043), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49033, 49043), True, 'import numpy as np\n'), ((49203, 49221), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49211, 49221), True, 'import numpy as np\n'), ((49358, 49376), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49366, 49376), True, 'import numpy as np\n'), ((49476, 49494), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49484, 49494), True, 'import numpy as np\n'), ((49581, 49597), 'numpy.dtype', 'np.dtype', (['"""int8"""'], {}), "('int8')\n", (49589, 49597), True, 'import numpy as np\n'), ((49728, 49746), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49736, 49746), True, 'import numpy as np\n'), ((49880, 49898), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (49888, 49898), True, 'import numpy as np\n'), ((50036, 50053), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50044, 50053), True, 'import numpy as np\n'), ((50187, 50204), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50195, 50204), True, 'import numpy as np\n'), ((50336, 50353), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50344, 50353), True, 'import numpy as np\n'), ((50486, 50503), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50494, 50503), True, 'import numpy as np\n'), ((50634, 50651), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50642, 50651), True, 'import numpy as np\n'), ((50782, 50799), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50790, 50799), True, 'import numpy as np\n'), ((50932, 50949), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (50940, 50949), True, 'import numpy as np\n'), ((51224, 51242), 'numpy.dtype', 'np.dtype', (['"""object"""'], {}), "('object')\n", (51232, 51242), True, 'import numpy as np\n'), ((51455, 51472), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (51463, 51472), True, 'import numpy as np\n'), ((51660, 51677), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (51668, 51677), True, 'import numpy as np\n'), ((51845, 52003), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'ANN_HGVS_c', 'variants/ANN_cDNA_pos']", 'types': "{'Allele': 'S12', 'ANN_HGVS_c': 'S20', 'variants/ANN_cDNA_pos': 'i8'}"}), "(fields=['Allele', 'ANN_HGVS_c', 'variants/ANN_cDNA_pos'],\n types={'Allele': 'S12', 'ANN_HGVS_c': 'S20', 'variants/ANN_cDNA_pos': 'i8'}\n )\n", (51859, 52003), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((52356, 52371), 'numpy.dtype', 'np.dtype', (['"""S12"""'], {}), "('S12')\n", (52364, 52371), True, 'import numpy as np\n'), ((52505, 52520), 'numpy.dtype', 'np.dtype', (['"""S20"""'], {}), "('S20')\n", (52513, 52520), True, 'import numpy as np\n'), ((52659, 52673), 'numpy.dtype', 'np.dtype', (['"""i8"""'], {}), "('i8')\n", (52667, 52673), True, 'import numpy as np\n'), ((53800, 53825), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (53812, 53825), False, 'import pytest\n'), ((54053, 54078), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (54065, 54078), False, 'import pytest\n'), ((54309, 54334), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (54321, 54334), False, 'import pytest\n'), ((54567, 54592), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (54579, 54592), False, 'import pytest\n'), ((54825, 54850), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (54837, 54850), False, 'import pytest\n'), ((55086, 55111), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (55098, 55111), False, 'import pytest\n'), ((55358, 55380), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (55368, 55380), False, 'import io\n'), ((55588, 55610), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (55598, 55610), False, 'import io\n'), ((55816, 55838), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (55826, 55838), False, 'import io\n'), ((56046, 56068), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (56056, 56068), False, 'import io\n'), ((56353, 56378), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (56365, 56378), False, 'import pytest\n'), ((56730, 56755), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (56742, 56755), False, 'import pytest\n'), ((57108, 57133), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (57120, 57133), False, 'import pytest\n'), ((57490, 57515), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (57502, 57515), False, 'import pytest\n'), ((57941, 57966), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (57953, 57966), False, 'import pytest\n'), ((58272, 58297), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (58284, 58297), False, 'import pytest\n'), ((58606, 58631), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (58618, 58631), False, 'import pytest\n'), ((58707, 58732), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (58719, 58732), False, 'import pytest\n'), ((60036, 60061), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (60048, 60061), False, 'import pytest\n'), ((60071, 60085), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (60079, 60085), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((60095, 60120), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (60107, 60120), False, 'import pytest\n'), ((60346, 60368), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (60356, 60368), False, 'import io\n'), ((60802, 60824), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (60812, 60824), False, 'import io\n'), ((61150, 61168), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (61159, 61168), False, 'import os\n'), ((61185, 61207), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (61195, 61207), False, 'import io\n'), ((61300, 61328), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (61309, 61328), False, 'import h5py\n'), ((61606, 61630), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (61619, 61630), False, 'import shutil\n'), ((61647, 61669), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (61657, 61669), False, 'import io\n'), ((62912, 63217), 'numpy.array', 'np.array', (["[[b'', b'', b'', b'', b''], [b'G', b'', b'', b'', b''], [b'', b'', b'', b'',\n b''], [b'T', b'', b'', b'', b''], [b'A', b'T', b'', b'', b''], [b'C',\n b'T', b'*', b'', b''], [b'A', b'GTAC', b'', b'', b''], [b'C', b'GATG',\n b'', b'', b''], [b'ATAC', b'GTACTACTAC', b'G', b'GTACA', b'GTA']]"], {}), "([[b'', b'', b'', b'', b''], [b'G', b'', b'', b'', b''], [b'', b'',\n b'', b'', b''], [b'T', b'', b'', b'', b''], [b'A', b'T', b'', b'', b''],\n [b'C', b'T', b'*', b'', b''], [b'A', b'GTAC', b'', b'', b''], [b'C',\n b'GATG', b'', b'', b''], [b'ATAC', b'GTACTACTAC', b'G', b'GTACA', b'GTA']])\n", (62920, 63217), True, 'import numpy as np\n'), ((63467, 63491), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (63485, 63491), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((63571, 63621), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[0, 1, 0, 1, 2, 3, 2, 2, 5]', 'a'], {}), '([0, 1, 0, 1, 2, 3, 2, 2, 5], a)\n', (63589, 63621), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((63707, 63883), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0], [0, 0, -1, 0, 0], [0, 3, 0, 0, 0], [-3, 0, 0, 0, 0], [0, 6, -\n 3, 1, -1]]'], {}), '([[0, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0], [0, 0, -1, 0, 0], [0, 3, 0, 0, 0], [-3, 0, 0, 0, 0],\n [0, 6, -3, 1, -1]])\n', (63715, 63883), True, 'import numpy as np\n'), ((64059, 64083), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (64077, 64083), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((64204, 64293), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[False, False, False, True, True, False, False, False, False]', 'a'], {}), '([False, False, False, True, True, False, False, False, \n False], a)\n', (64222, 64293), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((64657, 64722), 'numpy.array', 'np.array', (["[b'', b'G', b'', b'T', b'A', b'C', b'A', b'C', b'ATAC']"], {}), "([b'', b'G', b'', b'T', b'A', b'C', b'A', b'C', b'ATAC'])\n", (64665, 64722), True, 'import numpy as np\n'), ((64808, 64832), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (64826, 64832), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((64912, 64962), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[0, 1, 0, 1, 2, 3, 2, 2, 5]', 'a'], {}), '([0, 1, 0, 1, 2, 3, 2, 2, 5], a)\n', (64930, 64962), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((65046, 65084), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 0, 0, 0, -3, 0]'], {}), '([0, 1, 0, 0, 0, 0, 0, -3, 0])\n', (65054, 65084), True, 'import numpy as np\n'), ((65093, 65117), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (65111, 65117), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((65238, 65327), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[False, False, False, True, True, False, False, False, False]', 'a'], {}), '([False, False, False, True, True, False, False, False, \n False], a)\n', (65256, 65327), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((66078, 66282), 'numpy.array', 'np.array', (['[[[3, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]], [[1, 1, 1, 0], [0, 0, 0, 2], [\n 0, 0, 0, 0]], [[1, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]], [[1, 2, 0, 1],\n [0, 2, 2, 0], [2, 1, 1, 1]]]'], {'dtype': 't'}), '([[[3, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]], [[1, 1, 1, 0], [0, 0,\n 0, 2], [0, 0, 0, 0]], [[1, 1, 0, 0], [0, 0, 0, 1], [0, 0, 0, 0]], [[1, \n 2, 0, 1], [0, 2, 2, 0], [2, 1, 1, 1]]], dtype=t)\n', (66086, 66282), True, 'import numpy as np\n'), ((66417, 66441), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a'], {}), '(e, a)\n', (66435, 66441), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((66988, 67041), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': '"""chr1:10-100"""', 'tabix': 'tabix'}), "(vcf_path, region='chr1:10-100', tabix=tabix)\n", (66996, 67041), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((67121, 67154), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[20, 30]', 'pos'], {}), '([20, 30], pos)\n', (67139, 67154), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((67231, 67253), 'pytest.raises', 'pytest.raises', (['OSError'], {}), '(OSError)\n', (67244, 67253), False, 'import pytest\n'), ((67263, 67277), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (67271, 67277), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((67362, 67394), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (67375, 67394), False, 'import pytest\n'), ((67404, 67418), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (67412, 67418), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((67506, 67538), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (67519, 67538), False, 'import pytest\n'), ((67548, 67562), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (67556, 67562), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((67661, 67688), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (67674, 67688), False, 'import pytest\n'), ((67698, 67712), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['path'], {}), '(path)\n', (67706, 67712), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((68504, 68529), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (68516, 68529), False, 'import pytest\n'), ((69266, 69291), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (69278, 69291), False, 'import pytest\n'), ((70034, 70059), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (70046, 70059), False, 'import pytest\n'), ((70488, 70500), 'pytest.approx', 'approx', (['(0.03)'], {}), '(0.03)\n', (70494, 70500), False, 'from pytest import approx\n'), ((70784, 70796), 'pytest.approx', 'approx', (['(48.2)'], {}), '(48.2)\n', (70790, 70796), False, 'from pytest import approx\n'), ((70820, 70832), 'pytest.approx', 'approx', (['(48.1)'], {}), '(48.1)\n', (70826, 70832), False, 'from pytest import approx\n'), ((70856, 70868), 'pytest.approx', 'approx', (['(43.9)'], {}), '(43.9)\n', (70862, 70868), False, 'from pytest import approx\n'), ((70892, 70904), 'pytest.approx', 'approx', (['(49.0)'], {}), '(49.0)\n', (70898, 70904), False, 'from pytest import approx\n'), ((70927, 70938), 'pytest.approx', 'approx', (['(3.0)'], {}), '(3.0)\n', (70933, 70938), False, 'from pytest import approx\n'), ((70961, 70973), 'pytest.approx', 'approx', (['(41.0)'], {}), '(41.0)\n', (70967, 70973), False, 'from pytest import approx\n'), ((72882, 72988), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, fields='*', alt_number=2, region=region, tabix=tabix,\n samples=samples, types=types)\n", (72890, 72988), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((73024, 73048), 'os.path.exists', 'os.path.exists', (['npz_path'], {}), '(npz_path)\n', (73038, 73048), False, 'import os\n'), ((73090, 73224), 'allel.io.vcf_read.vcf_to_npz', 'vcf_to_npz', (['vcf_path', 'npz_path'], {'fields': '"""*"""', 'chunk_length': '(2)', 'alt_number': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, npz_path, fields='*', chunk_length=2, alt_number=2,\n region=region, tabix=tabix, samples=samples, types=types)\n", (73100, 73224), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((74040, 74059), 'os.remove', 'os.remove', (['npz_path'], {}), '(npz_path)\n', (74049, 74059), False, 'import os\n'), ((74842, 74861), 'os.remove', 'os.remove', (['npz_path'], {}), '(npz_path)\n', (74851, 74861), False, 'import os\n'), ((75931, 76037), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, fields='*', alt_number=2, region=region, tabix=tabix,\n samples=samples, types=types)\n", (75939, 76037), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((76073, 76098), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (76087, 76098), False, 'import os\n'), ((76145, 76281), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,\n region=region, tabix=tabix, samples=samples, types=types)\n", (76156, 76281), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((77472, 77496), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (77485, 77496), False, 'import shutil\n'), ((77713, 77733), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (77727, 77733), False, 'from allel.test.tools import compare_arrays\n'), ((78391, 78415), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (78404, 78415), False, 'import shutil\n'), ((78630, 78650), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (78644, 78650), False, 'from allel.test.tools import compare_arrays\n'), ((79120, 79145), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (79133, 79145), False, 'import pytest\n'), ((79155, 79221), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, zarr_path, fields='*', rename_fields=rename)\n", (79166, 79221), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((79380, 79405), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (79393, 79405), False, 'import pytest\n'), ((79415, 79481), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, zarr_path, fields='*', rename_fields=rename)\n", (79426, 79481), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((79640, 79665), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (79653, 79665), False, 'import pytest\n'), ((79675, 79741), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'rename_fields': 'rename'}), "(vcf_path, zarr_path, fields='*', rename_fields=rename)\n", (79686, 79741), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((79900, 79925), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (79913, 79925), False, 'import pytest\n'), ((79935, 79996), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': "['ALTLEN', 'altlen']"}), "(vcf_path, zarr_path, fields=['ALTLEN', 'altlen'])\n", (79946, 79996), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((80006, 80031), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (80019, 80031), False, 'import pytest\n'), ((80041, 80120), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': "['variants/ALTLEN', 'variants/altlen']"}), "(vcf_path, zarr_path, fields=['variants/ALTLEN', 'variants/altlen'])\n", (80052, 80120), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((80450, 80474), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (80463, 80474), False, 'import shutil\n'), ((80539, 80644), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'region': 'chrom', 'group': 'chrom'}), "(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,\n region=chrom, group=chrom)\n", (80550, 80644), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((80865, 80923), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'region': 'chrom'}), "(vcf_path, fields='*', alt_number=2, region=chrom)\n", (80873, 80923), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((81605, 81629), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (81618, 81629), False, 'import shutil\n'), ((81877, 81897), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (81891, 81897), False, 'from allel.test.tools import compare_arrays\n'), ((82373, 82462), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'types': 'types', 'transformers': 'transformers'}), "(vcf_path, fields='*', alt_number=2, types=types, transformers=\n transformers)\n", (82381, 82462), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((82497, 82522), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (82511, 82522), False, 'import os\n'), ((82569, 82687), 'allel.io.vcf_read.vcf_to_zarr', 'vcf_to_zarr', (['vcf_path', 'zarr_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'types': 'types', 'transformers': 'transformers'}), "(vcf_path, zarr_path, fields='*', alt_number=2, chunk_length=2,\n types=types, transformers=transformers)\n", (82580, 82687), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((82721, 82757), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (82736, 82757), False, 'import zarr\n'), ((83029, 83054), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (83043, 83054), False, 'import os\n'), ((83706, 83812), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, fields='*', alt_number=2, region=region, tabix=tabix,\n samples=samples, types=types)\n", (83714, 83812), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((83848, 83871), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (83862, 83871), False, 'import os\n'), ((83912, 84046), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'region': 'region', 'tabix': 'tabix', 'samples': 'samples', 'types': 'types'}), "(vcf_path, h5_path, fields='*', alt_number=2, chunk_length=2,\n region=region, tabix=tabix, samples=samples, types=types)\n", (83923, 84046), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((85220, 85238), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (85229, 85238), False, 'import os\n'), ((85319, 85347), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (85328, 85347), False, 'import h5py\n'), ((86132, 86150), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (86141, 86150), False, 'import os\n'), ((86229, 86257), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (86238, 86257), False, 'import h5py\n'), ((86719, 86737), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (86728, 86737), False, 'import os\n'), ((86802, 86905), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'chunk_length': '(2)', 'region': 'chrom', 'group': 'chrom'}), "(vcf_path, h5_path, fields='*', alt_number=2, chunk_length=2,\n region=chrom, group=chrom)\n", (86813, 86905), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((86931, 86959), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (86940, 86959), False, 'import h5py\n'), ((88073, 88143), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'types': 'types', 'transformers': 'transformers'}), "(vcf_path, fields='*', types=types, transformers=transformers)\n", (88081, 88143), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((88155, 88178), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (88169, 88178), False, 'import os\n'), ((88219, 88321), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': '"""*"""', 'chunk_length': '(2)', 'types': 'types', 'transformers': 'transformers'}), "(vcf_path, h5_path, fields='*', chunk_length=2, types=types,\n transformers=transformers)\n", (88230, 88321), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((88794, 88854), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, types=types)\n', (88802, 88854), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((88866, 88889), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (88880, 88889), False, 'import os\n'), ((88930, 89034), 'allel.io.vcf_read.vcf_to_hdf5', 'vcf_to_hdf5', (['vcf_path', 'h5_path'], {'fields': 'fields', 'alt_number': '(2)', 'chunk_length': '(3)', 'types': 'types', 'vlen': '(False)'}), '(vcf_path, h5_path, fields=fields, alt_number=2, chunk_length=3,\n types=types, vlen=False)\n', (88941, 89034), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((89791, 89814), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (89805, 89814), False, 'import os\n'), ((90746, 90823), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers, types=types)\n', (90754, 90823), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((90864, 90969), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers,\n chunk_length=2, types=types)\n', (90880, 90969), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((91562, 91639), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers, types=types)\n', (91570, 91639), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((91680, 91785), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers,\n chunk_length=2, types=types)\n', (91696, 91785), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((93073, 93167), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'numbers': 'numbers', 'types': 'types', 'transformers': 'transformers'}), '(vcf_path, fields=fields, numbers=numbers, types=types,\n transformers=transformers)\n', (93081, 93167), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((93204, 93322), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types', 'transformers': 'transformers'}), '(vcf_path, fields=fields, numbers=numbers, chunk_length=2,\n types=types, transformers=transformers)\n', (93220, 93322), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94011, 94116), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types', 'chunk_length': '(2)'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers,\n types=types, chunk_length=2)\n', (94027, 94116), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94162, 94195), 'os.path.join', 'os.path.join', (['tempdir', '"""test.csv"""'], {}), "(tempdir, 'test.csv')\n", (94174, 94195), False, 'import os\n'), ((94207, 94231), 'os.path.exists', 'os.path.exists', (['csv_path'], {}), '(csv_path)\n', (94221, 94231), False, 'import os\n'), ((94273, 94382), 'allel.io.vcf_read.vcf_to_csv', 'vcf_to_csv', (['vcf_path', 'csv_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types', 'chunk_length': '(2)'}), '(vcf_path, csv_path, fields=fields, alt_number=2, numbers=numbers,\n types=types, chunk_length=2)\n', (94283, 94382), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((94434, 94475), 'pandas.read_csv', 'pandas.read_csv', (['csv_path'], {'na_filter': '(True)'}), '(csv_path, na_filter=True)\n', (94449, 94475), False, 'import pandas\n'), ((94849, 94868), 'os.remove', 'os.remove', (['csv_path'], {}), '(csv_path)\n', (94858, 94868), False, 'import os\n'), ((95077, 95120), 'allel.test.tools.compare_arrays', 'compare_arrays', (['df[k].values', 'adf[k].values'], {}), '(df[k].values, adf[k].values)\n', (95091, 95120), False, 'from allel.test.tools import compare_arrays\n'), ((95406, 95425), 'os.remove', 'os.remove', (['csv_path'], {}), '(csv_path)\n', (95415, 95425), False, 'import os\n'), ((96142, 96260), 'allel.io.vcf_read.vcf_to_dataframe', 'vcf_to_dataframe', (['vcf_path'], {'fields': 'fields', 'numbers': 'numbers', 'types': 'types', 'chunk_length': '(2)', 'transformers': 'transformers'}), '(vcf_path, fields=fields, numbers=numbers, types=types,\n chunk_length=2, transformers=transformers)\n', (96158, 96260), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((96306, 96339), 'os.path.join', 'os.path.join', (['tempdir', '"""test.csv"""'], {}), "(tempdir, 'test.csv')\n", (96318, 96339), False, 'import os\n'), ((96351, 96375), 'os.path.exists', 'os.path.exists', (['csv_path'], {}), '(csv_path)\n', (96365, 96375), False, 'import os\n'), ((96417, 96539), 'allel.io.vcf_read.vcf_to_csv', 'vcf_to_csv', (['vcf_path', 'csv_path'], {'fields': 'fields', 'numbers': 'numbers', 'types': 'types', 'chunk_length': '(2)', 'transformers': 'transformers'}), '(vcf_path, csv_path, fields=fields, numbers=numbers, types=types,\n chunk_length=2, transformers=transformers)\n', (96427, 96539), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((96591, 96632), 'pandas.read_csv', 'pandas.read_csv', (['csv_path'], {'na_filter': '(True)'}), '(csv_path, na_filter=True)\n', (96606, 96632), False, 'import pandas\n'), ((97072, 97149), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers, types=types)\n', (97080, 97149), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((97189, 97293), 'allel.io.vcf_read.vcf_to_recarray', 'vcf_to_recarray', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers,\n chunk_length=2, types=types)\n', (97204, 97293), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((98212, 98289), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers, types=types)\n', (98220, 98289), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((98329, 98433), 'allel.io.vcf_read.vcf_to_recarray', 'vcf_to_recarray', (['vcf_path'], {'fields': 'fields', 'alt_number': '(2)', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types'}), '(vcf_path, fields=fields, alt_number=2, numbers=numbers,\n chunk_length=2, types=types)\n', (98344, 98433), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((100031, 100125), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': 'fields', 'numbers': 'numbers', 'types': 'types', 'transformers': 'transformers'}), '(vcf_path, fields=fields, numbers=numbers, types=types,\n transformers=transformers)\n', (100039, 100125), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((100161, 100278), 'allel.io.vcf_read.vcf_to_recarray', 'vcf_to_recarray', (['vcf_path'], {'fields': 'fields', 'numbers': 'numbers', 'chunk_length': '(2)', 'types': 'types', 'transformers': 'transformers'}), '(vcf_path, fields=fields, numbers=numbers, chunk_length=2,\n types=types, transformers=transformers)\n', (100176, 100278), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((17719, 17757), 'gzip.open', 'gzip.open', (["(vcf_path + '.gz')"], {'mode': '"""rb"""'}), "(vcf_path + '.gz', mode='rb')\n", (17728, 17757), False, 'import gzip\n'), ((17781, 17797), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (17791, 17797), False, 'import io\n'), ((21147, 21163), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (21157, 21163), False, 'import io\n'), ((21186, 21265), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['CHROM', 'samples']", 'types': "{'CHROM': string_type}"}), "(input_file, fields=['CHROM', 'samples'], types={'CHROM': string_type})\n", (21194, 21265), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((22654, 22670), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (22664, 22670), False, 'import io\n'), ((22693, 22766), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['ID', 'samples']", 'types': "{'ID': string_type}"}), "(input_file, fields=['ID', 'samples'], types={'ID': string_type})\n", (22701, 22766), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((23564, 23580), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (23574, 23580), False, 'import io\n'), ((23603, 23678), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['input_file'], {'fields': "['REF', 'samples']", 'types': "{'REF': string_type}"}), "(input_file, fields=['REF', 'samples'], types={'REF': string_type})\n", (23611, 23678), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((24481, 24497), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (24491, 24497), False, 'import io\n'), ((25714, 25725), 'pytest.approx', 'approx', (['(1.2)'], {}), '(1.2)\n', (25720, 25725), False, 'from pytest import approx\n'), ((25749, 25760), 'pytest.approx', 'approx', (['(3.4)'], {}), '(3.4)\n', (25755, 25760), False, 'from pytest import approx\n'), ((27865, 27876), 'pytest.approx', 'approx', (['(1.2)'], {}), '(1.2)\n', (27871, 27876), False, 'from pytest import approx\n'), ((28943, 28954), 'pytest.approx', 'approx', (['(1.2)'], {}), '(1.2)\n', (28949, 28954), False, 'from pytest import approx\n'), ((30939, 30954), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (30947, 30954), True, 'import numpy as np\n'), ((32763, 32778), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (32771, 32778), True, 'import numpy as np\n'), ((34387, 34402), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (34395, 34402), True, 'import numpy as np\n'), ((41144, 41190), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (41152, 41190), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((41382, 41403), 'numpy.all', 'np.all', (["(chrom == '19')"], {}), "(chrom == '19')\n", (41388, 41403), True, 'import numpy as np\n'), ((41449, 41484), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[111, 112]', 'pos'], {}), '([111, 112], pos)\n', (41467, 41484), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((41534, 41580), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (41542, 41580), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((41772, 41793), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (41778, 41793), True, 'import numpy as np\n'), ((41839, 41914), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[14370, 17330, 1110696, 1230237, 1234567, 1235237]', 'pos'], {}), '([14370, 17330, 1110696, 1230237, 1234567, 1235237], pos)\n', (41857, 41914), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((41963, 42009), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (41971, 42009), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((42201, 42221), 'numpy.all', 'np.all', (["(chrom == 'X')"], {}), "(chrom == 'X')\n", (42207, 42221), True, 'import numpy as np\n'), ((42267, 42296), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[10]', 'pos'], {}), '([10], pos)\n', (42285, 42296), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((42345, 42391), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (42353, 42391), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((42485, 42531), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (42493, 42531), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((42723, 42744), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (42729, 42744), True, 'import numpy as np\n'), ((42790, 42829), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[14370, 17330]', 'pos'], {}), '([14370, 17330], pos)\n', (42808, 42829), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((42895, 42941), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (42903, 42941), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((43133, 43154), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (43139, 43154), True, 'import numpy as np\n'), ((43200, 43243), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[1110696, 1230237]', 'pos'], {}), '([1110696, 1230237], pos)\n', (43218, 43243), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((43309, 43355), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'region': 'region', 'tabix': 'tabix'}), '(vcf_path, region=region, tabix=tabix)\n', (43317, 43355), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((43547, 43568), 'numpy.all', 'np.all', (["(chrom == '20')"], {}), "(chrom == '20')\n", (43553, 43568), True, 'import numpy as np\n'), ((43614, 43657), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['[1234567, 1235237]', 'pos'], {}), '([1234567, 1235237], pos)\n', (43632, 43657), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((53844, 53866), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (53854, 53866), False, 'import io\n'), ((54097, 54119), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (54107, 54119), False, 'import io\n'), ((54353, 54375), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (54363, 54375), False, 'import io\n'), ((54611, 54633), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (54621, 54633), False, 'import io\n'), ((54869, 54891), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (54879, 54891), False, 'import io\n'), ((55130, 55152), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (55140, 55152), False, 'import io\n'), ((56397, 56419), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (56407, 56419), False, 'import io\n'), ((56774, 56796), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (56784, 56796), False, 'import io\n'), ((57152, 57174), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (57162, 57174), False, 'import io\n'), ((57534, 57556), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (57544, 57556), False, 'import io\n'), ((57985, 58007), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (57995, 58007), False, 'import io\n'), ((58316, 58338), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (58326, 58338), False, 'import io\n'), ((58650, 58672), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (58660, 58672), False, 'import io\n'), ((58751, 58773), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (58761, 58773), False, 'import io\n'), ((60139, 60161), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (60149, 60161), False, 'import io\n'), ((62643, 62665), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (62653, 62665), False, 'import io\n'), ((64170, 64184), 'numpy.dtype', 'np.dtype', (['bool'], {}), '(bool)\n', (64178, 64184), True, 'import numpy as np\n'), ((64390, 64412), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (64400, 64412), False, 'import io\n'), ((65204, 65218), 'numpy.dtype', 'np.dtype', (['bool'], {}), '(bool)\n', (65212, 65218), True, 'import numpy as np\n'), ((65866, 65888), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (65876, 65888), False, 'import io\n'), ((68548, 68570), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (68558, 68570), False, 'import io\n'), ((69310, 69332), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (69320, 69332), False, 'import io\n'), ((70078, 70100), 'io.BytesIO', 'io.BytesIO', (['input_data'], {}), '(input_data)\n', (70088, 70100), False, 'import io\n'), ((73062, 73081), 'os.remove', 'os.remove', (['npz_path'], {}), '(npz_path)\n', (73071, 73081), False, 'import os\n'), ((73352, 73388), 'numpy.load', 'np.load', (['npz_path'], {'allow_pickle': '(True)'}), '(npz_path, allow_pickle=True)\n', (73359, 73388), True, 'import numpy as np\n'), ((74269, 74322), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (74294, 74322), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((74349, 74395), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (74367, 74395), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((75069, 75122), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (75094, 75122), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((75149, 75195), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (75167, 75195), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((76112, 76136), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (76125, 76136), False, 'import shutil\n'), ((76411, 76447), 'zarr.open_group', 'zarr.open_group', (['zarr_path'], {'mode': '"""r"""'}), "(zarr_path, mode='r')\n", (76426, 76447), False, 'import zarr\n'), ((81036, 81056), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (81050, 81056), False, 'from allel.test.tools import compare_arrays\n'), ((82164, 82271), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'HGVS_c', 'AA']", 'types': "{'Allele': string_type, 'HGVS_c': string_type}"}), "(fields=['Allele', 'HGVS_c', 'AA'], types={'Allele':\n string_type, 'HGVS_c': string_type})\n", (82178, 82271), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((82536, 82560), 'shutil.rmtree', 'shutil.rmtree', (['zarr_path'], {}), '(zarr_path)\n', (82549, 82560), False, 'import shutil\n'), ((82806, 82851), 'allel.test.tools.compare_arrays', 'compare_arrays', (['expected[key]', 'actual[key][:]'], {}), '(expected[key], actual[key][:])\n', (82820, 82851), False, 'from allel.test.tools import compare_arrays\n'), ((83885, 83903), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (83894, 83903), False, 'import os\n'), ((85407, 85452), 'allel.test.tools.compare_arrays', 'compare_arrays', (['expected[key]', 'actual[key][:]'], {}), '(expected[key], actual[key][:])\n', (85421, 85452), False, 'from allel.test.tools import compare_arrays\n'), ((86317, 86362), 'allel.test.tools.compare_arrays', 'compare_arrays', (['expected[key]', 'actual[key][:]'], {}), '(expected[key], actual[key][:])\n', (86331, 86362), False, 'from allel.test.tools import compare_arrays\n'), ((87141, 87199), 'allel.io.vcf_read.read_vcf', 'read_vcf', (['vcf_path'], {'fields': '"""*"""', 'alt_number': '(2)', 'region': 'chrom'}), "(vcf_path, fields='*', alt_number=2, region=chrom)\n", (87149, 87199), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((87864, 87971), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'HGVS_c', 'AA']", 'types': "{'Allele': string_type, 'HGVS_c': string_type}"}), "(fields=['Allele', 'HGVS_c', 'AA'], types={'Allele':\n string_type, 'HGVS_c': string_type})\n", (87878, 87971), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((88192, 88210), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (88201, 88210), False, 'import os\n'), ((88351, 88379), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (88360, 88379), False, 'import h5py\n'), ((88903, 88921), 'os.remove', 'os.remove', (['h5_path'], {}), '(h5_path)\n', (88912, 88921), False, 'import os\n'), ((89064, 89092), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (89073, 89092), False, 'import h5py\n'), ((91199, 91215), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (91207, 91215), True, 'import numpy as np\n'), ((91252, 91268), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (91260, 91268), True, 'import numpy as np\n'), ((92045, 92061), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (92053, 92061), True, 'import numpy as np\n'), ((92098, 92114), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (92106, 92114), True, 'import numpy as np\n'), ((92865, 92972), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'HGVS_c', 'AA']", 'types': "{'Allele': string_type, 'HGVS_c': string_type}"}), "(fields=['Allele', 'HGVS_c', 'AA'], types={'Allele':\n string_type, 'HGVS_c': string_type})\n", (92879, 92972), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((93620, 93636), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (93628, 93636), True, 'import numpy as np\n'), ((93673, 93689), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (93681, 93689), True, 'import numpy as np\n'), ((94245, 94264), 'os.remove', 'os.remove', (['csv_path'], {}), '(csv_path)\n', (94254, 94264), False, 'import os\n'), ((94576, 94619), 'allel.test.tools.compare_arrays', 'compare_arrays', (['df[k].values', 'adf[k].values'], {}), '(df[k].values, adf[k].values)\n', (94590, 94619), False, 'from allel.test.tools import compare_arrays\n'), ((95939, 96046), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'HGVS_c', 'AA']", 'types': "{'Allele': string_type, 'HGVS_c': string_type}"}), "(fields=['Allele', 'HGVS_c', 'AA'], types={'Allele':\n string_type, 'HGVS_c': string_type})\n", (95953, 96046), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((96389, 96408), 'os.remove', 'os.remove', (['csv_path'], {}), '(csv_path)\n', (96398, 96408), False, 'import os\n'), ((96733, 96776), 'allel.test.tools.compare_arrays', 'compare_arrays', (['df[k].values', 'adf[k].values'], {}), '(df[k].values, adf[k].values)\n', (96747, 96776), False, 'from allel.test.tools import compare_arrays\n'), ((97461, 97482), 'numpy.dtype', 'np.dtype', (['string_type'], {}), '(string_type)\n', (97469, 97482), True, 'import numpy as np\n'), ((98625, 98646), 'numpy.dtype', 'np.dtype', (['string_type'], {}), '(string_type)\n', (98633, 98646), True, 'import numpy as np\n'), ((99823, 99930), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {'fields': "['Allele', 'HGVS_c', 'AA']", 'types': "{'Allele': string_type, 'HGVS_c': string_type}"}), "(fields=['Allele', 'HGVS_c', 'AA'], types={'Allele':\n string_type, 'HGVS_c': string_type})\n", (99837, 99930), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((100514, 100535), 'numpy.dtype', 'np.dtype', (['string_type'], {}), '(string_type)\n', (100522, 100535), True, 'import numpy as np\n'), ((100571, 100592), 'numpy.dtype', 'np.dtype', (['string_type'], {}), '(string_type)\n', (100579, 100592), True, 'import numpy as np\n'), ((47090, 47106), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {}), '()\n', (47104, 47106), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((51126, 51142), 'allel.io.vcf_read.ANNTransformer', 'ANNTransformer', ([], {}), '()\n', (51140, 51142), False, 'from allel.io.vcf_read import iter_vcf_chunks, read_vcf, vcf_to_zarr, vcf_to_hdf5, vcf_to_npz, ANNTransformer, vcf_to_dataframe, vcf_to_csv, vcf_to_recarray, read_vcf_headers\n'), ((73292, 73316), 'os.path.exists', 'os.path.exists', (['npz_path'], {}), '(npz_path)\n', (73306, 73316), False, 'import os\n'), ((76350, 76375), 'os.path.exists', 'os.path.exists', (['zarr_path'], {}), '(zarr_path)\n', (76364, 76375), False, 'import os\n'), ((76573, 76593), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (76587, 76593), False, 'from allel.test.tools import compare_arrays\n'), ((84115, 84138), 'os.path.exists', 'os.path.exists', (['h5_path'], {}), '(h5_path)\n', (84129, 84138), False, 'import os\n'), ((84170, 84198), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r"""'}), "(h5_path, mode='r')\n", (84179, 84198), False, 'import h5py\n'), ((87328, 87348), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'a'], {}), '(e, a)\n', (87342, 87348), False, 'from allel.test.tools import compare_arrays\n'), ((88447, 88492), 'allel.test.tools.compare_arrays', 'compare_arrays', (['expected[key]', 'actual[key][:]'], {}), '(expected[key], actual[key][:])\n', (88461, 88492), False, 'from allel.test.tools import compare_arrays\n'), ((90288, 90322), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e', 'df[name].values'], {}), '(e, df[name].values)\n', (90302, 90322), False, 'from allel.test.tools import compare_arrays\n'), ((73501, 73554), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (73526, 73554), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((73597, 73643), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expected[key]', 'actual[key]'], {}), '(expected[key], actual[key])\n', (73615, 73643), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((84274, 84319), 'allel.test.tools.compare_arrays', 'compare_arrays', (['expected[key]', 'actual[key][:]'], {}), '(expected[key], actual[key][:])\n', (84288, 84319), False, 'from allel.test.tools import compare_arrays\n'), ((89212, 89266), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expect[key]', 'actual[key][:]'], {}), '(expect[key], actual[key][:])\n', (89237, 89266), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((97697, 97727), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a[name]'], {}), '(e, a[name])\n', (97715, 97727), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((98861, 98891), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a[name]'], {}), '(e, a[name])\n', (98879, 98891), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((100797, 100827), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e', 'a[name]'], {}), '(e, a[name])\n', (100815, 100827), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((89572, 89619), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expect[key]', 'actual[key][:]'], {}), '(expect[key], actual[key][:])\n', (89590, 89619), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((90417, 90469), 'allel.test.tools.compare_arrays', 'compare_arrays', (['e[:, i]', "df['%s_%s' % (name, i + 1)]"], {}), "(e[:, i], df['%s_%s' % (name, i + 1)])\n", (90431, 90469), False, 'from allel.test.tools import compare_arrays\n'), ((97834, 97889), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e[:, i]', "a['%s_%s' % (name, i + 1)]"], {}), "(e[:, i], a['%s_%s' % (name, i + 1)])\n", (97852, 97889), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((98998, 99053), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e[:, i]', "a['%s_%s' % (name, i + 1)]"], {}), "(e[:, i], a['%s_%s' % (name, i + 1)])\n", (99016, 99053), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((100934, 100989), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['e[:, i]', "a['%s_%s' % (name, i + 1)]"], {}), "(e[:, i], a['%s_%s' % (name, i + 1)])\n", (100952, 100989), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n')]
|
"""
Tests module image_io
# Author: <NAME>
# $Id:$
"""
from __future__ import unicode_literals
from __future__ import print_function
__version__ = "$Revision:$"
from copy import copy, deepcopy
import pickle
import os.path
import unittest
import numpy
import numpy.testing as np_test
import scipy
from pyto.io.image_io import ImageIO
class TestImageIO(np_test.TestCase):
"""
Tests class ImageIO
"""
def setUp(self):
"""
Sets absolute path to this file directory and saves it as self.dir
"""
# set absolute path to current dir
working_dir = os.getcwd()
file_dir, name = os.path.split(__file__)
self.dir = os.path.join(working_dir, file_dir)
# make raw file
self.raw_shape = (4,3,2)
self.raw_dtype = 'int16'
self.raw_data = numpy.arange(
24, dtype=self.raw_dtype).reshape(self.raw_shape)
raw = ImageIO()
self.raw_file_name = 'data.raw'
raw.write(file=self.raw_file_name, data=self.raw_data)
def testRead(self):
"""
Tests reading EM and MRC files
"""
# EM tomo
em = ImageIO()
em.read(file=os.path.join(self.dir, "bin-2.em"))
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(em.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(em.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(em.byteOrder, '<')
np_test.assert_equal(em.arrayOrder, 'F')
np_test.assert_equal(em.dataType, 'float32')
np_test.assert_equal(em.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(em.memmap, False)
# EM tomo with memory map
em.read(file=os.path.join(self.dir, "bin-2.em"), memmap=True)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(em.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(em.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(em.byteOrder, '<')
np_test.assert_equal(em.arrayOrder, 'F')
np_test.assert_equal(em.dataType, 'float32')
np_test.assert_equal(em.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(em.memmap, True)
# EM, big-endian
em = ImageIO()
em.read(file=os.path.join(self.dir, "mac-file.em"))
np_test.assert_equal(em.byteOrder, '>')
# EM, little-endian
em = ImageIO()
em.read(file=os.path.join(self.dir, "pc-file.em"))
np_test.assert_equal(em.byteOrder, '<')
em.read(file=os.path.join(self.dir, "pc-file.em"), memmap=True)
np_test.assert_equal(em.byteOrder, '<')
# MRC tomo
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2.mrc"))
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, False)
# MRC tomo with memmap
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2.mrc"), memmap=True)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, True)
# MRC tomo with extended header
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2_ext.mrc"), memmap=False)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, False)
np_test.assert_equal(mrc.extendedHeaderLength, 5120)
# MRC tomo with extended header and with memmap
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "bin-2_ext.mrc"), memmap=True)
expected = numpy.array([[-0.0242, -0.0250, 0.0883],
[0.0640, 0.0071, -0.1300],
[-0.0421, -0.0392, -0.0312]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0573, 0.0569, 0.0386],
[0.1309, 0.1211, -0.0881],
[-0.0110, -0.0240, 0.0347]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 10], expected,
decimal=4)
np_test.assert_equal(mrc.byteOrder, '<')
np_test.assert_equal(mrc.arrayOrder, 'F')
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, True)
np_test.assert_equal(mrc.extendedHeaderLength, 5120)
# another MRC tomo (generated by and)
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "and-tomo.mrc"))
expected = numpy.array([[-0.0329, -0.0006, -0.0698],
[-0.0101, -0.1196, -0.1295],
[0.0844, -0.0400, -0.0716]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0019, -0.0085, 0.0036],
[0.0781, 0.0279, -0.0365],
[0.0210, -0.0193, -0.0355]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 60], expected,
decimal=4)
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, False)
# another MRC tomo (generated by and) with memmap
mrc = ImageIO()
mrc.read(file=os.path.join(self.dir, "and-tomo.mrc"), memmap=True)
expected = numpy.array([[-0.0329, -0.0006, -0.0698],
[-0.0101, -0.1196, -0.1295],
[0.0844, -0.0400, -0.0716]])
np_test.assert_almost_equal(mrc.data[50:53, 120:123, 40], expected,
decimal=4)
expected = numpy.array([[-0.0019, -0.0085, 0.0036],
[0.0781, 0.0279, -0.0365],
[0.0210, -0.0193, -0.0355]])
np_test.assert_almost_equal(mrc.data[150:153, 20:23, 60], expected,
decimal=4)
np_test.assert_equal(mrc.dataType, 'float32')
np_test.assert_equal(mrc.data.dtype, numpy.dtype('float32'))
np_test.assert_equal(mrc.memmap, True)
# mrc with the opposite byte order
mrc2 = ImageIO()
mrc2.read(file=os.path.join(self.dir, "swapped_byte_order.mrc"))
expected = numpy.array(
[[ 0.000, 0.000],
[-0.341, -6.702],
[0.782, -11.780],
[0.327, -14.298],
[-0.691, -17.411],
[-0.337, -18.076],
[-0.669, -19.157],
[-0.799, -20.400],
[-0.793, -21.286],
[-1.008, -21.386]])
np_test.assert_almost_equal(mrc2.data[:,:,0], expected, decimal=3)
np_test.assert_equal(mrc2.memmap, False)
raised = False
try:
mrc2.read(
file=os.path.join(self.dir, "swapped_byte_order.mrc"),
memmap=True)
except ValueError:
raised = True
np_test.assert_equal(raised, True)
np_test.assert_equal(mrc2.memmap, True)
# new style header mrc
mrc_new = ImageIO()
mrc_new.read(file=os.path.join(self.dir, 'new-head_int16.mrc'))
np_test.assert_equal(mrc_new.dataType, 'int16')
np_test.assert_equal(mrc_new.data.dtype, numpy.dtype('int16'))
np_test.assert_equal(mrc_new.byteOrder, '<')
np_test.assert_equal(mrc_new.arrayOrder, 'F')
np_test.assert_equal(mrc_new.shape, (40,30,20))
np_test.assert_equal(mrc_new.pixel, [0.4, 0.4, 0.4])
np_test.assert_equal(mrc_new.pixelsize, 0.4)
np_test.assert_equal(mrc_new.data[14,8,10], -14)
np_test.assert_equal(mrc_new.data[15,23,12], 10)
np_test.assert_equal(mrc_new.data[23,29,16], 2)
np_test.assert_equal(mrc_new.memmap, False)
# new style header mrc
mrc_new = ImageIO()
mrc_new.read(
file=os.path.join(self.dir, 'new-head_int16.mrc'), memmap=True)
np_test.assert_equal(mrc_new.dataType, 'int16')
np_test.assert_equal(mrc_new.data.dtype, numpy.dtype('int16'))
np_test.assert_equal(mrc_new.byteOrder, '<')
np_test.assert_equal(mrc_new.arrayOrder, 'F')
np_test.assert_equal(mrc_new.shape, (40,30,20))
np_test.assert_equal(mrc_new.pixel, [0.4, 0.4, 0.4])
np_test.assert_equal(mrc_new.pixelsize, 0.4)
np_test.assert_equal(mrc_new.data[14,8,10], -14)
np_test.assert_equal(mrc_new.data[15,23,12], 10)
np_test.assert_equal(mrc_new.data[23,29,16], 2)
np_test.assert_equal(mrc_new.memmap, True)
np_test.assert_equal(mrc_new.n_labels, 9)
np_test.assert_equal(len(mrc_new.labels), 9)
desired = (
b"COMBINEFFT: Combined FFT from two tomograms "
+ b"07-Oct-13 17:15:24" )
np_test.assert_equal(len(mrc_new.labels[3]), 80)
np_test.assert_equal(mrc_new.labels[3][:len(desired)], desired)
desired = (
b"NEWSTACK: Images copied 10-Oct-13 18:00:03")
np_test.assert_equal(len(mrc_new.labels[6]), 80)
np_test.assert_equal(mrc_new.labels[6][:len(desired)], desired)
# test raw file
raw = ImageIO()
raw.read(
file=self.raw_file_name, dataType=self.raw_dtype,
shape=self.raw_shape)
np_test.assert_equal(raw.data, self.raw_data)
np_test.assert_equal(raw.memmap, False)
# test raw file with memmap
raw = ImageIO()
raw.read(
file=self.raw_file_name, dataType=self.raw_dtype,
shape=self.raw_shape, memmap=True)
np_test.assert_equal(raw.data, self.raw_data)
np_test.assert_equal(raw.memmap, True)
def testWrite(self):
"""
Tests write (and implicitly read), for em, mrc and raw format.
"""
# arrays
ar_uint8 = numpy.array([54, 200, 5, 7, 45, 123],
dtype='uint8').reshape((3,1,2))
ar_int8 = numpy.array([54, 2, -5, 7, 45, 123],
dtype='uint8').reshape((3,1,2))
ar_uint16 = numpy.array([1034, 546, 248, 40000, 2345, 365, 4876, 563],
dtype='uint16').reshape((2,2,2))
ar_int16 = numpy.array([1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16').reshape((2,2,2))
ar_int32 = numpy.array([1034, 56546, -223448, 156,
2345, 2**31-10, -884876, 563],
dtype='int32').reshape((2,2,2))
ar_uint32 = numpy.array([1034, 56546, 223448, 156,
2345, 365, 884876, 2**32-10],
dtype='uint32').reshape((2,2,2))
ar_int8_2 = numpy.arange(24, dtype='int8').reshape((4,3,2))
ar_int16_2 = numpy.arange(24, dtype='int16').reshape((4,3,2))
ar2_int16 = numpy.array([1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16').reshape((2,4))
ar_int16_f = numpy.array(
[1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16', order='F').reshape((2,2,2))
ar_int16_c = numpy.array(
[1034, 546, -248, 156, 2345, 365, -4876, 563],
dtype='int16', order='C').reshape((2,2,2))
# em uint8
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.em'), data=ar_uint8)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'uint8')
np_test.assert_equal(file_in.data, ar_uint8)
# em uint16
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.em'), data=ar_uint16)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'uint16')
np_test.assert_equal(file_in.data, ar_uint16)
# em int16 converted to int32, safe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_int16, dataType='int32', casting='safe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'int32')
np_test.assert_equal(file_in.data, ar_int16)
# em int16, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_int16, 'casting':'safe'})
# em int16 converted to uint16, unsafe casting
file_out = ImageIO()
print("int16 to uint16")
file_out.write(file=os.path.join(self.dir, '_test.em'),
data=ar_int16, dataType='uint16', casting='unsafe')
print("int16 to uint16 end")
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'uint16')
np_test.assert_equal(file_in.data.dtype, numpy.dtype('uint16'))
np_test.assert_equal(file_in.data[0,1,0] == ar_int16[0,1,0], False)
# em int16 to uint16, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_int16, 'dataType':'uint16', 'casting':'safe'})
# em uint16 to int16, unsafe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_uint16, 'dataType':'int16', 'casting':'unsafe'})
# em uint32 to int32, safe casting
print("uint32 to int32 safe")
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_uint32, 'dataType':'int32', 'casting':'safe'})
# em uint32 converted to int32, unsafe casting
print("uint32 to int32")
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_uint32, dataType='int32', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'int32')
#np_test.assert_equal(file_in.data, ar_uint32) should fail
np_test.assert_equal(file_in.data[0,0,0] == ar_uint32[0,0,0], True)
np_test.assert_equal(file_in.data[1,1,1] == ar_uint32[1,1,1], False)
# em uint32 to float32, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.em'),
'data':ar_uint32, 'dataType':'float32', 'casting':'safe'})
# em uint32 to float32, unsafe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_uint32, dataType='float32', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'float32')
#np_test.assert_almost_equal(file_in.data, ar_uint32) should fail
np_test.assert_equal(
file_in.data[0,0,0] == ar_uint32[0,0,0], True)
np_test.assert_equal(
file_in.data[1,1,1] == ar_uint32[1,1,1], False)
# em int32 to float32, unsafe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_int32, dataType='float32', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'float32')
#np_test.assert_almost_equal(file_in.data, ar_int32) should fail
np_test.assert_equal(
file_in.data[0,0,0] == ar_int32[0,0,0], True)
np_test.assert_equal(
file_in.data[1,0,1] == ar_int32[1,0,1], False)
# em int32 to float64, safe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.em'),
data=ar_int32, dataType='float64', casting='safe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.em'))
np_test.assert_equal(file_in.dataType, 'float64')
np_test.assert_almost_equal(file_in.data, ar_int32)
# mrc data type and shape from args
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int8_2, shape=(2,3,4), dataType='int16')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (2,3,4))
# mrc data type and shape from previously given data
file_out = ImageIO()
file_out.setData(ar_int16_2)
file_out.write(file=os.path.join(self.dir, '_test.mrc'))
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (4,3,2))
# mrc data type and shape from attributes
file_out = ImageIO()
file_out.data = ar_int8_2
file_out.shape = (2,3,4)
file_out.dataType = 'int16'
file_out.write(file=os.path.join(self.dir, '_test.mrc'))
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (2,3,4))
# mrc data type and shape from data
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_2)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.shape, (4,3,2))
# mrc uint8, same as ubyte
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.mrc'), data=ar_uint8)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'ubyte')
np_test.assert_almost_equal(file_in.data, ar_uint8)
# mrc uint16
file_out = ImageIO()
np_test.assert_raises(
(KeyError, TypeError),
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'), 'data':ar_uint16})
# mrc uint16 to int16, safe casting
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_uint16, 'dataType':'ubyte', 'casting':'safe'})
# mrc uint16 to int16, unsafe casting
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_uint16, dataType='int16', casting='unsafe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
#np_test.assert_almost_equal(file_in.data, ar_uint16) should fail
np_test.assert_equal(file_in.data[0,0,0] == ar_uint16[0,0,0], True)
np_test.assert_equal(file_in.data[0,1,1] == ar_uint16[0,1,1], False)
# mrc int16
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int16, pixel=2.3)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int16)
np_test.assert_equal(file_in.pixel, [2.3, 2.3, 2.3])
np_test.assert_equal(file_in.pixelsize, 2.3)
# mrc int16 2D
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar2_int16, pixel=3.4)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data[:,:,0], ar2_int16)
np_test.assert_equal(file_in.pixelsize, 3.4)
# mrc int8 to int16
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int8,
dataType='int16', casting='safe')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int8)
# mrc int32
file_out = ImageIO()
np_test.assert_raises(
(KeyError, TypeError),
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'), 'data':ar_int32})
# mrc int32 to int16
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_int32, 'dataType':'int16', 'casting':'safe'})
# mrc int32 to float32
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_int32, 'dataType':'float32', 'casting':'safe'})
# mrc int32 to complex64
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.mrc'),
'data':ar_int32, 'dataType':'complex64', 'casting':'safe'})
# raw int16
file_out = ImageIO()
file_out.write(file=os.path.join(self.dir, '_test.raw'), data=ar_int16)
file_in = ImageIO()
file_in.read(
file=os.path.join(self.dir, '_test.raw'),
dataType='int16', shape=(2,2,2))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int16)
# raw int8 to int16
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.raw'),
data=ar_int8, dataType='int16')
file_in = ImageIO()
file_in.read(
file=os.path.join(self.dir, '_test.raw'),
dataType='int16', shape=(3,1,2))
np_test.assert_equal(file_in.dataType, 'int16')
np_test.assert_equal(file_in.data, ar_int8)
# raw int16 to int8
file_out = ImageIO()
np_test.assert_raises(
TypeError,
file_out.write,
**{'file':os.path.join(self.dir, '_test.raw'),
'data':ar_int16, 'dataType':'int8', 'casting':'safe'})
# explain error messages printed before
print("It's fine if few error messages were printed just before " +
"this line, because they have been caught.")
# shape param
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16, dataType='int16')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), dataType='int16')
np_test.assert_equal(file_in.data.shape, (2,2,2))
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16, dataType='int16', shape=(1,4,2))
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), dataType='int16')
np_test.assert_equal(file_in.data.shape, (1,4,2))
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16, dataType='int16', shape=(4,2))
file_in.readHeader(file=os.path.join(self.dir, '_test.mrc'))
file_in.read(file=os.path.join(self.dir, '_test.mrc'), dataType='int16')
np_test.assert_equal(file_in.data.shape, (4,2,1))
file_in.read(
file=os.path.join(self.dir, '_test.mrc'),
dataType='int16', shape=(2,2,2))
np_test.assert_equal(file_in.data.shape, (2,2,2))
# array order C, read write default (F)
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int16_c)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.data, ar_int16_c)
# array order C, read write C
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_c, arrayOrder='C')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), arrayOrder='C')
np_test.assert_equal(file_in.data, ar_int16_c)
# array order F, read write default (F)
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'), data=ar_int16_f)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_equal(file_in.data, ar_int16_f)
# array order F, read write F
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_f, arrayOrder='F')
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'), arrayOrder='F')
np_test.assert_equal(file_in.data, ar_int16_f)
def testPixelSize(self):
"""
Tests pixel size in read and write
"""
# arrays
#ar_int8_2 = numpy.arange(24, dtype='int8').reshape((4,3,2))
ar_int16_2 = numpy.arange(24, dtype='int16').reshape((4,3,2))
#
file_out = ImageIO()
file_out.write(
file=os.path.join(self.dir, '_test.mrc'),
data=ar_int16_2, pixel=2.1)
file_in = ImageIO()
file_in.read(file=os.path.join(self.dir, '_test.mrc'))
np_test.assert_almost_equal(file_in.pixel, 2.1)
def tearDown(self):
"""
Remove temporary files
"""
try:
os.remove(os.path.join(self.dir, '_test.em'))
except OSError:
pass
try:
os.remove(os.path.join(self.dir, '_test.mrc'))
except OSError:
pass
try:
os.remove(os.path.join(self.dir, '_test.raw'))
except OSError:
pass
try:
os.remove(os.path.join(self.dir, self.raw_file_name))
except OSError:
pass
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestImageIO)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"unittest.TextTestRunner",
"numpy.testing.assert_almost_equal",
"numpy.dtype",
"pyto.io.image_io.ImageIO",
"numpy.array",
"unittest.TestLoader",
"numpy.testing.assert_equal",
"numpy.arange"
] |
[((928, 937), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (935, 937), False, 'from pyto.io.image_io import ImageIO\n'), ((1170, 1179), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (1177, 1179), False, 'from pyto.io.image_io import ImageIO\n'), ((1256, 1354), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (1267, 1354), False, 'import numpy\n'), ((1440, 1517), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['em.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(em.data[50:53, 120:123, 40], expected, decimal=4)\n', (1467, 1517), True, 'import numpy.testing as np_test\n'), ((1573, 1670), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (1584, 1670), False, 'import numpy\n'), ((1757, 1834), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['em.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(em.data[150:153, 20:23, 10], expected, decimal=4)\n', (1784, 1834), True, 'import numpy.testing as np_test\n'), ((1879, 1918), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.byteOrder', '"""<"""'], {}), "(em.byteOrder, '<')\n", (1899, 1918), True, 'import numpy.testing as np_test\n'), ((1927, 1967), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.arrayOrder', '"""F"""'], {}), "(em.arrayOrder, 'F')\n", (1947, 1967), True, 'import numpy.testing as np_test\n'), ((1976, 2020), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.dataType', '"""float32"""'], {}), "(em.dataType, 'float32')\n", (1996, 2020), True, 'import numpy.testing as np_test\n'), ((2097, 2135), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.memmap', '(False)'], {}), '(em.memmap, False)\n', (2117, 2135), True, 'import numpy.testing as np_test\n'), ((2260, 2358), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (2271, 2358), False, 'import numpy\n'), ((2444, 2521), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['em.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(em.data[50:53, 120:123, 40], expected, decimal=4)\n', (2471, 2521), True, 'import numpy.testing as np_test\n'), ((2577, 2674), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (2588, 2674), False, 'import numpy\n'), ((2761, 2838), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['em.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(em.data[150:153, 20:23, 10], expected, decimal=4)\n', (2788, 2838), True, 'import numpy.testing as np_test\n'), ((2883, 2922), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.byteOrder', '"""<"""'], {}), "(em.byteOrder, '<')\n", (2903, 2922), True, 'import numpy.testing as np_test\n'), ((2931, 2971), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.arrayOrder', '"""F"""'], {}), "(em.arrayOrder, 'F')\n", (2951, 2971), True, 'import numpy.testing as np_test\n'), ((2980, 3024), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.dataType', '"""float32"""'], {}), "(em.dataType, 'float32')\n", (3000, 3024), True, 'import numpy.testing as np_test\n'), ((3101, 3138), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.memmap', '(True)'], {}), '(em.memmap, True)\n', (3121, 3138), True, 'import numpy.testing as np_test\n'), ((3178, 3187), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (3185, 3187), False, 'from pyto.io.image_io import ImageIO\n'), ((3256, 3295), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.byteOrder', '""">"""'], {}), "(em.byteOrder, '>')\n", (3276, 3295), True, 'import numpy.testing as np_test\n'), ((3338, 3347), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (3345, 3347), False, 'from pyto.io.image_io import ImageIO\n'), ((3415, 3454), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.byteOrder', '"""<"""'], {}), "(em.byteOrder, '<')\n", (3435, 3454), True, 'import numpy.testing as np_test\n'), ((3535, 3574), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['em.byteOrder', '"""<"""'], {}), "(em.byteOrder, '<')\n", (3555, 3574), True, 'import numpy.testing as np_test\n'), ((3609, 3618), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (3616, 3618), False, 'from pyto.io.image_io import ImageIO\n'), ((3697, 3795), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (3708, 3795), False, 'import numpy\n'), ((3881, 3959), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (3908, 3959), True, 'import numpy.testing as np_test\n'), ((4015, 4112), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (4026, 4112), False, 'import numpy\n'), ((4199, 4277), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 10], expected, decimal=4)\n', (4226, 4277), True, 'import numpy.testing as np_test\n'), ((4322, 4362), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.byteOrder', '"""<"""'], {}), "(mrc.byteOrder, '<')\n", (4342, 4362), True, 'import numpy.testing as np_test\n'), ((4371, 4412), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.arrayOrder', '"""F"""'], {}), "(mrc.arrayOrder, 'F')\n", (4391, 4412), True, 'import numpy.testing as np_test\n'), ((4421, 4466), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (4441, 4466), True, 'import numpy.testing as np_test\n'), ((4544, 4583), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(False)'], {}), '(mrc.memmap, False)\n', (4564, 4583), True, 'import numpy.testing as np_test\n'), ((4630, 4639), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (4637, 4639), False, 'from pyto.io.image_io import ImageIO\n'), ((4731, 4829), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (4742, 4829), False, 'import numpy\n'), ((4915, 4993), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (4942, 4993), True, 'import numpy.testing as np_test\n'), ((5049, 5146), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (5060, 5146), False, 'import numpy\n'), ((5233, 5311), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 10], expected, decimal=4)\n', (5260, 5311), True, 'import numpy.testing as np_test\n'), ((5356, 5396), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.byteOrder', '"""<"""'], {}), "(mrc.byteOrder, '<')\n", (5376, 5396), True, 'import numpy.testing as np_test\n'), ((5405, 5446), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.arrayOrder', '"""F"""'], {}), "(mrc.arrayOrder, 'F')\n", (5425, 5446), True, 'import numpy.testing as np_test\n'), ((5455, 5500), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (5475, 5500), True, 'import numpy.testing as np_test\n'), ((5578, 5616), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(True)'], {}), '(mrc.memmap, True)\n', (5598, 5616), True, 'import numpy.testing as np_test\n'), ((5672, 5681), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (5679, 5681), False, 'from pyto.io.image_io import ImageIO\n'), ((5778, 5876), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (5789, 5876), False, 'import numpy\n'), ((5962, 6040), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (5989, 6040), True, 'import numpy.testing as np_test\n'), ((6096, 6193), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (6107, 6193), False, 'import numpy\n'), ((6280, 6358), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 10], expected, decimal=4)\n', (6307, 6358), True, 'import numpy.testing as np_test\n'), ((6403, 6443), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.byteOrder', '"""<"""'], {}), "(mrc.byteOrder, '<')\n", (6423, 6443), True, 'import numpy.testing as np_test\n'), ((6452, 6493), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.arrayOrder', '"""F"""'], {}), "(mrc.arrayOrder, 'F')\n", (6472, 6493), True, 'import numpy.testing as np_test\n'), ((6502, 6547), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (6522, 6547), True, 'import numpy.testing as np_test\n'), ((6625, 6664), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(False)'], {}), '(mrc.memmap, False)\n', (6645, 6664), True, 'import numpy.testing as np_test\n'), ((6673, 6725), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.extendedHeaderLength', '(5120)'], {}), '(mrc.extendedHeaderLength, 5120)\n', (6693, 6725), True, 'import numpy.testing as np_test\n'), ((6797, 6806), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (6804, 6806), False, 'from pyto.io.image_io import ImageIO\n'), ((6902, 7000), 'numpy.array', 'numpy.array', (['[[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -0.0392, -0.0312]\n ]'], {}), '([[-0.0242, -0.025, 0.0883], [0.064, 0.0071, -0.13], [-0.0421, -\n 0.0392, -0.0312]])\n', (6913, 7000), False, 'import numpy\n'), ((7086, 7164), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (7113, 7164), True, 'import numpy.testing as np_test\n'), ((7220, 7317), 'numpy.array', 'numpy.array', (['[[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011, -0.024, 0.0347]\n ]'], {}), '([[-0.0573, 0.0569, 0.0386], [0.1309, 0.1211, -0.0881], [-0.011,\n -0.024, 0.0347]])\n', (7231, 7317), False, 'import numpy\n'), ((7404, 7482), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 10]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 10], expected, decimal=4)\n', (7431, 7482), True, 'import numpy.testing as np_test\n'), ((7527, 7567), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.byteOrder', '"""<"""'], {}), "(mrc.byteOrder, '<')\n", (7547, 7567), True, 'import numpy.testing as np_test\n'), ((7576, 7617), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.arrayOrder', '"""F"""'], {}), "(mrc.arrayOrder, 'F')\n", (7596, 7617), True, 'import numpy.testing as np_test\n'), ((7626, 7671), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (7646, 7671), True, 'import numpy.testing as np_test\n'), ((7749, 7787), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(True)'], {}), '(mrc.memmap, True)\n', (7769, 7787), True, 'import numpy.testing as np_test\n'), ((7796, 7848), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.extendedHeaderLength', '(5120)'], {}), '(mrc.extendedHeaderLength, 5120)\n', (7816, 7848), True, 'import numpy.testing as np_test\n'), ((7910, 7919), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (7917, 7919), False, 'from pyto.io.image_io import ImageIO\n'), ((8001, 8103), 'numpy.array', 'numpy.array', (['[[-0.0329, -0.0006, -0.0698], [-0.0101, -0.1196, -0.1295], [0.0844, -0.04, \n -0.0716]]'], {}), '([[-0.0329, -0.0006, -0.0698], [-0.0101, -0.1196, -0.1295], [\n 0.0844, -0.04, -0.0716]])\n', (8012, 8103), False, 'import numpy\n'), ((8185, 8263), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (8212, 8263), True, 'import numpy.testing as np_test\n'), ((8319, 8418), 'numpy.array', 'numpy.array', (['[[-0.0019, -0.0085, 0.0036], [0.0781, 0.0279, -0.0365], [0.021, -0.0193, -\n 0.0355]]'], {}), '([[-0.0019, -0.0085, 0.0036], [0.0781, 0.0279, -0.0365], [0.021,\n -0.0193, -0.0355]])\n', (8330, 8418), False, 'import numpy\n'), ((8502, 8580), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 60]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 60], expected, decimal=4)\n', (8529, 8580), True, 'import numpy.testing as np_test\n'), ((8625, 8670), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (8645, 8670), True, 'import numpy.testing as np_test\n'), ((8748, 8787), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(False)'], {}), '(mrc.memmap, False)\n', (8768, 8787), True, 'import numpy.testing as np_test\n'), ((8861, 8870), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (8868, 8870), False, 'from pyto.io.image_io import ImageIO\n'), ((8965, 9067), 'numpy.array', 'numpy.array', (['[[-0.0329, -0.0006, -0.0698], [-0.0101, -0.1196, -0.1295], [0.0844, -0.04, \n -0.0716]]'], {}), '([[-0.0329, -0.0006, -0.0698], [-0.0101, -0.1196, -0.1295], [\n 0.0844, -0.04, -0.0716]])\n', (8976, 9067), False, 'import numpy\n'), ((9149, 9227), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[50:53, 120:123, 40]', 'expected'], {'decimal': '(4)'}), '(mrc.data[50:53, 120:123, 40], expected, decimal=4)\n', (9176, 9227), True, 'import numpy.testing as np_test\n'), ((9283, 9382), 'numpy.array', 'numpy.array', (['[[-0.0019, -0.0085, 0.0036], [0.0781, 0.0279, -0.0365], [0.021, -0.0193, -\n 0.0355]]'], {}), '([[-0.0019, -0.0085, 0.0036], [0.0781, 0.0279, -0.0365], [0.021,\n -0.0193, -0.0355]])\n', (9294, 9382), False, 'import numpy\n'), ((9466, 9544), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc.data[150:153, 20:23, 60]', 'expected'], {'decimal': '(4)'}), '(mrc.data[150:153, 20:23, 60], expected, decimal=4)\n', (9493, 9544), True, 'import numpy.testing as np_test\n'), ((9589, 9634), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.dataType', '"""float32"""'], {}), "(mrc.dataType, 'float32')\n", (9609, 9634), True, 'import numpy.testing as np_test\n'), ((9712, 9750), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc.memmap', '(True)'], {}), '(mrc.memmap, True)\n', (9732, 9750), True, 'import numpy.testing as np_test\n'), ((9810, 9819), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (9817, 9819), False, 'from pyto.io.image_io import ImageIO\n'), ((9912, 10112), 'numpy.array', 'numpy.array', (['[[0.0, 0.0], [-0.341, -6.702], [0.782, -11.78], [0.327, -14.298], [-0.691, \n -17.411], [-0.337, -18.076], [-0.669, -19.157], [-0.799, -20.4], [-\n 0.793, -21.286], [-1.008, -21.386]]'], {}), '([[0.0, 0.0], [-0.341, -6.702], [0.782, -11.78], [0.327, -14.298\n ], [-0.691, -17.411], [-0.337, -18.076], [-0.669, -19.157], [-0.799, -\n 20.4], [-0.793, -21.286], [-1.008, -21.386]])\n', (9923, 10112), False, 'import numpy\n'), ((10262, 10330), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['mrc2.data[:, :, 0]', 'expected'], {'decimal': '(3)'}), '(mrc2.data[:, :, 0], expected, decimal=3)\n', (10289, 10330), True, 'import numpy.testing as np_test\n'), ((10337, 10377), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc2.memmap', '(False)'], {}), '(mrc2.memmap, False)\n', (10357, 10377), True, 'import numpy.testing as np_test\n'), ((10599, 10633), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['raised', '(True)'], {}), '(raised, True)\n', (10619, 10633), True, 'import numpy.testing as np_test\n'), ((10642, 10681), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc2.memmap', '(True)'], {}), '(mrc2.memmap, True)\n', (10662, 10681), True, 'import numpy.testing as np_test\n'), ((10733, 10742), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (10740, 10742), False, 'from pyto.io.image_io import ImageIO\n'), ((10823, 10870), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.dataType', '"""int16"""'], {}), "(mrc_new.dataType, 'int16')\n", (10843, 10870), True, 'import numpy.testing as np_test\n'), ((10950, 10994), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.byteOrder', '"""<"""'], {}), "(mrc_new.byteOrder, '<')\n", (10970, 10994), True, 'import numpy.testing as np_test\n'), ((11003, 11048), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.arrayOrder', '"""F"""'], {}), "(mrc_new.arrayOrder, 'F')\n", (11023, 11048), True, 'import numpy.testing as np_test\n'), ((11057, 11106), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.shape', '(40, 30, 20)'], {}), '(mrc_new.shape, (40, 30, 20))\n', (11077, 11106), True, 'import numpy.testing as np_test\n'), ((11113, 11165), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.pixel', '[0.4, 0.4, 0.4]'], {}), '(mrc_new.pixel, [0.4, 0.4, 0.4])\n', (11133, 11165), True, 'import numpy.testing as np_test\n'), ((11174, 11218), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.pixelsize', '(0.4)'], {}), '(mrc_new.pixelsize, 0.4)\n', (11194, 11218), True, 'import numpy.testing as np_test\n'), ((11227, 11277), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[14, 8, 10]', '(-14)'], {}), '(mrc_new.data[14, 8, 10], -14)\n', (11247, 11277), True, 'import numpy.testing as np_test\n'), ((11284, 11334), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[15, 23, 12]', '(10)'], {}), '(mrc_new.data[15, 23, 12], 10)\n', (11304, 11334), True, 'import numpy.testing as np_test\n'), ((11341, 11390), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[23, 29, 16]', '(2)'], {}), '(mrc_new.data[23, 29, 16], 2)\n', (11361, 11390), True, 'import numpy.testing as np_test\n'), ((11397, 11440), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.memmap', '(False)'], {}), '(mrc_new.memmap, False)\n', (11417, 11440), True, 'import numpy.testing as np_test\n'), ((11491, 11500), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (11498, 11500), False, 'from pyto.io.image_io import ImageIO\n'), ((11607, 11654), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.dataType', '"""int16"""'], {}), "(mrc_new.dataType, 'int16')\n", (11627, 11654), True, 'import numpy.testing as np_test\n'), ((11734, 11778), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.byteOrder', '"""<"""'], {}), "(mrc_new.byteOrder, '<')\n", (11754, 11778), True, 'import numpy.testing as np_test\n'), ((11787, 11832), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.arrayOrder', '"""F"""'], {}), "(mrc_new.arrayOrder, 'F')\n", (11807, 11832), True, 'import numpy.testing as np_test\n'), ((11841, 11890), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.shape', '(40, 30, 20)'], {}), '(mrc_new.shape, (40, 30, 20))\n', (11861, 11890), True, 'import numpy.testing as np_test\n'), ((11897, 11949), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.pixel', '[0.4, 0.4, 0.4]'], {}), '(mrc_new.pixel, [0.4, 0.4, 0.4])\n', (11917, 11949), True, 'import numpy.testing as np_test\n'), ((11958, 12002), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.pixelsize', '(0.4)'], {}), '(mrc_new.pixelsize, 0.4)\n', (11978, 12002), True, 'import numpy.testing as np_test\n'), ((12011, 12061), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[14, 8, 10]', '(-14)'], {}), '(mrc_new.data[14, 8, 10], -14)\n', (12031, 12061), True, 'import numpy.testing as np_test\n'), ((12068, 12118), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[15, 23, 12]', '(10)'], {}), '(mrc_new.data[15, 23, 12], 10)\n', (12088, 12118), True, 'import numpy.testing as np_test\n'), ((12125, 12174), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.data[23, 29, 16]', '(2)'], {}), '(mrc_new.data[23, 29, 16], 2)\n', (12145, 12174), True, 'import numpy.testing as np_test\n'), ((12181, 12223), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.memmap', '(True)'], {}), '(mrc_new.memmap, True)\n', (12201, 12223), True, 'import numpy.testing as np_test\n'), ((12232, 12273), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['mrc_new.n_labels', '(9)'], {}), '(mrc_new.n_labels, 9)\n', (12252, 12273), True, 'import numpy.testing as np_test\n'), ((12868, 12877), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (12875, 12877), False, 'from pyto.io.image_io import ImageIO\n'), ((13001, 13046), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['raw.data', 'self.raw_data'], {}), '(raw.data, self.raw_data)\n', (13021, 13046), True, 'import numpy.testing as np_test\n'), ((13055, 13094), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['raw.memmap', '(False)'], {}), '(raw.memmap, False)\n', (13075, 13094), True, 'import numpy.testing as np_test\n'), ((13146, 13155), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (13153, 13155), False, 'from pyto.io.image_io import ImageIO\n'), ((13292, 13337), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['raw.data', 'self.raw_data'], {}), '(raw.data, self.raw_data)\n', (13312, 13337), True, 'import numpy.testing as np_test\n'), ((13346, 13384), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['raw.memmap', '(True)'], {}), '(raw.memmap, True)\n', (13366, 13384), True, 'import numpy.testing as np_test\n'), ((15038, 15047), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15045, 15047), False, 'from pyto.io.image_io import ImageIO\n'), ((15145, 15154), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15152, 15154), False, 'from pyto.io.image_io import ImageIO\n'), ((15225, 15272), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""uint8"""'], {}), "(file_in.dataType, 'uint8')\n", (15245, 15272), True, 'import numpy.testing as np_test\n'), ((15281, 15325), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_uint8'], {}), '(file_in.data, ar_uint8)\n', (15301, 15325), True, 'import numpy.testing as np_test\n'), ((15366, 15375), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15373, 15375), False, 'from pyto.io.image_io import ImageIO\n'), ((15474, 15483), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15481, 15483), False, 'from pyto.io.image_io import ImageIO\n'), ((15554, 15602), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""uint16"""'], {}), "(file_in.dataType, 'uint16')\n", (15574, 15602), True, 'import numpy.testing as np_test\n'), ((15611, 15656), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_uint16'], {}), '(file_in.data, ar_uint16)\n', (15631, 15656), True, 'import numpy.testing as np_test\n'), ((15729, 15738), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15736, 15738), False, 'from pyto.io.image_io import ImageIO\n'), ((15895, 15904), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (15902, 15904), False, 'from pyto.io.image_io import ImageIO\n'), ((15975, 16022), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int32"""'], {}), "(file_in.dataType, 'int32')\n", (15995, 16022), True, 'import numpy.testing as np_test\n'), ((16031, 16075), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16'], {}), '(file_in.data, ar_int16)\n', (16051, 16075), True, 'import numpy.testing as np_test\n'), ((16130, 16139), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (16137, 16139), False, 'from pyto.io.image_io import ImageIO\n'), ((16406, 16415), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (16413, 16415), False, 'from pyto.io.image_io import ImageIO\n'), ((16643, 16652), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (16650, 16652), False, 'from pyto.io.image_io import ImageIO\n'), ((16723, 16771), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""uint16"""'], {}), "(file_in.dataType, 'uint16')\n", (16743, 16771), True, 'import numpy.testing as np_test\n'), ((16852, 16923), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 1, 0] == ar_int16[0, 1, 0])', '(False)'], {}), '(file_in.data[0, 1, 0] == ar_int16[0, 1, 0], False)\n', (16872, 16923), True, 'import numpy.testing as np_test\n'), ((16984, 16993), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (16991, 16993), False, 'from pyto.io.image_io import ImageIO\n'), ((17272, 17281), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (17279, 17281), False, 'from pyto.io.image_io import ImageIO\n'), ((17598, 17607), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (17605, 17607), False, 'from pyto.io.image_io import ImageIO\n'), ((17928, 17937), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (17935, 17937), False, 'from pyto.io.image_io import ImageIO\n'), ((18097, 18106), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (18104, 18106), False, 'from pyto.io.image_io import ImageIO\n'), ((18177, 18224), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int32"""'], {}), "(file_in.dataType, 'int32')\n", (18197, 18224), True, 'import numpy.testing as np_test\n'), ((18300, 18371), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 0, 0] == ar_uint32[0, 0, 0])', '(True)'], {}), '(file_in.data[0, 0, 0] == ar_uint32[0, 0, 0], True)\n', (18320, 18371), True, 'import numpy.testing as np_test\n'), ((18376, 18448), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[1, 1, 1] == ar_uint32[1, 1, 1])', '(False)'], {}), '(file_in.data[1, 1, 1] == ar_uint32[1, 1, 1], False)\n', (18396, 18448), True, 'import numpy.testing as np_test\n'), ((18511, 18520), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (18518, 18520), False, 'from pyto.io.image_io import ImageIO\n'), ((18802, 18811), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (18809, 18811), False, 'from pyto.io.image_io import ImageIO\n'), ((18973, 18982), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (18980, 18982), False, 'from pyto.io.image_io import ImageIO\n'), ((19053, 19102), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""float32"""'], {}), "(file_in.dataType, 'float32')\n", (19073, 19102), True, 'import numpy.testing as np_test\n'), ((19186, 19257), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 0, 0] == ar_uint32[0, 0, 0])', '(True)'], {}), '(file_in.data[0, 0, 0] == ar_uint32[0, 0, 0], True)\n', (19206, 19257), True, 'import numpy.testing as np_test\n'), ((19275, 19347), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[1, 1, 1] == ar_uint32[1, 1, 1])', '(False)'], {}), '(file_in.data[1, 1, 1] == ar_uint32[1, 1, 1], False)\n', (19295, 19347), True, 'import numpy.testing as np_test\n'), ((19423, 19432), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (19430, 19432), False, 'from pyto.io.image_io import ImageIO\n'), ((19593, 19602), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (19600, 19602), False, 'from pyto.io.image_io import ImageIO\n'), ((19673, 19722), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""float32"""'], {}), "(file_in.dataType, 'float32')\n", (19693, 19722), True, 'import numpy.testing as np_test\n'), ((19805, 19875), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 0, 0] == ar_int32[0, 0, 0])', '(True)'], {}), '(file_in.data[0, 0, 0] == ar_int32[0, 0, 0], True)\n', (19825, 19875), True, 'import numpy.testing as np_test\n'), ((19893, 19964), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[1, 0, 1] == ar_int32[1, 0, 1])', '(False)'], {}), '(file_in.data[1, 0, 1] == ar_int32[1, 0, 1], False)\n', (19913, 19964), True, 'import numpy.testing as np_test\n'), ((20038, 20047), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (20045, 20047), False, 'from pyto.io.image_io import ImageIO\n'), ((20206, 20215), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (20213, 20215), False, 'from pyto.io.image_io import ImageIO\n'), ((20286, 20335), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""float64"""'], {}), "(file_in.dataType, 'float64')\n", (20306, 20335), True, 'import numpy.testing as np_test\n'), ((20344, 20395), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['file_in.data', 'ar_int32'], {}), '(file_in.data, ar_int32)\n', (20371, 20395), True, 'import numpy.testing as np_test\n'), ((20460, 20469), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (20467, 20469), False, 'from pyto.io.image_io import ImageIO\n'), ((20627, 20636), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (20634, 20636), False, 'from pyto.io.image_io import ImageIO\n'), ((20708, 20755), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (20728, 20755), True, 'import numpy.testing as np_test\n'), ((20764, 20810), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.shape', '(2, 3, 4)'], {}), '(file_in.shape, (2, 3, 4))\n', (20784, 20810), True, 'import numpy.testing as np_test\n'), ((20890, 20899), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (20897, 20899), False, 'from pyto.io.image_io import ImageIO\n'), ((21020, 21029), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (21027, 21029), False, 'from pyto.io.image_io import ImageIO\n'), ((21101, 21148), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (21121, 21148), True, 'import numpy.testing as np_test\n'), ((21157, 21203), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.shape', '(4, 3, 2)'], {}), '(file_in.shape, (4, 3, 2))\n', (21177, 21203), True, 'import numpy.testing as np_test\n'), ((21272, 21281), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (21279, 21281), False, 'from pyto.io.image_io import ImageIO\n'), ((21468, 21477), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (21475, 21477), False, 'from pyto.io.image_io import ImageIO\n'), ((21549, 21596), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (21569, 21596), True, 'import numpy.testing as np_test\n'), ((21605, 21651), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.shape', '(2, 3, 4)'], {}), '(file_in.shape, (2, 3, 4))\n', (21625, 21651), True, 'import numpy.testing as np_test\n'), ((21714, 21723), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (21721, 21723), False, 'from pyto.io.image_io import ImageIO\n'), ((21847, 21856), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (21854, 21856), False, 'from pyto.io.image_io import ImageIO\n'), ((21928, 21975), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (21948, 21975), True, 'import numpy.testing as np_test\n'), ((21984, 22030), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.shape', '(4, 3, 2)'], {}), '(file_in.shape, (4, 3, 2))\n', (22004, 22030), True, 'import numpy.testing as np_test\n'), ((22084, 22093), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (22091, 22093), False, 'from pyto.io.image_io import ImageIO\n'), ((22192, 22201), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (22199, 22201), False, 'from pyto.io.image_io import ImageIO\n'), ((22273, 22320), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""ubyte"""'], {}), "(file_in.dataType, 'ubyte')\n", (22293, 22320), True, 'import numpy.testing as np_test\n'), ((22329, 22380), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['file_in.data', 'ar_uint8'], {}), '(file_in.data, ar_uint8)\n', (22356, 22380), True, 'import numpy.testing as np_test\n'), ((22423, 22432), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (22430, 22432), False, 'from pyto.io.image_io import ImageIO\n'), ((22669, 22678), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (22676, 22678), False, 'from pyto.io.image_io import ImageIO\n'), ((22958, 22967), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (22965, 22967), False, 'from pyto.io.image_io import ImageIO\n'), ((23128, 23137), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (23135, 23137), False, 'from pyto.io.image_io import ImageIO\n'), ((23209, 23256), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (23229, 23256), True, 'import numpy.testing as np_test\n'), ((23340, 23411), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 0, 0] == ar_uint16[0, 0, 0])', '(True)'], {}), '(file_in.data[0, 0, 0] == ar_uint16[0, 0, 0], True)\n', (23360, 23411), True, 'import numpy.testing as np_test\n'), ((23416, 23488), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['(file_in.data[0, 1, 1] == ar_uint16[0, 1, 1])', '(False)'], {}), '(file_in.data[0, 1, 1] == ar_uint16[0, 1, 1], False)\n', (23436, 23488), True, 'import numpy.testing as np_test\n'), ((23525, 23534), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (23532, 23534), False, 'from pyto.io.image_io import ImageIO\n'), ((23657, 23666), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (23664, 23666), False, 'from pyto.io.image_io import ImageIO\n'), ((23738, 23785), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (23758, 23785), True, 'import numpy.testing as np_test\n'), ((23794, 23838), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16'], {}), '(file_in.data, ar_int16)\n', (23814, 23838), True, 'import numpy.testing as np_test\n'), ((23847, 23899), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.pixel', '[2.3, 2.3, 2.3]'], {}), '(file_in.pixel, [2.3, 2.3, 2.3])\n', (23867, 23899), True, 'import numpy.testing as np_test\n'), ((23908, 23952), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.pixelsize', '(2.3)'], {}), '(file_in.pixelsize, 2.3)\n', (23928, 23952), True, 'import numpy.testing as np_test\n'), ((23996, 24005), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (24003, 24005), False, 'from pyto.io.image_io import ImageIO\n'), ((24129, 24138), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (24136, 24138), False, 'from pyto.io.image_io import ImageIO\n'), ((24210, 24257), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (24230, 24257), True, 'import numpy.testing as np_test\n'), ((24266, 24320), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data[:, :, 0]', 'ar2_int16'], {}), '(file_in.data[:, :, 0], ar2_int16)\n', (24286, 24320), True, 'import numpy.testing as np_test\n'), ((24335, 24379), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.pixelsize', '(3.4)'], {}), '(file_in.pixelsize, 3.4)\n', (24355, 24379), True, 'import numpy.testing as np_test\n'), ((24428, 24437), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (24435, 24437), False, 'from pyto.io.image_io import ImageIO\n'), ((24594, 24603), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (24601, 24603), False, 'from pyto.io.image_io import ImageIO\n'), ((24675, 24722), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (24695, 24722), True, 'import numpy.testing as np_test\n'), ((24731, 24774), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int8'], {}), '(file_in.data, ar_int8)\n', (24751, 24774), True, 'import numpy.testing as np_test\n'), ((24816, 24825), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (24823, 24825), False, 'from pyto.io.image_io import ImageIO\n'), ((25046, 25055), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (25053, 25055), False, 'from pyto.io.image_io import ImageIO\n'), ((25319, 25328), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (25326, 25328), False, 'from pyto.io.image_io import ImageIO\n'), ((25596, 25605), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (25603, 25605), False, 'from pyto.io.image_io import ImageIO\n'), ((25862, 25871), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (25869, 25871), False, 'from pyto.io.image_io import ImageIO\n'), ((25970, 25979), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (25977, 25979), False, 'from pyto.io.image_io import ImageIO\n'), ((26109, 26156), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (26129, 26156), True, 'import numpy.testing as np_test\n'), ((26165, 26209), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16'], {}), '(file_in.data, ar_int16)\n', (26185, 26209), True, 'import numpy.testing as np_test\n'), ((26258, 26267), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (26265, 26267), False, 'from pyto.io.image_io import ImageIO\n'), ((26408, 26417), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (26415, 26417), False, 'from pyto.io.image_io import ImageIO\n'), ((26547, 26594), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.dataType', '"""int16"""'], {}), "(file_in.dataType, 'int16')\n", (26567, 26594), True, 'import numpy.testing as np_test\n'), ((26603, 26646), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int8'], {}), '(file_in.data, ar_int8)\n', (26623, 26646), True, 'import numpy.testing as np_test\n'), ((26695, 26704), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (26702, 26704), False, 'from pyto.io.image_io import ImageIO\n'), ((27143, 27152), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (27150, 27152), False, 'from pyto.io.image_io import ImageIO\n'), ((27294, 27303), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (27301, 27303), False, 'from pyto.io.image_io import ImageIO\n'), ((27393, 27444), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data.shape', '(2, 2, 2)'], {}), '(file_in.data.shape, (2, 2, 2))\n', (27413, 27444), True, 'import numpy.testing as np_test\n'), ((27462, 27471), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (27469, 27471), False, 'from pyto.io.image_io import ImageIO\n'), ((27628, 27637), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (27635, 27637), False, 'from pyto.io.image_io import ImageIO\n'), ((27727, 27778), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data.shape', '(1, 4, 2)'], {}), '(file_in.data.shape, (1, 4, 2))\n', (27747, 27778), True, 'import numpy.testing as np_test\n'), ((28071, 28122), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data.shape', '(4, 2, 1)'], {}), '(file_in.data.shape, (4, 2, 1))\n', (28091, 28122), True, 'import numpy.testing as np_test\n'), ((28250, 28301), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data.shape', '(2, 2, 2)'], {}), '(file_in.data.shape, (2, 2, 2))\n', (28270, 28301), True, 'import numpy.testing as np_test\n'), ((28368, 28377), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (28375, 28377), False, 'from pyto.io.image_io import ImageIO\n'), ((28491, 28500), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (28498, 28500), False, 'from pyto.io.image_io import ImageIO\n'), ((28572, 28618), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16_c'], {}), '(file_in.data, ar_int16_c)\n', (28592, 28618), True, 'import numpy.testing as np_test\n'), ((28677, 28686), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (28684, 28686), False, 'from pyto.io.image_io import ImageIO\n'), ((28828, 28837), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (28835, 28837), False, 'from pyto.io.image_io import ImageIO\n'), ((28925, 28971), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16_c'], {}), '(file_in.data, ar_int16_c)\n', (28945, 28971), True, 'import numpy.testing as np_test\n'), ((29040, 29049), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (29047, 29049), False, 'from pyto.io.image_io import ImageIO\n'), ((29163, 29172), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (29170, 29172), False, 'from pyto.io.image_io import ImageIO\n'), ((29244, 29290), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16_f'], {}), '(file_in.data, ar_int16_f)\n', (29264, 29290), True, 'import numpy.testing as np_test\n'), ((29349, 29358), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (29356, 29358), False, 'from pyto.io.image_io import ImageIO\n'), ((29500, 29509), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (29507, 29509), False, 'from pyto.io.image_io import ImageIO\n'), ((29597, 29643), 'numpy.testing.assert_equal', 'np_test.assert_equal', (['file_in.data', 'ar_int16_f'], {}), '(file_in.data, ar_int16_f)\n', (29617, 29643), True, 'import numpy.testing as np_test\n'), ((29928, 29937), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (29935, 29937), False, 'from pyto.io.image_io import ImageIO\n'), ((30074, 30083), 'pyto.io.image_io.ImageIO', 'ImageIO', ([], {}), '()\n', (30081, 30083), False, 'from pyto.io.image_io import ImageIO\n'), ((30155, 30202), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['file_in.pixel', '(2.1)'], {}), '(file_in.pixel, 2.1)\n', (30182, 30202), True, 'import numpy.testing as np_test\n'), ((2065, 2087), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (2076, 2087), False, 'import numpy\n'), ((3069, 3091), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (3080, 3091), False, 'import numpy\n'), ((4512, 4534), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (4523, 4534), False, 'import numpy\n'), ((5546, 5568), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (5557, 5568), False, 'import numpy\n'), ((6593, 6615), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (6604, 6615), False, 'import numpy\n'), ((7717, 7739), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (7728, 7739), False, 'import numpy\n'), ((8716, 8738), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (8727, 8738), False, 'import numpy\n'), ((9680, 9702), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (9691, 9702), False, 'import numpy\n'), ((10920, 10940), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (10931, 10940), False, 'import numpy\n'), ((11704, 11724), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (11715, 11724), False, 'import numpy\n'), ((16821, 16842), 'numpy.dtype', 'numpy.dtype', (['"""uint16"""'], {}), "('uint16')\n", (16832, 16842), False, 'import numpy\n'), ((30782, 30803), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (30801, 30803), False, 'import unittest\n'), ((30843, 30879), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (30866, 30879), False, 'import unittest\n'), ((838, 876), 'numpy.arange', 'numpy.arange', (['(24)'], {'dtype': 'self.raw_dtype'}), '(24, dtype=self.raw_dtype)\n', (850, 876), False, 'import numpy\n'), ((13544, 13596), 'numpy.array', 'numpy.array', (['[54, 200, 5, 7, 45, 123]'], {'dtype': '"""uint8"""'}), "([54, 200, 5, 7, 45, 123], dtype='uint8')\n", (13555, 13596), False, 'import numpy\n'), ((13664, 13715), 'numpy.array', 'numpy.array', (['[54, 2, -5, 7, 45, 123]'], {'dtype': '"""uint8"""'}), "([54, 2, -5, 7, 45, 123], dtype='uint8')\n", (13675, 13715), False, 'import numpy\n'), ((13785, 13859), 'numpy.array', 'numpy.array', (['[1034, 546, 248, 40000, 2345, 365, 4876, 563]'], {'dtype': '"""uint16"""'}), "([1034, 546, 248, 40000, 2345, 365, 4876, 563], dtype='uint16')\n", (13796, 13859), False, 'import numpy\n'), ((13927, 14000), 'numpy.array', 'numpy.array', (['[1034, 546, -248, 156, 2345, 365, -4876, 563]'], {'dtype': '"""int16"""'}), "([1034, 546, -248, 156, 2345, 365, -4876, 563], dtype='int16')\n", (13938, 14000), False, 'import numpy\n'), ((14068, 14161), 'numpy.array', 'numpy.array', (['[1034, 56546, -223448, 156, 2345, 2 ** 31 - 10, -884876, 563]'], {'dtype': '"""int32"""'}), "([1034, 56546, -223448, 156, 2345, 2 ** 31 - 10, -884876, 563],\n dtype='int32')\n", (14079, 14161), False, 'import numpy\n'), ((14255, 14347), 'numpy.array', 'numpy.array', (['[1034, 56546, 223448, 156, 2345, 365, 884876, 2 ** 32 - 10]'], {'dtype': '"""uint32"""'}), "([1034, 56546, 223448, 156, 2345, 365, 884876, 2 ** 32 - 10],\n dtype='uint32')\n", (14266, 14347), False, 'import numpy\n'), ((14442, 14472), 'numpy.arange', 'numpy.arange', (['(24)'], {'dtype': '"""int8"""'}), "(24, dtype='int8')\n", (14454, 14472), False, 'import numpy\n'), ((14511, 14542), 'numpy.arange', 'numpy.arange', (['(24)'], {'dtype': '"""int16"""'}), "(24, dtype='int16')\n", (14523, 14542), False, 'import numpy\n'), ((14580, 14653), 'numpy.array', 'numpy.array', (['[1034, 546, -248, 156, 2345, 365, -4876, 563]'], {'dtype': '"""int16"""'}), "([1034, 546, -248, 156, 2345, 365, -4876, 563], dtype='int16')\n", (14591, 14653), False, 'import numpy\n'), ((14722, 14810), 'numpy.array', 'numpy.array', (['[1034, 546, -248, 156, 2345, 365, -4876, 563]'], {'dtype': '"""int16"""', 'order': '"""F"""'}), "([1034, 546, -248, 156, 2345, 365, -4876, 563], dtype='int16',\n order='F')\n", (14733, 14810), False, 'import numpy\n'), ((14870, 14958), 'numpy.array', 'numpy.array', (['[1034, 546, -248, 156, 2345, 365, -4876, 563]'], {'dtype': '"""int16"""', 'order': '"""C"""'}), "([1034, 546, -248, 156, 2345, 365, -4876, 563], dtype='int16',\n order='C')\n", (14881, 14958), False, 'import numpy\n'), ((29849, 29880), 'numpy.arange', 'numpy.arange', (['(24)'], {'dtype': '"""int16"""'}), "(24, dtype='int16')\n", (29861, 29880), False, 'import numpy\n')]
|
import networkx as nx
import numpy as np
import pickle
G = nx.Graph()
node1, node2 = np.loadtxt(graph_input, usecols=(0,1), unpack=True)
for i in range(len(node1)):
G.add_edge(node1[i], node2[i])
graph_num_node = G.number_of_nodes()
print(f"This graph contains {graph_num_node} nodes. ")
graph_num_edge = G.number_of_edges()
print(f"This graph contains {graph_num_edge} edges. ")
node_bet_central = nx.betweenness_centrality(G)
pickle.dump(node_bet_central, open("node_betweeen_centrality.pkl", 'wb'))
res = np.array([(int(key), node_bet_central[key]) for key in node_bet_central.keys() ])
res_sorted = res[res[:,0].argsort()]
ax.xaxis.set_minor_locator(MultipleLocator(10))
pos = dict(zip(idx.astype(int), np.column_stack((x, y, z))))
pos = {}
for i in range(len(idx)):
pos[str(int(idx[i]))] = (x[i], y[i], z[i])
for key in pos.keys():
position[key] = {'posi': pos[key]}
nx.set_node_attributes(G, poistion)
pos = nx.get_node_attributes(G, 'posi')
n = G.number_of_nodes()
degrees = [val for (node, val) in G.degree()]
edge_max = max(degrees)
colors = [plt.cm.plasma(degrees[i]/edge_max) for i in range(n)]
with plt.style.context(('ggplot')):
fig = plt.figure(figsize=(10,7))
ax = Axes3D(fig)
for key, value in pos.items():
xi = value[0]
yi = value[1]
zi = value[2]
ax.scatter(xi, yi, zi, c=colors[key], s=20+20*G.degree(key), edgecolors='k', alpha=0.7)
for i, j in enumerate(G.edges()):
x = np.array((pos[j[0]][0], pos[j[1]][0]))
y = np.array((pos[j[0]][1], pos[j[1]][1]))
z = np.array((pos[j[0]][2], pos[j[1]][2]))
ax.plot(x, y, z, c='black', alpha=0.5)
ax.view_init(30, angle)
ax.set_axis_off()
plt.show()
return
|
[
"networkx.set_node_attributes",
"networkx.betweenness_centrality",
"numpy.column_stack",
"networkx.Graph",
"numpy.loadtxt",
"numpy.array",
"networkx.get_node_attributes"
] |
[((60, 70), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (68, 70), True, 'import networkx as nx\n'), ((86, 138), 'numpy.loadtxt', 'np.loadtxt', (['graph_input'], {'usecols': '(0, 1)', 'unpack': '(True)'}), '(graph_input, usecols=(0, 1), unpack=True)\n', (96, 138), True, 'import numpy as np\n'), ((408, 436), 'networkx.betweenness_centrality', 'nx.betweenness_centrality', (['G'], {}), '(G)\n', (433, 436), True, 'import networkx as nx\n'), ((895, 930), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['G', 'poistion'], {}), '(G, poistion)\n', (917, 930), True, 'import networkx as nx\n'), ((938, 971), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""posi"""'], {}), "(G, 'posi')\n", (960, 971), True, 'import networkx as nx\n'), ((719, 745), 'numpy.column_stack', 'np.column_stack', (['(x, y, z)'], {}), '((x, y, z))\n', (734, 745), True, 'import numpy as np\n'), ((1477, 1515), 'numpy.array', 'np.array', (['(pos[j[0]][0], pos[j[1]][0])'], {}), '((pos[j[0]][0], pos[j[1]][0]))\n', (1485, 1515), True, 'import numpy as np\n'), ((1528, 1566), 'numpy.array', 'np.array', (['(pos[j[0]][1], pos[j[1]][1])'], {}), '((pos[j[0]][1], pos[j[1]][1]))\n', (1536, 1566), True, 'import numpy as np\n'), ((1579, 1617), 'numpy.array', 'np.array', (['(pos[j[0]][2], pos[j[1]][2])'], {}), '((pos[j[0]][2], pos[j[1]][2]))\n', (1587, 1617), True, 'import numpy as np\n')]
|
"""Collection of tests for sorting functions."""
# global
from hypothesis import given, strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy.functional.backends.numpy as ivy_np
# argsort
@given(
array_shape=helpers.lists(
st.integers(1, 5), min_size="num_dims", max_size="num_dims", size_bounds=[1, 5]
),
input_dtype=st.sampled_from(ivy_np.valid_dtypes),
data=st.data(),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=helpers.num_positional_args(fn_name="argsort"),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
)
def test_argsort(
array_shape,
input_dtype,
data,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
):
# smoke for torch
if fw == "torch" and input_dtype in ["uint16", "uint32", "uint64"]:
return
# we do not want any nans
x = data.draw(
helpers.nph.arrays(shape=array_shape, dtype=input_dtype).filter(
lambda x: not np.any(np.isnan(x))
)
)
ndim = len(x.shape)
axis = data.draw(st.integers(-ndim, ndim - 1))
descending = data.draw(st.booleans())
stable = data.draw(st.booleans())
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"argsort",
x=x,
axis=axis,
descending=descending,
stable=stable,
)
# sort
@given(
array_shape=helpers.lists(
st.integers(1, 5), min_size="num_dims", max_size="num_dims", size_bounds=[1, 5]
),
input_dtype=st.sampled_from(ivy_np.valid_dtypes),
data=st.data(),
as_variable=st.booleans(),
with_out=st.booleans(),
num_positional_args=helpers.num_positional_args(fn_name="sort"),
native_array=st.booleans(),
container=st.booleans(),
instance_method=st.booleans(),
)
def test_sort(
array_shape,
input_dtype,
data,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
):
# smoke for torch
if fw == "torch" and input_dtype in ["uint16", "uint32", "uint64"]:
return
# we do not want any nans
x = data.draw(
helpers.nph.arrays(shape=array_shape, dtype=input_dtype).filter(
lambda x: not np.any(np.isnan(x))
)
)
ndim = len(x.shape)
axis = data.draw(st.integers(-ndim, ndim - 1))
descending = data.draw(st.booleans())
stable = data.draw(st.booleans())
helpers.test_array_function(
input_dtype,
as_variable,
with_out,
num_positional_args,
native_array,
container,
instance_method,
fw,
"sort",
x=x,
axis=axis,
descending=descending,
stable=stable,
)
|
[
"hypothesis.strategies.data",
"ivy_tests.test_ivy.helpers.num_positional_args",
"ivy_tests.test_ivy.helpers.test_array_function",
"hypothesis.strategies.sampled_from",
"numpy.isnan",
"hypothesis.strategies.booleans",
"hypothesis.strategies.integers",
"ivy_tests.test_ivy.helpers.nph.arrays"
] |
[((1313, 1516), 'ivy_tests.test_ivy.helpers.test_array_function', 'helpers.test_array_function', (['input_dtype', 'as_variable', 'with_out', 'num_positional_args', 'native_array', 'container', 'instance_method', 'fw', '"""argsort"""'], {'x': 'x', 'axis': 'axis', 'descending': 'descending', 'stable': 'stable'}), "(input_dtype, as_variable, with_out,\n num_positional_args, native_array, container, instance_method, fw,\n 'argsort', x=x, axis=axis, descending=descending, stable=stable)\n", (1340, 1516), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((2698, 2898), 'ivy_tests.test_ivy.helpers.test_array_function', 'helpers.test_array_function', (['input_dtype', 'as_variable', 'with_out', 'num_positional_args', 'native_array', 'container', 'instance_method', 'fw', '"""sort"""'], {'x': 'x', 'axis': 'axis', 'descending': 'descending', 'stable': 'stable'}), "(input_dtype, as_variable, with_out,\n num_positional_args, native_array, container, instance_method, fw,\n 'sort', x=x, axis=axis, descending=descending, stable=stable)\n", (2725, 2898), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((1198, 1226), 'hypothesis.strategies.integers', 'st.integers', (['(-ndim)', '(ndim - 1)'], {}), '(-ndim, ndim - 1)\n', (1209, 1226), True, 'from hypothesis import given, strategies as st\n'), ((1255, 1268), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1266, 1268), True, 'from hypothesis import given, strategies as st\n'), ((1293, 1306), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1304, 1306), True, 'from hypothesis import given, strategies as st\n'), ((388, 424), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['ivy_np.valid_dtypes'], {}), '(ivy_np.valid_dtypes)\n', (403, 424), True, 'from hypothesis import given, strategies as st\n'), ((435, 444), 'hypothesis.strategies.data', 'st.data', ([], {}), '()\n', (442, 444), True, 'from hypothesis import given, strategies as st\n'), ((462, 475), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (473, 475), True, 'from hypothesis import given, strategies as st\n'), ((490, 503), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (501, 503), True, 'from hypothesis import given, strategies as st\n'), ((529, 575), 'ivy_tests.test_ivy.helpers.num_positional_args', 'helpers.num_positional_args', ([], {'fn_name': '"""argsort"""'}), "(fn_name='argsort')\n", (556, 575), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((594, 607), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (605, 607), True, 'from hypothesis import given, strategies as st\n'), ((623, 636), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (634, 636), True, 'from hypothesis import given, strategies as st\n'), ((658, 671), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (669, 671), True, 'from hypothesis import given, strategies as st\n'), ((2583, 2611), 'hypothesis.strategies.integers', 'st.integers', (['(-ndim)', '(ndim - 1)'], {}), '(-ndim, ndim - 1)\n', (2594, 2611), True, 'from hypothesis import given, strategies as st\n'), ((2640, 2653), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (2651, 2653), True, 'from hypothesis import given, strategies as st\n'), ((2678, 2691), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (2689, 2691), True, 'from hypothesis import given, strategies as st\n'), ((1779, 1815), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['ivy_np.valid_dtypes'], {}), '(ivy_np.valid_dtypes)\n', (1794, 1815), True, 'from hypothesis import given, strategies as st\n'), ((1826, 1835), 'hypothesis.strategies.data', 'st.data', ([], {}), '()\n', (1833, 1835), True, 'from hypothesis import given, strategies as st\n'), ((1853, 1866), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1864, 1866), True, 'from hypothesis import given, strategies as st\n'), ((1881, 1894), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1892, 1894), True, 'from hypothesis import given, strategies as st\n'), ((1920, 1963), 'ivy_tests.test_ivy.helpers.num_positional_args', 'helpers.num_positional_args', ([], {'fn_name': '"""sort"""'}), "(fn_name='sort')\n", (1947, 1963), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((1982, 1995), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1993, 1995), True, 'from hypothesis import given, strategies as st\n'), ((2011, 2024), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (2022, 2024), True, 'from hypothesis import given, strategies as st\n'), ((2046, 2059), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (2057, 2059), True, 'from hypothesis import given, strategies as st\n'), ((285, 302), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (296, 302), True, 'from hypothesis import given, strategies as st\n'), ((1676, 1693), 'hypothesis.strategies.integers', 'st.integers', (['(1)', '(5)'], {}), '(1, 5)\n', (1687, 1693), True, 'from hypothesis import given, strategies as st\n'), ((1025, 1081), 'ivy_tests.test_ivy.helpers.nph.arrays', 'helpers.nph.arrays', ([], {'shape': 'array_shape', 'dtype': 'input_dtype'}), '(shape=array_shape, dtype=input_dtype)\n', (1043, 1081), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((2410, 2466), 'ivy_tests.test_ivy.helpers.nph.arrays', 'helpers.nph.arrays', ([], {'shape': 'array_shape', 'dtype': 'input_dtype'}), '(shape=array_shape, dtype=input_dtype)\n', (2428, 2466), True, 'import ivy_tests.test_ivy.helpers as helpers\n'), ((1123, 1134), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (1131, 1134), True, 'import numpy as np\n'), ((2508, 2519), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (2516, 2519), True, 'import numpy as np\n')]
|
import numpy as np
from random import random
from noneq_settings import BETA
def hamming(s1, s2):
"""Calculate the Hamming distance between two bit lists"""
assert len(s1) == len(s2)
return sum(c1 != c2 for c1, c2 in zip(s1, s2))
def hamiltonian(state_vec, intxn_matrix):
return -0.5 * reduce(np.dot, [state_vec.T, intxn_matrix, state_vec]) # plus some other field terms... do we care for these? ie. "-sum h_i*s_i"
def internal_field(state, spin_idx, t, intxn_matrix):
internal_field = np.dot(intxn_matrix[spin_idx,:], state[:,t])
return internal_field
def glauber_dynamics_update(state, spin_idx, t, intxn_matrix, app_field=None, beta=BETA):
r1 = random()
total_field = internal_field(state, spin_idx, t, intxn_matrix)
if app_field is not None:
total_field += app_field[spin_idx]
prob_on_after_timestep = 1 / (1 + np.exp(-2*beta*total_field)) # probability that site i will be "up" after the timestep
#prob_on_after_timestep = 1 / (1 + np.exp(-BETA*total_field)) # (note remove factor of 2 because h = 0.5*J*s) probability that site i will be "up" after the timestep
if prob_on_after_timestep > r1:
#state[spin_idx, t + 1] = 1.0
state[spin_idx, t] = 1.0
else:
#state[spin_idx, t + 1] = -1.0
state[spin_idx, t] = -1.0
return state
def state_to_label(state):
# Idea: assign integer label (0 to 2^N - 1) to the state
# state acts like binary representation of integers
# "0" corresponds to all -1
# 2^N - 1 corresponds to all +1
label = 0
bitlist = (1+np.array(state, dtype=int))/2
for bit in bitlist:
label = (label << 1) | bit
return label
def label_to_state(label, N, use_neg=True):
# n is the integer label of a set of spins
bitlist = [1 if digit=='1' else 0 for digit in bin(label)[2:]]
if len(bitlist) < N:
tmp = bitlist
bitlist = np.zeros(N, dtype=int)
bitlist[-len(tmp):] = tmp[:]
if use_neg:
state = np.array(bitlist)*2 - 1
else:
state = np.array(bitlist)
return state
def get_adjacent_labels(state):
# TODO slow, how to speedup with permutation?
N = len(state)
labels = [0] * N
tmp = np.zeros(N, dtype=int)
for i in xrange(N):
tmp[:] = state[:]
tmp[i] = -1 * state[i]
labels[i] = state_to_label(tmp)
return labels
|
[
"numpy.zeros",
"random.random",
"numpy.array",
"numpy.exp",
"numpy.dot"
] |
[((514, 560), 'numpy.dot', 'np.dot', (['intxn_matrix[spin_idx, :]', 'state[:, t]'], {}), '(intxn_matrix[spin_idx, :], state[:, t])\n', (520, 560), True, 'import numpy as np\n'), ((686, 694), 'random.random', 'random', ([], {}), '()\n', (692, 694), False, 'from random import random\n'), ((2226, 2248), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (2234, 2248), True, 'import numpy as np\n'), ((1915, 1937), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (1923, 1937), True, 'import numpy as np\n'), ((2057, 2074), 'numpy.array', 'np.array', (['bitlist'], {}), '(bitlist)\n', (2065, 2074), True, 'import numpy as np\n'), ((873, 904), 'numpy.exp', 'np.exp', (['(-2 * beta * total_field)'], {}), '(-2 * beta * total_field)\n', (879, 904), True, 'import numpy as np\n'), ((1584, 1610), 'numpy.array', 'np.array', (['state'], {'dtype': 'int'}), '(state, dtype=int)\n', (1592, 1610), True, 'import numpy as np\n'), ((2007, 2024), 'numpy.array', 'np.array', (['bitlist'], {}), '(bitlist)\n', (2015, 2024), True, 'import numpy as np\n')]
|
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates stylized images with different strengths of a stylization.
For each pair of the content and style images this script computes stylized
images with different strengths of stylization (interpolates between the
identity transform parameters and the style parameters for the style image) and
saves them to the given output_dir.
See run_interpolation_with_identity.sh for example usage.
"""
import ast
import os
from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model as build_model
from magenta.models.image_stylization import image_utils
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
flags = tf.flags
flags.DEFINE_string('checkpoint', None, 'Path to the model checkpoint.')
flags.DEFINE_string('style_images_paths', None, 'Paths to the style images'
'for evaluation.')
flags.DEFINE_string('content_images_paths', None, 'Paths to the content images'
'for evaluation.')
flags.DEFINE_string('output_dir', None, 'Output directory.')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_boolean('content_square_crop', False, 'Whether to center crop'
'the content image to be a square or not.')
flags.DEFINE_integer('style_image_size', 256, 'Style image size.')
flags.DEFINE_boolean('style_square_crop', False, 'Whether to center crop'
'the style image to be a square or not.')
flags.DEFINE_integer('maximum_styles_to_evaluate', 1024, 'Maximum number of'
'styles to evaluate.')
flags.DEFINE_string('interpolation_weights', '[1.0]', 'List of weights'
'for interpolation between the parameters of the identity'
'transform and the style parameters of the style image. The'
'larger the weight is the strength of stylization is more.'
'Weight of 1.0 means the normal style transfer and weight'
'of 0.0 means identity transform.')
FLAGS = flags.FLAGS
def main(unused_argv=None):
tf.logging.set_verbosity(tf.logging.INFO)
if not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MkDir(FLAGS.output_dir)
with tf.Graph().as_default(), tf.Session() as sess:
# Defines place holder for the style image.
style_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3])
if FLAGS.style_square_crop:
style_img_preprocessed = image_utils.center_crop_resize_image(
style_img_ph, FLAGS.style_image_size)
else:
style_img_preprocessed = image_utils.resize_image(style_img_ph,
FLAGS.style_image_size)
# Defines place holder for the content image.
content_img_ph = tf.placeholder(tf.float32, shape=[None, None, 3])
if FLAGS.content_square_crop:
content_img_preprocessed = image_utils.center_crop_resize_image(
content_img_ph, FLAGS.image_size)
else:
content_img_preprocessed = image_utils.resize_image(
content_img_ph, FLAGS.image_size)
# Defines the model.
stylized_images, _, _, bottleneck_feat = build_model.build_model(
content_img_preprocessed,
style_img_preprocessed,
trainable=False,
is_training=False,
inception_end_point='Mixed_6e',
style_prediction_bottleneck=100,
adds_losses=False)
if tf.gfile.IsDirectory(FLAGS.checkpoint):
checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint)
else:
checkpoint = FLAGS.checkpoint
tf.logging.info('loading latest checkpoint file: {}'.format(checkpoint))
init_fn = slim.assign_from_checkpoint_fn(checkpoint,
slim.get_variables_to_restore())
sess.run([tf.local_variables_initializer()])
init_fn(sess)
# Gets the list of the input style images.
style_img_list = tf.gfile.Glob(FLAGS.style_images_paths)
if len(style_img_list) > FLAGS.maximum_styles_to_evaluate:
np.random.seed(1234)
style_img_list = np.random.permutation(style_img_list)
style_img_list = style_img_list[:FLAGS.maximum_styles_to_evaluate]
# Gets list of input content images.
content_img_list = tf.gfile.Glob(FLAGS.content_images_paths)
for content_i, content_img_path in enumerate(content_img_list):
content_img_np = image_utils.load_np_image_uint8(content_img_path)[:, :, :
3]
content_img_name = os.path.basename(content_img_path)[:-4]
# Saves preprocessed content image.
inp_img_croped_resized_np = sess.run(
content_img_preprocessed, feed_dict={
content_img_ph: content_img_np
})
image_utils.save_np_image(inp_img_croped_resized_np,
os.path.join(FLAGS.output_dir,
'%s.jpg' % (content_img_name)))
# Computes bottleneck features of the style prediction network for the
# identity transform.
identity_params = sess.run(
bottleneck_feat, feed_dict={style_img_ph: content_img_np})
for style_i, style_img_path in enumerate(style_img_list):
if style_i > FLAGS.maximum_styles_to_evaluate:
break
style_img_name = os.path.basename(style_img_path)[:-4]
style_image_np = image_utils.load_np_image_uint8(style_img_path)[:, :, :
3]
if style_i % 10 == 0:
tf.logging.info('Stylizing (%d) %s with (%d) %s' %
(content_i, content_img_name, style_i,
style_img_name))
# Saves preprocessed style image.
style_img_croped_resized_np = sess.run(
style_img_preprocessed, feed_dict={
style_img_ph: style_image_np
})
image_utils.save_np_image(style_img_croped_resized_np,
os.path.join(FLAGS.output_dir,
'%s.jpg' % (style_img_name)))
# Computes bottleneck features of the style prediction network for the
# given style image.
style_params = sess.run(
bottleneck_feat, feed_dict={style_img_ph: style_image_np})
interpolation_weights = ast.literal_eval(FLAGS.interpolation_weights)
# Interpolates between the parameters of the identity transform and
# style parameters of the given style image.
for interp_i, wi in enumerate(interpolation_weights):
stylized_image_res = sess.run(
stylized_images,
feed_dict={
bottleneck_feat:
identity_params * (1 - wi) + style_params * wi,
content_img_ph:
content_img_np
})
# Saves stylized image.
image_utils.save_np_image(
stylized_image_res,
os.path.join(FLAGS.output_dir, '%s_stylized_%s_%d.jpg' %
(content_img_name, style_img_name, interp_i)))
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
|
[
"numpy.random.seed",
"magenta.models.arbitrary_image_stylization.arbitrary_image_stylization_build_model.build_model",
"magenta.models.image_stylization.image_utils.load_np_image_uint8",
"tensorflow.compat.v1.gfile.Exists",
"magenta.models.image_stylization.image_utils.resize_image",
"tensorflow.compat.v1.gfile.Glob",
"os.path.join",
"tensorflow.compat.v1.app.run",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.train.latest_checkpoint",
"os.path.basename",
"tf_slim.get_variables_to_restore",
"numpy.random.permutation",
"tensorflow.compat.v1.gfile.MkDir",
"magenta.models.image_stylization.image_utils.center_crop_resize_image",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.gfile.IsDirectory",
"tensorflow.compat.v1.local_variables_initializer",
"tensorflow.compat.v1.logging.set_verbosity",
"ast.literal_eval",
"tensorflow.compat.v1.Graph"
] |
[((2656, 2697), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (2680, 2697), True, 'import tensorflow.compat.v1 as tf\n'), ((7755, 7779), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (7777, 7779), True, 'import tensorflow.compat.v1 as tf\n'), ((7782, 7798), 'tensorflow.compat.v1.app.run', 'tf.app.run', (['main'], {}), '(main)\n', (7792, 7798), True, 'import tensorflow.compat.v1 as tf\n'), ((2707, 2740), 'tensorflow.compat.v1.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (2722, 2740), True, 'import tensorflow.compat.v1 as tf\n'), ((2746, 2778), 'tensorflow.compat.v1.gfile.MkDir', 'tf.gfile.MkDir', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (2760, 2778), True, 'import tensorflow.compat.v1 as tf\n'), ((2812, 2824), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (2822, 2824), True, 'import tensorflow.compat.v1 as tf\n'), ((2901, 2950), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, 3]'}), '(tf.float32, shape=[None, None, 3])\n', (2915, 2950), True, 'import tensorflow.compat.v1 as tf\n'), ((3332, 3381), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, None, 3]'}), '(tf.float32, shape=[None, None, 3])\n', (3346, 3381), True, 'import tensorflow.compat.v1 as tf\n'), ((3715, 3916), 'magenta.models.arbitrary_image_stylization.arbitrary_image_stylization_build_model.build_model', 'build_model.build_model', (['content_img_preprocessed', 'style_img_preprocessed'], {'trainable': '(False)', 'is_training': '(False)', 'inception_end_point': '"""Mixed_6e"""', 'style_prediction_bottleneck': '(100)', 'adds_losses': '(False)'}), "(content_img_preprocessed, style_img_preprocessed,\n trainable=False, is_training=False, inception_end_point='Mixed_6e',\n style_prediction_bottleneck=100, adds_losses=False)\n", (3738, 3916), True, 'from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model as build_model\n'), ((3974, 4012), 'tensorflow.compat.v1.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['FLAGS.checkpoint'], {}), '(FLAGS.checkpoint)\n', (3994, 4012), True, 'import tensorflow.compat.v1 as tf\n'), ((4475, 4514), 'tensorflow.compat.v1.gfile.Glob', 'tf.gfile.Glob', (['FLAGS.style_images_paths'], {}), '(FLAGS.style_images_paths)\n', (4488, 4514), True, 'import tensorflow.compat.v1 as tf\n'), ((4804, 4845), 'tensorflow.compat.v1.gfile.Glob', 'tf.gfile.Glob', (['FLAGS.content_images_paths'], {}), '(FLAGS.content_images_paths)\n', (4817, 4845), True, 'import tensorflow.compat.v1 as tf\n'), ((3014, 3088), 'magenta.models.image_stylization.image_utils.center_crop_resize_image', 'image_utils.center_crop_resize_image', (['style_img_ph', 'FLAGS.style_image_size'], {}), '(style_img_ph, FLAGS.style_image_size)\n', (3050, 3088), False, 'from magenta.models.image_stylization import image_utils\n'), ((3141, 3203), 'magenta.models.image_stylization.image_utils.resize_image', 'image_utils.resize_image', (['style_img_ph', 'FLAGS.style_image_size'], {}), '(style_img_ph, FLAGS.style_image_size)\n', (3165, 3203), False, 'from magenta.models.image_stylization import image_utils\n'), ((3449, 3519), 'magenta.models.image_stylization.image_utils.center_crop_resize_image', 'image_utils.center_crop_resize_image', (['content_img_ph', 'FLAGS.image_size'], {}), '(content_img_ph, FLAGS.image_size)\n', (3485, 3519), False, 'from magenta.models.image_stylization import image_utils\n'), ((3574, 3632), 'magenta.models.image_stylization.image_utils.resize_image', 'image_utils.resize_image', (['content_img_ph', 'FLAGS.image_size'], {}), '(content_img_ph, FLAGS.image_size)\n', (3598, 3632), False, 'from magenta.models.image_stylization import image_utils\n'), ((4033, 4077), 'tensorflow.compat.v1.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['FLAGS.checkpoint'], {}), '(FLAGS.checkpoint)\n', (4059, 4077), True, 'import tensorflow.compat.v1 as tf\n'), ((4306, 4337), 'tf_slim.get_variables_to_restore', 'slim.get_variables_to_restore', ([], {}), '()\n', (4335, 4337), True, 'import tf_slim as slim\n'), ((4584, 4604), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (4598, 4604), True, 'import numpy as np\n'), ((4628, 4665), 'numpy.random.permutation', 'np.random.permutation', (['style_img_list'], {}), '(style_img_list)\n', (4649, 4665), True, 'import numpy as np\n'), ((2787, 2797), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (2795, 2797), True, 'import tensorflow.compat.v1 as tf\n'), ((4353, 4385), 'tensorflow.compat.v1.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (4383, 4385), True, 'import tensorflow.compat.v1 as tf\n'), ((4938, 4987), 'magenta.models.image_stylization.image_utils.load_np_image_uint8', 'image_utils.load_np_image_uint8', (['content_img_path'], {}), '(content_img_path)\n', (4969, 4987), False, 'from magenta.models.image_stylization import image_utils\n'), ((5097, 5131), 'os.path.basename', 'os.path.basename', (['content_img_path'], {}), '(content_img_path)\n', (5113, 5131), False, 'import os\n'), ((5421, 5480), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', "('%s.jpg' % content_img_name)"], {}), "(FLAGS.output_dir, '%s.jpg' % content_img_name)\n", (5433, 5480), False, 'import os\n'), ((6945, 6990), 'ast.literal_eval', 'ast.literal_eval', (['FLAGS.interpolation_weights'], {}), '(FLAGS.interpolation_weights)\n', (6961, 6990), False, 'import ast\n'), ((5899, 5931), 'os.path.basename', 'os.path.basename', (['style_img_path'], {}), '(style_img_path)\n', (5915, 5931), False, 'import os\n'), ((5962, 6009), 'magenta.models.image_stylization.image_utils.load_np_image_uint8', 'image_utils.load_np_image_uint8', (['style_img_path'], {}), '(style_img_path)\n', (5993, 6009), False, 'from magenta.models.image_stylization import image_utils\n'), ((6135, 6245), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (["('Stylizing (%d) %s with (%d) %s' % (content_i, content_img_name, style_i,\n style_img_name))"], {}), "('Stylizing (%d) %s with (%d) %s' % (content_i,\n content_img_name, style_i, style_img_name))\n", (6150, 6245), True, 'import tensorflow.compat.v1 as tf\n'), ((6591, 6648), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', "('%s.jpg' % style_img_name)"], {}), "(FLAGS.output_dir, '%s.jpg' % style_img_name)\n", (6603, 6648), False, 'import os\n'), ((7593, 7699), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', "('%s_stylized_%s_%d.jpg' % (content_img_name, style_img_name, interp_i))"], {}), "(FLAGS.output_dir, '%s_stylized_%s_%d.jpg' % (content_img_name,\n style_img_name, interp_i))\n", (7605, 7699), False, 'import os\n')]
|
#!/usr/bin/env python
"""
@package ion_functions.data.adcp_functions
@file ion_functions/data/adcp_functions.py
@author <NAME>, <NAME>, <NAME>
@brief Module containing ADCP related data-calculations.
"""
import numpy as np
from ion_functions.data.generic_functions import magnetic_declination
from ion_functions.data.generic_functions import replace_fill_with_nan
# instrument fill value unprocessed by CI
# (bad beam velocity sentinel output by tRDI ADCP instruments)
ADCP_FILLVALUE = -32768
"""
**** For instruments programmed in beam coordinates:
(ADCPS-I,K; ADCPT-B,D,E)
adcp_beam_eastward -- calculates VELPROF-VLE_L1
adcp_beam_northward -- calculates VELPROF-VLN_L1
adcp_beam_vertical -- calculates VELPROF-VLU_L1
adcp_beam_error -- calculates VELPROF-ERR_L1
**** For instruments programmed in earth coordinates:
(ADCPA; ADCPS-J,L,N; ADCPT-C,F,G,M)
adcp_earth_eastward -- calculates VELPROF-VLE_L1
adcp_earth_northward -- calculates VELPROF-VLN_L1
adcp_earth_vertical -- calculates VELPROF-VLU_L1
adcp_earth_error -- calculates VELPROF-ERR_L1
**** For the VADCP programmed in beam coordinates:
vadcp_beam_eastward -- calculates VELTURB-VLE_L1
vadcp_beam_northward -- calculates VELTURB-VLN_L1
vadcp_beam_vertical_true -- calculates VELTURB-VLU-5BM_L1
vadcp_beam_vertical_est -- calculates VELTURB-VLU-4BM_L1
vadcp_beam_error -- calculates VELTURB-ERR_L1
**** For all tRDI ADCP instruments:
adcp_backscatter -- calculates ECHOINT-B1_L1,
calculates ECHOINT-B2_L1,
calculates ECHOINT-B3_L1,
calculates ECHOINT-B4_L1.
**** Base functions used by above functions
adcp_beam2ins -- applies the beam to instrument transform using a 4
beam solution for instruments programmed in beam coordinates
adcp_ins2earth -- applies the instrument to Earth transform for all
instruments originally programmed in beam coordinates.
magnetic_correction -- corrects horizontal velocities for the magnetic
variation (declination) at the measurement location.
**** Supplementary functions to calculate velocity bin depths:
adcp_bin_depths -- calculates bin depths for the pd0 output format
(virtually all tRDI ADCPs deployed by OOI); uses
TEOS-10 functions p_from_z and enthalpy_SSO_0_p.
adcp_bin_depths_pd8 -- calculates bin depths for the pd8 output format,
assuming that (1) the ADCP operator recorded the
necessary input variables and (2) these are somehow
entered into the CI system.
"""
# Wrapper functions to create the VELPROF L1 data products for instruments
# programmed in beam coordinates by RSN (ADCPS-I,K and ADCPT-B,D,E)
def adcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Eastward Velocity Profile (VELPROF-VLE)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-03: <NAME>. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-04-04: <NAME>. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: <NAME>. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
uu_cor = adcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
uu_corr = east velocity profiles in Earth coordinates corrected for the
magnetic declination (VELPROF-VLE_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [daPa]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, eee = adcp_beam2ins(b1, b2, b3, b4)
#print eee
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
uu_cor, _ = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
uu_cor = uu_cor / 1000. # mm/s -> m/s
# return the Eastward Velocity Profile
return uu_cor
def adcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Northward Velocity Profile (VELPROF-VLN)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-03: <NAME>. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-03-28: <NAME>iderio. Corrected documentation only.
2014-04-04: <NAME>. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: Christopher Wingard. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
vv_cor = adcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
vv_corr = north velocity profiles in Earth coordinates corrected for the
magnetic declination (VELPROF-VLN_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [daPa]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
_, vv_cor = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
vv_cor = vv_cor / 1000. # mm/s -> m/s
# return the Northward Velocity Profile
return vv_cor
def adcp_beam_vertical(b1, b2, b3, b4, h, p, r, vf):
"""
Description:
Wrapper function to compute the Upward Velocity Profile (VELPROF-VLU)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-03: <NAME>. Formatting and adjusting to use
magnetic declination values calculated using the WMM 2010.
2014-04-04: <NAME>. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: <NAME>. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
Usage:
ww_cor = adcp_beam_vertical(b1, b2, b3, b4, h, p, r, vf)
where
ww_cor = vertical velocity profiles (VELPROF-VLU_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
"""
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
_, _, ww = adcp_ins2earth(u, v, w, h, p, r, vf)
# scale upward velocity to m/s
ww = ww / 1000. # mm/s -> m/s
# return the Upward Velocity Profile
return ww
def adcp_beam_error(b1, b2, b3, b4):
"""
Description:
Wrapper function to compute the Error Velocity Profile (VELPROF-ERR)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2015-06-10: <NAME>.
Moved the conditioning of input beam velocities to adcp_beam2inst.
Usage:
ww_cor = adcp_beam_error(b1, b2, b3, b4)
where
e = Error velocity profiles (VELPROF-ERR_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELPROF-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELPROF-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELPROF-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELPROF-B4_L0) [mm s-1]
"""
# compute the beam to instrument transform
_, _, _, e = adcp_beam2ins(b1, b2, b3, b4)
# scale error velocity to m/s
e = e / 1000. # mm/s
# return the Error Velocity Profile
return e
# Wrapper functions to create the VELPROF L1 data products for instruments
# programmed in Earth coordinates by CGSN (Pioneer and Endurance) (ADCPA,
# ADCPS-J,L,N and ADCPT-C,F,G,M)
def adcp_earth_eastward(u, v, z, lat, lon, dt):
"""
Description:
Wrapper function to compute the Eastward Velocity Profile (VELPROF-VLE)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-03: <NAME>. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-04-04: <NAME>. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: <NAME>. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: <NAME>.
Removed the depth dependence from the magnetic declination.
2015-06-25: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
uu_cor = adcp_earth_eastward(u, v, z, lat, lon, dt)
where
uu_cor = eastward velocity profiles in Earth coordinates corrected for
the magnetic declination (VELPROF-VLE_L1) [m s-1]
u = Eastward velocity profiles (VELPROF-VLE_L0) [mm s-1]
v = Northward velocity profiles (VELPROF-VLN_L0) [mm s-1]
z = instrument's pressure sensor reading (depth) [daPa]
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays
u = np.atleast_2d(u)
v = np.atleast_2d(v)
# on input, the elements of u and v are of type int.
u, v = replace_fill_with_nan(ADCP_FILLVALUE, u, v)
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
uu_cor, _ = magnetic_correction(theta, u, v)
# scale velocity to m/s
uu_cor = uu_cor / 1000. # mm/s -> m/s
# return the Eastward Velocity Profile
return uu_cor
def adcp_earth_northward(u, v, z, lat, lon, dt):
"""
Description:
Wrapper function to compute the Northward Velocity Profile (VELPROF-VLN)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-03: <NAME>. Formatting and adjusting to use
magnetic declination values calculated use the WMM 2010.
2014-04-04: <NAME>. Optimized code performance by replacing
the for loops previously used to calculate 2D and 3D
vectorized coordinate transformations with calls to
np.einsum (numpy Einstein summation function).
2014-06-25: <NAME>. Edited to account for units of
heading, pitch, roll and depth
2015-06-10: <NAME>.
Removed the depth dependence from the magnetic declination.
2015-06-25: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
vv_cor = adcp_earth_northward(u, v, z, lat, lon, dt)
where
vv_cor = northward velocity profiles in Earth coordinates corrected for
the magnetic declination (VELPROF-VLN_L1) [m s-1]
u = Eastward velocity profiles (VELPROF-VLE_L0) [mm s-1]
v = Northward velocity profiles (VELPROF-VLN_L0) [mm s-1]
z = instrument's pressure sensor reading (depth) [daPa]
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays
u = np.atleast_2d(u)
v = np.atleast_2d(v)
# on input, the elements of u and v are of type int.
u, v = replace_fill_with_nan(ADCP_FILLVALUE, u, v)
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
_, vv_cor = magnetic_correction(theta, u, v)
# scale velocity to m/s
vv_cor = vv_cor / 1000. # mm/s -> m/s
# return the Northward Velocity Profile
return vv_cor
def adcp_earth_vertical(w):
"""
Description:
Wrapper function to compute the Upward Velocity Profile (VELPROF-VLU)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2014-06-25: <NAME>. Initial code.
2015-06-25: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
w_scl = adcp_earth_vertical(w)
where
w_scl = scaled upward velocity profiles in Earth coordinates
(VELPROF-VLN_L1) [m s-1]
w = upward velocity profiles (VELPROF-VLU_L0) [mm s-1]
"""
w = replace_fill_with_nan(ADCP_FILLVALUE, w)
# scale velocity to m/s
w_scl = w / 1000. # mm/s -> m/s
# return the Upward Velocity Profile
return w_scl
def adcp_earth_error(e):
"""
Description:
Wrapper function to compute the Error Velocity Profile (VELPROF-ERR)
from Earth coordinate transformed velocity profiles as defined in the
Data Product Specification for Velocity Profile and Echo Intensity -
DCN 1341-00750.
Implemented by:
2014-06-25: <NAME>. Initial code.
2015-06-25: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
e_scl = adcp_earth_vertical(w)
where
e_scl = scaled error velocity profiles in Earth coordinates
(VELPROF-ERR_L1) [m s-1]
e = error velocity profiles (VELPROF-ERR_L0) [mm s-1]
"""
e = replace_fill_with_nan(ADCP_FILLVALUE, e)
# scale velocity to m/s
e_scl = e / 1000. # mm/s -> m/s
# return the scaled Error Velocity Profile
return e_scl
# Compute the VELTURB_L1 data products for the VADCP instrument deployed by RSN.
def vadcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Eastward Velocity Profile (VELTURB-VLE)
from beam coordinate transformed velocity profiles as defined in the
Data Product Specification for Turbulent Velocity Profile and Echo Intensity -
DCN 1341-00760.
Implemented by:
2014-06-25: <NAME>. Initial code, based on existing ADCP
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
uu_cor = vadcp_beam_eastward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
uu_cor = east velocity profiles in Earth coordinates corrected for the
magnetic declination (VELTURB-VLE_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [daPa]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... correct for it
uu_cor, _ = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
uu_cor = uu_cor / 1000. # mm/s -> m/s
# return the Eastward Velocity Profile
return uu_cor
def vadcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt):
"""
Description:
Wrapper function to compute the Northward Velocity Profile
(VELTURB-VLN) from beam coordinate transformed velocity profiles as
defined in the Data Product Specification for Turbulent Velocity
Profile and Echo Intensity - DCN 1341-00760.
Implemented by:
2014-06-25: <NAME>. Initial code, based on existing ADCP
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
(c) removed the depth dependence from the magnetic declination.
Usage:
vv_cor = vadcp_beam_northward(b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
where
vv_cor = north velocity profiles in Earth coordinates corrected for the
magnetic declination (VELTURB-VLN_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
lat = instrument's deployment latitude [decimal degrees]
lon = instrument's deployment longitude [decimal degrees]
z = instrument's pressure sensor reading (depth) [dm]
dt = sample date and time value [seconds since 1900-01-01]
"""
# force shapes of inputs to arrays of the correct dimensions
#z = np.atleast_1d(z) / 1000. # scale daPa depth input to dbar
#z = z * 1.019716 # use a simple approximation to calculate depth in m
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
dt = np.atleast_1d(dt)
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
uu, vv, _ = adcp_ins2earth(u, v, w, h, p, r, vf)
# compute the magnetic variation, and ...
theta = magnetic_declination(lat, lon, dt)
# ... corect for it
_, vv_cor = magnetic_correction(theta, uu, vv)
# scale velocity to m/s
vv_cor = vv_cor / 1000. # mm/s -> m/s
# return the Northward Velocity Profile
return vv_cor
def vadcp_beam_vertical_est(b1, b2, b3, b4, h, p, r, vf):
"""
Description:
Wrapper function to compute the "estimated" Upward Velocity Profile
(VELTURB-VLU-4BM) from the beam coordinate transformed velocity profiles as
defined in the Data Product Specification for Turbulent Velocity
Profile and Echo Intensity - DCN 1341-00760. This provides the
traditional estimate of the vertical velocity component from a 4 beam
solution, where each beam is facing outward at an angle (20 degrees)
relative to the vertical.
Implemented by:
2014-06-25: <NAME>. Initial code, based on existing ADCP
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
2015-06-22: <NAME>. Renamed this data product.
Usage:
ww_est = vadcp_beam_vertical_est(b1, b2, b3, b4, h, p, r, vf)
where
ww_est = estimated vertical velocity profiles in Earth coordinates
(VELTURB-VLU-4BM_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
"""
# compute the beam to instrument transform
u, v, w, _ = adcp_beam2ins(b1, b2, b3, b4)
# compute the instrument to earth beam transform
_, _, ww = adcp_ins2earth(u, v, w, h, p, r, vf)
# scale upward velocity to m/s
ww = ww / 1000. # mm/s -> m/s
# return the estimated Upward Velocity Profile
return ww
def vadcp_beam_vertical_true(b1, b2, b3, b4, b5, h, p, r, vf):
"""
Description:
Wrapper function to compute the "true" Upward Velocity Profile
(VELTURB-VLU-5BM) from the beam coordinate transformed velocity profiles as
defined in the Data Product Specification for Turbulent Velocity
Profile and Echo Intensity - DCN 1341-00760. This is assumed to provide
a better estimate of the true vertical velocity component, since beam 5
is pointing directly up.
Implemented by:
2014-06-25: <NAME>. Initial code, based on existing ADCP
2015-06-10: <NAME>.
(a) moved the conditioning of input beam velocities to adcp_beam2inst.
(b) moved the conditioning of compass readings to adcp_inst2earth.
2015-06-22: <NAME>. Renamed this data product.
2015-06-25: <NAME>. Incorporated b5 int fillvalue -> Nan.
Usage:
ww_true = vadcp_beam_vertical_true(b1, b2, b3, b4, b5, h, p, r, vf)
where
ww_true = true vertical velocity profiles in Earth coordinates
(VELTURB-VLU-5BM_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
b5 = "beam 5" velocity profiles in beam coordinates (VELTURB-B5_L0) [mm s-1]
h = instrument's uncorrected magnetic heading [cdegrees]
p = instrument pitch [cdegrees]
r = instrument roll [cdegrees]
vf = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
"""
# compute the beam to instrument transform
# fill values in the 4 beams are checked for inside adcp_beam2ins
u, v, _, _ = adcp_beam2ins(b1, b2, b3, b4)
# check b5 for the presence of fill values
b5 = replace_fill_with_nan(ADCP_FILLVALUE, b5)
# compute the instrument to earth beam transform
# fill values in the adcp orientation parameters are checked for inside adcp_ins2earth
_, _, ww = adcp_ins2earth(u, v, b5, h, p, r, vf)
# scale upward velocity to m/s
ww = ww / 1000. # mm/s -> m/s
# return the true Upward Velocity Profile
return ww
def vadcp_beam_error(b1, b2, b3, b4):
"""
Description:
Wrapper function to compute the Error Velocity Profile (VELTURB-ERR)
from the beam coordinate transformed velocity profiles as defined in
the Data Product Specification for Turbulent Velocity Profile and Echo
Intensity - DCN 1341-00760.
Implemented by:
2014-06-25: <NAME>. Initial code, based on existing ADCP
2015-06-10: <NAME>.
Moved the conditioning of input beam velocities to adcp_beam2inst.
Usage:
e = vadcp_beam_northward(b1, b2, b3, b4)
where
e = error velocity profiles (VELTURB-ERR_L1) [m s-1]
b1 = "beam 1" velocity profiles in beam coordinates (VELTURB-B1_L0) [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates (VELTURB-B2_L0) [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates (VELTURB-B3_L0) [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates (VELTURB-B4_L0) [mm s-1]
"""
# compute the beam to instrument transform
_, _, _, e = adcp_beam2ins(b1, b2, b3, b4)
# scale error velocity to m/s
e = e / 1000. # mm/s
# return the Error Velocity Profile
return e
# Calculates ECHOINT_L1 for all tRDI ADCPs
def adcp_backscatter(raw, sfactor):
"""
Description:
Converts the echo intensity data from counts to dB using a factory
specified scale factor (nominally 0.45 dB/count for the Workhorse
family of ADCPs and 0.61 dB/count for the ExplorerDVL family). As
defined in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2014-04-21: <NAME>. Initial code.
2015-06-25: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
dB = adcp_backscatter(raw, sfactor)
where
dB = Relative Echo Intensity (ECHOINT_L1) [dB]
raw = raw echo intensity (ECHOINT_L0) [count]
sfactor = factory supplied scale factor, instrument and beam specific [dB/count]
Notes:
The ADCP outputs the raw echo intensity as a 1-byte integer, so the ADCP_FILLVALUE
cannot apply (requires 2 bytes).
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
if np.isscalar(sfactor) is False:
sfactor = sfactor.reshape(sfactor.shape[0], 1)
# check raw for the presence of system fill values
raw = replace_fill_with_nan(None, raw)
dB = raw * sfactor
return dB
##### ADCP Beam to Earth Transforms and Magnetic Variation Corrections
def adcp_beam2ins(b1, b2, b3, b4):
"""
Description:
This function converts the Beam Coordinate transformed velocity
profiles to the instrument coordinate system. The calculations are
defined in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2015-06-24: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
u, v, w, e = adcp_beam2ins(b1, b2, b3, b4)
where
u = "east" velocity profiles in instrument coordinates [mm s-1]
v = "north" velocity profiles in instrument coordinates [mm s-1]
w = "vertical" velocity profiles in instrument coordinates [mm s-1]
e = "error" velocity profiles [mm s-1]
b1 = "beam 1" velocity profiles in beam coordinates [mm s-1]
b2 = "beam 2" velocity profiles in beam coordinates [mm s-1]
b3 = "beam 3" velocity profiles in beam coordinates [mm s-1]
b4 = "beam 4" velocity profiles in beam coordinates [mm s-1]
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
b1 = np.atleast_2d(b1)
b2 = np.atleast_2d(b2)
b3 = np.atleast_2d(b3)
b4 = np.atleast_2d(b4)
b1, b2, b3, b4 = replace_fill_with_nan(ADCP_FILLVALUE, b1, b2, b3, b4)
theta = 20.0 / 180.0 * np.pi
a = 1.0 / (2.0 * np.sin(theta))
b = 1.0 / (4.0 * np.cos(theta))
c = 1.0 # +1.0 for convex transducer head, -1 for concave
d = a / np.sqrt(2.0)
u = c * a * (b1 - b2)
v = c * a * (b4 - b3)
w = b * (b1 + b2 + b3 + b4)
e = d * (b1 + b2 - b3 - b4)
return (u, v, w, e)
def adcp_ins2earth(u, v, w, heading, pitch, roll, vertical):
"""
Description:
This function converts the Instrument Coordinate transformed velocity
profiles to the Earth coordinate system. The calculation is defined in
the Data Product Specification for Velocity Profile and Echo Intensity
- DCN 1341-00750.
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-04-04: <NAME>. Optimized code performance by replacing the for
loops previously used to calculate vectorized matrix multiplication
products with calls to np.einsum (numpy Einstein summation function).
2015-06-24: <NAME>. Changed implementation of 'vertical' in the roll
calculation so that if these values are equal to the CI fill value
(-999999999), when these fill values are replaced with nans, the nans
will propagate through to the data product output.
2015-06-24: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
uu, vu, ww = adcp_ins2earth(u, v, w, heading, pitch, roll, vertical)
where
uu = "east" velocity profiles in earth coordinates [mm s-1]
vv = "north" velocity profiles in earth coordinates [mm s-1]
ww = "vertical" velocity profiles in earth coordinates [mm s-1]
u = east velocity profiles in instrument coordinates [mm s-1]
v = north velocity profiles in instrument coordinates [mm s-1]
w = vertical velocity profiles in instrument coordinates [mm s-1]
heading = instrument's uncorrected magnetic heading [centidegrees]
pitch = instrument pitch [centidegrees]
roll = instrument roll [centidegrees]
vertical = instrument's vertical orientation (0 = downward looking and
1 = upward looking)
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
### the input beam data for adcp_ins2earth are always called using the output
### of adcp_beam2ins, so the following lines are not needed.
# insure we are dealing with array inputs
#u = np.atleast_2d(u)
#v = np.atleast_2d(v)
#w = np.atleast_2d(w)
# check for CI fill values before changing units.
# this function 'conditions' (np.atleast_1d) its inputs.
# TRDI does not apply its ADCP fill/bad value sentinels to compass data.
heading, pitch, roll, vertical = replace_fill_with_nan(None, heading, pitch, roll, vertical)
# change units from centidegrees to degrees
heading = heading / 100.0
pitch = pitch / 100.0
roll = roll / 100.0
# better way to calculate roll from the vertical orientation toggle;
# this will propagate R as nans if the vertical variable is missing from the data.
R = roll + vertical * 180.0
# roll
Rrad = np.radians(R)
cos_R = np.cos(Rrad)
sin_R = np.sin(Rrad)
# heading
Hrad = np.radians(heading)
cos_H = np.cos(Hrad)
sin_H = np.sin(Hrad)
# pitch
t1rad = np.radians(pitch)
t2rad = np.radians(roll)
Prad = np.arctan(np.tan(t1rad) * np.cos(t2rad))
cos_P = np.cos(Prad)
sin_P = np.sin(Prad)
# determine array size
n_packets = u.shape[0]
n_uvw = u.shape[1]
# initialize vectors to be used as matrix elements
ones = np.ones(n_packets)
zeros = ones * 0.0
# the rollaxis calls reorient the matrices so that their lead index is
# the data packet index
M1 = np.array([[cos_H, sin_H, zeros],
[-sin_H, cos_H, zeros],
[zeros, zeros, ones]])
M1 = np.rollaxis(M1, 2)
M2 = np.array([[ones, zeros, zeros],
[zeros, cos_P, -sin_P],
[zeros, sin_P, cos_P]])
M2 = np.rollaxis(M2, 2)
M3 = np.array([[cos_R, zeros, sin_R],
[zeros, ones, zeros],
[-sin_R, zeros, cos_R]])
M3 = np.rollaxis(M3, 2)
# construct input array of coordinates (velocities) to be transformed.
# the basis set is 3D (E,N,U) so that the middle dimension is sized at 3.
uvw = np.zeros((n_packets, 3, n_uvw))
# pack the coordinates (velocities) to be transformed into the appropriate
# slices.
uvw[:, 0, :] = u
uvw[:, 1, :] = v
uvw[:, 2, :] = w
# the Einstein summation is here configured to do the matrix
# multiplication MM(i,l) = M1(i,j) * M2(j,k) * M3(k,l) on each slice h.
MM = np.einsum('hij,hjk,hkl->hil', M1, M2, M3)
# the Einstein summation is here configured to do the matrix
# multiplication uvw_earth(i,m) = MM(i,l) * uvw(l,m) on each slice h.
uvw_earth = np.einsum('hil,hlm->him', MM, uvw)
# NOTE:
# these last two executable statements run about a factor of 2
# faster in the 10000 data packet performance tests versus combining
# these operations into the one statement:
# uvw_earth = np.einsum('hij,hjk,hkl,hlm->him', M1, M2, M3, uvw)
# break out the coordinate slices and return them
uu = uvw_earth[:, 0, :]
vv = uvw_earth[:, 1, :]
ww = uvw_earth[:, 2, :]
return (uu, vv, ww)
def magnetic_correction(theta, u, v):
"""
Description:
This function corrects velocity profiles for the magnetic variation
(declination) at the measurement location. The magnetic declination
is obtained from the 2010 World Magnetic Model (WMM2010) provided by
NOAA (see wmm_declination).
This version handles 'vectorized' input variables without using for
loops. It was specifically written to handle the case of a 1D array of
theta values, theta=f(i), with corresponding sets of 'u' and 'v' values
such that u=f(i,j) and v=f(i,j), where there are j 'u' and 'v' values
for each theta(i).
Implemented by:
2014-04-04: <NAME>. Initial code. This function is used to
calculate magnetic corrections by the functions contained
in this module instead of the function magnetic_correction
found in ion_functions.data.generic_functions.
2015-04-10: Russell Desiderio. Corrected a typo:
uv = np.atleast_2d(u) -> u = np.atleast_2d(u)
Usage:
u_cor, v_cor = magnetic_correction(theta, u, v)
where
u_cor = eastward velocity profiles, in earth coordinates, with
the correction for magnetic variation applied.
v_cor = northward velocity profiles, in earth coordinates,
with the correction for magnetic variation applied.
theta = magnetic variation based on location (latitude, longitude and
altitude) and date; units of theta are [degrees]
u = uncorrected eastward velocity profiles in earth coordinates
v = uncorrected northward velocity profiles in earth coordinates
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
OOI (2013). Data Product Specification for Turbulent Velocity Profile
and Echo Intensity. Document Control Number 1341-00760.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00760_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
# force shapes of inputs to arrays
theta = np.atleast_1d(theta)
u = np.atleast_2d(u)
v = np.atleast_2d(v)
theta_rad = np.radians(theta)
cosT = np.cos(theta_rad)
sinT = np.sin(theta_rad)
M = np.array([[cosT, sinT],
[-sinT, cosT]])
# roll axes so that the lead index represents data packet #.
M = np.rollaxis(M, 2)
# the coordinate system is 2D, so the middle dimension is sized at 2.
uv = np.zeros((u.shape[0], 2, u.shape[1]))
# pack the coordinates to be rotated into the appropriate slices
uv[:, 0, :] = u
uv[:, 1, :] = v
# the Einstein summation is here configured to do the matrix
# multiplication uv_cor(i,k) = M(i,j) * uv(j,k) on each slice h.
uv_cor = np.einsum('hij,hjk->hik', M, uv)
# the magnetically corrected u values are:
u_cor = uv_cor[:, 0, :]
# the magnetically corrected v values are:
v_cor = uv_cor[:, 1, :]
# return corrected u and v values
return (u_cor, v_cor)
def adcp_bin_depths_bar(dist_first_bin, bin_size, num_bins, pressure, adcp_orientation, latitude):
"""
Description:
Calculates the center bin depths for PD0 and PD12 ADCP data. As defined
in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2015-01-29: <NAME>. Initial code.
2015-06-26: <NAME>. Fixed the handling of the pressure variables.
Time-vectorized the code by finessing the conditional.
2015-06-30: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
bin_depths = adcp_bin_depths(dist_first_bin, bin_size, num_bins, pressure,
adcp_orientation, latitude)
where
bin_depths = [meters]
dist_first_bin = distance to the first ADCP bin [centimeters]
bin_size = depth of each ADCP bin [centimeters]
num_bins = number of ADCP bins [unitless]
pressure = pressure at the sensor head [bar]
adcp_orientation = 1=upward looking or 0=downward looking [unitless]
latitude = latitude of the instrument [degrees]
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
# check for CI fill values.
pressure = replace_fill_with_nan(None, pressure)
# Convert pressure from bar to decibar
pressure_dbar = pressure * 10.0
# Calculate sensor depth using TEOS-10 toolbox z_from_p function
# note change of sign to make the sensor_depth variable positive
sensor_depth = -z_from_p(pressure_dbar, latitude)
return adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
def adcp_bin_depths_dapa(dist_first_bin, bin_size, num_bins, pressure, adcp_orientation, latitude):
"""
Description:
Calculates the center bin depths for PD0 and PD12 ADCP data. As defined
in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2015-01-29: <NAME>. Initial code.
2015-06-26: <NAME>. Fixed the handling of the pressure variables.
Time-vectorized the code by finessing the conditional.
2015-06-30: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
bin_depths = adcp_bin_depths(dist_first_bin, bin_size, num_bins, pressure,
adcp_orientation, latitude)
where
bin_depths = [meters]
dist_first_bin = distance to the first ADCP bin [centimeters]
bin_size = depth of each ADCP bin [centimeters]
num_bins = number of ADCP bins [unitless]
pressure = pressure at the sensor head [daPa]
adcp_orientation = 1=upward looking or 0=downward looking [unitless]
latitude = latitude of the instrument [degrees]
References:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00050_Data_Product_SPEC_VELPROF_OOI.pdf)
"""
# check for CI fill values.
pressure = replace_fill_with_nan(None, pressure)
# Convert pressure from decaPascal to decibar
pressure_dbar = pressure / 1000.0
# Calculate sensor depth using TEOS-10 toolbox z_from_p function
# note change of sign to make the sensor_depth variable positive
sensor_depth = -z_from_p(pressure_dbar, latitude)
return adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
def z_from_p(p, lat, geo_strf_dyn_height=0, sea_surface_geopotential=0):
"""Calculates height from sea pressure using the computationally-efficient
75-term expression for density in terms of SA, CT and p (Roquet et al.,
2015). Dynamic height anomaly, geo_strf_dyn_height, if provided, must be
computed with its p_ref=0 (the surface). Also if provided, sea_surface_geopotental
is the geopotential at zero sea pressure.
Calls a function which calculates enthalpy assuming standard ocean salinity
and 0 degrees celsius.
Parameters
----------
p : pressure [dbar]
lat : latitude in decimal degrees north [-90..+90]
geo_strf_dyn_height : dynamic height anomaly [m^2/s^2]
sea_surface_geopotential : geopotential at zero sea pressure [ m^2/s^2 ]
Returns
-------
z : TEOS-10 height [m] : height is returned as a negative number; its
absolute value is the depth below the sea surface.
#################################################################
# Check values from TEOS-10 version 3.05 (matlab code): #
# from http://www.teos-10.org/pubs/gsw/html/gsw_z_from_p.html #
#################################################################
p = [10, 50, 125, 250, 600, 1000]
lat = 4
z_from_p(p, lat) =
[ -9.9445834469453, -49.7180897012550, -124.2726219409978,
-248.4700576548589, -595.8253480356214, -992.0919060719987]
Notes
-----
At sea level z = 0, and since z (HEIGHT) is defined to be positive upwards,
it follows that while z is positive in the atmosphere, it is NEGATIVE in
the ocean.
References
----------
IOC, SCOR and IAPSO, 2010: The international thermodynamic equation of
seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. Available from the TEOS-10 web site.
<NAME>., <NAME>, <NAME> and <NAME>, 2003:
Accurate and computationally efficient algorithms for potential
temperature and density of seawater. J. Atmosph. Ocean. Tech., 20,
pp. 730-741.
Moritz, 2000: Goedetic reference system 1980. J. Geodesy, 74, 128-133.
<NAME>., <NAME>, <NAME>, <NAME>, 2015: Accurate
polynomial expressions for the density and specifc volume of seawater
using the TEOS-10 standard. Ocean Modelling.
<NAME>., 1981: Practical conversion of pressure to depth.
Journal of Physical Oceanography, 11, 573-574.
IMPLEMENTATION NOTES:
<NAME>. 2015_07_01
versions 3.04 and 3.05 of the main function z_from_p are identical.
z_from_p calls the subroutine enthalpy_SSO_0_p; this subroutine
has been updated from ver 3.04 to 3.05.
the check values above for z_from_p have been updated to incorporate
this change using enthalpy_SSO_0_p ver 3.05.
"""
X = np.sin(np.deg2rad(lat))
sin2 = X ** 2
B = 9.780327 * (1.0 + (5.2792e-3 + (2.32e-5 * sin2)) * sin2)
gamma = 2.26e-07
A = -0.5 * gamma * B
C = enthalpy_SSO_0_p(p) - geo_strf_dyn_height
return -2 * C / (B + np.sqrt(B ** 2 - 4 * A * C))
def enthalpy_SSO_0_p(p):
"""
This documentation and code is copy\pasted from the matlab coding of this function.
%==========================================================================
% This function calculates enthalpy at the Standard Ocean Salinity, SSO,
% and at a Conservative Temperature of zero degrees C, as a function of
% pressure, p, in dbar, using a streamlined version of the 76-term
% computationally-efficient expression for specific volume, that is, a
% streamlined version of the code "gsw_enthalpy(SA,CT,p)".
%
% VERSION NUMBER: 3.05 (27th January 2015)
%
% REFERENCES:
% <NAME>., <NAME>, <NAME>, <NAME>, 2015: Accurate
% polynomial expressions for the density and specifc volume of seawater
% using the TEOS-10 standard. Ocean Modelling.
%
%==========================================================================
IMPLEMENTATION NOTES:
<NAME>. 2015_07_01. this subroutine has been updated
from ver 3.04 to 3.05.
"""
z = p * 1e-4
h006 = -2.1078768810e-9
h007 = 2.8019291329e-10
dynamic_enthalpy_SSO_0_p = z * (9.726613854843870e-4 + z * (-2.252956605630465e-5 + z * (
2.376909655387404e-6 + z * (-1.664294869986011e-7 + z * (
-5.988108894465758e-9 + z * (h006 + h007 * z))))))
enthalpy_SSO_0 = dynamic_enthalpy_SSO_0_p * 1.e8 # Note. 1e8 = db2Pa*1e4
return enthalpy_SSO_0
def adcp_bin_depths_meters(dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation):
"""
Description:
Calculates the center bin depths for PD0, PD8 and PD12 ADCP data. As defined
in the Data Product Specification for Velocity Profile and Echo
Intensity - DCN 1341-00750.
Implemented by:
2015-01-30: <NAME>. Initial code.
2015-06-26: <NAME>. Time-vectorized the code by finessing the conditionals.
2015-06-30: <NAME>. Incorporated int fillvalue -> Nan.
Usage:
bin_depths_pd8 = adcp_bin_depths(dist_first_bin, bin_size, num_bins, sensor_depth,
adcp_orientation)
where
bin_depths_pd8 = [meters]
dist_first_bin = distance to the first ADCP bin [centimeters]
bin_size = depth of each ADCP bin [centimeters]
num_bins = number of ADCP bins [unitless]
sensor_depth = estimated depth at the sensor head [meters]
adcp_orientation = 1=upward looking or 0=downward looking [unitless]
Notes:
The PD8 output format is a very sparse format. Other than num_bins, it does *not* record
any of the other input variables required by this DPA. Those must somehow be supplied "by
hand".
"""
# check for CI fill values.
#
# Note that these input parameters will not come from an IDD driver (except for possibly
# (num_bins) because the PD8 output format does not output them. Therefore, I don't know
# if they will be of type integer or not. However, ndarrays composed of float types are
# passed through the check-code unchanged, so run the inputs through in case they are of
# type int and in case -999999999 fill values are somehow present.
dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation = replace_fill_with_nan(
None, dist_first_bin, bin_size, num_bins, sensor_depth, adcp_orientation)
# note, there is a CI problem not yet addressed if the time-vectorized values
# in num_bins are not all the same!! For now, assume they are all the same:
num_bins_constant = num_bins[0]
# make bin_numbers a row vector
bin_numbers = np.array([np.arange(num_bins_constant)])
# Convert from cm to meters
# the input variables are type integer, so divide by a real number
# to avoid truncation errors.
dist_first_bin = dist_first_bin / 100.0
bin_size = bin_size / 100.0
# make sure sensor depth is positive
sensor_depth = np.fabs(sensor_depth)
# Following the PD0 convention where
# adcp_orientation = 0 is downward looking, bindepths are added to sensor depth
# = 1 is upward looking, bindepths are subtracted from sensor depth
z_sign = 1.0 - 2.0 * adcp_orientation
# to broadcast the vertical time dimension correctly with the horizontal bin_numbers dimension,
# make all the 1D time arrays into column vectors to be processed with the bin_numbers row vector.
sensor_depth = sensor_depth.reshape(-1, 1)
z_sign = z_sign.reshape(-1, 1)
dist_first_bin = dist_first_bin.reshape(-1, 1)
bin_size = bin_size.reshape(-1, 1)
# Calculate bin depths
bin_depths_pd8 = sensor_depth + z_sign * (dist_first_bin + bin_size * bin_numbers)
return bin_depths_pd8
|
[
"numpy.radians",
"ion_functions.data.generic_functions.magnetic_declination",
"ion_functions.data.generic_functions.replace_fill_with_nan",
"numpy.isscalar",
"numpy.deg2rad",
"numpy.zeros",
"numpy.ones",
"numpy.einsum",
"numpy.sin",
"numpy.array",
"numpy.fabs",
"numpy.cos",
"numpy.rollaxis",
"numpy.tan",
"numpy.arange",
"numpy.atleast_1d",
"numpy.sqrt",
"numpy.atleast_2d"
] |
[((5640, 5658), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (5653, 5658), True, 'import numpy as np\n'), ((5670, 5688), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (5683, 5688), True, 'import numpy as np\n'), ((5699, 5716), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (5712, 5716), True, 'import numpy as np\n'), ((6005, 6039), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (6025, 6039), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((8971, 8989), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (8984, 8989), True, 'import numpy as np\n'), ((9001, 9019), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (9014, 9019), True, 'import numpy as np\n'), ((9030, 9047), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (9043, 9047), True, 'import numpy as np\n'), ((9318, 9352), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (9338, 9352), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((15110, 15126), 'numpy.atleast_2d', 'np.atleast_2d', (['u'], {}), '(u)\n', (15123, 15126), True, 'import numpy as np\n'), ((15136, 15152), 'numpy.atleast_2d', 'np.atleast_2d', (['v'], {}), '(v)\n', (15149, 15152), True, 'import numpy as np\n'), ((15225, 15268), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'u', 'v'], {}), '(ADCP_FILLVALUE, u, v)\n', (15246, 15268), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((15428, 15446), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (15441, 15446), True, 'import numpy as np\n'), ((15458, 15476), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (15471, 15476), True, 'import numpy as np\n'), ((15487, 15504), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (15500, 15504), True, 'import numpy as np\n'), ((15567, 15601), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (15587, 15601), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((17653, 17669), 'numpy.atleast_2d', 'np.atleast_2d', (['u'], {}), '(u)\n', (17666, 17669), True, 'import numpy as np\n'), ((17679, 17695), 'numpy.atleast_2d', 'np.atleast_2d', (['v'], {}), '(v)\n', (17692, 17695), True, 'import numpy as np\n'), ((17768, 17811), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'u', 'v'], {}), '(ADCP_FILLVALUE, u, v)\n', (17789, 17811), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((17971, 17989), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (17984, 17989), True, 'import numpy as np\n'), ((18001, 18019), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (18014, 18019), True, 'import numpy as np\n'), ((18030, 18047), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (18043, 18047), True, 'import numpy as np\n'), ((18110, 18144), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (18130, 18144), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((19094, 19134), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'w'], {}), '(ADCP_FILLVALUE, w)\n', (19115, 19134), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((19990, 20030), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'e'], {}), '(ADCP_FILLVALUE, e)\n', (20011, 20030), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((22378, 22396), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (22391, 22396), True, 'import numpy as np\n'), ((22408, 22426), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (22421, 22426), True, 'import numpy as np\n'), ((22437, 22454), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (22450, 22454), True, 'import numpy as np\n'), ((22725, 22759), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (22745, 22759), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((25110, 25128), 'numpy.atleast_1d', 'np.atleast_1d', (['lat'], {}), '(lat)\n', (25123, 25128), True, 'import numpy as np\n'), ((25140, 25158), 'numpy.atleast_1d', 'np.atleast_1d', (['lon'], {}), '(lon)\n', (25153, 25158), True, 'import numpy as np\n'), ((25169, 25186), 'numpy.atleast_1d', 'np.atleast_1d', (['dt'], {}), '(dt)\n', (25182, 25186), True, 'import numpy as np\n'), ((25457, 25491), 'ion_functions.data.generic_functions.magnetic_declination', 'magnetic_declination', (['lat', 'lon', 'dt'], {}), '(lat, lon, dt)\n', (25477, 25491), False, 'from ion_functions.data.generic_functions import magnetic_declination\n'), ((29955, 29996), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'b5'], {}), '(ADCP_FILLVALUE, b5)\n', (29976, 29996), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((33163, 33195), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['None', 'raw'], {}), '(None, raw)\n', (33184, 33195), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((34790, 34807), 'numpy.atleast_2d', 'np.atleast_2d', (['b1'], {}), '(b1)\n', (34803, 34807), True, 'import numpy as np\n'), ((34818, 34835), 'numpy.atleast_2d', 'np.atleast_2d', (['b2'], {}), '(b2)\n', (34831, 34835), True, 'import numpy as np\n'), ((34846, 34863), 'numpy.atleast_2d', 'np.atleast_2d', (['b3'], {}), '(b3)\n', (34859, 34863), True, 'import numpy as np\n'), ((34874, 34891), 'numpy.atleast_2d', 'np.atleast_2d', (['b4'], {}), '(b4)\n', (34887, 34891), True, 'import numpy as np\n'), ((34916, 34969), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['ADCP_FILLVALUE', 'b1', 'b2', 'b3', 'b4'], {}), '(ADCP_FILLVALUE, b1, b2, b3, b4)\n', (34937, 34969), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((38103, 38162), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['None', 'heading', 'pitch', 'roll', 'vertical'], {}), '(None, heading, pitch, roll, vertical)\n', (38124, 38162), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((38520, 38533), 'numpy.radians', 'np.radians', (['R'], {}), '(R)\n', (38530, 38533), True, 'import numpy as np\n'), ((38547, 38559), 'numpy.cos', 'np.cos', (['Rrad'], {}), '(Rrad)\n', (38553, 38559), True, 'import numpy as np\n'), ((38573, 38585), 'numpy.sin', 'np.sin', (['Rrad'], {}), '(Rrad)\n', (38579, 38585), True, 'import numpy as np\n'), ((38613, 38632), 'numpy.radians', 'np.radians', (['heading'], {}), '(heading)\n', (38623, 38632), True, 'import numpy as np\n'), ((38646, 38658), 'numpy.cos', 'np.cos', (['Hrad'], {}), '(Hrad)\n', (38652, 38658), True, 'import numpy as np\n'), ((38672, 38684), 'numpy.sin', 'np.sin', (['Hrad'], {}), '(Hrad)\n', (38678, 38684), True, 'import numpy as np\n'), ((38711, 38728), 'numpy.radians', 'np.radians', (['pitch'], {}), '(pitch)\n', (38721, 38728), True, 'import numpy as np\n'), ((38742, 38758), 'numpy.radians', 'np.radians', (['roll'], {}), '(roll)\n', (38752, 38758), True, 'import numpy as np\n'), ((38825, 38837), 'numpy.cos', 'np.cos', (['Prad'], {}), '(Prad)\n', (38831, 38837), True, 'import numpy as np\n'), ((38851, 38863), 'numpy.sin', 'np.sin', (['Prad'], {}), '(Prad)\n', (38857, 38863), True, 'import numpy as np\n'), ((39016, 39034), 'numpy.ones', 'np.ones', (['n_packets'], {}), '(n_packets)\n', (39023, 39034), True, 'import numpy as np\n'), ((39176, 39255), 'numpy.array', 'np.array', (['[[cos_H, sin_H, zeros], [-sin_H, cos_H, zeros], [zeros, zeros, ones]]'], {}), '([[cos_H, sin_H, zeros], [-sin_H, cos_H, zeros], [zeros, zeros, ones]])\n', (39184, 39255), True, 'import numpy as np\n'), ((39306, 39324), 'numpy.rollaxis', 'np.rollaxis', (['M1', '(2)'], {}), '(M1, 2)\n', (39317, 39324), True, 'import numpy as np\n'), ((39335, 39414), 'numpy.array', 'np.array', (['[[ones, zeros, zeros], [zeros, cos_P, -sin_P], [zeros, sin_P, cos_P]]'], {}), '([[ones, zeros, zeros], [zeros, cos_P, -sin_P], [zeros, sin_P, cos_P]])\n', (39343, 39414), True, 'import numpy as np\n'), ((39465, 39483), 'numpy.rollaxis', 'np.rollaxis', (['M2', '(2)'], {}), '(M2, 2)\n', (39476, 39483), True, 'import numpy as np\n'), ((39494, 39573), 'numpy.array', 'np.array', (['[[cos_R, zeros, sin_R], [zeros, ones, zeros], [-sin_R, zeros, cos_R]]'], {}), '([[cos_R, zeros, sin_R], [zeros, ones, zeros], [-sin_R, zeros, cos_R]])\n', (39502, 39573), True, 'import numpy as np\n'), ((39624, 39642), 'numpy.rollaxis', 'np.rollaxis', (['M3', '(2)'], {}), '(M3, 2)\n', (39635, 39642), True, 'import numpy as np\n'), ((39811, 39842), 'numpy.zeros', 'np.zeros', (['(n_packets, 3, n_uvw)'], {}), '((n_packets, 3, n_uvw))\n', (39819, 39842), True, 'import numpy as np\n'), ((40161, 40202), 'numpy.einsum', 'np.einsum', (['"""hij,hjk,hkl->hil"""', 'M1', 'M2', 'M3'], {}), "('hij,hjk,hkl->hil', M1, M2, M3)\n", (40170, 40202), True, 'import numpy as np\n'), ((40363, 40397), 'numpy.einsum', 'np.einsum', (['"""hil,hlm->him"""', 'MM', 'uvw'], {}), "('hil,hlm->him', MM, uvw)\n", (40372, 40397), True, 'import numpy as np\n'), ((43386, 43406), 'numpy.atleast_1d', 'np.atleast_1d', (['theta'], {}), '(theta)\n', (43399, 43406), True, 'import numpy as np\n'), ((43416, 43432), 'numpy.atleast_2d', 'np.atleast_2d', (['u'], {}), '(u)\n', (43429, 43432), True, 'import numpy as np\n'), ((43442, 43458), 'numpy.atleast_2d', 'np.atleast_2d', (['v'], {}), '(v)\n', (43455, 43458), True, 'import numpy as np\n'), ((43478, 43495), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (43488, 43495), True, 'import numpy as np\n'), ((43508, 43525), 'numpy.cos', 'np.cos', (['theta_rad'], {}), '(theta_rad)\n', (43514, 43525), True, 'import numpy as np\n'), ((43538, 43555), 'numpy.sin', 'np.sin', (['theta_rad'], {}), '(theta_rad)\n', (43544, 43555), True, 'import numpy as np\n'), ((43567, 43606), 'numpy.array', 'np.array', (['[[cosT, sinT], [-sinT, cosT]]'], {}), '([[cosT, sinT], [-sinT, cosT]])\n', (43575, 43606), True, 'import numpy as np\n'), ((43703, 43720), 'numpy.rollaxis', 'np.rollaxis', (['M', '(2)'], {}), '(M, 2)\n', (43714, 43720), True, 'import numpy as np\n'), ((43808, 43845), 'numpy.zeros', 'np.zeros', (['(u.shape[0], 2, u.shape[1])'], {}), '((u.shape[0], 2, u.shape[1]))\n', (43816, 43845), True, 'import numpy as np\n'), ((44112, 44144), 'numpy.einsum', 'np.einsum', (['"""hij,hjk->hik"""', 'M', 'uv'], {}), "('hij,hjk->hik', M, uv)\n", (44121, 44144), True, 'import numpy as np\n'), ((45996, 46033), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['None', 'pressure'], {}), '(None, pressure)\n', (46017, 46033), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((48046, 48083), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['None', 'pressure'], {}), '(None, pressure)\n', (48067, 48083), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((55207, 55306), 'ion_functions.data.generic_functions.replace_fill_with_nan', 'replace_fill_with_nan', (['None', 'dist_first_bin', 'bin_size', 'num_bins', 'sensor_depth', 'adcp_orientation'], {}), '(None, dist_first_bin, bin_size, num_bins,\n sensor_depth, adcp_orientation)\n', (55228, 55306), False, 'from ion_functions.data.generic_functions import replace_fill_with_nan\n'), ((55897, 55918), 'numpy.fabs', 'np.fabs', (['sensor_depth'], {}), '(sensor_depth)\n', (55904, 55918), True, 'import numpy as np\n'), ((33007, 33027), 'numpy.isscalar', 'np.isscalar', (['sfactor'], {}), '(sfactor)\n', (33018, 33027), True, 'import numpy as np\n'), ((35158, 35170), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (35165, 35170), True, 'import numpy as np\n'), ((51534, 51549), 'numpy.deg2rad', 'np.deg2rad', (['lat'], {}), '(lat)\n', (51544, 51549), True, 'import numpy as np\n'), ((35028, 35041), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (35034, 35041), True, 'import numpy as np\n'), ((35065, 35078), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (35071, 35078), True, 'import numpy as np\n'), ((38781, 38794), 'numpy.tan', 'np.tan', (['t1rad'], {}), '(t1rad)\n', (38787, 38794), True, 'import numpy as np\n'), ((38797, 38810), 'numpy.cos', 'np.cos', (['t2rad'], {}), '(t2rad)\n', (38803, 38810), True, 'import numpy as np\n'), ((51763, 51790), 'numpy.sqrt', 'np.sqrt', (['(B ** 2 - 4 * A * C)'], {}), '(B ** 2 - 4 * A * C)\n', (51770, 51790), True, 'import numpy as np\n'), ((55582, 55610), 'numpy.arange', 'np.arange', (['num_bins_constant'], {}), '(num_bins_constant)\n', (55591, 55610), True, 'import numpy as np\n')]
|
import numpy as np
import os
from scipy.io.wavfile import write as audio_write
### Generate data
data = np.random.uniform(size=(10000)) # single example
DATA = np.random.uniform(size=(10,10000)) # multi example
wavfiles, numpyfiles = [], []
datafolder = 'data_intro/data'
os.makedirs(datafolder,exist_ok=True)
os.makedirs(datafolder + '_numpy',exist_ok=True)
for k,D in enumerate(DATA):
wavfiles.append(os.path.join(datafolder,str(k) + '.wav'))
numpyfiles.append(os.path.join(datafolder + '_numpy',str(k) + '.npy'))
np.save(numpyfiles[k], D)
audio_write(wavfiles[k], rate=1, data=D)
# -------------------------------------------------------------------------
### Create an STFT, get mean and std over time
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# create processing chain
dp = ProcessingChain()
dp.add(Framing(windowsize=10,stepsize=10,axis=0))
dp.add(FFT(axis=1))
dp.add(Aggregation(methods=['mean', 'std'], axis=0, combine='concatenate'))
dp.summary()
# apply processing chain to data
# make sure to provide sampling frequency to dp. Kwargs are always accessible for
# all processing layer. Therefore, you should make sure naming DOES NOT overlap
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Create an STFT, get mean and std over time (alternative)
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# create processing chain
# in this example, fs is already set in the processing chain
dp = ProcessingChain()
dp.add(Framing(windowsize=10,stepsize=10,axis=0,fs=1))
dp.add(FFT(axis=1))
dp.add(Aggregation(methods=['mean', 'std'], axis=0, combine='concatenate'))
dp.summary()
# apply processing chain to data
output_data = dp(data)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Create an STFT, get mean and std over time and fit this to normalization
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# create processing chain
dp = ProcessingChain()
dp.add(Framing(windowsize=10,stepsize=10,axis=0))
dp.add(FFT(axis=1))
dp.add(Aggregation(methods=['mean', 'std'], axis=0, combine='concatenate'))
dp.add(Normalizer(type='standard'))
dp.summary()
# fit processing chain as Normalizer contains a 'fit' method to init parameters
dp.fit(DATA, fs=1)
# apply processing chain to data
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Same as before but the data is loaded from wav file
### As a consequence no extra fs information needs to be provided for processing. This read from the wav.
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# define processing chain
dp = ProcessingChain()
dp.add(WavDatareader())
dp.add(Framing(windowsize=10,stepsize=10,axis=0))
dp.add(FFT(axis=1))
dp.add(Aggregation(methods=['mean', 'std'], axis=0, combine='concatenate'))
dp.add(Normalizer(type='standard'))
dp.summary()
# fit to wavfiles
dp.fit(wavfiles) #fit from wav files
#dp.fit(['data_intro/data_numpy/0.wav', 'data_intro/data_numpy/1.wav', 'data_intro/data_numpy/3.wav', ...], fs=1)
output_data = dp(wavfiles[2]) # process from wavfiles
#output_data = dp('data_intro/data_numpy/2.wav',fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Same as before but the data is loaded from numpy file \
### As a consequence extra fs information needs to be provided for processing.
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# define processing chain
dp = ProcessingChain()
dp.add(NumpyDatareader())
dp.add(Framing(windowsize=10,stepsize=10,axis=0))
dp.add(FFT(axis=1))
dp.add(Aggregation(methods=['mean', 'std'], axis=0, combine='concatenate'))
dp.add(Normalizer(type='standard'))
# fit to numpy files
dp.fit(numpyfiles, fs=1) #fit from npy files
#dp.fit(['data_intro/data_numpy/0.npy', 'data_intro/data_numpy/1.npy', 'data_intro/data_numpy/3.npy', ...], fs=1)
output_data = dp(numpyfiles[2],fs=1) #fit from npy files
#output_data = dp('data_intro/data_numpy/2.npy',fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Create an STFT, get mean and std over time and fit this to normalization (created from hardcoded configuration)
from dabstract.dataprocessor import ProcessingChain
config = {'chain': [{'name': 'NumpyDatareader'},
{'name': 'Framing',
'parameters': {'axis': 0, 'stepsize': 10, 'windowsize': 10}},
{'name': 'FFT',
'parameters': {'axis': 1}},
{'name': 'Logarithm'},
{'name': 'Aggregation',
'parameters': {'axis': 0,
'combine': 'concatenate',
'methods': ['mean', 'std']}},
{'name': 'Normalizer',
'parameters': {'type': 'standard'}}]}
dp = ProcessingChain(config)
dp.summary()
# OR
# dp = ProcessingChain()
# dp.add(config)
dp.fit(numpyfiles, fs=1) #fit from npy files
#dp.fit(['data_intro/data_numpy/0.npy', 'data_intro/data_numpy/1.npy', 'data_intro/data_numpy/3.npy', ...], fs=1)
output_data = dp(numpyfiles[2],fs=1) #fit from npy files
#output_data = dp('data_intro/data_numpy/2.npy',fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Create an STFT, get mean and std over time and fit this to normalization (created from yaml config)
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# get yaml configuration
config = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'))
# create processing chain from the yaml config
dp = ProcessingChain(config)
# fit data
dp.fit(DATA, fs=1)
# process
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Same as before, but now the yaml loading fct and feed to ProcessingChain() is available in a one-liner.
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
# fit data
dp.fit(DATA, fs=1)
# process
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Example on how to add a custom processing layer
# -- processing chain from config BIS
from dabstract.dataprocessor import ProcessingChain, Processor
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# custom processor.
# This is a minimal example of what a processor can do.
class custom_processor(Processor):
def process(self, data, **kwargs):
return data * 100, {}
# return data, information that can be propagated to consecutive layers
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
dp.summary()
# add a custom processor to the dp.chain
dp.add(custom_processor())
dp.summary()
# Fit data to chain
dp.fit(DATA, fs=1)
# process0
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Example on how to add a custom processing with fit option
# -- processing chain from config BIS
from dabstract.dataprocessor import ProcessingChain, Processor
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# custom processor.
# This is a minimal example of what a processor can do.
class custom_processor(Processor):
def process(self, data, **kwargs):
return (data - self.mean) * 100, {}
# return data, information that can be propagated to consecutive layers
def fit(self, data, info, **kwargs):
self.mean = np.mean(data)
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
dp.summary()
# add custom processor
dp.add(custom_processor())
dp.summary()
# fit data (it's recursive, so both the normalizer and the custom_processor are fit'ed on the data)
dp.fit(DATA, fs=1)
# process data
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Example on how to use any function in a dabstract processing chain and still use info propagation
# -- processing chain from config BIS
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
def custom_fct(data,**kwargs):
return (data - 5) * 100
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
dp.summary()
# add custom processors
dp.add(custom_fct)
dp.add(lambda x: x*100)
dp.summary()
# fit data (it's recursive, so both the normalizer and the custom_processor are fit'ed on the data)
dp.fit(DATA, fs=1)
# process data
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Example on how to add a custom processing layer within configuration using !class
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config_custom', path=os.path.join('configs','dp'),post_process=ProcessingChain)
# fit data (it's recursive, so both the normalizer and the custom_processor are fit'ed on the data)
dp.fit(DATA, fs=1)
# process data
output_data = dp(data, fs=1)
print(output_data.shape)
print('\n\n\n')
# -------------------------------------------------------------------------
### Create a lazy data source from disk with additional processing
### Adds a lazy mapping function to DATA and allow multi-example indexing
# -- processing chain for multiple examples
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
from dabstract.abstract.abstract import MapAbstract, DataAbstract
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
# Fit data
dp.fit(DATA, fs=1)
# Make and abstract data source
# you can now access data as with typical indexing
# e.g. datab[0], data[1]
# in this way it accesses DATA[0] and DATA[1] respectively with the additional dp
datab = MapAbstract(DATA,dp, fs=1)
print(datab)
# allow for multi indexing, e.g. data[:] or data[0,1]
datab = DataAbstract(datab, fs=1)
print(datab)
print('\n\n\n')
# -------------------------------------------------------------------------
### Add multi-processing to lazy data source
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
from dabstract.utils import load_yaml_config
# get yaml configuration and process with ProcessingChain()
dp = load_yaml_config(filename='Readme_1_dp_config', path=os.path.join('configs','dp'),post_process=ProcessingChain)
# Fit data
dp.fit(DATA, fs=1)
# Make and abstract data source
# you can now access data as with typical indexing
# e.g. datab[0], data[1]
# in this way it accesses DATA[0] and DATA[1] respectively with the additional dp
datab = MapAbstract(DATA,dp, fs = 1)
print(datab)
# allow for multi indexing, e.g. data[:] or data[0,1]
# and allow for multiprocessing with the workers and buffer_len flag
# indexing is paralellized, but also the iterator is
datab = DataAbstract(datab, workers=2, buffer_len=2)
print(datab)
for k,d in enumerate(datab):
print('Example ' + str(k))
print(d)
|
[
"numpy.random.uniform",
"numpy.save",
"os.makedirs",
"scipy.io.wavfile.write",
"dabstract.abstract.abstract.DataAbstract",
"numpy.mean",
"dabstract.abstract.abstract.MapAbstract",
"os.path.join",
"dabstract.dataprocessor.ProcessingChain"
] |
[((105, 134), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(10000)'}), '(size=10000)\n', (122, 134), True, 'import numpy as np\n'), ((161, 196), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(10, 10000)'}), '(size=(10, 10000))\n', (178, 196), True, 'import numpy as np\n'), ((273, 311), 'os.makedirs', 'os.makedirs', (['datafolder'], {'exist_ok': '(True)'}), '(datafolder, exist_ok=True)\n', (284, 311), False, 'import os\n'), ((311, 360), 'os.makedirs', 'os.makedirs', (["(datafolder + '_numpy')"], {'exist_ok': '(True)'}), "(datafolder + '_numpy', exist_ok=True)\n", (322, 360), False, 'import os\n'), ((857, 874), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', ([], {}), '()\n', (872, 874), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((1631, 1648), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', ([], {}), '()\n', (1646, 1648), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((2197, 2214), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', ([], {}), '()\n', (2212, 2214), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((2984, 3001), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', ([], {}), '()\n', (2999, 3001), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((3888, 3905), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', ([], {}), '()\n', (3903, 3905), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((5318, 5341), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', (['config'], {}), '(config)\n', (5333, 5341), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((6210, 6233), 'dabstract.dataprocessor.ProcessingChain', 'ProcessingChain', (['config'], {}), '(config)\n', (6225, 6233), False, 'from dabstract.dataprocessor import ProcessingChain\n'), ((11558, 11585), 'dabstract.abstract.abstract.MapAbstract', 'MapAbstract', (['DATA', 'dp'], {'fs': '(1)'}), '(DATA, dp, fs=1)\n', (11569, 11585), False, 'from dabstract.abstract.abstract import MapAbstract, DataAbstract\n'), ((11660, 11685), 'dabstract.abstract.abstract.DataAbstract', 'DataAbstract', (['datab'], {'fs': '(1)'}), '(datab, fs=1)\n', (11672, 11685), False, 'from dabstract.abstract.abstract import MapAbstract, DataAbstract\n'), ((12389, 12416), 'dabstract.abstract.abstract.MapAbstract', 'MapAbstract', (['DATA', 'dp'], {'fs': '(1)'}), '(DATA, dp, fs=1)\n', (12400, 12416), False, 'from dabstract.abstract.abstract import MapAbstract, DataAbstract\n'), ((12615, 12659), 'dabstract.abstract.abstract.DataAbstract', 'DataAbstract', (['datab'], {'workers': '(2)', 'buffer_len': '(2)'}), '(datab, workers=2, buffer_len=2)\n', (12627, 12659), False, 'from dabstract.abstract.abstract import MapAbstract, DataAbstract\n'), ((529, 554), 'numpy.save', 'np.save', (['numpyfiles[k]', 'D'], {}), '(numpyfiles[k], D)\n', (536, 554), True, 'import numpy as np\n'), ((559, 599), 'scipy.io.wavfile.write', 'audio_write', (['wavfiles[k]'], {'rate': '(1)', 'data': 'D'}), '(wavfiles[k], rate=1, data=D)\n', (570, 599), True, 'from scipy.io.wavfile import write as audio_write\n'), ((6128, 6157), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (6140, 6157), False, 'import os\n'), ((6794, 6823), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (6806, 6823), False, 'import os\n'), ((7667, 7696), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (7679, 7696), False, 'import os\n'), ((8610, 8623), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (8617, 8623), True, 'import numpy as np\n'), ((8743, 8772), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (8755, 8772), False, 'import os\n'), ((9624, 9653), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (9636, 9653), False, 'import os\n'), ((10415, 10444), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (10427, 10444), False, 'import os\n'), ((11271, 11300), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (11283, 11300), False, 'import os\n'), ((12102, 12131), 'os.path.join', 'os.path.join', (['"""configs"""', '"""dp"""'], {}), "('configs', 'dp')\n", (12114, 12131), False, 'import os\n')]
|
from operator import mul
try:
reduce
except NameError:
from functools import reduce
import numpy as np
def logit(x):
return np.log(x) - np.log(1 - x)
def logitsum(xs):
total = 0
for x in xs:
total += logit(x)
return total
def prod(*x):
return reduce(mul, x, 1)
|
[
"functools.reduce",
"numpy.log"
] |
[((286, 303), 'functools.reduce', 'reduce', (['mul', 'x', '(1)'], {}), '(mul, x, 1)\n', (292, 303), False, 'from functools import reduce\n'), ((139, 148), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (145, 148), True, 'import numpy as np\n'), ((151, 164), 'numpy.log', 'np.log', (['(1 - x)'], {}), '(1 - x)\n', (157, 164), True, 'import numpy as np\n')]
|
import pytest
import json
from ..embedding.model import EmbeddingModel
from ..feature_extraction import FeatureExtraction
import numpy as np
class TestFeatureExtraction():
@classmethod
def setup_class(self):
self.embedder_DE = EmbeddingModel(lang="de")
self.embedder_EN = EmbeddingModel(lang="en")
self.fe_DE = FeatureExtraction(self.embedder_DE, None)
self.fe_EN = FeatureExtraction(self.embedder_EN, None)
def test_mean_of_pairwise_cosine_distances(self):
ems = np.array([
[-1,1,1],
[-11,3,9],
[22,0,8]
], dtype=float)
assert abs(0.9770 - FeatureExtraction.mean_of_pairwise_cosine_distances(ems)) < 1e-4
def test_keywords_similarity_DE(self):
keywords_sim = [
"Huhn",
"Ei",
"Vogel",
"Geflügel"
]
keywords_diff = [
"Code",
"Geflügel",
"Siebträger",
"<NAME>"
]
ss_sim = self.fe_DE.get_keywords_similarity(keywords_sim)
ss_diff = self.fe_DE.get_keywords_similarity(keywords_diff)
assert ss_sim < ss_diff
def test_keywords_similarity_empty_DE(self):
empty = []
ss = self.fe_DE.get_keywords_similarity(empty)
assert ss == 0
def test_keywords_similarity_one_DE(self):
empty = ["test"]
ss = self.fe_DE.get_keywords_similarity(empty)
assert ss == 0
def test_keywords_similarity_EN(self):
keywords_sim = [
"Chicken",
"Egg",
"Bird",
"Poultry"
]
keywords_diff = [
"Code",
"Poultry",
"Portafilter",
"Donald Trump"
]
ss_sim = self.fe_EN.get_keywords_similarity(keywords_sim)
ss_diff = self.fe_EN.get_keywords_similarity(keywords_diff)
assert ss_sim < ss_diff
def test_keywords_similarity_empty_EN(self):
empty = []
ss = self.fe_EN.get_keywords_similarity(empty)
assert ss == 0
def test_keywords_similarity_one_EN(self):
empty = ["test"]
ss = self.fe_EN.get_keywords_similarity(empty)
assert ss == 0
|
[
"numpy.array"
] |
[((518, 578), 'numpy.array', 'np.array', (['[[-1, 1, 1], [-11, 3, 9], [22, 0, 8]]'], {'dtype': 'float'}), '([[-1, 1, 1], [-11, 3, 9], [22, 0, 8]], dtype=float)\n', (526, 578), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.