code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import sys
from functools import partial
from typing import Any, Callable, List, Optional, Tuple, cast
import numpy as np
from numpy.core.numerictypes import ScalarType
from sklearn import feature_selection as fs
from . import _utils
def _get_mi_func(discrete: bool) -> Callable:
"""
Get mutual information function depending on whether the attribute is discrete
Parameters
----------
discrete : bool
whether the attribute is discrete
Returns
-------
Callable
mutual information function handle
"""
RANDOM_STATE = getattr(sys.modules[__name__.split(".")[0]], "RANDOM_STATE")
return partial(
fs.mutual_info_classif if discrete else fs.mutual_info_regression,
random_state=RANDOM_STATE,
)
def _latent_attr_mutual_info(
z: np.ndarray, a: np.ndarray, discrete: bool = False
) -> np.ndarray:
"""
Calculate mutual information between latent vectors and a target attribute.
Parameters
----------
z : np.ndarray, (n_samples, n_features)
a batch of latent vectors
a : np.ndarray, (n_samples,)
a batch of one attribute
discrete : bool, optional
whether the attribute is discrete, by default False
Returns
-------
np.ndarray, (n_features,)
mutual information between each latent vector dimension and the attribute
"""
return _get_mi_func(discrete)(z, a)
def _attr_latent_mutual_info(
z: np.ndarray, a: np.ndarray, discrete: bool = False
) -> np.ndarray:
"""
Calculate mutual information between latent vectors and a target attribute.
Parameters
----------
z : np.ndarray, (n_samples,)
a batch of a latent vector
a : np.ndarray, (n_samples, n_attr)
a batch of attributes
discrete : bool, optional
whether the attribute is discrete, by default False
Returns
-------
np.ndarray, (n_attr,)
mutual information between each latent vector dimension and the attribute
"""
return np.concatenate(
[_get_mi_func(discrete)(z[:, None], a[:, i]) for i in range(a.shape[1])]
)
def _single_mutual_info(a: np.ndarray, b: np.ndarray, discrete: bool) -> float:
"""
Calculate mutual information between two variables
Parameters
----------
a : np.ndarray, (n_samples,)
a batch of a feature variable
b : np.ndarray, (n_samples,)
a batch of a target variable
discrete : bool, optional
whether the target variable is discrete, by default False
Returns
-------
float
mutual information between the variables
"""
return _get_mi_func(discrete)(a[:, None], b)[0]
def _entropy(a: np.ndarray, discrete: bool = False) -> float:
"""
Calculate entropy of a variable
Parameters
----------
a : np.ndarray, (n_samples,)
a batch of the variable
discrete : bool, optional
whether the variable is discrete, by default False
Returns
-------
float
entropy of the variable
"""
return _single_mutual_info(a, a, discrete)
def _conditional_entropy(
ai: np.ndarray, aj: np.ndarray, discrete: bool = False
) -> float:
"""
Calculate conditional entropy of a variable given another variable.
.. math:: \mathcal{H}(a_i|a_j) = \mathcal{H}(a_i) - \mathcal{I}(a_i, a_j),
where :math:`\mathcal{I}(\cdot,\cdot)` is mutual information, and :math:`\mathcal{H}(\cdot)` is entropy.
Parameters
----------
ai : np.ndarray, (n_samples,)
a batch of the first variable
aj : np.ndarray, (n_samples,)
a batch of the conditioning variable
discrete : bool, optional
whether the variables are discrete, by default False
Returns
-------
float
conditional entropy of `ai` given `aj`.
"""
return _entropy(ai, discrete) - _single_mutual_info(ai, aj, discrete)
def _xgap(mi: np.ndarray, zi: int, reg_dim: List) -> Tuple[np.ndarray, Optional[int]]:
# TODO: merge this function with utils._top2gap
mizi = mi[zi]
mi = np.delete(mi, reg_dim)
mi_sort = np.sort(mi)
mi_argsort = np.argsort(mi)
return (mizi - mi_sort[-1]), mi_argsort[-1] + len(reg_dim)
def mig(
z: np.ndarray,
a: np.ndarray,
reg_dim: Optional[List[int]] = None,
discrete: bool = False,
fill_reg_dim: bool = False,
) -> np.ndarray:
"""
Calculate Mutual Information Gap (MIG) between latent vectors and attributes.
Mutual Information Gap measures the degree of disentanglement. For each attribute, MIG is calculated by difference in the mutual informations between that of the attribute and its most informative latent dimension, and that of the attribute and its second-most informative latent dimension. Mathematically, MIG is given by
.. math:: \operatorname{MIG}(a_i, \mathbf{z}) = \dfrac{\mathcal{I}(a_i, z_j)-\mathcal{I}(a_i, z_k)}{\mathcal{H}(a_i)},
where :math:`j=\operatorname{arg}\max_n \mathcal{I}(a_i, z_n)`, :math:`k=\operatorname{arg}\max_{n≠j} \mathcal{I}(a_i, z_n)`, :math:`\mathcal{I}(\cdot,\cdot)` is mutual information, and :math:`\mathcal{H}(\cdot)` is entropy.
If `reg_dim` is specified, :math:`j` is instead overwritten to `reg_dim[i]`, while :math:`k=\operatorname{arg}\max_{n≠j} \mathcal{I}(a_i, z_n)` as usual.
MIG is best applied for independent attributes.
Parameters
----------
z : np.ndarray, (n_samples, n_features)
a batch of latent vectors
a : np.ndarray, (n_samples, n_attributes) or (n_samples,)
a batch of attribute(s)
reg_dim : Optional[List], optional
regularized dimensions, by default None
Attribute `a[:, i]` is regularized by `z[:, reg_dim[i]]`. If `reg_dim` is provided, the first mutual information is always taken between the regularized dimension and the attribute, and MIG may be negative.
discrete : bool, optional
Whether the attributes are discrete, by default False
fill_reg_dim : bool, optional
Whether to automatically fill `reg_dim` with `range(n_attributes)`, by default False. If `fill_reg_dim` is True, the `reg_dim` behavior is the same as the dependency-aware family. This option is mainly used for compatibility with the dependency-aware family in a bundle.
Returns
-------
np.ndarray, (n_attributes,)
MIG for each attribute
See Also
--------
.dmig : Dependency-Aware Mutual Information Gap
.xmig : Dependency-Blind Mutual Information Gap
.dlig : Dependency-Aware Latent Information Gap
References
----------
.. [1] <NAME>, <NAME>, <NAME>, and <NAME>, “Isolating sources of disentanglement in variational autoencoders”, in Proceedings of the 32nd International Conference on Neural Information Processing Systems, 2018.
"""
z, a, reg_dim = _utils._validate_za_shape(z, a, reg_dim, fill_reg_dim=fill_reg_dim)
_, n_attr = a.shape
ret = np.zeros((n_attr,))
for i in range(n_attr):
ai = a[:, i]
zi = reg_dim[i] if reg_dim is not None else None
en = _entropy(ai, discrete)
mi = _latent_attr_mutual_info(z, ai, discrete)
gap, _ = _utils._top2gap(mi, zi)
ret[i] = gap / en
return ret
def dmig(
z: np.ndarray,
a: np.ndarray,
reg_dim: Optional[List[int]] = None,
discrete: bool = False,
) -> np.ndarray:
"""
Calculate Dependency-Aware Mutual Information Gap (DMIG) between latent vectors and attributes
Dependency-Aware Mutual Information Gap (DMIG) is a dependency-aware version of MIG that accounts for attribute interdependence observed in real-world data. Mathematically, DMIG is given by
.. math:: \operatorname{DMIG}(a_i, \mathbf{z}) = \dfrac{\mathcal{I}(a_i, z_j)-\mathcal{I}(a_i, z_k)}{\mathcal{H}(a_i|a_l)},
where :math:`j=\operatorname{arg}\max_n \mathcal{I}(a_i, z_n)`, :math:`k=\operatorname{arg}\max_{n≠j} \mathcal{I}(a_i, z_n)`, :math:`\mathcal{I}(\cdot,\cdot)` is mutual information, :math:`\mathcal{H}(\cdot|\cdot)` is conditional entropy, and :math:`a_l` is the attribute regularized by :math:`z_k`. If :math:`z_k` is not regularizing any attribute, DMIG reduces to the usual MIG. DMIG compensates for the reduced maximum possible value of the numerator due to attribute interdependence.
If `reg_dim` is specified, :math:`j` is instead overwritten to `reg_dim[i]`, while :math:`k=\operatorname{arg}\max_{n≠j} \mathcal{I}(a_i, z_n)` as usual.
Parameters
----------
z : np.ndarray, (n_samples, n_features)
a batch of latent vectors
a : np.ndarray, (n_samples, n_attributes) or (n_samples,)
a batch of attribute(s)
reg_dim : Optional[List], optional
regularized dimensions, by default None
Attribute `a[:, i]` is regularized by `z[:, reg_dim[i]]`. If `None`, `a[:, i]` is assumed to be regularized by `z[:, i]`.
discrete : bool, optional
Whether the attributes are discrete, by default False
Returns
-------
np.ndarray, (n_attributes,)
DMIG for each attribute
See Also
--------
.mig : Mutual Information Gap
.xmig : Dependency-Blind Mutual Information Gap
.dlig : Dependency-Aware Latent Information Gap
References
----------
.. [1] <NAME> and <NAME>, “Evaluation of Latent Space Disentanglement in the Presence of Interdependent Attributes”, in Extended Abstracts of the Late-Breaking Demo Session of the 22nd International Society for Music Information Retrieval Conference, 2021.
.. [2] <NAME>, “Controllable Music: Supervised Learning of Disentangled Representations for Music Generation”, 2021.
"""
z, a, reg_dim = _utils._validate_za_shape(z, a, reg_dim, fill_reg_dim=True)
reg_dim = cast(List[int], reg_dim) # make the type checker happy
_, n_attr = a.shape
ret = np.zeros((n_attr,))
for i in range(n_attr):
ai = a[:, i]
zi = reg_dim[i]
mi = _latent_attr_mutual_info(z, ai, discrete)
gap, zj = _utils._top2gap(mi, zi)
if zj in reg_dim:
cen = _conditional_entropy(ai, a[:, reg_dim.index(zj)], discrete)
else:
cen = _entropy(ai, discrete)
ret[i] = gap / cen
return ret
def dlig(
z: np.ndarray,
a: np.ndarray,
reg_dim: Optional[List[int]] = None,
discrete: bool = False,
):
"""
Calculate Dependency-Aware Latent Information Gap (DLIG) between latent vectors and attributes
Dependency-aware Latent Information Gap (DLIG) is a latent-centric counterpart to DMIG. DLIG evaluates disentanglement of a set of semantic attributes :math:`\{a_i\}` with respect to a latent dimension :math:`z_d` such that
.. math:: \operatorname{DLIG}(\{a_i\}, z_d) = \dfrac{\mathcal{I}(a_j, z_d)-\mathcal{I}(a_k, z_d)}{\mathcal{H}(a_j|a_k)},
where :math:`j=\operatorname{arg}\max_i \mathcal{I}(a_i, z_d)`, :math:`k=\operatorname{arg}\max_{i≠j} \mathcal{I}(a_i, z_d)`, :math:`\mathcal{I}(\cdot,\cdot)` is mutual information, and :math:`\mathcal{H}(\cdot|\cdot)` is conditional entropy.
If `reg_dim` is specified, :math:`j` is instead overwritten to `reg_dim[i]`, while :math:`k=\operatorname{arg}\max_{i≠j} \mathcal{I}(a_i, z_d)` as usual.
Parameters
----------
z : np.ndarray, (n_samples, n_features)
a batch of latent vectors
a : np.ndarray, (n_samples, n_attributes)
a batch of at least two attributes
reg_dim : Optional[List], optional
regularized dimensions, by default None
Attribute `a[:, i]` is regularized by `z[:, reg_dim[i]]`. If `None`, `a[:, i]` is assumed to be regularized by `z[:, i]`.
discrete : bool, optional
Whether the attributes are discrete, by default False
Returns
-------
np.ndarray, (n_attributes,)
DLIG for each attribute-regularizing latent dimension
See Also
--------
.mig : Mutual Information Gap
.dmig : Dependency-Aware Mutual Information Gap
.xmig : Dependency-Blind Mutual Information Gap
..modularity.modularity : Modularity
References
----------
.. [1] <NAME>, “Controllable Music: Supervised Learning of Disentangled Representations for Music Generation”, 2021.
"""
z, a, reg_dim = _utils._validate_za_shape(z, a, reg_dim, fill_reg_dim=True)
reg_dim = cast(List[int], reg_dim) # make the type checker happy
_, n_attr = a.shape # same as len(reg_dim)
assert n_attr > 1, "DLIG requires at least two attributes"
ret = np.zeros((n_attr,))
for i, zi in enumerate(reg_dim):
mi = _attr_latent_mutual_info(z[:, zi], a, discrete)
gap, j = _utils._top2gap(mi, i)
cen = _conditional_entropy(a[:, i], a[:, j], discrete)
ret[i] = gap / cen
return ret
def xmig(
z: np.ndarray,
a: np.ndarray,
reg_dim: Optional[List[int]] = None,
discrete: bool = False,
):
"""
Calculate Dependency-Blind Mutual Information Gap (XMIG) between latent vectors and attributes
Dependency-blind Mutual Information Gap (XMIG) is a complementary metric to MIG and DMIG that measures the gap in mutual information with the subtrahend restricted to dimensions which do not regularize any attribute. XMIG is given by
.. math:: \operatorname{XMIG}(a_i, \mathbf{z}) = \dfrac{\mathcal{I}(a_i, z_j)-\mathcal{I}(a_i, z_k)}{\mathcal{H}(a_i)},
where :math:`j=\operatorname{arg}\max_d \mathcal{I}(a_i, z_d)`, :math:`k=\operatorname{arg}\max_{d∉\mathcal{D}} \mathcal{I}(a_i, z_d)`, :math:`\mathcal{I}(\cdot,\cdot)` is mutual information, :math:`\mathcal{H}(\cdot)` is entropy, and :math:`\mathcal{D}` is a set of latent indices which do not regularize any attribute. XMIG allows monitoring of latent disentanglement exclusively against attribute-unregularized latent dimensions.
If `reg_dim` is specified, :math:`j` is instead overwritten to `reg_dim[i]`, while :math:`k=\operatorname{arg}\max_{d∉\mathcal{D}} \mathcal{I}(a_i, z_d)` as usual.
Parameters
----------
z : np.ndarray, (n_samples, n_features)
a batch of latent vectors
a : np.ndarray, (n_samples, n_attributes) or (n_samples,)
a batch of attribute(s)
reg_dim : Optional[List], optional
regularized dimensions, by default None
Attribute `a[:, i]` is regularized by `z[:, reg_dim[i]]`. If `None`, `a[:, i]` is assumed to be regularized by `z[:, i]`.
discrete : bool, optional
Whether the attributes are discrete, by default False
Returns
-------
np.ndarray, (n_attributes,)
XMIG for each attribute
See Also
--------
.mig : Mutual Information Gap
.dmig : Dependency-Aware Mutual Information Gap
.dlig : Dependency-Aware Latent Information Gap
References
----------
.. [1] <NAME>, “Controllable Music: Supervised Learning of Disentangled Representations for Music Generation”, 2021.
"""
z, a, reg_dim = _utils._validate_za_shape(z, a, reg_dim, fill_reg_dim=True)
reg_dim = cast(List[int], reg_dim) # make the type checker happy
_, n_features = z.shape
_, n_attr = a.shape
assert n_features > n_attr
ret = np.zeros((n_attr,))
for i in range(n_attr):
ai = a[:, i]
zi = reg_dim[i]
en = _entropy(ai, discrete)
mi = _latent_attr_mutual_info(z, ai, discrete)
gap, _ = _xgap(mi, zi, reg_dim)
ret[i] = gap / en
return ret
|
[
"functools.partial",
"typing.cast",
"numpy.zeros",
"numpy.argsort",
"numpy.sort",
"numpy.delete"
] |
[((651, 756), 'functools.partial', 'partial', (['(fs.mutual_info_classif if discrete else fs.mutual_info_regression)'], {'random_state': 'RANDOM_STATE'}), '(fs.mutual_info_classif if discrete else fs.mutual_info_regression,\n random_state=RANDOM_STATE)\n', (658, 756), False, 'from functools import partial\n'), ((4085, 4107), 'numpy.delete', 'np.delete', (['mi', 'reg_dim'], {}), '(mi, reg_dim)\n', (4094, 4107), True, 'import numpy as np\n'), ((4122, 4133), 'numpy.sort', 'np.sort', (['mi'], {}), '(mi)\n', (4129, 4133), True, 'import numpy as np\n'), ((4151, 4165), 'numpy.argsort', 'np.argsort', (['mi'], {}), '(mi)\n', (4161, 4165), True, 'import numpy as np\n'), ((6977, 6996), 'numpy.zeros', 'np.zeros', (['(n_attr,)'], {}), '((n_attr,))\n', (6985, 6996), True, 'import numpy as np\n'), ((9810, 9834), 'typing.cast', 'cast', (['List[int]', 'reg_dim'], {}), '(List[int], reg_dim)\n', (9814, 9834), False, 'from typing import Any, Callable, List, Optional, Tuple, cast\n'), ((9902, 9921), 'numpy.zeros', 'np.zeros', (['(n_attr,)'], {}), '((n_attr,))\n', (9910, 9921), True, 'import numpy as np\n'), ((12393, 12417), 'typing.cast', 'cast', (['List[int]', 'reg_dim'], {}), '(List[int], reg_dim)\n', (12397, 12417), False, 'from typing import Any, Callable, List, Optional, Tuple, cast\n'), ((12573, 12592), 'numpy.zeros', 'np.zeros', (['(n_attr,)'], {}), '((n_attr,))\n', (12581, 12592), True, 'import numpy as np\n'), ((15081, 15105), 'typing.cast', 'cast', (['List[int]', 'reg_dim'], {}), '(List[int], reg_dim)\n', (15085, 15105), False, 'from typing import Any, Callable, List, Optional, Tuple, cast\n'), ((15233, 15252), 'numpy.zeros', 'np.zeros', (['(n_attr,)'], {}), '((n_attr,))\n', (15241, 15252), True, 'import numpy as np\n')]
|
import numpy as np
import autodisc as ad
from autodisc.systems.lenia import LeniaStatistics
from goalrepresent.datasets import LENIADataset
from goalrepresent.helper.randomhelper import set_seed
from goalrepresent.models import PCAModel
EPS = 0.0001
def calc_static_statistics(final_obs):
'''Calculates the final statistics for lenia last observation'''
feature_vector = np.zeros(17)
cur_idx = 0
size_y = final_obs.shape[0]
size_x = final_obs.shape[1]
num_of_cells = size_y * size_x
# calc initial center of mass and use it as a reference point to "center" the world around it
mid_y = (size_y - 1) / 2
mid_x = (size_x - 1) / 2
mid = np.array([mid_y, mid_x])
activation_center_of_mass = np.array(LeniaStatistics.center_of_mass(final_obs))
activation_shift_to_center = mid - activation_center_of_mass
activation = final_obs
centered_activation = np.roll(activation, activation_shift_to_center.astype(int), (0, 1))
# calculate the image moments
activation_moments = ad.helper.statistics.calc_image_moments(centered_activation)
# activation mass
activation_mass = activation_moments.m00
activation_mass_data = activation_mass / num_of_cells # activation is number of acitvated cells divided by the number of cells
feature_vector[cur_idx] = activation_mass_data
cur_idx += 1
# activation volume
activation_volume = np.sum(activation > EPS)
activation_volume_data = activation_volume / num_of_cells
feature_vector[cur_idx] = activation_volume_data
cur_idx += 1
# activation density
if activation_volume == 0:
activation_density_data = 0
else:
activation_density_data = activation_mass / activation_volume
feature_vector[cur_idx] = activation_density_data
cur_idx += 1
# mass distribution around the center
distance_weight_matrix = LeniaStatistics.calc_distance_matrix(final_obs.shape[0],
final_obs.shape[1])
if activation_mass <= EPS:
activation_mass_distribution = 1.0
else:
activation_mass_distribution = np.sum(distance_weight_matrix * centered_activation) / np.sum(
centered_activation)
activation_mass_distribution_data = activation_mass_distribution
feature_vector[cur_idx] = activation_mass_distribution_data
cur_idx += 1
# activation moments
activation_hu1_data = activation_moments.hu1
feature_vector[cur_idx] = activation_hu1_data
cur_idx += 1
activation_hu2_data = activation_moments.hu2
feature_vector[cur_idx] = activation_hu2_data
cur_idx += 1
activation_hu3_data = activation_moments.hu3
feature_vector[cur_idx] = activation_hu3_data
cur_idx += 1
activation_hu4_data= activation_moments.hu4
feature_vector[cur_idx] = activation_hu4_data
cur_idx += 1
activation_hu5_data = activation_moments.hu5
feature_vector[cur_idx] = activation_hu5_data
cur_idx += 1
activation_hu6_data = activation_moments.hu6
feature_vector[cur_idx] = activation_hu6_data
cur_idx += 1
activation_hu7_data = activation_moments.hu7
feature_vector[cur_idx] = activation_hu7_data
cur_idx += 1
activation_hu8_data = activation_moments.hu8
feature_vector[cur_idx] = activation_hu8_data
cur_idx += 1
activation_flusser9_data = activation_moments.flusser9
feature_vector[cur_idx] = activation_flusser9_data
cur_idx += 1
activation_flusser10_data = activation_moments.flusser10
feature_vector[cur_idx] = activation_flusser10_data
cur_idx += 1
activation_flusser11_data = activation_moments.flusser11
feature_vector[cur_idx] = activation_flusser11_data
cur_idx += 1
activation_flusser12_data = activation_moments.flusser12
feature_vector[cur_idx] = activation_flusser12_data
cur_idx += 1
activation_flusser13_data = activation_moments.flusser13
feature_vector[cur_idx] = activation_flusser13_data
cur_idx += 1
return feature_vector
if __name__ == '__main__':
set_seed(0)
n_features_BC = 8
n_statistics = 17
dataset_config = LENIADataset.default_config()
dataset_config.data_root = '/gpfswork/rech/zaj/ucf28eq/data/lenia_datasets/data_005/'
dataset_config.split = 'train'
dataset = LENIADataset(config=dataset_config)
# create fourier descriptors and save statistics
coefficients = np.zeros((dataset.n_images, n_statistics))
for idx in range(dataset.n_images):
im = dataset.get_image(idx).squeeze().numpy()
coeffs = calc_static_statistics(im)
coefficients[idx] = coeffs
# do PCA to keep only principal components according to reference dataset
pca_model = PCAModel(n_features=n_statistics, n_latents=n_features_BC)
X_all = coefficients.reshape(coefficients.shape[0], -1)
z_all = pca_model.fit(X_all)
pca_model.save_checkpoint('reference_dataset_pca_lenia_statistics_descriptors_model.pickle')
np.savez('reference_dataset_pca_lenia_statistics_descriptors_range.npz',
low=np.percentile(z_all, 0.01, axis=0), high=np.percentile(z_all, 99.9, axis=0))
print('pca explained variance: {}'.format(pca_model.algorithm.explained_variance_ratio_.sum()))
print(
'analytic_space_range: {} - {}'.format(np.percentile(z_all, 0.01, axis=0), np.percentile(z_all, 99.9, axis=0)))
np.savez('reference_dataset_pca_lenia_statistics_descriptors_statistics.npz',
descriptors=coefficients, pca_explained_variance=pca_model.algorithm.explained_variance_ratio_)
np.savez('reference_dataset_pca_lenia_statistics_descriptors_values.npz',z=z_all)
|
[
"numpy.sum",
"goalrepresent.datasets.LENIADataset",
"autodisc.helper.statistics.calc_image_moments",
"numpy.zeros",
"numpy.percentile",
"autodisc.systems.lenia.LeniaStatistics.calc_distance_matrix",
"goalrepresent.helper.randomhelper.set_seed",
"numpy.array",
"numpy.savez",
"goalrepresent.datasets.LENIADataset.default_config",
"goalrepresent.models.PCAModel",
"autodisc.systems.lenia.LeniaStatistics.center_of_mass"
] |
[((384, 396), 'numpy.zeros', 'np.zeros', (['(17)'], {}), '(17)\n', (392, 396), True, 'import numpy as np\n'), ((680, 704), 'numpy.array', 'np.array', (['[mid_y, mid_x]'], {}), '([mid_y, mid_x])\n', (688, 704), True, 'import numpy as np\n'), ((1038, 1098), 'autodisc.helper.statistics.calc_image_moments', 'ad.helper.statistics.calc_image_moments', (['centered_activation'], {}), '(centered_activation)\n', (1077, 1098), True, 'import autodisc as ad\n'), ((1416, 1440), 'numpy.sum', 'np.sum', (['(activation > EPS)'], {}), '(activation > EPS)\n', (1422, 1440), True, 'import numpy as np\n'), ((1889, 1965), 'autodisc.systems.lenia.LeniaStatistics.calc_distance_matrix', 'LeniaStatistics.calc_distance_matrix', (['final_obs.shape[0]', 'final_obs.shape[1]'], {}), '(final_obs.shape[0], final_obs.shape[1])\n', (1925, 1965), False, 'from autodisc.systems.lenia import LeniaStatistics\n'), ((4095, 4106), 'goalrepresent.helper.randomhelper.set_seed', 'set_seed', (['(0)'], {}), '(0)\n', (4103, 4106), False, 'from goalrepresent.helper.randomhelper import set_seed\n'), ((4174, 4203), 'goalrepresent.datasets.LENIADataset.default_config', 'LENIADataset.default_config', ([], {}), '()\n', (4201, 4203), False, 'from goalrepresent.datasets import LENIADataset\n'), ((4343, 4378), 'goalrepresent.datasets.LENIADataset', 'LENIADataset', ([], {'config': 'dataset_config'}), '(config=dataset_config)\n', (4355, 4378), False, 'from goalrepresent.datasets import LENIADataset\n'), ((4452, 4494), 'numpy.zeros', 'np.zeros', (['(dataset.n_images, n_statistics)'], {}), '((dataset.n_images, n_statistics))\n', (4460, 4494), True, 'import numpy as np\n'), ((4764, 4822), 'goalrepresent.models.PCAModel', 'PCAModel', ([], {'n_features': 'n_statistics', 'n_latents': 'n_features_BC'}), '(n_features=n_statistics, n_latents=n_features_BC)\n', (4772, 4822), False, 'from goalrepresent.models import PCAModel\n'), ((5422, 5604), 'numpy.savez', 'np.savez', (['"""reference_dataset_pca_lenia_statistics_descriptors_statistics.npz"""'], {'descriptors': 'coefficients', 'pca_explained_variance': 'pca_model.algorithm.explained_variance_ratio_'}), "('reference_dataset_pca_lenia_statistics_descriptors_statistics.npz',\n descriptors=coefficients, pca_explained_variance=pca_model.algorithm.\n explained_variance_ratio_)\n", (5430, 5604), True, 'import numpy as np\n'), ((5615, 5702), 'numpy.savez', 'np.savez', (['"""reference_dataset_pca_lenia_statistics_descriptors_values.npz"""'], {'z': 'z_all'}), "('reference_dataset_pca_lenia_statistics_descriptors_values.npz', z\n =z_all)\n", (5623, 5702), True, 'import numpy as np\n'), ((747, 788), 'autodisc.systems.lenia.LeniaStatistics.center_of_mass', 'LeniaStatistics.center_of_mass', (['final_obs'], {}), '(final_obs)\n', (777, 788), False, 'from autodisc.systems.lenia import LeniaStatistics\n'), ((2155, 2207), 'numpy.sum', 'np.sum', (['(distance_weight_matrix * centered_activation)'], {}), '(distance_weight_matrix * centered_activation)\n', (2161, 2207), True, 'import numpy as np\n'), ((2210, 2237), 'numpy.sum', 'np.sum', (['centered_activation'], {}), '(centered_activation)\n', (2216, 2237), True, 'import numpy as np\n'), ((5108, 5142), 'numpy.percentile', 'np.percentile', (['z_all', '(0.01)'], {'axis': '(0)'}), '(z_all, 0.01, axis=0)\n', (5121, 5142), True, 'import numpy as np\n'), ((5149, 5183), 'numpy.percentile', 'np.percentile', (['z_all', '(99.9)'], {'axis': '(0)'}), '(z_all, 99.9, axis=0)\n', (5162, 5183), True, 'import numpy as np\n'), ((5344, 5378), 'numpy.percentile', 'np.percentile', (['z_all', '(0.01)'], {'axis': '(0)'}), '(z_all, 0.01, axis=0)\n', (5357, 5378), True, 'import numpy as np\n'), ((5380, 5414), 'numpy.percentile', 'np.percentile', (['z_all', '(99.9)'], {'axis': '(0)'}), '(z_all, 99.9, axis=0)\n', (5393, 5414), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 10 11:31:48 2019
@author: <NAME>
"""
import numpy as np
from .stagData import StagData, StagCartesianGeometry, StagYinYangGeometry
from .stagData import SliceData, CartesianSliceData, YinYangSliceData
from .stagData import InterpolatedSliceData
from .stagError import GridInterpolationError
from scipy.interpolate import griddata
from time import time
def im(textMessage,pName,verbose):
"""Print verbose internal message. This function depends on the
argument of self.verbose. If self.verbose == True then the message
will be displayed on the terminal.
<i> : textMessage = str, message to display
pName = str, name of the subprogram
verbose = bool, condition for the verbose output
"""
if verbose == True:
print('>> '+pName+'| '+textMessage)
def regularSphericalGrid(radius,spacing=1):
"""Return a regular spherical grid for a single depth. The regularity is
guaranted on longitude and latitude. e.g: For a spacing parameter in
input of 1, the regular grid produced in return will have 1 point per
deg in lon and in lat and the same radius to the center of the sphere.
<i> : spacing = int, number of degree in longitude and latitude between
each point of the grid
<o> : ((x,y,z),(R,Lon,Lat)) where (x,y,z) car the cartesian coordinates
of points in the new grid and (R,Lon,Lat) the spherical coordinates
of points in the new grid."""
#First, creation of point in spherical coordinates
nbinLon = int(361/spacing)
nbinLat = int((181)/spacing)
nbinR = 1
lon = np.linspace(0,360,nbinLon)*np.pi/180
lat = np.linspace(-90,90,nbinLat)*np.pi/180
r = [radius]*nbinR
#2. Mesh grid
(Lon,Lat,R) = np.meshgrid(lon,lat,r,indexing='ij')
#3. Projection on cartesian coordinates
x = R*np.cos(Lat)*np.cos(Lon)
y = R*np.cos(Lat)*np.sin(Lon)
z = R*np.sin(Lat)
return ((x,y,z),(R,Lon,Lat))
def sliceInterpolator(sliceData,interpGeom='rgS',spacing=1,interpMethod='nearest',deg=False,verbose=True):
"""
Interpolates a stagData.YinYangSliceData object in an other grid.
<i> : sliceData = stagData.YinYangSliceDat object
interpGeom = str, indicates the type of geometry used for the new
grid. (in {rgS})
spacing = int, parameter of the interpGeom
interpMethod = str, method used for the interpolation. In ('nearest'
'linear', 'cubic'). Default = 'nearest'
deg = bool, for interpGeom == 'rgS' only ! if deg is True, then the
x,y,z on output will be lon,lat,r repsectivelly
verbose = bool, controls inhibition of the internal message
<o> : Return a stagData.InterpolatedSliceData objet
"""
time0 = time() #Init time for log message
if verbose:
print()
pName = 'sliceInterpolator'
#1. Creation of the new grid
im('Creation of interpGeom Grid',pName,verbose)
if interpGeom == 'rgS':
#Regular Grid - Spherical 1:
((x,y,z),(r,lon,lat)) = regularSphericalGrid(radius=sliceData.r[0],spacing=spacing)
npx, npy, npz = x.shape[0], x.shape[1], x.shape[2]
Xrg = x.reshape(x.shape[0]*x.shape[1]*x.shape[2])
Yrg = y.reshape(y.shape[0]*y.shape[1]*y.shape[2])
Zrg = z.reshape(z.shape[0]*z.shape[1]*z.shape[2])
else:
raise GridInterpolationError(interpGeom)
im(' - Spacing for grid : '+str(spacing),pName,verbose)
im(' - Number of Points : '+str(len(Xrg)),pName,verbose)
#2. Preparation of the interpolation:
im('Interpolation of the slice:',pName,verbose)
X = sliceData.x
Y = sliceData.y
Z = sliceData.z
im(' - Slice layer index : '+str(sliceData.layer),pName,verbose)
im(' - Corresponding depth : '+str(sliceData.depth),pName,verbose)
im(' - Number of Points in the slice: '+str(len(X)),pName,verbose)
points = np.array([(X[i],Y[i],Z[i]) for i in range(len(X))])
# Stores all in an stagData.InterpolatedSliceData object
isd = InterpolatedSliceData()
isd.sliceInheritance(sliceData)
isd.nxi, isd.nyi, isd.nzi = npx, npy, npz
isd.interpGeom = interpGeom
isd.spacing = spacing
isd.interpMethod = interpMethod
isd.deg = deg
# Scalar or Vectorial
if sliceData.fieldNature == 'Scalar':
im(' - Interpolation of a Sclar field',pName,verbose)
values = np.array(sliceData.v)
isd.v = griddata(points, values, (Xrg, Yrg, Zrg), method=interpMethod)
im('Interpolation done for the slice !',pName,verbose)
im(' - Duration of interpolation: '+str(time()-time0)[0:5]+' s',pName,verbose)
if deg:
y = lat*180/np.pi
x = lon*180/np.pi
z = r
Xrg = x.reshape(x.shape[0]*x.shape[1]*x.shape[2])
Yrg = y.reshape(y.shape[0]*y.shape[1]*y.shape[2])
Zrg = z.reshape(z.shape[0]*z.shape[1]*z.shape[2])
else: #Vectorial field
im(' - Interpolation of a Vectorial field: can take time',pName,verbose)
values_vx = np.array(sliceData.vx)
values_vy = np.array(sliceData.vy)
values_vz = np.array(sliceData.vz)
values_P = np.array(sliceData.P)
values_vtheta = np.array(sliceData.vtheta)
values_vphi = np.array(sliceData.vphi)
values_vr = np.array(sliceData.vr)
values_v = np.array(sliceData.v)
isd.vx = griddata(points, values_vx, (Xrg, Yrg, Zrg), method=interpMethod)
isd.vy = griddata(points, values_vy, (Xrg, Yrg, Zrg), method=interpMethod)
isd.vz = griddata(points, values_vz, (Xrg, Yrg, Zrg), method=interpMethod)
isd.P = griddata(points, values_P, (Xrg, Yrg, Zrg), method=interpMethod)
isd.vtheta = griddata(points, values_vtheta, (Xrg, Yrg, Zrg), method=interpMethod)
isd.vphi = griddata(points, values_vphi, (Xrg, Yrg, Zrg), method=interpMethod)
isd.vr = griddata(points, values_vr, (Xrg, Yrg, Zrg), method=interpMethod)
isd.v = griddata(points, values_v, (Xrg, Yrg, Zrg), method=interpMethod)
im('Interpolation done for the slice !',pName,verbose)
im(' - Duration of interpolation: '+str(time()-time0)[0:5]+' s',pName,verbose)
if deg:
im('Requested: Conversion of the grid into degree',pName,verbose)
y = lat*180/np.pi
x = lon*180/np.pi
z = r
Xrg = x.reshape(x.shape[0]*x.shape[1]*x.shape[2])
Yrg = y.reshape(y.shape[0]*y.shape[1]*y.shape[2])
Zrg = z.reshape(z.shape[0]*z.shape[1]*z.shape[2])
isd.x = Xrg
isd.y = Yrg
isd.z = Zrg
return isd
|
[
"numpy.meshgrid",
"scipy.interpolate.griddata",
"time.time",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.linspace"
] |
[((1801, 1840), 'numpy.meshgrid', 'np.meshgrid', (['lon', 'lat', 'r'], {'indexing': '"""ij"""'}), "(lon, lat, r, indexing='ij')\n", (1812, 1840), True, 'import numpy as np\n'), ((2853, 2859), 'time.time', 'time', ([], {}), '()\n', (2857, 2859), False, 'from time import time\n'), ((1904, 1915), 'numpy.cos', 'np.cos', (['Lon'], {}), '(Lon)\n', (1910, 1915), True, 'import numpy as np\n'), ((1938, 1949), 'numpy.sin', 'np.sin', (['Lon'], {}), '(Lon)\n', (1944, 1949), True, 'import numpy as np\n'), ((1960, 1971), 'numpy.sin', 'np.sin', (['Lat'], {}), '(Lat)\n', (1966, 1971), True, 'import numpy as np\n'), ((4526, 4547), 'numpy.array', 'np.array', (['sliceData.v'], {}), '(sliceData.v)\n', (4534, 4547), True, 'import numpy as np\n'), ((4564, 4626), 'scipy.interpolate.griddata', 'griddata', (['points', 'values', '(Xrg, Yrg, Zrg)'], {'method': 'interpMethod'}), '(points, values, (Xrg, Yrg, Zrg), method=interpMethod)\n', (4572, 4626), False, 'from scipy.interpolate import griddata\n'), ((5191, 5213), 'numpy.array', 'np.array', (['sliceData.vx'], {}), '(sliceData.vx)\n', (5199, 5213), True, 'import numpy as np\n'), ((5234, 5256), 'numpy.array', 'np.array', (['sliceData.vy'], {}), '(sliceData.vy)\n', (5242, 5256), True, 'import numpy as np\n'), ((5277, 5299), 'numpy.array', 'np.array', (['sliceData.vz'], {}), '(sliceData.vz)\n', (5285, 5299), True, 'import numpy as np\n'), ((5319, 5340), 'numpy.array', 'np.array', (['sliceData.P'], {}), '(sliceData.P)\n', (5327, 5340), True, 'import numpy as np\n'), ((5365, 5391), 'numpy.array', 'np.array', (['sliceData.vtheta'], {}), '(sliceData.vtheta)\n', (5373, 5391), True, 'import numpy as np\n'), ((5414, 5438), 'numpy.array', 'np.array', (['sliceData.vphi'], {}), '(sliceData.vphi)\n', (5422, 5438), True, 'import numpy as np\n'), ((5459, 5481), 'numpy.array', 'np.array', (['sliceData.vr'], {}), '(sliceData.vr)\n', (5467, 5481), True, 'import numpy as np\n'), ((5502, 5523), 'numpy.array', 'np.array', (['sliceData.v'], {}), '(sliceData.v)\n', (5510, 5523), True, 'import numpy as np\n'), ((5541, 5606), 'scipy.interpolate.griddata', 'griddata', (['points', 'values_vx', '(Xrg, Yrg, Zrg)'], {'method': 'interpMethod'}), '(points, values_vx, (Xrg, Yrg, Zrg), method=interpMethod)\n', (5549, 5606), False, 'from scipy.interpolate import griddata\n'), ((5624, 5689), 'scipy.interpolate.griddata', 'griddata', (['points', 'values_vy', '(Xrg, Yrg, Zrg)'], {'method': 'interpMethod'}), '(points, values_vy, (Xrg, Yrg, Zrg), method=interpMethod)\n', (5632, 5689), False, 'from scipy.interpolate import griddata\n'), ((5707, 5772), 'scipy.interpolate.griddata', 'griddata', (['points', 'values_vz', '(Xrg, Yrg, Zrg)'], {'method': 'interpMethod'}), '(points, values_vz, (Xrg, Yrg, Zrg), method=interpMethod)\n', (5715, 5772), False, 'from scipy.interpolate import griddata\n'), ((5789, 5853), 'scipy.interpolate.griddata', 'griddata', (['points', 'values_P', '(Xrg, Yrg, Zrg)'], {'method': 'interpMethod'}), '(points, values_P, (Xrg, Yrg, Zrg), method=interpMethod)\n', (5797, 5853), False, 'from scipy.interpolate import griddata\n'), ((5875, 5944), 'scipy.interpolate.griddata', 'griddata', (['points', 'values_vtheta', '(Xrg, Yrg, Zrg)'], {'method': 'interpMethod'}), '(points, values_vtheta, (Xrg, Yrg, Zrg), method=interpMethod)\n', (5883, 5944), False, 'from scipy.interpolate import griddata\n'), ((5964, 6031), 'scipy.interpolate.griddata', 'griddata', (['points', 'values_vphi', '(Xrg, Yrg, Zrg)'], {'method': 'interpMethod'}), '(points, values_vphi, (Xrg, Yrg, Zrg), method=interpMethod)\n', (5972, 6031), False, 'from scipy.interpolate import griddata\n'), ((6049, 6114), 'scipy.interpolate.griddata', 'griddata', (['points', 'values_vr', '(Xrg, Yrg, Zrg)'], {'method': 'interpMethod'}), '(points, values_vr, (Xrg, Yrg, Zrg), method=interpMethod)\n', (6057, 6114), False, 'from scipy.interpolate import griddata\n'), ((6131, 6195), 'scipy.interpolate.griddata', 'griddata', (['points', 'values_v', '(Xrg, Yrg, Zrg)'], {'method': 'interpMethod'}), '(points, values_v, (Xrg, Yrg, Zrg), method=interpMethod)\n', (6139, 6195), False, 'from scipy.interpolate import griddata\n'), ((1655, 1683), 'numpy.linspace', 'np.linspace', (['(0)', '(360)', 'nbinLon'], {}), '(0, 360, nbinLon)\n', (1666, 1683), True, 'import numpy as np\n'), ((1702, 1731), 'numpy.linspace', 'np.linspace', (['(-90)', '(90)', 'nbinLat'], {}), '(-90, 90, nbinLat)\n', (1713, 1731), True, 'import numpy as np\n'), ((1892, 1903), 'numpy.cos', 'np.cos', (['Lat'], {}), '(Lat)\n', (1898, 1903), True, 'import numpy as np\n'), ((1926, 1937), 'numpy.cos', 'np.cos', (['Lat'], {}), '(Lat)\n', (1932, 1937), True, 'import numpy as np\n'), ((4741, 4747), 'time.time', 'time', ([], {}), '()\n', (4745, 4747), False, 'from time import time\n'), ((6310, 6316), 'time.time', 'time', ([], {}), '()\n', (6314, 6316), False, 'from time import time\n')]
|
"""
Created on Mon Mar 14 09:30:44 2016
@author: rmc84
<NAME>
Purpose: calcualtes the GMPE values for a given IM & earthquake
Based on CompareGMPEs_.m (version 1.0 8 March 2010, Brendon Bradley)
All variable and function names have been retained wherever possible
All the redundant parts of the code to plot the GMPE on the IM plot have been removed.
This function is mostly just to put the variables from the parms into the class (matlab structures) siteprop and faultproperties.
Other GMPEs can be called from this function with the same format.
Issues:
Verification:
Matches matlab code to 9sig fig (IM_Rscaling, Rrupval, sigma_IM_Rscaling)
"""
import numpy as np
#from Bradley_2010_Sa import Bradley_2010_Sa
from geoNet.gmpe.Bradley_2010_Sa import Bradley_2010_Sa
class siteProperties: #Class of site properties. initialize all attributes to None
period=None # '(-1),(0),(real variable)' period of vibration =-1->PGV; =0->PGA; >0->SA
Rrup=None # closest distance coseismic rupture (km)
Rjb=None # ???
Rx=None #distance measured perpendicular to fault strike from surface projection of updip edge of the fault rupture (+ve in downdip dir) (km)
Rtvz=None # source-to-site distance in the Taupo volcanic zone (TVZ) (km)
V30measured=None # yes =1 (i.e. from Vs tests); no=0 (i.e. estimated from geology)
V30=None # shear wave velocity at 30m depth (m/s)
Z1pt0=None # depth to the 1.0km/s shear wave velocity horizon (optional, uses default relationship otherwise
g=None # gravity (cm s^-2)
class faultProperties(): #Class of fault properties. initialize all attributes to None
Mw=None # moment tensor magnitude
rake=None # rake angle (degrees)
dip=None # dip angle (degrees)
Ztor=None # depth to top of coseismic rupture (km)
class set_faultprop(object):
def __init__(self,Mw=None, rake=None, dip=None, Ztor=None):
"""
In one line set the fault parameters
"""
#self.faultprop=faultProperties()
#self.faultprop.Mw=Mw
#self.faultprop.rake=rake
#self.faultprop.dip=dip
#self.faultprop.Ztor=Ztor
self.Mw=Mw
self.rake=rake
self.dip=dip
self.Ztor=Ztor
return
class set_siteprop(object):
def __init__(self,faultprop,
period=None, Rrup=None, Rjb=None, Rx=None, Rtvz=None, V30measured=None,
V30=None, Z1pt0=None, g=981):
"""
g in cm/s^2
Period = -1 for PGV, 0 for PGA, actual numerical value for PSA
Default choice in Richard's code used is V30=250. #site 'D' classification
"""
# self.siteprop=siteProperties()
# self.siteprop.period=period
# self.siteprop.g=g
# self.siteprop.Rtvz=Rtvz
# self.siteprop.V30measured=V30measured
# #self.siteprop.V30=250. #site 'D' classification
# self.siteprop.V30=V30
# self.siteprop.Rrup=Rrup
# #Definition used below by Richard does not match Rjb definition
# self.siteprop.Rjb=np.sqrt(np.max((0,Rrup**2-faultprop.Ztor**2)))
# #We leave Rjb unchanged to match Ricard's code
# #self.siteprop.Rjb=Rjb
# self.siteprop.Rx=-self.siteprop.Rjb
#
self.period=period
self.Z1pt0=Z1pt0
self.g=g
self.Rtvz=Rtvz
self.V30measured=V30measured
#self.V30=250. #site 'D' classification
self.V30=V30
self.Rrup=Rrup
#Definition used below by Richard does not match Rjb definition
self.Rjb=np.sqrt(np.max((0,Rrup**2-faultprop.Ztor**2)))
#We leave Rjb unchanged to match Ricard's code
#self.Rjb=Rjb
self.Rx=-self.Rjb
return
def calculateGMPE(parms,ImName,period): ##need to pass the ImName and the period as we loop over these
#Do some error checking to make sure that the model is valid
if parms.plotGMPE.model!=1:
print ('Error: The selected GMPE (model %d) is not supported. Only Model=1 Bradley_2010 is supported.') %(parms.plotGMPE.model)
print ('Not calculating the GMPE. Returning from function')
raise ValueError
return
#For the model, check that the IM is supported
if not((ImName=='PGA')|(ImName=='PGV')|(ImName=='pSA')):
print ('Error: The selected GMPE (model %d) does not support IM name %s') %(parms.plotGMPE.model,ImName)
print ('Not calculating the GMPE for %s. Returning from function') %ImName
raise ValueError
return
#set up the class faultprop. Same structure as the matlab code
faultprop=faultProperties()
faultprop.Mw=parms.plotGMPE.Mw
faultprop.rake=parms.plotGMPE.rake
faultprop.dip=parms.plotGMPE.dip
faultprop.Ztor=parms.plotGMPE.Ztor
#set up the class siteprop. Same structure as the matlab code
siteprop=siteProperties()
if ImName=='PGA':
siteprop.period=0
elif ImName=='PGV':
siteprop.period=-1
elif ImName=='pSA':
siteprop.period=period
siteprop.g=parms.calcIM.g
siteprop.Rtvz=parms.plotGMPE.Rtvz
siteprop.V30measured=parms.plotGMPE.V30measured
#Rrup values used for the GMPE calculation
Rrupval=np.exp(np.linspace(np.log(parms.plotGMPE.DistMin),np.log(parms.plotGMPE.DistMax),parms.plotGMPE.nVal))
Vs30=[] #The list of Vs30 values we are going to calculate the GMPE for
for i in range(len(parms.plotGMPE.SiteClassForPrediction)):
if parms.plotGMPE.SiteClassForPrediction[i]=='A':
Vs30.append(parms.plotGMPE.Vs30[0])
elif parms.plotGMPE.SiteClassForPrediction[i]=='B':
Vs30.append(parms.plotGMPE.Vs30[1])
elif parms.plotGMPE.SiteClassForPrediction[i]=='C':
Vs30.append(parms.plotGMPE.Vs30[2])
elif parms.plotGMPE.SiteClassForPrediction[i]=='D':
Vs30.append(parms.plotGMPE.Vs30[3])
elif parms.plotGMPE.SiteClassForPrediction[i]=='E':
Vs30.append(parms.plotGMPE.Vs30[4])
else:
print (parms.plotGMPE.SiteClassForPrediction[i], 'is an invalid siteClass for Prediciton. Must be A, B, C, D, E.')
#initialise the outputs as arrays of zeros
IM_Rscaling=np.zeros((len(Vs30),parms.plotGMPE.nVal))
sigma_IM_Rscaling=np.zeros((len(Vs30),parms.plotGMPE.nVal,3))
for j in range(len(Vs30)): #loop over the different Vs30 values chosen
siteprop.V30=Vs30[j]
for i in range(len(Rrupval)): #loop over the different rupture distances
siteprop.Rrup=Rrupval[i]
siteprop.Rjb=np.sqrt(np.max((0,Rrupval[i]**2-faultprop.Ztor**2)))
siteprop.Rx=-siteprop.Rjb
if parms.plotGMPE.model==1: #Bradley2010 NZ-specific. No error checking here as we check at start of file
IM_Rscaling[j,i], sigma_IM_Rscaling[j,i,:]=Bradley_2010_Sa(siteprop,faultprop)
return Rrupval, Vs30, IM_Rscaling, sigma_IM_Rscaling
|
[
"numpy.max",
"numpy.log",
"geoNet.gmpe.Bradley_2010_Sa.Bradley_2010_Sa"
] |
[((3754, 3798), 'numpy.max', 'np.max', (['(0, Rrup ** 2 - faultprop.Ztor ** 2)'], {}), '((0, Rrup ** 2 - faultprop.Ztor ** 2))\n', (3760, 3798), True, 'import numpy as np\n'), ((5427, 5457), 'numpy.log', 'np.log', (['parms.plotGMPE.DistMin'], {}), '(parms.plotGMPE.DistMin)\n', (5433, 5457), True, 'import numpy as np\n'), ((5458, 5488), 'numpy.log', 'np.log', (['parms.plotGMPE.DistMax'], {}), '(parms.plotGMPE.DistMax)\n', (5464, 5488), True, 'import numpy as np\n'), ((6776, 6826), 'numpy.max', 'np.max', (['(0, Rrupval[i] ** 2 - faultprop.Ztor ** 2)'], {}), '((0, Rrupval[i] ** 2 - faultprop.Ztor ** 2))\n', (6782, 6826), True, 'import numpy as np\n'), ((7068, 7104), 'geoNet.gmpe.Bradley_2010_Sa.Bradley_2010_Sa', 'Bradley_2010_Sa', (['siteprop', 'faultprop'], {}), '(siteprop, faultprop)\n', (7083, 7104), False, 'from geoNet.gmpe.Bradley_2010_Sa import Bradley_2010_Sa\n')]
|
"""
PTC
---
Data handling for turn-by-turn measurement files from the ``PTC`` code, which can be obtained by performing
particle tracking of your machine through the ``MAD-X PTC`` interface. The files are very close in
structure to **TFS** files, with the difference that the data part is split into "segments" relating
containing data for a given observation point.
"""
import copy
import logging
from collections import namedtuple
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from dateutil import tz
from turn_by_turn.constants import (
COLPARTICLE,
COLTURN,
COLX,
COLY,
DATE,
HEADER,
NAMES,
PLANES,
SEGMENT_MARKER,
SEGMENTS,
TIME,
TIME_FORMAT,
TYPES,
)
from turn_by_turn.errors import PTCFormatError
from turn_by_turn.structures import TbtData, TransverseData
LOGGER = logging.getLogger()
Segment = namedtuple("Segment", ["number", "turns", "particles", "element", "name"])
def read_tbt(file_path: Union[str, Path]) -> TbtData:
"""
Reads turn-by-turn data from the ``PTC`` **trackone** format file.
Args:
file_path (Union[str, Path]): path to the turn-by-turn measurement file.
Returns:
A ``TbTData`` object with the loaded data.
"""
file_path = Path(file_path)
LOGGER.debug(f"Reading PTC trackone file at path: '{file_path.absolute()}'")
lines: List[str] = file_path.read_text().splitlines()
LOGGER.debug("Reading header from file")
date, header_length = _read_header(lines)
lines = lines[header_length:]
# parameters
bpms, particles, column_indices, n_turns, n_particles = _read_from_first_turn(lines)
# read into dict first for speed then convert to DFs
matrices = [{p: {bpm: np.zeros(n_turns) for bpm in bpms} for p in PLANES} for _ in range(n_particles)]
matrices = _read_data(lines, matrices, column_indices)
for bunch in range(n_particles):
matrices[bunch] = TransverseData(
X=pd.DataFrame(matrices[bunch]["X"]).transpose(),
Y=pd.DataFrame(matrices[bunch]["Y"]).transpose(),
)
LOGGER.debug(f"Read Tbt matrices from: '{file_path.absolute()}'")
return TbtData(matrices=matrices, date=date, bunch_ids=particles, nturns=n_turns)
def _read_header(lines: Sequence[str]) -> Tuple[datetime, int]:
"""Reads header length and datetime from header."""
idx_line = 0
date_str = {k: None for k in [DATE, TIME]}
for idx_line, line in enumerate(lines):
parts = line.strip().split()
if len(parts) == 0:
continue
if parts[0] != HEADER:
break
if parts[1] in date_str.keys():
date_str[parts[1]] = parts[-1].strip("'\" ")
if any(datestring is None for datestring in date_str.values()):
LOGGER.warning("No date found in file, defaulting to today")
return datetime.today().replace(tzinfo=tz.tzutc()), idx_line
return datetime.strptime(f"{date_str[DATE]} {date_str[TIME]}", TIME_FORMAT), idx_line
def _read_from_first_turn(
lines: Sequence[str],
) -> Tuple[List[str], List[int], Dict[Any, Any], int, int]:
"""
Reads the BPMs, particles, column indices and number of turns and particles from the matrices of
the first turn.
"""
LOGGER.debug("Reading first turn to define boundary parameters.")
bpms = []
particles = []
column_indices = None
n_turns = 0
n_particles = 0
first_segment = True
for line in lines:
parts = line.strip().split()
if len(parts) == 0 or parts[0] in [HEADER, TYPES]:
continue
if parts[0] == NAMES: # read column names
if column_indices is not None:
raise KeyError(f"{NAMES} are defined twice in tbt file!")
column_indices = _parse_column_names_to_indices(parts[1:])
continue
if parts[0] == SEGMENTS: # read segments, append to bunch_id
segment = Segment(*parts[1:])
if segment.name == SEGMENT_MARKER[0]: # start of first segment
n_turns = int(segment.turns) - 1
n_particles = int(segment.particles)
elif segment.name == SEGMENT_MARKER[1]: # end of first segment
break
else:
first_segment = False
bpms.append(segment.name)
elif first_segment:
if column_indices is None:
LOGGER.error("Columns not defined in Tbt file")
raise PTCFormatError
new_data = _parse_data(column_indices, parts)
particle = int(float(new_data[COLPARTICLE]))
particles.append(particle)
if len(particles) == 0:
LOGGER.error("No matrices found in TbT file")
raise PTCFormatError
return bpms, particles, column_indices, n_turns, n_particles
def _read_data(
lines: Sequence[str], matrices: Dict[str, Dict[str, np.ndarray]], column_indices: dict
) -> Dict[str, Dict[str, np.ndarray]]:
"""Read the matrices into the matrices."""
LOGGER.debug("Reading matrices.")
matrices = copy.deepcopy(matrices)
segment = None
column_map = {"X": COLX, "Y": COLY}
for line in lines:
parts = line.strip().split()
if len(parts) == 0 or parts[0] in (HEADER, TYPES, NAMES):
continue
if parts[0] == SEGMENTS: # start of a new segment
segment = Segment(*parts[1:])
continue
if segment is None:
LOGGER.error("Data written before Segment definition")
raise PTCFormatError
if segment.name in SEGMENT_MARKER:
continue
data = _parse_data(column_indices, parts)
part_id = int(float(data[COLPARTICLE])) - 1
turn_nr = int(float(data[COLTURN])) - 1
for plane in PLANES:
matrices[part_id][plane][segment.name][turn_nr] = float(data[column_map[plane]])
return matrices
def _parse_data(column_indices, parts: Sequence[str]) -> dict:
"""
Converts the ``parts`` (split elements of a data line) into a dictionary based on the indices in
``column_indices``.
"""
return {col: parts[col_idx] for col, col_idx in column_indices.items()}
def _parse_column_names_to_indices(parts: Sequence[str]) -> dict:
"""Parses the column names from the line into a dictionary with indices."""
col_idx = {k: None for k in [COLX, COLY, COLTURN, COLPARTICLE]}
LOGGER.debug("Setting column names.")
for idx, column_name in enumerate(parts):
if column_name not in col_idx:
LOGGER.debug(f"Column '{column_name}' will be ignored.")
continue
if col_idx[column_name] is not None:
raise KeyError(f"'{column_name}' is defined twice.")
col_idx[column_name] = idx
missing = [c for c in col_idx.values() if c is None]
if any(missing):
raise ValueError(f"The following columns are missing in ptc file: '{str(missing)}'")
return col_idx
|
[
"pandas.DataFrame",
"copy.deepcopy",
"datetime.datetime.today",
"numpy.zeros",
"dateutil.tz.tzutc",
"pathlib.Path",
"turn_by_turn.structures.TbtData",
"datetime.datetime.strptime",
"collections.namedtuple",
"logging.getLogger"
] |
[((936, 955), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (953, 955), False, 'import logging\n'), ((966, 1040), 'collections.namedtuple', 'namedtuple', (['"""Segment"""', "['number', 'turns', 'particles', 'element', 'name']"], {}), "('Segment', ['number', 'turns', 'particles', 'element', 'name'])\n", (976, 1040), False, 'from collections import namedtuple\n'), ((1357, 1372), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (1361, 1372), False, 'from pathlib import Path\n'), ((2264, 2338), 'turn_by_turn.structures.TbtData', 'TbtData', ([], {'matrices': 'matrices', 'date': 'date', 'bunch_ids': 'particles', 'nturns': 'n_turns'}), '(matrices=matrices, date=date, bunch_ids=particles, nturns=n_turns)\n', (2271, 2338), False, 'from turn_by_turn.structures import TbtData, TransverseData\n'), ((5183, 5206), 'copy.deepcopy', 'copy.deepcopy', (['matrices'], {}), '(matrices)\n', (5196, 5206), False, 'import copy\n'), ((3022, 3090), 'datetime.datetime.strptime', 'datetime.strptime', (['f"""{date_str[DATE]} {date_str[TIME]}"""', 'TIME_FORMAT'], {}), "(f'{date_str[DATE]} {date_str[TIME]}', TIME_FORMAT)\n", (3039, 3090), False, 'from datetime import datetime\n'), ((1829, 1846), 'numpy.zeros', 'np.zeros', (['n_turns'], {}), '(n_turns)\n', (1837, 1846), True, 'import numpy as np\n'), ((2956, 2972), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (2970, 2972), False, 'from datetime import datetime\n'), ((2988, 2998), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (2996, 2998), False, 'from dateutil import tz\n'), ((2062, 2096), 'pandas.DataFrame', 'pd.DataFrame', (["matrices[bunch]['X']"], {}), "(matrices[bunch]['X'])\n", (2074, 2096), True, 'import pandas as pd\n'), ((2124, 2158), 'pandas.DataFrame', 'pd.DataFrame', (["matrices[bunch]['Y']"], {}), "(matrices[bunch]['Y'])\n", (2136, 2158), True, 'import pandas as pd\n')]
|
import torchvision
import skimage
import torch
from torchvision import transforms
import numpy as np
from PIL import Image
IMG_MEAN = (0.4914, 0.4822, 0.4465)
IMG_STD = (0.2023, 0.1994, 0.2010)
NORM = [transforms.ToTensor(),
transforms.Normalize(IMG_MEAN, IMG_STD)]
class MapTransform(object):
def __init__(self, transforms, pil_convert=True):
self.transforms = transforms
self.pil_convert = pil_convert
def __call__(self, vid):
if isinstance(vid, Image.Image):
return np.stack([self.transforms(vid)])
if isinstance(vid, torch.Tensor):
vid = vid.numpy()
if self.pil_convert:
x = np.stack([np.asarray(self.transforms(Image.fromarray(v))) for v in vid])
return x
else:
return np.stack([self.transforms(v) for v in vid])
def n_patches(x, n, transform, shape=(64, 64, 3), scale=[0.2, 0.8]):
''' unused '''
if shape[-1] == 0:
shape = np.random.uniform(64, 128)
shape = (shape, shape, 3)
crop = transforms.Compose([
lambda x: Image.fromarray(x) if not 'PIL' in str(type(x)) else x,
transforms.RandomResizedCrop(shape[0], scale=scale)
])
if torch.is_tensor(x):
x = x.numpy().transpose(1,2, 0)
P = []
for _ in range(n):
xx = transform(crop(x))
P.append(xx)
return torch.cat(P, dim=0)
def patch_grid(transform, shape=(64, 64, 3), stride=[0.5, 0.5]):
stride = np.random.random() * (stride[1] - stride[0]) + stride[0]
stride = [int(shape[0]*stride), int(shape[1]*stride), shape[2]]
spatial_jitter = transforms.Compose([
lambda x: Image.fromarray(x),
transforms.RandomResizedCrop(shape[0], scale=(0.7, 0.9))
])
def aug(x):
if torch.is_tensor(x):
x = x.numpy().transpose(1, 2, 0)
elif 'PIL' in str(type(x)):
x = np.array(x)#.transpose(2, 0, 1)
winds = skimage.util.view_as_windows(x, shape, step=stride)
winds = winds.reshape(-1, *winds.shape[-3:])
P = [transform(spatial_jitter(w)) for w in winds]
return torch.cat(P, dim=0)
return aug
def get_frame_aug(frame_aug, patch_size):
train_transform = []
if 'cj' in frame_aug:
_cj = 0.1
train_transform += [
#transforms.RandomGrayscale(p=0.2),
transforms.ColorJitter(_cj, _cj, _cj, 0),
]
if 'flip' in frame_aug:
train_transform += [transforms.RandomHorizontalFlip()]
train_transform += NORM
train_transform = transforms.Compose(train_transform)
print('Frame augs:', train_transform, frame_aug)
if 'grid' in frame_aug:
aug = patch_grid(train_transform, shape=np.array(patch_size))
else:
aug = train_transform
return aug
def get_frame_transform(frame_transform_str, img_size):
tt = []
fts = frame_transform_str
norm_size = torchvision.transforms.Resize((img_size, img_size))
if 'crop' in fts:
tt.append(torchvision.transforms.RandomResizedCrop(
img_size, scale=(0.8, 0.95), ratio=(0.7, 1.3), interpolation=2),)
else:
tt.append(norm_size)
if 'cj' in fts:
_cj = 0.1
# tt += [#transforms.RandomGrayscale(p=0.2),]
tt += [transforms.ColorJitter(_cj, _cj, _cj, 0),]
if 'flip' in fts:
tt.append(torchvision.transforms.RandomHorizontalFlip())
print('Frame transforms:', tt, fts)
return tt
def get_train_transforms(args):
norm_size = torchvision.transforms.Resize((args.img_size, args.img_size))
frame_transform = get_frame_transform(args.frame_transforms, args.img_size)
frame_aug = get_frame_aug(args.frame_aug, args.patch_size)
frame_aug = [frame_aug] if args.frame_aug != '' else NORM
transform = frame_transform + frame_aug
train_transform = MapTransform(
torchvision.transforms.Compose(transform)
)
plain = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
norm_size,
*NORM,
])
def with_orig(x):
x = train_transform(x), \
plain(x[0]) if 'numpy' in str(type(x[0])) else plain(x[0].permute(2, 0, 1))
return x
return with_orig
|
[
"torchvision.transforms.ColorJitter",
"numpy.random.uniform",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.Resize",
"skimage.util.view_as_windows",
"torchvision.transforms.ToPILImage",
"torch.cat",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.Compose",
"numpy.random.random",
"numpy.array",
"PIL.Image.fromarray",
"torchvision.transforms.Normalize",
"torch.is_tensor",
"torchvision.transforms.ToTensor"
] |
[((206, 227), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (225, 227), False, 'from torchvision import transforms\n'), ((238, 277), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['IMG_MEAN', 'IMG_STD'], {}), '(IMG_MEAN, IMG_STD)\n', (258, 277), False, 'from torchvision import transforms\n'), ((1239, 1257), 'torch.is_tensor', 'torch.is_tensor', (['x'], {}), '(x)\n', (1254, 1257), False, 'import torch\n'), ((1403, 1422), 'torch.cat', 'torch.cat', (['P'], {'dim': '(0)'}), '(P, dim=0)\n', (1412, 1422), False, 'import torch\n'), ((2599, 2634), 'torchvision.transforms.Compose', 'transforms.Compose', (['train_transform'], {}), '(train_transform)\n', (2617, 2634), False, 'from torchvision import transforms\n'), ((2960, 3011), 'torchvision.transforms.Resize', 'torchvision.transforms.Resize', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (2989, 3011), False, 'import torchvision\n'), ((3556, 3617), 'torchvision.transforms.Resize', 'torchvision.transforms.Resize', (['(args.img_size, args.img_size)'], {}), '((args.img_size, args.img_size))\n', (3585, 3617), False, 'import torchvision\n'), ((992, 1018), 'numpy.random.uniform', 'np.random.uniform', (['(64)', '(128)'], {}), '(64, 128)\n', (1009, 1018), True, 'import numpy as np\n'), ((1813, 1831), 'torch.is_tensor', 'torch.is_tensor', (['x'], {}), '(x)\n', (1828, 1831), False, 'import torch\n'), ((1987, 2038), 'skimage.util.view_as_windows', 'skimage.util.view_as_windows', (['x', 'shape'], {'step': 'stride'}), '(x, shape, step=stride)\n', (2015, 2038), False, 'import skimage\n'), ((2166, 2185), 'torch.cat', 'torch.cat', (['P'], {'dim': '(0)'}), '(P, dim=0)\n', (2175, 2185), False, 'import torch\n'), ((3922, 3963), 'torchvision.transforms.Compose', 'torchvision.transforms.Compose', (['transform'], {}), '(transform)\n', (3952, 3963), False, 'import torchvision\n'), ((1168, 1219), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['shape[0]'], {'scale': 'scale'}), '(shape[0], scale=scale)\n', (1196, 1219), False, 'from torchvision import transforms\n'), ((1503, 1521), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1519, 1521), True, 'import numpy as np\n'), ((1721, 1777), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['shape[0]'], {'scale': '(0.7, 0.9)'}), '(shape[0], scale=(0.7, 0.9))\n', (1749, 1777), False, 'from torchvision import transforms\n'), ((2405, 2445), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['_cj', '_cj', '_cj', '(0)'], {}), '(_cj, _cj, _cj, 0)\n', (2427, 2445), False, 'from torchvision import transforms\n'), ((2513, 2546), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2544, 2546), False, 'from torchvision import transforms\n'), ((3053, 3162), 'torchvision.transforms.RandomResizedCrop', 'torchvision.transforms.RandomResizedCrop', (['img_size'], {'scale': '(0.8, 0.95)', 'ratio': '(0.7, 1.3)', 'interpolation': '(2)'}), '(img_size, scale=(0.8, 0.95), ratio\n =(0.7, 1.3), interpolation=2)\n', (3093, 3162), False, 'import torchvision\n'), ((3320, 3360), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['_cj', '_cj', '_cj', '(0)'], {}), '(_cj, _cj, _cj, 0)\n', (3342, 3360), False, 'from torchvision import transforms\n'), ((3404, 3449), 'torchvision.transforms.RandomHorizontalFlip', 'torchvision.transforms.RandomHorizontalFlip', ([], {}), '()\n', (3447, 3449), False, 'import torchvision\n'), ((4028, 4063), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ([], {}), '()\n', (4061, 4063), False, 'import torchvision\n'), ((1693, 1711), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (1708, 1711), False, 'from PIL import Image\n'), ((1930, 1941), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1938, 1941), True, 'import numpy as np\n'), ((2766, 2786), 'numpy.array', 'np.array', (['patch_size'], {}), '(patch_size)\n', (2774, 2786), True, 'import numpy as np\n'), ((1104, 1122), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (1119, 1122), False, 'from PIL import Image\n'), ((726, 744), 'PIL.Image.fromarray', 'Image.fromarray', (['v'], {}), '(v)\n', (741, 744), False, 'from PIL import Image\n')]
|
import logging
import pytest
import uuid
from pygame.math import Vector2
import pygame
import random
import numpy as np
import cv2
import time
from .base_render import BaseRender
from gobigger.utils import Colors, BLACK, YELLOW, GREEN
from gobigger.utils import PLAYER_COLORS, FOOD_COLOR, THORNS_COLOR, SPORE_COLOR
from gobigger.utils import precision_algorithm, Border
class EnvRender(BaseRender):
'''
Overview:
No need to use a new window, giving a global view and the view that each player can see
'''
def __init__(self, width, height, background=(255,255,255), padding=(0,0), cell_size=10,
scale_up_ratio=1.5, vision_x_min=100, vision_y_min=100, only_render=True, use_spatial=True):
super(EnvRender, self).__init__(width, height, background=background, padding=padding,
cell_size=cell_size, only_render=only_render)
self.scale_up_ratio = scale_up_ratio
self.vision_x_min = vision_x_min
self.vision_y_min = vision_y_min
self.use_spatial = use_spatial
def fill_all(self, screen, food_balls, thorns_balls, spore_balls, players, ):
font = pygame.font.SysFont('Menlo', 12, True)
# render all balls
for ball in food_balls:
pygame.draw.circle(screen, FOOD_COLOR, ball.position, ball.radius)
for ball in thorns_balls:
pygame.draw.circle(screen, THORNS_COLOR, ball.position, ball.radius)
for ball in spore_balls:
pygame.draw.circle(screen, SPORE_COLOR, ball.position, ball.radius)
for index, player in enumerate(players):
for ball in player.get_balls():
pygame.draw.circle(screen, PLAYER_COLORS[int(ball.owner)], ball.position, ball.radius)
screen_data = pygame.surfarray.array3d(screen)
return screen_data
def get_clip_screen(self, screen_data, rectangle):
if len(screen_data.shape) == 3:
screen_data_clip = screen_data[rectangle[0]:rectangle[2],
rectangle[1]:rectangle[3], :]
else:
screen_data_clip = screen_data[rectangle[0]:rectangle[2],
rectangle[1]:rectangle[3]]
return screen_data_clip
def get_rectangle_by_player(self, player):
'''
Multiples of the circumscribed matrix of the centroid
'''
centroid = player.cal_centroid()
xs_max = 0
ys_max = 0
for ball in player.get_balls():
direction_center = centroid - ball.position
if abs(direction_center.x) + ball.radius > xs_max:
xs_max = abs(direction_center.x) + ball.radius
if abs(direction_center.y) + ball.radius > ys_max:
ys_max = abs(direction_center.y) + ball.radius
xs_max = max(xs_max, self.vision_x_min)
ys_max = max(ys_max, self.vision_y_min)
scale_up_len = max(xs_max, ys_max)
left_top_x = min(max(int(centroid.x - scale_up_len * self.scale_up_ratio), 0),
max(int(self.width_full - scale_up_len * self.scale_up_ratio * 2), 0))
left_top_y = min(max(int(centroid.y - scale_up_len * self.scale_up_ratio), 0),
max(int(self.height_full - scale_up_len * self.scale_up_ratio * 2), 0))
right_bottom_x = min(int(left_top_x + scale_up_len * self.scale_up_ratio * 2), self.width_full)
right_bottom_y = min(int(left_top_y + scale_up_len * self.scale_up_ratio * 2), self.height_full)
rectangle = (left_top_x, left_top_y, right_bottom_x, right_bottom_y)
return rectangle
def get_overlap(self, rectangle, food_balls, thorns_balls, spore_balls, player):
def food_generator(rectangle, food_balls):
for ball in food_balls:
if ball.judge_in_rectangle(rectangle):
yield({'position': tuple(ball.position), 'radius': ball.radius})
def thorns_generator(rectangle, thorns_balls):
for ball in thorns_balls:
if ball.judge_in_rectangle(rectangle):
yield({'position': tuple(ball.position), 'radius': ball.radius})
def spore_generator(rectangle, spore_balls):
for ball in spore_balls:
if ball.judge_in_rectangle(rectangle):
yield({'position': tuple(ball.position), 'radius': ball.radius})
def player_generator(rectangle, player):
for ball in player.get_balls():
if ball.judge_in_rectangle(rectangle):
yield({'position': tuple(ball.position), 'radius': ball.radius,
'player': player.name, 'team': player.team_name})
return {'food': food_generator(rectangle, food_balls), 'thorns': thorns_generator(rectangle, thorns_balls),
'spore': spore_generator(rectangle, spore_balls), 'clone': player_generator(rectangle, player)}
def update_all(self, food_balls, thorns_balls, spore_balls, players):
screen_data_all = None
feature_layers = None
if self.use_spatial:
screen_all = pygame.Surface((self.width, self.height))
screen_all.fill(self.background)
screen_data_all = self.fill_all(screen_all, food_balls, thorns_balls, spore_balls, players)
screen_data_players = {}
# food_balls = precision_algorithm(Border(0, 0, 1000, 1000), food_balls)
for player in players:
rectangle = self.get_rectangle_by_player(player)
if self.use_spatial:
screen_data_player = self.get_clip_screen(screen_data_all, rectangle=rectangle)
screen_data_player = cv2.cvtColor(screen_data_player, cv2.COLOR_RGB2BGR)
screen_data_player = np.fliplr(screen_data_player)
screen_data_player = np.rot90(screen_data_player)
feature_layers = self.transfer_rgb_to_features(screen_data_player, player_num=len(players))
overlap = self.get_overlap(rectangle, food_balls, thorns_balls, spore_balls, player)
# overlap = self.get_overlap(rectangle, food_balls.solve(rectangle[0], rectangle[1],rectangle[2], rectangle[3), thorns_balls, spore_balls, player)
screen_data_players[player.name] = {
'feature_layers': feature_layers,
'rectangle': rectangle,
'overlap': overlap,
'team_name': player.team_name,
}
return screen_data_all, screen_data_players
def get_tick_all_colorful(self, food_balls, thorns_balls, spore_balls, players, partial_size=300):
screen_all = pygame.Surface((self.width, self.height))
screen_all.fill(self.background)
font = pygame.font.SysFont('Menlo', 12, True)
# render all balls
for ball in food_balls:
pygame.draw.circle(screen_all, BLACK, ball.position, ball.radius)
for ball in thorns_balls:
pygame.draw.circle(screen_all, GREEN, ball.position, ball.radius)
for ball in spore_balls:
pygame.draw.circle(screen_all, YELLOW, ball.position, ball.radius)
for index, player in enumerate(players):
for ball in player.get_balls():
pygame.draw.circle(screen_all, Colors[int(ball.owner)], ball.position, ball.radius)
screen_data_all = pygame.surfarray.array3d(screen_all)
screen_data_players = {}
for player in players:
rectangle = self.get_rectangle_by_player(player)
screen_data_player = self.get_clip_screen(screen_data_all, rectangle=rectangle)
screen_data_player = np.resize(np.rot90(np.fliplr(cv2.cvtColor(screen_data_player, cv2.COLOR_RGB2BGR))), (partial_size, partial_size, 3))
screen_data_players[player.name] = screen_data_player
screen_data_all = np.rot90(np.fliplr(cv2.cvtColor(screen_data_all, cv2.COLOR_RGB2BGR)))
return screen_data_all, screen_data_players
def transfer_rgb_to_features(self, rgb, player_num=12):
'''
Overview:
12 player + food + spore + thorns
'''
features = []
h, w = rgb.shape[0:2]
tmp_rgb = rgb[:,:,0]
assert len(tmp_rgb.shape) == 2
for i in range(player_num):
features.append((tmp_rgb==PLAYER_COLORS[i][0]).astype(int))
features.append((tmp_rgb==FOOD_COLOR[0]).astype(int))
features.append((tmp_rgb==SPORE_COLOR[0]).astype(int))
features.append((tmp_rgb==THORNS_COLOR[0]).astype(int))
return features
def show(self):
raise NotImplementedError
def close(self):
pygame.quit()
|
[
"pygame.quit",
"pygame.draw.circle",
"pygame.Surface",
"pygame.font.SysFont",
"cv2.cvtColor",
"numpy.fliplr",
"numpy.rot90",
"pygame.surfarray.array3d"
] |
[((1177, 1215), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Menlo"""', '(12)', '(True)'], {}), "('Menlo', 12, True)\n", (1196, 1215), False, 'import pygame\n'), ((1800, 1832), 'pygame.surfarray.array3d', 'pygame.surfarray.array3d', (['screen'], {}), '(screen)\n', (1824, 1832), False, 'import pygame\n'), ((6726, 6767), 'pygame.Surface', 'pygame.Surface', (['(self.width, self.height)'], {}), '((self.width, self.height))\n', (6740, 6767), False, 'import pygame\n'), ((6824, 6862), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Menlo"""', '(12)', '(True)'], {}), "('Menlo', 12, True)\n", (6843, 6862), False, 'import pygame\n'), ((7443, 7479), 'pygame.surfarray.array3d', 'pygame.surfarray.array3d', (['screen_all'], {}), '(screen_all)\n', (7467, 7479), False, 'import pygame\n'), ((8736, 8749), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (8747, 8749), False, 'import pygame\n'), ((1287, 1353), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', 'FOOD_COLOR', 'ball.position', 'ball.radius'], {}), '(screen, FOOD_COLOR, ball.position, ball.radius)\n', (1305, 1353), False, 'import pygame\n'), ((1400, 1468), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', 'THORNS_COLOR', 'ball.position', 'ball.radius'], {}), '(screen, THORNS_COLOR, ball.position, ball.radius)\n', (1418, 1468), False, 'import pygame\n'), ((1514, 1581), 'pygame.draw.circle', 'pygame.draw.circle', (['screen', 'SPORE_COLOR', 'ball.position', 'ball.radius'], {}), '(screen, SPORE_COLOR, ball.position, ball.radius)\n', (1532, 1581), False, 'import pygame\n'), ((5198, 5239), 'pygame.Surface', 'pygame.Surface', (['(self.width, self.height)'], {}), '((self.width, self.height))\n', (5212, 5239), False, 'import pygame\n'), ((6934, 6999), 'pygame.draw.circle', 'pygame.draw.circle', (['screen_all', 'BLACK', 'ball.position', 'ball.radius'], {}), '(screen_all, BLACK, ball.position, ball.radius)\n', (6952, 6999), False, 'import pygame\n'), ((7046, 7111), 'pygame.draw.circle', 'pygame.draw.circle', (['screen_all', 'GREEN', 'ball.position', 'ball.radius'], {}), '(screen_all, GREEN, ball.position, ball.radius)\n', (7064, 7111), False, 'import pygame\n'), ((7157, 7223), 'pygame.draw.circle', 'pygame.draw.circle', (['screen_all', 'YELLOW', 'ball.position', 'ball.radius'], {}), '(screen_all, YELLOW, ball.position, ball.radius)\n', (7175, 7223), False, 'import pygame\n'), ((5762, 5813), 'cv2.cvtColor', 'cv2.cvtColor', (['screen_data_player', 'cv2.COLOR_RGB2BGR'], {}), '(screen_data_player, cv2.COLOR_RGB2BGR)\n', (5774, 5813), False, 'import cv2\n'), ((5851, 5880), 'numpy.fliplr', 'np.fliplr', (['screen_data_player'], {}), '(screen_data_player)\n', (5860, 5880), True, 'import numpy as np\n'), ((5918, 5946), 'numpy.rot90', 'np.rot90', (['screen_data_player'], {}), '(screen_data_player)\n', (5926, 5946), True, 'import numpy as np\n'), ((7958, 8006), 'cv2.cvtColor', 'cv2.cvtColor', (['screen_data_all', 'cv2.COLOR_RGB2BGR'], {}), '(screen_data_all, cv2.COLOR_RGB2BGR)\n', (7970, 8006), False, 'import cv2\n'), ((7759, 7810), 'cv2.cvtColor', 'cv2.cvtColor', (['screen_data_player', 'cv2.COLOR_RGB2BGR'], {}), '(screen_data_player, cv2.COLOR_RGB2BGR)\n', (7771, 7810), False, 'import cv2\n')]
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
# A script to produce a finishing ability metric based on the *concept* of post-shot xG based on the phyiscal properties of
# a shot once taken
# see https://www.opengoalapp.com/finishing-ability for full write-up of the method
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
from GetMatchDates import GetMatchDates
from PrepareIO import PrepareIO
from sklearn.model_selection import train_test_split
from sklearn.calibration import calibration_curve
from sklearn.metrics import log_loss
import xgboost as xgb
import matplotlib.pyplot as plt
# load in StatsBomb shot data from a snapshot file - you can also load in from the API as well with split=True
# then just combine the shot dataframes from each match into a single frame afterwards e.g:
#-------------
#grouped_event_list = []
#for match_id in match_ids:
#grouped_events = sb.events(match_id=match_id, split=True)
#grouped_event_list.append(grouped_events)
#select the event group required and a list of dataframes will be returned
#shots_frames = [i["shots"] for i in grouped_event_list]
#-------------
shots = pd.read_pickle('data\shot_data.pkl')
##############################################################################################
# PRE-PROCESSING #
##############################################################################################
#expand out the nested json values within shots into new dataframe
expanded_shots = json_normalize(shots['shot']) # expanded shot is produced for each shot in order, so can reset index and append shots.id onto expanded_shots
shots = shots.reset_index(drop=True)
expanded_shots = expanded_shots.reset_index(drop=True)
#concat along the columns axis as data is in order
expanded_shots = pd.concat([shots, expanded_shots], axis = 1)
#expad location data into own columns
expanded_shots[['loc_x','loc_y']] = pd.DataFrame(expanded_shots.location.tolist(), index= expanded_shots.index)
expanded_shots[['endloc_x', 'endloc_y', 'endloc_z']] = pd.DataFrame(expanded_shots.end_location.tolist(), index= expanded_shots.index)
#lookup the date of the match for each shot in the set
match_date = GetMatchDates()
expanded_shots = expanded_shots.merge(match_date) # get date of all shots
#select some players for evaluation - note for this set there is only a handful of players with >100 shots.
player_list = ['<NAME>', '<NAME>', '<NAME>']
#create a dictionary for the shot data of the evaluation players
eval_dict = {}
for player in player_list:
eval_dict[player] = expanded_shots.loc[expanded_shots['player'].isin([player])].sort_values(by = ['match_date', 'index']) # strip out eval players shots and sort into order shots were taken
#drop this data from the main set to be used for model training and test
expanded_shots = expanded_shots.loc[~expanded_shots['player'].isin(player_list)]
#generate IO for models - the output is common for both
main_psxg_input, main_xg_input, main_output = PrepareIO(expanded_shots)
#get eval player data into format that can be plugged into model as well
eval_data = {}
for player in eval_dict:
eval_data[player] = PrepareIO(eval_dict[player])
##############################################################################################
# MODELS #
##############################################################################################
#----------psxG model----------------------------------------------
#generate train and test split for single run
X_train, X_test, y_train, y_test = train_test_split(main_psxg_input, main_output, test_size=0.3, random_state=42)
#define xgboost model - params have been chosen following a small amount of experimentation i.e. NOT optimised
psxg_model = xgb.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1, gamma=0,
learning_rate=0.1, max_delta_step=0, max_depth=5,
min_child_weight=1, missing=None, n_estimators=180, n_jobs=1,
nthread=None, objective='binary:logistic', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
silent=None, subsample=1, verbosity=1)
#fit model
psxg_model.fit(X_train, y_train)
#generate predictions from test data
y_pred = psxg_model.predict_proba(X_test)
#calculate log loss on test data, using p(Goal = 1) i.e. the second value in the array
ll_model = log_loss(y_test, y_pred[:,1])
#plot calibration curve
ccurve = calibration_curve(y_test, y_pred[:,1], n_bins = 15) # returns true proportion [0] and average predicted prob [1]
plt.scatter(ccurve[1],ccurve[0])
plt.title('psxG Calibration Curve')
plt.xlabel('Average of model predicted psxG')
plt.ylabel('Average of actual goal outcome')
x = [0,1]
y = [0,1]
plt.plot(x,y, '--')
plt.show()
#------------------------------------------------------------------
#----------xG model----------------------------------------------
#generate train and test split for single run
X_train, X_test, y_train, y_test = train_test_split(main_xg_input, main_output, test_size=0.3, random_state=42)
#define xgboost model - params have been chosen following a small amount of experimentation i.e. NOT optimised
xg_model = xgb.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1, gamma=0,
learning_rate=0.1, max_delta_step=0, max_depth=5,
min_child_weight=1, missing=None, n_estimators=180, n_jobs=1,
nthread=None, objective='binary:logistic', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
silent=None, subsample=1, verbosity=1)
#fit model
xg_model.fit(X_train, y_train)
#generate predictions from test data
y_pred = xg_model.predict_proba(X_test)
#calculate log loss on test data, using p(Goal = 1) i.e. the second value in the array
ll_model = log_loss(y_test, y_pred[:,1])
#plot calibration curve
ccurve = calibration_curve(y_test, y_pred[:,1], n_bins = 15) # returns true proportion [0] and average predicted prob [1]
plt.scatter(ccurve[1],ccurve[0])
plt.title('xG Calibration Curve')
plt.xlabel('Average of model predicted xG')
plt.ylabel('Average of actual goal outcome')
x = [0,1]
y = [0,1]
plt.plot(x,y, '--')
plt.show()
##############################################################################################
# EVALUATION #
##############################################################################################
# Generate confidence intervals on eval player set
window = 30 # number of shots to use as window for rolling average
#initialise an empty dictionary for player results to go in
player_eval = {}
for player in player_list:
player_eval[player] = {}
player_eval[player]['psxG_preds'] = []
player_eval[player]['xG_preds'] = []
num_sims = 1000 # set number of runs of model fitting to perform - EACH SIM TAKES 2 SECONDS ON MY MID-SPEC MACHINE = 2000 SECS TOTAL
#run the model fits and add predition results to dictionary
for i in range(num_sims):
X_train_psxG, X_test_psxG, y_train, y_test = train_test_split(main_psxg_input, main_output, test_size=0.3)
X_train_xG = X_train_psxG.drop(['angle', 'elev', 'vel'], axis = 1)
X_test_xG = X_test_psxG.drop(['angle', 'elev', 'vel'], axis = 1)
psxg_model.fit(X_train_psxG, y_train)
psxg_pred = psxg_model.predict_proba(X_test_psxG)
xg_model.fit(X_train_xG, y_train)
xg_pred = xg_model.predict_proba(X_test_xG)
ll_psxg = log_loss(y_test, psxg_pred[:,1])
ll_xg = log_loss(y_test, xg_pred[:,1])
print('psxG = '+ str(ll_psxg)) # print progress LL for each run as we go to show it is doing something more than anything
print('xG = '+ str(ll_xg))
for player in player_list:
player_eval[player]['psxG_preds'].append(psxg_model.predict_proba(eval_data[player][0])[:,1])
player_eval[player]['xG_preds'].append(xg_model.predict_proba(eval_data[player][1])[:,1])
#manipulate and perform relative % calculation
for player in player_eval:
player_eval[player]['p'] = np.array(player_eval[player]['psxG_preds'])
player_eval[player]['q'] = np.array(player_eval[player]['xG_preds'])
p = player_eval[player]['p']
q = player_eval[player]['q']
player_eval[player]['overperf_mult'] = (p-q) / q
player_eval[player]['overperf_mult'] = player_eval[player]['overperf_mult'].T
player_eval[player]['overperf_mult'] = pd.DataFrame(player_eval[player]['overperf_mult'])
player_eval[player]['ma_overperf_mult'] = player_eval[player]['overperf_mult'].rolling(window = window).mean()
#calculate relavent percentiles for mean and CI of choice - here we have 95% CI
player_eval[player]['ma_overperf_mult_97'] = np.percentile(player_eval[player]['ma_overperf_mult'], q = 97.5, axis = 1)
player_eval[player]['ma_overperf_mult_2'] = np.percentile(player_eval[player]['ma_overperf_mult'], q = 2.5, axis = 1)
player_eval[player]['ma_overperf_mult_50'] = np.percentile(player_eval[player]['ma_overperf_mult'], q = 50, axis = 1)
##############################################################################################
# PLOTTING #
##############################################################################################
player = '<NAME>'
plt.figure(figsize=(12,6))
plt.plot(player_eval[player]['ma_overperf_mult_50']*100, label = 'psxG - xG', color = 'purple', linewidth = 5)
plt.plot(player_eval[player]['ma_overperf_mult_97']*100, '--', color = 'purple', label = '95% CI')
plt.plot(player_eval[player]['ma_overperf_mult_2']*100, '--', color = 'purple')
date_labels = eval_dict[player]['match_date']
plt.title('Relative psxG overperformance - '+str(window)+ ' shot rolling average'+'\n'+ player)
plt.xlabel('Date')
plt.ylabel('Relative overperformance %')
plt.legend(loc="upper right")
ticks = np.floor(np.linspace(0,len(date_labels)-1,num = 7))
plt.xticks(ticks = ticks, labels = date_labels.take(ticks))
plt.grid(b=True)
plt.xlim(window, len(date_labels))
plt.ylim(-40,80)
plt.fill_between(x = list(range(0,len(date_labels))), y1 = player_eval[player]['ma_overperf_mult_97']*100,
y2 = player_eval[player]['ma_overperf_mult_2']*100,
alpha = 0.3,
color = 'purple')
plt.show()
|
[
"matplotlib.pyplot.title",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.figure",
"GetMatchDates.GetMatchDates",
"pandas.DataFrame",
"sklearn.metrics.log_loss",
"xgboost.XGBClassifier",
"pandas.concat",
"sklearn.calibration.calibration_curve",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"pandas.io.json.json_normalize",
"matplotlib.pyplot.legend",
"PrepareIO.PrepareIO",
"numpy.percentile",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"numpy.array",
"pandas.read_pickle",
"matplotlib.pyplot.xlabel"
] |
[((1194, 1231), 'pandas.read_pickle', 'pd.read_pickle', (['"""data\\\\shot_data.pkl"""'], {}), "('data\\\\shot_data.pkl')\n", (1208, 1231), True, 'import pandas as pd\n'), ((1573, 1602), 'pandas.io.json.json_normalize', 'json_normalize', (["shots['shot']"], {}), "(shots['shot'])\n", (1587, 1602), False, 'from pandas.io.json import json_normalize\n'), ((1876, 1918), 'pandas.concat', 'pd.concat', (['[shots, expanded_shots]'], {'axis': '(1)'}), '([shots, expanded_shots], axis=1)\n', (1885, 1918), True, 'import pandas as pd\n'), ((2277, 2292), 'GetMatchDates.GetMatchDates', 'GetMatchDates', ([], {}), '()\n', (2290, 2292), False, 'from GetMatchDates import GetMatchDates\n'), ((3086, 3111), 'PrepareIO.PrepareIO', 'PrepareIO', (['expanded_shots'], {}), '(expanded_shots)\n', (3095, 3111), False, 'from PrepareIO import PrepareIO\n'), ((3685, 3763), 'sklearn.model_selection.train_test_split', 'train_test_split', (['main_psxg_input', 'main_output'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(main_psxg_input, main_output, test_size=0.3, random_state=42)\n', (3701, 3763), False, 'from sklearn.model_selection import train_test_split\n'), ((3910, 4320), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'base_score': '(0.5)', 'booster': '"""gbtree"""', 'colsample_bylevel': '(1)', 'colsample_bynode': '(1)', 'colsample_bytree': '(1)', 'gamma': '(0)', 'learning_rate': '(0.1)', 'max_delta_step': '(0)', 'max_depth': '(5)', 'min_child_weight': '(1)', 'missing': 'None', 'n_estimators': '(180)', 'n_jobs': '(1)', 'nthread': 'None', 'objective': '"""binary:logistic"""', 'random_state': '(0)', 'reg_alpha': '(0)', 'reg_lambda': '(1)', 'scale_pos_weight': '(1)', 'seed': 'None', 'silent': 'None', 'subsample': '(1)', 'verbosity': '(1)'}), "(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bynode=1, colsample_bytree=1, gamma=0, learning_rate=0.1,\n max_delta_step=0, max_depth=5, min_child_weight=1, missing=None,\n n_estimators=180, n_jobs=1, nthread=None, objective='binary:logistic',\n random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=\n None, silent=None, subsample=1, verbosity=1)\n", (3927, 4320), True, 'import xgboost as xgb\n'), ((4636, 4666), 'sklearn.metrics.log_loss', 'log_loss', (['y_test', 'y_pred[:, 1]'], {}), '(y_test, y_pred[:, 1])\n', (4644, 4666), False, 'from sklearn.metrics import log_loss\n'), ((4701, 4751), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_test', 'y_pred[:, 1]'], {'n_bins': '(15)'}), '(y_test, y_pred[:, 1], n_bins=15)\n', (4718, 4751), False, 'from sklearn.calibration import calibration_curve\n'), ((4814, 4847), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ccurve[1]', 'ccurve[0]'], {}), '(ccurve[1], ccurve[0])\n', (4825, 4847), True, 'import matplotlib.pyplot as plt\n'), ((4847, 4882), 'matplotlib.pyplot.title', 'plt.title', (['"""psxG Calibration Curve"""'], {}), "('psxG Calibration Curve')\n", (4856, 4882), True, 'import matplotlib.pyplot as plt\n'), ((4883, 4928), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Average of model predicted psxG"""'], {}), "('Average of model predicted psxG')\n", (4893, 4928), True, 'import matplotlib.pyplot as plt\n'), ((4929, 4973), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average of actual goal outcome"""'], {}), "('Average of actual goal outcome')\n", (4939, 4973), True, 'import matplotlib.pyplot as plt\n'), ((4994, 5014), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""--"""'], {}), "(x, y, '--')\n", (5002, 5014), True, 'import matplotlib.pyplot as plt\n'), ((5014, 5024), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5022, 5024), True, 'import matplotlib.pyplot as plt\n'), ((5242, 5318), 'sklearn.model_selection.train_test_split', 'train_test_split', (['main_xg_input', 'main_output'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(main_xg_input, main_output, test_size=0.3, random_state=42)\n', (5258, 5318), False, 'from sklearn.model_selection import train_test_split\n'), ((5442, 5852), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'base_score': '(0.5)', 'booster': '"""gbtree"""', 'colsample_bylevel': '(1)', 'colsample_bynode': '(1)', 'colsample_bytree': '(1)', 'gamma': '(0)', 'learning_rate': '(0.1)', 'max_delta_step': '(0)', 'max_depth': '(5)', 'min_child_weight': '(1)', 'missing': 'None', 'n_estimators': '(180)', 'n_jobs': '(1)', 'nthread': 'None', 'objective': '"""binary:logistic"""', 'random_state': '(0)', 'reg_alpha': '(0)', 'reg_lambda': '(1)', 'scale_pos_weight': '(1)', 'seed': 'None', 'silent': 'None', 'subsample': '(1)', 'verbosity': '(1)'}), "(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bynode=1, colsample_bytree=1, gamma=0, learning_rate=0.1,\n max_delta_step=0, max_depth=5, min_child_weight=1, missing=None,\n n_estimators=180, n_jobs=1, nthread=None, objective='binary:logistic',\n random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=\n None, silent=None, subsample=1, verbosity=1)\n", (5459, 5852), True, 'import xgboost as xgb\n'), ((6138, 6168), 'sklearn.metrics.log_loss', 'log_loss', (['y_test', 'y_pred[:, 1]'], {}), '(y_test, y_pred[:, 1])\n', (6146, 6168), False, 'from sklearn.metrics import log_loss\n'), ((6206, 6256), 'sklearn.calibration.calibration_curve', 'calibration_curve', (['y_test', 'y_pred[:, 1]'], {'n_bins': '(15)'}), '(y_test, y_pred[:, 1], n_bins=15)\n', (6223, 6256), False, 'from sklearn.calibration import calibration_curve\n'), ((6319, 6352), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ccurve[1]', 'ccurve[0]'], {}), '(ccurve[1], ccurve[0])\n', (6330, 6352), True, 'import matplotlib.pyplot as plt\n'), ((6352, 6385), 'matplotlib.pyplot.title', 'plt.title', (['"""xG Calibration Curve"""'], {}), "('xG Calibration Curve')\n", (6361, 6385), True, 'import matplotlib.pyplot as plt\n'), ((6386, 6429), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Average of model predicted xG"""'], {}), "('Average of model predicted xG')\n", (6396, 6429), True, 'import matplotlib.pyplot as plt\n'), ((6430, 6474), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average of actual goal outcome"""'], {}), "('Average of actual goal outcome')\n", (6440, 6474), True, 'import matplotlib.pyplot as plt\n'), ((6495, 6515), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""--"""'], {}), "(x, y, '--')\n", (6503, 6515), True, 'import matplotlib.pyplot as plt\n'), ((6515, 6525), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6523, 6525), True, 'import matplotlib.pyplot as plt\n'), ((9651, 9678), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (9661, 9678), True, 'import matplotlib.pyplot as plt\n'), ((9678, 9789), 'matplotlib.pyplot.plot', 'plt.plot', (["(player_eval[player]['ma_overperf_mult_50'] * 100)"], {'label': '"""psxG - xG"""', 'color': '"""purple"""', 'linewidth': '(5)'}), "(player_eval[player]['ma_overperf_mult_50'] * 100, label=\n 'psxG - xG', color='purple', linewidth=5)\n", (9686, 9789), True, 'import matplotlib.pyplot as plt\n'), ((9789, 9890), 'matplotlib.pyplot.plot', 'plt.plot', (["(player_eval[player]['ma_overperf_mult_97'] * 100)", '"""--"""'], {'color': '"""purple"""', 'label': '"""95% CI"""'}), "(player_eval[player]['ma_overperf_mult_97'] * 100, '--', color=\n 'purple', label='95% CI')\n", (9797, 9890), True, 'import matplotlib.pyplot as plt\n'), ((9888, 9967), 'matplotlib.pyplot.plot', 'plt.plot', (["(player_eval[player]['ma_overperf_mult_2'] * 100)", '"""--"""'], {'color': '"""purple"""'}), "(player_eval[player]['ma_overperf_mult_2'] * 100, '--', color='purple')\n", (9896, 9967), True, 'import matplotlib.pyplot as plt\n'), ((10111, 10129), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (10121, 10129), True, 'import matplotlib.pyplot as plt\n'), ((10130, 10170), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative overperformance %"""'], {}), "('Relative overperformance %')\n", (10140, 10170), True, 'import matplotlib.pyplot as plt\n'), ((10171, 10200), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (10181, 10200), True, 'import matplotlib.pyplot as plt\n'), ((10321, 10337), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)'}), '(b=True)\n', (10329, 10337), True, 'import matplotlib.pyplot as plt\n'), ((10373, 10390), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-40)', '(80)'], {}), '(-40, 80)\n', (10381, 10390), True, 'import matplotlib.pyplot as plt\n'), ((10632, 10642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10640, 10642), True, 'import matplotlib.pyplot as plt\n'), ((3250, 3278), 'PrepareIO.PrepareIO', 'PrepareIO', (['eval_dict[player]'], {}), '(eval_dict[player])\n', (3259, 3278), False, 'from PrepareIO import PrepareIO\n'), ((7374, 7435), 'sklearn.model_selection.train_test_split', 'train_test_split', (['main_psxg_input', 'main_output'], {'test_size': '(0.3)'}), '(main_psxg_input, main_output, test_size=0.3)\n', (7390, 7435), False, 'from sklearn.model_selection import train_test_split\n'), ((7789, 7822), 'sklearn.metrics.log_loss', 'log_loss', (['y_test', 'psxg_pred[:, 1]'], {}), '(y_test, psxg_pred[:, 1])\n', (7797, 7822), False, 'from sklearn.metrics import log_loss\n'), ((7835, 7866), 'sklearn.metrics.log_loss', 'log_loss', (['y_test', 'xg_pred[:, 1]'], {}), '(y_test, xg_pred[:, 1])\n', (7843, 7866), False, 'from sklearn.metrics import log_loss\n'), ((8380, 8423), 'numpy.array', 'np.array', (["player_eval[player]['psxG_preds']"], {}), "(player_eval[player]['psxG_preds'])\n", (8388, 8423), True, 'import numpy as np\n'), ((8455, 8496), 'numpy.array', 'np.array', (["player_eval[player]['xG_preds']"], {}), "(player_eval[player]['xG_preds'])\n", (8463, 8496), True, 'import numpy as np\n'), ((8751, 8801), 'pandas.DataFrame', 'pd.DataFrame', (["player_eval[player]['overperf_mult']"], {}), "(player_eval[player]['overperf_mult'])\n", (8763, 8801), True, 'import pandas as pd\n'), ((9061, 9131), 'numpy.percentile', 'np.percentile', (["player_eval[player]['ma_overperf_mult']"], {'q': '(97.5)', 'axis': '(1)'}), "(player_eval[player]['ma_overperf_mult'], q=97.5, axis=1)\n", (9074, 9131), True, 'import numpy as np\n'), ((9184, 9253), 'numpy.percentile', 'np.percentile', (["player_eval[player]['ma_overperf_mult']"], {'q': '(2.5)', 'axis': '(1)'}), "(player_eval[player]['ma_overperf_mult'], q=2.5, axis=1)\n", (9197, 9253), True, 'import numpy as np\n'), ((9307, 9375), 'numpy.percentile', 'np.percentile', (["player_eval[player]['ma_overperf_mult']"], {'q': '(50)', 'axis': '(1)'}), "(player_eval[player]['ma_overperf_mult'], q=50, axis=1)\n", (9320, 9375), True, 'import numpy as np\n')]
|
import numpy as np
from .dt import DecisionTree
from .losses import MSELoss, CrossEntropyLoss
def to_one_hot(labels, n_classes=None):
if labels.ndim > 1:
raise ValueError("labels must have dimension 1, but got {}".format(labels.ndim))
N = labels.size
n_cols = np.max(labels) + 1 if n_classes is None else n_classes
one_hot = np.zeros((N, n_cols))
one_hot[np.arange(N), labels] = 1.0
return one_hot
class GradientBoostedDecisionTree:
def __init__(
self,
n_iter,
max_depth=None,
classifier=True,
learning_rate=1,
loss="crossentropy",
step_size="constant",
):
"""
A gradient boosted ensemble of decision trees.
Notes
-----
Gradient boosted machines (GBMs) fit an ensemble of `m` weak learners such that:
.. math::
f_m(X) = b(X) + \eta w_1 g_1 + \ldots + \eta w_m g_m
where `b` is a fixed initial estimate for the targets, :math:`\eta` is
a learning rate parameter, and :math:`w_{\cdot}` and :math:`g_{\cdot}`
denote the weights and learner predictions for subsequent fits.
We fit each `w` and `g` iteratively using a greedy strategy so that at each
iteration `i`,
.. math::
w_i, g_i = \\arg \min_{w_i, g_i} L(Y, f_{i-1}(X) + w_i g_i)
On each iteration we fit a new weak learner to predict the negative
gradient of the loss with respect to the previous prediction, :math:`f_{i-1}(X)`.
We then use the element-wise product of the predictions of this weak
learner, :math:`g_i`, with a weight, :math:`w_i`, to compute the amount to
adjust the predictions of our model at the previous iteration, :math:`f_{i-1}(X)`:
.. math::
f_i(X) := f_{i-1}(X) + w_i g_i
Parameters
----------
n_iter : int
The number of iterations / weak estimators to use when fitting each
dimension / class of `Y`.
max_depth : int
The maximum depth of each decision tree weak estimator. Default is
None.
classifier : bool
Whether `Y` contains class labels or real-valued targets. Default
is True.
learning_rate : float
Value in [0, 1] controlling the amount each weak estimator
contributes to the overall model prediction. Sometimes known as the
`shrinkage parameter` in the GBM literature. Default is 1.
loss : {'crossentropy', 'mse'}
The loss to optimize for the GBM. Default is 'crossentropy'.
step_size : {"constant", "adaptive"}
How to choose the weight for each weak learner. If "constant", use
a fixed weight of 1 for each learner. If "adaptive", use a step
size computed via line-search on the current iteration's loss.
Default is 'constant'.
"""
self.loss = loss
self.weights = None
self.learners = None
self.out_dims = None
self.n_iter = n_iter
self.base_estimator = None
self.max_depth = max_depth
self.step_size = step_size
self.classifier = classifier
self.learning_rate = learning_rate
def fit(self, X, Y):
"""
Fit the gradient boosted decision trees on a dataset.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape (N, M)
The training data of `N` examples, each with `M` features
Y : :py:class:`ndarray <numpy.ndarray>` of shape (N,)
An array of integer class labels for each example in `X` if
``self.classifier = True``, otherwise the set of target values for
each example in `X`.
"""
if self.loss == "mse":
loss = MSELoss()
elif self.loss == "crossentropy":
loss = CrossEntropyLoss()
# convert Y to one_hot if not already
if self.classifier:
Y = to_one_hot(Y.flatten())
else:
Y = Y.reshape(-1, 1) if len(Y.shape) == 1 else Y
N, M = X.shape
self.out_dims = Y.shape[1]
self.learners = np.empty((self.n_iter, self.out_dims), dtype=object)
self.weights = np.ones((self.n_iter, self.out_dims))
self.weights[1:, :] *= self.learning_rate
# fit the base estimator
Y_pred = np.zeros((N, self.out_dims))
for k in range(self.out_dims):
t = loss.base_estimator()
t.fit(X, Y[:, k])
Y_pred[:, k] += t.predict(X)
self.learners[0, k] = t
# incrementally fit each learner on the negative gradient of the loss
# wrt the previous fit (pseudo-residuals)
for i in range(1, self.n_iter):
for k in range(self.out_dims):
y, y_pred = Y[:, k], Y_pred[:, k]
neg_grad = -1 * loss.grad(y, y_pred)
# use MSE as the surrogate loss when fitting to negative gradients
t = DecisionTree(
classifier=False, max_depth=self.max_depth, criterion="mse"
)
# fit current learner to negative gradients
t.fit(X, neg_grad)
self.learners[i, k] = t
# compute step size and weight for the current learner
step = 1.0
h_pred = t.predict(X)
if self.step_size == "adaptive":
step = loss.line_search(y, y_pred, h_pred)
# update weights and our overall prediction for Y
self.weights[i, k] *= step
Y_pred[:, k] += self.weights[i, k] * h_pred
def predict(self, X):
"""
Use the trained model to classify or predict the examples in `X`.
Parameters
----------
X : :py:class:`ndarray <numpy.ndarray>` of shape `(N, M)`
The training data of `N` examples, each with `M` features
Returns
-------
preds : :py:class:`ndarray <numpy.ndarray>` of shape `(N,)`
The integer class labels predicted for each example in `X` if
``self.classifier = True``, otherwise the predicted target values.
"""
Y_pred = np.zeros((X.shape[0], self.out_dims))
for i in range(self.n_iter):
for k in range(self.out_dims):
Y_pred[:, k] += self.weights[i, k] * self.learners[i, k].predict(X)
if self.classifier:
Y_pred = Y_pred.argmax(axis=1)
return Y_pred
|
[
"numpy.empty",
"numpy.zeros",
"numpy.ones",
"numpy.max",
"numpy.arange"
] |
[((353, 374), 'numpy.zeros', 'np.zeros', (['(N, n_cols)'], {}), '((N, n_cols))\n', (361, 374), True, 'import numpy as np\n'), ((4220, 4272), 'numpy.empty', 'np.empty', (['(self.n_iter, self.out_dims)'], {'dtype': 'object'}), '((self.n_iter, self.out_dims), dtype=object)\n', (4228, 4272), True, 'import numpy as np\n'), ((4296, 4333), 'numpy.ones', 'np.ones', (['(self.n_iter, self.out_dims)'], {}), '((self.n_iter, self.out_dims))\n', (4303, 4333), True, 'import numpy as np\n'), ((4435, 4463), 'numpy.zeros', 'np.zeros', (['(N, self.out_dims)'], {}), '((N, self.out_dims))\n', (4443, 4463), True, 'import numpy as np\n'), ((6305, 6342), 'numpy.zeros', 'np.zeros', (['(X.shape[0], self.out_dims)'], {}), '((X.shape[0], self.out_dims))\n', (6313, 6342), True, 'import numpy as np\n'), ((284, 298), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (290, 298), True, 'import numpy as np\n'), ((387, 399), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (396, 399), True, 'import numpy as np\n')]
|
# Import modules
import matplotlib.pyplot as plt
import numpy as np
from scripts.dwt_windowed import do_transform
def py_closest(data, value):
return np.argmin(np.abs(data - value))
def py_cumvar_n(data):
return np.cumsum(data**2) / np.sum(data**2)
## RANDOM INPUT
# Generate test signal
sig = np.random.random([2**16])
plt.figure();
plt.plot(sig)
# Apply DWT
dwt = do_transform(sig)
cols= dwt.columns
FIG = plt.figure()
ax1 = FIG.add_subplot(611); ax1.plot(dwt[cols[0]])
ax2 = FIG.add_subplot(612); ax2.plot(dwt[cols[1]])
ax3 = FIG.add_subplot(613); ax3.plot(dwt[cols[2]])
ax4 = FIG.add_subplot(614); ax4.plot(dwt[cols[3]])
ax5 = FIG.add_subplot(615); ax5.plot(dwt[cols[4]])
ax6 = FIG.add_subplot(616); ax6.plot(dwt[cols[5]])
## TRAILING PULSE
# Generate test signal
sig = np.zeros([2**16])
sig[-1] = 1
plt.figure();
plt.plot(sig)
# Apply DWT
nlv = 10
dwt = do_transform(sig, wtype='sym2', nlevels=nlv)
cols= dwt.columns
FIG = plt.figure()
AX = []
for il in range(nlv):
if nlv<6:
AX += [FIG.add_subplot(nlv+1, 1, il+1)];
AX[il].plot(dwt[cols[il]]);
AX[il].set_xlim([2**16-120, 2**16])
AX[il].set_xticks([])
else:
AX += [FIG.add_subplot(5, 2, il+1)];
AX[il].plot(dwt[cols[il]]);
AX[il].set_xlim([2**16-240, 2 ** 16]);
AX[il].set_title(cols[il])
AX[il].set_xticks([])
AX[-2].set_xticks([2**16-240, 2**16-120, 2**16])
AX[-2].set_xticklabels(['-4', '-2', '0'])
AX[-2].set_xlabel('Time (h)')
AX[-1].set_xticks([2**16-240, 2**16-120, 2**16])
AX[-1].set_xticklabels(['-4', '-2', '0'])
AX[-1].set_xlabel('Time (h)')
# Check variance threshold
thresh = [0.05, 0.1, 0.15, 0.2]
scale = list( range(10) )
TABLE = np.zeros([10, len(thresh)])
for ix, it in enumerate(thresh):
for isc in scale:
TABLE[isc, ix] = 2**16 - py_closest( py_cumvar_n(dwt[cols[isc]]), it )
p1 = plt.bar(range(10), TABLE[:,0], 0.35)
p2 = plt.bar(range(10), TABLE[:,1]-TABLE[:,0], 0.35, bottom=TABLE[:,0])
p3 = plt.bar(range(10), TABLE[:,2]-TABLE[:,1], 0.35, bottom=TABLE[:,1])
p4 = plt.bar(range(10), TABLE[:,3]-TABLE[:,2], 0.35, bottom=TABLE[:,2])
plt.ylabel('edge effect duration (min)')
plt.title('Effects of wavelet scale and variance threshold')
plt.xticks(range(10), cols[:-1])
plt.legend( (p1[0], p2[0], p3[0], p4[0]), ('5% threshold', '10%', '15%', '20%') )
plt.show()
plt.plot([-0.5, 9.5], [30, 30], '--r')
plt.plot([-0.5, 9.5], [60, 60], '--r')
plt.xlim([-0.5, 9.5])
plt.text(1, 32, '30 minutes', color='r')
plt.text(2, 62, '60 minutes', color='r')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.plot",
"numpy.sum",
"matplotlib.pyplot.legend",
"numpy.zeros",
"scripts.dwt_windowed.do_transform",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"numpy.random.random",
"numpy.cumsum",
"matplotlib.pyplot.ylabel"
] |
[((309, 336), 'numpy.random.random', 'np.random.random', (['[2 ** 16]'], {}), '([2 ** 16])\n', (325, 336), True, 'import numpy as np\n'), ((335, 347), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (345, 347), True, 'import matplotlib.pyplot as plt\n'), ((349, 362), 'matplotlib.pyplot.plot', 'plt.plot', (['sig'], {}), '(sig)\n', (357, 362), True, 'import matplotlib.pyplot as plt\n'), ((384, 401), 'scripts.dwt_windowed.do_transform', 'do_transform', (['sig'], {}), '(sig)\n', (396, 401), False, 'from scripts.dwt_windowed import do_transform\n'), ((430, 442), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (440, 442), True, 'import matplotlib.pyplot as plt\n'), ((811, 830), 'numpy.zeros', 'np.zeros', (['[2 ** 16]'], {}), '([2 ** 16])\n', (819, 830), True, 'import numpy as np\n'), ((841, 853), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (851, 853), True, 'import matplotlib.pyplot as plt\n'), ((855, 868), 'matplotlib.pyplot.plot', 'plt.plot', (['sig'], {}), '(sig)\n', (863, 868), True, 'import matplotlib.pyplot as plt\n'), ((901, 945), 'scripts.dwt_windowed.do_transform', 'do_transform', (['sig'], {'wtype': '"""sym2"""', 'nlevels': 'nlv'}), "(sig, wtype='sym2', nlevels=nlv)\n", (913, 945), False, 'from scripts.dwt_windowed import do_transform\n'), ((974, 986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (984, 986), True, 'import matplotlib.pyplot as plt\n'), ((2185, 2225), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""edge effect duration (min)"""'], {}), "('edge effect duration (min)')\n", (2195, 2225), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2286), 'matplotlib.pyplot.title', 'plt.title', (['"""Effects of wavelet scale and variance threshold"""'], {}), "('Effects of wavelet scale and variance threshold')\n", (2235, 2286), True, 'import matplotlib.pyplot as plt\n'), ((2320, 2399), 'matplotlib.pyplot.legend', 'plt.legend', (['(p1[0], p2[0], p3[0], p4[0])', "('5% threshold', '10%', '15%', '20%')"], {}), "((p1[0], p2[0], p3[0], p4[0]), ('5% threshold', '10%', '15%', '20%'))\n", (2330, 2399), True, 'import matplotlib.pyplot as plt\n'), ((2402, 2412), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2410, 2412), True, 'import matplotlib.pyplot as plt\n'), ((2413, 2451), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.5, 9.5]', '[30, 30]', '"""--r"""'], {}), "([-0.5, 9.5], [30, 30], '--r')\n", (2421, 2451), True, 'import matplotlib.pyplot as plt\n'), ((2452, 2490), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.5, 9.5]', '[60, 60]', '"""--r"""'], {}), "([-0.5, 9.5], [60, 60], '--r')\n", (2460, 2490), True, 'import matplotlib.pyplot as plt\n'), ((2491, 2512), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.5, 9.5]'], {}), '([-0.5, 9.5])\n', (2499, 2512), True, 'import matplotlib.pyplot as plt\n'), ((2513, 2553), 'matplotlib.pyplot.text', 'plt.text', (['(1)', '(32)', '"""30 minutes"""'], {'color': '"""r"""'}), "(1, 32, '30 minutes', color='r')\n", (2521, 2553), True, 'import matplotlib.pyplot as plt\n'), ((2554, 2594), 'matplotlib.pyplot.text', 'plt.text', (['(2)', '(62)', '"""60 minutes"""'], {'color': '"""r"""'}), "(2, 62, '60 minutes', color='r')\n", (2562, 2594), True, 'import matplotlib.pyplot as plt\n'), ((166, 186), 'numpy.abs', 'np.abs', (['(data - value)'], {}), '(data - value)\n', (172, 186), True, 'import numpy as np\n'), ((224, 244), 'numpy.cumsum', 'np.cumsum', (['(data ** 2)'], {}), '(data ** 2)\n', (233, 244), True, 'import numpy as np\n'), ((245, 262), 'numpy.sum', 'np.sum', (['(data ** 2)'], {}), '(data ** 2)\n', (251, 262), True, 'import numpy as np\n')]
|
# ------------------------------------------------------------------------
# MIT License
#
# Copyright (c) [2021] [<NAME>]
#
# This code is part of the library PyDL <https://github.com/nash911/PyDL>
# This code is licensed under MIT license (see LICENSE.txt for details)
# ------------------------------------------------------------------------
import numpy as np
from pydl.nn.layers import FC
from pydl.nn.rnn import RNN
from pydl.nn.nn import NN
from pydl.training.sgd import SGD
from pydl import conf
np.random.seed(11421111)
def get_data(file_path, seq_len):
data = open(file_path, 'r').read()
unique_chars = list(set(data))
K = len(unique_chars)
X = np.zeros((1, K), dtype=conf.dtype)
return data, X, K
def main():
seq_len = 50
weight_scale = 1e-2
data, X, K = get_data('data/paulgraham_essays.txt', seq_len)
print("X.shape: ", X.shape)
print("K: ", K)
l1 = RNN(X, num_neurons=200, bias=True, seq_len=seq_len, weight_scale=weight_scale, xavier=True,
activation_fn='Sigmoid', tune_internal_states=True, name="RNN-1")
l2 = FC(l1, num_neurons=K, bias=True, weight_scale=weight_scale, xavier=True,
activation_fn='SoftMax', name="Output-Layer")
layers = [l1, l2]
nn = NN(None, layers)
sgd = SGD(nn, step_size=1e-1, reg_lambda=0, train_size=90, test_size=10)
sgd.train_recurrent(data, batch_size=seq_len, epochs=10000, sample_length=1000, temperature=0.5,
log_freq=1, plot='Character-RNN - SGD - Tune Hidden')
input("Press Enter to continue...")
if __name__ == '__main__':
main()
|
[
"pydl.nn.nn.NN",
"numpy.random.seed",
"pydl.nn.rnn.RNN",
"pydl.training.sgd.SGD",
"numpy.zeros",
"pydl.nn.layers.FC"
] |
[((508, 532), 'numpy.random.seed', 'np.random.seed', (['(11421111)'], {}), '(11421111)\n', (522, 532), True, 'import numpy as np\n'), ((677, 711), 'numpy.zeros', 'np.zeros', (['(1, K)'], {'dtype': 'conf.dtype'}), '((1, K), dtype=conf.dtype)\n', (685, 711), True, 'import numpy as np\n'), ((919, 1085), 'pydl.nn.rnn.RNN', 'RNN', (['X'], {'num_neurons': '(200)', 'bias': '(True)', 'seq_len': 'seq_len', 'weight_scale': 'weight_scale', 'xavier': '(True)', 'activation_fn': '"""Sigmoid"""', 'tune_internal_states': '(True)', 'name': '"""RNN-1"""'}), "(X, num_neurons=200, bias=True, seq_len=seq_len, weight_scale=\n weight_scale, xavier=True, activation_fn='Sigmoid',\n tune_internal_states=True, name='RNN-1')\n", (922, 1085), False, 'from pydl.nn.rnn import RNN\n'), ((1099, 1221), 'pydl.nn.layers.FC', 'FC', (['l1'], {'num_neurons': 'K', 'bias': '(True)', 'weight_scale': 'weight_scale', 'xavier': '(True)', 'activation_fn': '"""SoftMax"""', 'name': '"""Output-Layer"""'}), "(l1, num_neurons=K, bias=True, weight_scale=weight_scale, xavier=True,\n activation_fn='SoftMax', name='Output-Layer')\n", (1101, 1221), False, 'from pydl.nn.layers import FC\n'), ((1262, 1278), 'pydl.nn.nn.NN', 'NN', (['None', 'layers'], {}), '(None, layers)\n', (1264, 1278), False, 'from pydl.nn.nn import NN\n'), ((1290, 1355), 'pydl.training.sgd.SGD', 'SGD', (['nn'], {'step_size': '(0.1)', 'reg_lambda': '(0)', 'train_size': '(90)', 'test_size': '(10)'}), '(nn, step_size=0.1, reg_lambda=0, train_size=90, test_size=10)\n', (1293, 1355), False, 'from pydl.training.sgd import SGD\n')]
|
"""
FinetuneTransformer
===================
"""
from ..utils.misc import suppress_stdout, get_file_md5
from .base_model import BaseModel
from ..utils.transformers_helpers import mask_tokens, rotate_checkpoints, set_seed, download_vocab_files_for_tokenizer
from torch.utils.data import DataLoader, IterableDataset
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import Dataset
from tqdm import tqdm, trange
import pandas as pd
import logging
import os
import numpy as np
import torch
from transformers import (
AdamW,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
from tokenizers import BertWordPieceTokenizer
import urllib
import itertools
logger = logging.getLogger(__name__)
class FinetuneTransformer(BaseModel):
def __init__(self):
super().__init__()
def init(self, config):
# Paths
self.name = config.name
self.train_data = config.train_data
self.test_data = config.get('test_data', None)
self.other_path = config.other_path
self.output_path = config.output_path
self.tmp_path = config.tmp_path
self.model_name = config.get('model', 'bert')
self.model_type = config.get('model_type', 'bert-base-uncased')
self.model_path = os.path.join(self.other_path, self.model_name)
self.overwrite = config.get('overwrite', False)
self.load_data_into_memory = config.get('load_data_into_memory', False)
self.num_workers_batch_loading = config.get('num_workers_batch_loading', 50) # only used when load_data_into_memory is False
self.evaluate_during_training = config.get('evaluate_during_training', True)
# config
self.mlm = self.model_name in ["bert", "roberta", "distilbert", "camembert"] # is masked LM
self.mlm_probability = config.get('mlm_probability', 0.15)
self.save_steps = config.get('save_steps', 500) # save every n steps
self.num_checkpoints = config.get('num_checkpoints', 1) # save n last checkpoints (set to 1 due to large model files)
# hyperparams
self.max_seq_length = config.get('max_seq_length', 128)
self.train_batch_size = config.get('train_batch_size', 16)
self.test_batch_size = config.get('test_batch_size', 16)
self.learning_rate = config.get('learning_rate', 5e-5)
self.num_epochs = config.get('num_epochs', 1)
self.warmup_steps = config.get('warmup_steps', 100)
self.max_train_steps = config.get('max_train_steps', None)
self.no_cuda = config.get('no_cuda', False)
self.on_memory = config.get('on_memory', True)
self.do_lower_case = 'uncased' in self.model_type
self.local_rank = config.get('local_rank', -1)
self.seed = config.get('seed', np.random.randint(1e4))
self.gradient_accumulation_steps = config.get('gradient_accumulation_steps', 1)
self.fp16 = config.get('fp16', False)
self.loss_scale = config.get('loss_scale', 0.0)
# set seed
set_seed(self.seed, no_cuda=self.no_cuda)
# GPU config
if self.local_rank == -1 or self.no_cuda:
self.device = torch.device("cuda" if torch.cuda.is_available() and not self.no_cuda else "cpu")
self.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(self.local_rank)
self.device = torch.device("cuda", self.local_rank)
self.n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
if self.no_cuda:
self.n_gpu = 0
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(self.device, self.n_gpu, bool(self.local_rank != -1), self.fp16))
if self.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(self.gradient_accumulation_steps))
self.train_batch_size = self.train_batch_size // self.gradient_accumulation_steps
def train(self):
# Model initialization
tokenizer = AutoTokenizer.from_pretrained(self.model_type, cache_dir=self.model_path)
vocab_files = download_vocab_files_for_tokenizer(tokenizer, self.model_type, self.output_path)
fast_tokenizer = BertWordPieceTokenizer(vocab_files.get('vocab_file'), vocab_files.get('merges_file'), lowercase=self.do_lower_case)
fast_tokenizer.enable_padding(max_length=self.max_seq_length)
num_train_optimization_steps = None
logger.debug(f'Loading Train Dataset {self.train_data}...')
if self.load_data_into_memory:
train_dataset = TextDataset(self.train_data, fast_tokenizer, max_seq_length=self.max_seq_length)
else:
train_dataset = TextIterableDataset(self.train_data, fast_tokenizer, max_seq_length=self.max_seq_length)
logger.info(f'Loaded {len(train_dataset):,} examples...')
num_train_optimization_steps = int(len(train_dataset) / self.train_batch_size / self.gradient_accumulation_steps) * self.num_epochs
if self.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
config = AutoConfig.from_pretrained(self.model_type, cache_dir=self.model_path)
model = AutoModelWithLMHead.from_pretrained(
self.model_type,
from_tf=bool(".ckpt" in self.model_type),
config=config,
cache_dir=self.model_path)
if self.fp16:
model.half()
model.to(self.device)
if self.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif self.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if self.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=self.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if self.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=self.loss_scale)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=self.learning_rate)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=self.warmup_steps, num_training_steps=num_train_optimization_steps)
# Run training
global_step = 0
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", self.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
if self.load_data_into_memory:
if self.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
train_sampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=self.train_batch_size)
total_batches = len(train_dataloader)
else:
# no sampling supported for iterative data loading
train_dataloader = DataLoader(train_dataset, batch_size=self.train_batch_size, num_workers=self.num_workers_batch_loading)
# len doesn't work for iterator datasets
total_batches = int(len(train_dataloader)/self.train_batch_size)
set_seed(self.seed, no_cuda=self.no_cuda) # Added here for reproducibility
for _ in trange(int(self.num_epochs), desc="Epoch"):
model.train()
tr_loss = 0
epoch_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
pbar = tqdm(train_dataloader, total=total_batches)
for step, batch in enumerate(pbar):
inputs, labels = mask_tokens(batch, tokenizer, mlm_probability=self.mlm_probability) if self.mlm else (batch, batch)
inputs = inputs.to(self.device)
labels = labels.to(self.device)
model.train()
outputs = model(inputs, masked_lm_labels=labels) if self.mlm else model(inputs, labels=labels)
loss = outputs[0]
if self.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if self.gradient_accumulation_steps > 1:
loss = loss / self.gradient_accumulation_steps
if self.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += labels.size(0)
nb_tr_steps += 1
epoch_loss += loss
if step > 0:
pbar.set_description("Loss: {:8.4f} | Average loss/it: {:8.4f}".format(tr_loss, epoch_loss/step))
if (step + 1) % self.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
global_step += 1
if self.save_steps > 0 and global_step % self.save_steps == 0:
output_dir = os.path.join(self.output_path, f'checkpoint-{global_step}')
os.makedirs(output_dir, exist_ok=True)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
logger.info(f'Saving model checkpoint to {output_dir}')
rotate_checkpoints(self.output_path, save_total_limit=self.num_checkpoints)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info(f'Saving optimizer and scheduler states to {output_dir}')
if self.evaluate_during_training:
logger.info('Evaluate...')
self.test(model=model, tokenizer=tokenizer, fast_tokenizer=fast_tokenizer, output_dir=output_dir)
if self.max_train_steps is not None and step > self.max_train_steps:
logger.info(f'Reached max number of training steps {self.max_train_steps:,}')
break
if step >= total_batches:
# finished epoch
break
if self.max_train_steps is not None and step > self.max_train_steps:
break
# Save a trained model
logger.info("** ** * Saving fine - tuned model ** ** * ")
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
model_to_save.save_pretrained(self.output_path)
tokenizer.save_pretrained(self.output_path)
self.add_to_config(self.output_path, vars(self))
def test(self, model=None, tokenizer=None, fast_tokenizer=None, output_dir=None):
# TODO: Rewrite with a single config arg
if self.test_data is None:
logger.warning('No test data provided. Aborting.')
return
if output_dir is None:
output_dir = self.output_path
if tokenizer is None:
tokenizer = AutoTokenizer.from_pretrained(output_dir)
if fast_tokenizer is None:
vocab_files = self.download_vocab_files_for_tokenizer(tokenizer)
fast_tokenizer = BertWordPieceTokenizer(vocab_files.get('vocab_file'), vocab_files.get('merges_file'), lowercase=self.do_lower_case)
fast_tokenizer.enable_padding(max_length=self.max_seq_length)
logger.debug(f'Loading test dataset {self.test_data}...')
if self.load_data_into_memory:
test_dataset = TextDataset(self.test_data, fast_totenizer, max_seq_length=self.max_seq_length)
else:
test_dataset = TextIterableDataset(self.test_data, fast_tokenizer, max_seq_length=self.max_seq_length)
logger.info(f'Loaded {len(test_dataset):,} examples...')
if model is None:
config = AutoConfig.from_pretrained(output_dir)
model = AutoModelWithLMHead.from_pretrained(output_dir, config=config)
model.to(self.device)
if self.n_gpu > 1:
model = torch.nn.DataParallel(model)
# evaluate
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(test_dataset))
logger.info(" Batch size = %d", self.test_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
if self.load_data_into_memory:
eval_sampler = SequentialSampler(test_dataset)
test_dataloader = DataLoader(test_dataset, sampler=eval_sampler, batch_size=self.test_batch_size)
total_batches = len(test_dataloader)
else:
# no sampling supported for iterative data loading
test_dataloader = DataLoader(test_dataset, batch_size=self.test_batch_size, num_workers=self.num_workers_batch_loading)
# len doesn't work for iterator datasets
total_batches = int(len(test_dataloader)/self.test_batch_size)
for batch in tqdm(test_dataloader, desc="Evaluating", total=total_batches):
inputs, labels = mask_tokens(batch, tokenizer, mlm_probability=self.mlm_probability) if self.mlm else (batch, batch)
inputs = inputs.to(self.device)
labels = labels.to(self.device)
with torch.no_grad():
outputs = model(inputs, masked_lm_labels=labels) if self.mlm else model(inputs, labels=labels)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
if nb_eval_steps >= total_batches:
break
eval_loss = eval_loss / nb_eval_steps
perplexity = float(torch.exp(torch.tensor(eval_loss)))
logger.info(f'Eval perplexity: {perplexity}')
return {'perplexity': perplexity}
def prepare_input_data(self, min_tokens=3):
"""DEPRECATED: method used for NSP/SOP tasks"""
import en_core_web_sm
logger.info('Generating file hash...')
file_hash = get_file_md5(self.train_data)
input_data_path = os.path.join(self.tmp_path, 'fine_tune', 'finetune_transformer', f'{file_hash}.txt')
if os.path.exists(input_data_path):
logger.info('Found pre-existing input data file.')
return input_data_path
if not os.path.isdir(os.path.dirname(input_data_path)):
os.makedirs(os.path.dirname(input_data_path))
logger.info('Reading input data...')
df = pd.read_csv(self.train_data, usecols=['text'])
nlp = en_core_web_sm.load()
logger.info('Generating input data...')
with open(input_data_path, 'w') as f:
for i, text in tqdm(enumerate(df['text']), total=len(df)):
sentence_was_found = False
doc = nlp(text, disable=['entity', 'tagger'])
sentences = [sent.string.strip() for sent in doc.sents]
for sentence in sentences:
try:
num_tokens = len(doc)
except:
logger.error('error with sentence: "{}"'.format(sentence))
continue
if num_tokens > min_tokens:
f.write(sentence + '\n')
sentence_was_found = True
if sentence_was_found:
# add new line after sentences
f.write('\n')
return input_data_path
class TextDataset(Dataset):
"""Load dataset in memory"""
def __init__(self, file_path, tokenizer, max_seq_length=512):
assert os.path.isfile(file_path)
logger.info('Reading file...')
with open(file_path, encoding="utf-8") as f:
lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
logger.info('Tokenizing...')
lines = tokenizer.encode_batch(lines)
self.examples = [l.ids[:max_seq_length] for l in lines]
logger.info('... done')
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return torch.tensor(self.examples[i], dtype=torch.long)
class TextIterableDataset(IterableDataset):
"""Load dataset iteratively and tokenize with tokenizer library on the fly"""
def __init__(self, file_path, tokenizer, max_seq_length=512):
assert os.path.isfile(file_path)
self.f_name = file_path
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.num_lines = sum(1 for line in open(self.f_name, 'r') if len(line.strip()) > 0)
def parse_and_tokenize(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
# single-process data loading
with open(self.f_name, 'r') as f:
for line in f:
if len(line.strip()) > 0:
encoding = self.tokenizer.encode(line)
encoding.truncate(self.max_seq_length)
yield torch.tensor(encoding.ids, dtype=torch.long)
else:
per_worker = int(self.num_lines/float(worker_info.num_workers))
worker_id = worker_info.id
iter_start = worker_id * per_worker
iter_end = min(iter_start + per_worker, self.num_lines)
logger.info(f'Start batching worker {worker_id}, start: {iter_start:,}, end: {iter_end:,} (of total {self.num_lines:,} lines)')
with open(self.f_name, 'r') as f:
for i, line in enumerate(f):
if i > iter_start and i < iter_end:
if len(line.strip()) > 0:
encoding = self.tokenizer.encode(line)
encoding.truncate(self.max_seq_length)
yield torch.tensor(encoding.ids, dtype=torch.long)
def __iter__(self):
return itertools.cycle(self.parse_and_tokenize())
def __len__(self):
return self.num_lines
|
[
"pandas.read_csv",
"logging.getLogger",
"torch.cuda.device_count",
"os.path.isfile",
"numpy.random.randint",
"torch.distributed.get_world_size",
"torch.device",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"os.path.dirname",
"os.path.exists",
"apex.optimizers.FusedAdam",
"apex.optimizers.FP16_Optimizer",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.set_device",
"tqdm.tqdm",
"torch.utils.data.get_worker_info",
"transformers.AutoTokenizer.from_pretrained",
"torch.utils.data.sampler.RandomSampler",
"transformers.get_linear_schedule_with_warmup",
"transformers.AdamW",
"apex.parallel.DistributedDataParallel",
"torch.cuda.is_available",
"en_core_web_sm.load",
"transformers.AutoConfig.from_pretrained",
"torch.distributed.init_process_group",
"os.makedirs",
"transformers.AutoModelWithLMHead.from_pretrained",
"torch.utils.data.sampler.SequentialSampler",
"torch.nn.DataParallel",
"torch.tensor"
] |
[((812, 839), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (829, 839), False, 'import logging\n'), ((1389, 1435), 'os.path.join', 'os.path.join', (['self.other_path', 'self.model_name'], {}), '(self.other_path, self.model_name)\n', (1401, 1435), False, 'import os\n'), ((4304, 4377), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.model_type'], {'cache_dir': 'self.model_path'}), '(self.model_type, cache_dir=self.model_path)\n', (4333, 4377), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((5450, 5520), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['self.model_type'], {'cache_dir': 'self.model_path'}), '(self.model_type, cache_dir=self.model_path)\n', (5476, 5520), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((14793, 14854), 'tqdm.tqdm', 'tqdm', (['test_dataloader'], {'desc': '"""Evaluating"""', 'total': 'total_batches'}), "(test_dataloader, desc='Evaluating', total=total_batches)\n", (14797, 14854), False, 'from tqdm import tqdm, trange\n'), ((15869, 15957), 'os.path.join', 'os.path.join', (['self.tmp_path', '"""fine_tune"""', '"""finetune_transformer"""', 'f"""{file_hash}.txt"""'], {}), "(self.tmp_path, 'fine_tune', 'finetune_transformer',\n f'{file_hash}.txt')\n", (15881, 15957), False, 'import os\n'), ((15965, 15996), 'os.path.exists', 'os.path.exists', (['input_data_path'], {}), '(input_data_path)\n', (15979, 15996), False, 'import os\n'), ((16276, 16322), 'pandas.read_csv', 'pd.read_csv', (['self.train_data'], {'usecols': "['text']"}), "(self.train_data, usecols=['text'])\n", (16287, 16322), True, 'import pandas as pd\n'), ((16337, 16358), 'en_core_web_sm.load', 'en_core_web_sm.load', ([], {}), '()\n', (16356, 16358), False, 'import en_core_web_sm\n'), ((17404, 17429), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (17418, 17429), False, 'import os\n'), ((17908, 17956), 'torch.tensor', 'torch.tensor', (['self.examples[i]'], {'dtype': 'torch.long'}), '(self.examples[i], dtype=torch.long)\n', (17920, 17956), False, 'import torch\n'), ((18165, 18190), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (18179, 18190), False, 'import os\n'), ((18453, 18487), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (18485, 18487), False, 'import torch\n'), ((2903, 2929), 'numpy.random.randint', 'np.random.randint', (['(10000.0)'], {}), '(10000.0)\n', (2920, 2929), True, 'import numpy as np\n'), ((3392, 3417), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3415, 3417), False, 'import torch\n'), ((3444, 3482), 'torch.cuda.set_device', 'torch.cuda.set_device', (['self.local_rank'], {}), '(self.local_rank)\n', (3465, 3482), False, 'import torch\n'), ((3509, 3546), 'torch.device', 'torch.device', (['"""cuda"""', 'self.local_rank'], {}), "('cuda', self.local_rank)\n", (3521, 3546), False, 'import torch\n'), ((3684, 3736), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (3720, 3736), False, 'import torch\n'), ((6112, 6122), 'apex.parallel.DistributedDataParallel', 'DDP', (['model'], {}), '(model)\n', (6115, 6122), True, 'from apex.parallel import DistributedDataParallel as DDP\n'), ((6981, 7089), 'apex.optimizers.FusedAdam', 'FusedAdam', (['optimizer_grouped_parameters'], {'lr': 'self.learning_rate', 'bias_correction': '(False)', 'max_grad_norm': '(1.0)'}), '(optimizer_grouped_parameters, lr=self.learning_rate,\n bias_correction=False, max_grad_norm=1.0)\n', (6990, 7089), False, 'from apex.optimizers import FusedAdam\n'), ((7449, 7507), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'self.learning_rate'}), '(optimizer_grouped_parameters, lr=self.learning_rate)\n', (7454, 7507), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((7532, 7664), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'self.warmup_steps', 'num_training_steps': 'num_train_optimization_steps'}), '(optimizer, num_warmup_steps=self.\n warmup_steps, num_training_steps=num_train_optimization_steps)\n', (7563, 7664), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((8210, 8297), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'sampler': 'train_sampler', 'batch_size': 'self.train_batch_size'}), '(train_dataset, sampler=train_sampler, batch_size=self.\n train_batch_size)\n', (8220, 8297), False, 'from torch.utils.data import DataLoader, IterableDataset\n'), ((8451, 8559), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'self.train_batch_size', 'num_workers': 'self.num_workers_batch_loading'}), '(train_dataset, batch_size=self.train_batch_size, num_workers=\n self.num_workers_batch_loading)\n', (8461, 8559), False, 'from torch.utils.data import DataLoader, IterableDataset\n'), ((8972, 9015), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'total': 'total_batches'}), '(train_dataloader, total=total_batches)\n', (8976, 9015), False, 'from tqdm import tqdm, trange\n'), ((12850, 12891), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['output_dir'], {}), '(output_dir)\n', (12879, 12891), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((13676, 13714), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['output_dir'], {}), '(output_dir)\n', (13702, 13714), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((13735, 13797), 'transformers.AutoModelWithLMHead.from_pretrained', 'AutoModelWithLMHead.from_pretrained', (['output_dir'], {'config': 'config'}), '(output_dir, config=config)\n', (13770, 13797), False, 'from transformers import AdamW, AutoConfig, AutoModelWithLMHead, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((13880, 13908), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (13901, 13908), False, 'import torch\n'), ((14244, 14275), 'torch.utils.data.sampler.SequentialSampler', 'SequentialSampler', (['test_dataset'], {}), '(test_dataset)\n', (14261, 14275), False, 'from torch.utils.data.sampler import RandomSampler, SequentialSampler\n'), ((14306, 14385), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'sampler': 'eval_sampler', 'batch_size': 'self.test_batch_size'}), '(test_dataset, sampler=eval_sampler, batch_size=self.test_batch_size)\n', (14316, 14385), False, 'from torch.utils.data import DataLoader, IterableDataset\n'), ((14542, 14648), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'self.test_batch_size', 'num_workers': 'self.num_workers_batch_loading'}), '(test_dataset, batch_size=self.test_batch_size, num_workers=self.\n num_workers_batch_loading)\n', (14552, 14648), False, 'from torch.utils.data import DataLoader, IterableDataset\n'), ((5398, 5432), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (5430, 5432), False, 'import torch\n'), ((6172, 6200), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (6193, 6200), False, 'import torch\n'), ((7253, 7303), 'apex.optimizers.FP16_Optimizer', 'FP16_Optimizer', (['optimizer'], {'dynamic_loss_scale': '(True)'}), '(optimizer, dynamic_loss_scale=True)\n', (7267, 7303), False, 'from apex.optimizers import FP16_Optimizer\n'), ((7350, 7410), 'apex.optimizers.FP16_Optimizer', 'FP16_Optimizer', (['optimizer'], {'static_loss_scale': 'self.loss_scale'}), '(optimizer, static_loss_scale=self.loss_scale)\n', (7364, 7410), False, 'from apex.optimizers import FP16_Optimizer\n'), ((8066, 8094), 'torch.utils.data.sampler.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (8079, 8094), False, 'from torch.utils.data.sampler import RandomSampler, SequentialSampler\n'), ((8145, 8178), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (8163, 8178), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((15090, 15105), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15103, 15105), False, 'import torch\n'), ((15489, 15512), 'torch.tensor', 'torch.tensor', (['eval_loss'], {}), '(eval_loss)\n', (15501, 15512), False, 'import torch\n'), ((16125, 16157), 'os.path.dirname', 'os.path.dirname', (['input_data_path'], {}), '(input_data_path)\n', (16140, 16157), False, 'import os\n'), ((16184, 16216), 'os.path.dirname', 'os.path.dirname', (['input_data_path'], {}), '(input_data_path)\n', (16199, 16216), False, 'import os\n'), ((3308, 3333), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3331, 3333), False, 'import torch\n'), ((10556, 10615), 'os.path.join', 'os.path.join', (['self.output_path', 'f"""checkpoint-{global_step}"""'], {}), "(self.output_path, f'checkpoint-{global_step}')\n", (10568, 10615), False, 'import os\n'), ((10640, 10678), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (10651, 10678), False, 'import os\n'), ((11207, 11247), 'os.path.join', 'os.path.join', (['output_dir', '"""optimizer.pt"""'], {}), "(output_dir, 'optimizer.pt')\n", (11219, 11247), False, 'import os\n'), ((11308, 11348), 'os.path.join', 'os.path.join', (['output_dir', '"""scheduler.pt"""'], {}), "(output_dir, 'scheduler.pt')\n", (11320, 11348), False, 'import os\n'), ((18841, 18885), 'torch.tensor', 'torch.tensor', (['encoding.ids'], {'dtype': 'torch.long'}), '(encoding.ids, dtype=torch.long)\n', (18853, 18885), False, 'import torch\n'), ((19636, 19680), 'torch.tensor', 'torch.tensor', (['encoding.ids'], {'dtype': 'torch.long'}), '(encoding.ids, dtype=torch.long)\n', (19648, 19680), False, 'import torch\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
data_full = [[ 0., 10., 20., 30., 40., 50., 60., 70., 80., 90., 100., 110., 120., 130.,
140., 150., 160., 170., 180., 190., 200., 210., 220., 230., 240., 250., 260., 270.,
280., 290., 300., 310., 320., 330., 340., 350., 360., 370., 380., 390., 400., 410.,
420., 430., 440., 450., 460., 470., 480., 490., 500., 510., 520., 530., 540., 550.,
560., 570., 580., 590., 600., 610., 620., 630., 640., 650., 660., 670., 680., 690.,
700., 710., 720., 730., 740., 750., 760., 770., 780., 790., 800.],
[ 0., 3., 7., 10., 13., 16., 19., 23., 26., 29., 32., 35., 38., 41.,
43., 46., 49., 52., 55., 58., 60., 63., 66., 68., 71., 74., 76., 79.,
81., 84., 86., 89., 91., 94., 96., 99., 101., 103., 106., 108., 110., 112.,
115., 117., 119., 121., 124., 126., 128., 130., 132., 134., 136., 138., 140., 142.,
144., 146., 148., 150., 152., 154., 156., 158., 160., 162., 164., 165., 167., 169.,
171., 173., 174., 176., 178., 180., 181., 255., 255., 255., 255.]]
data = np.array(data_full)
limt = 76
def linear_reg(arr):
x = np.expand_dims(arr[0,:],axis=1)# 81,1
#x = [email protected]([1,2])
y= np.expand_dims(arr[1,:],axis=1)
linreg = linear_model.Lasso()
model = linreg.fit(x,y)
y_ = model.predict(x)
MSE = np.abs(y[:,0]-y_).mean()
print('MSE = {}'.format(MSE))
print(model.coef_)
print(linreg.intercept_)
plt.plot(x[:,0],y,'r*')
plt.plot(x[:,0],y_,'b-')
plt.show()
def linear_reg2(arr):
x0 = np.expand_dims(arr[0,:],axis=1)# 81,1
x1 = x0*x0
#x2 = x1*x0
x = np.concatenate([x0,x1],axis=1)
#x = [email protected]([1,2])
y= np.expand_dims(arr[1,:],axis=1)
linreg = linear_model.Lasso()
model = linreg.fit(x,y)
y_ = model.predict(x)
y_2 = [email protected]_.T + model.intercept_
MSE = np.abs(y[:,0]-y_)
print(y_ )
print(y_2)
print('MSE = {}'.format(MSE.mean()))
print(model.coef_)
print(linreg.intercept_)
plt.plot(data[0, :], data[1, :], 'b+',label = 'control points')
plt.plot(x[:,0],y_,'r-',label = 'fitted')
plt.ylabel('gray(1)')
plt.xlabel('distances(m)')
plt.legend()
if __name__ == '__main__':
linear_reg2(data[:,:limt])
plt.show()
|
[
"matplotlib.pyplot.show",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.abs",
"matplotlib.pyplot.legend",
"numpy.expand_dims",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"sklearn.linear_model.Lasso"
] |
[((1223, 1242), 'numpy.array', 'np.array', (['data_full'], {}), '(data_full)\n', (1231, 1242), True, 'import numpy as np\n'), ((1290, 1323), 'numpy.expand_dims', 'np.expand_dims', (['arr[0, :]'], {'axis': '(1)'}), '(arr[0, :], axis=1)\n', (1304, 1323), True, 'import numpy as np\n'), ((1371, 1404), 'numpy.expand_dims', 'np.expand_dims', (['arr[1, :]'], {'axis': '(1)'}), '(arr[1, :], axis=1)\n', (1385, 1404), True, 'import numpy as np\n'), ((1420, 1440), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', ([], {}), '()\n', (1438, 1440), False, 'from sklearn import linear_model\n'), ((1654, 1680), 'matplotlib.pyplot.plot', 'plt.plot', (['x[:, 0]', 'y', '"""r*"""'], {}), "(x[:, 0], y, 'r*')\n", (1662, 1680), True, 'import matplotlib.pyplot as plt\n'), ((1686, 1713), 'matplotlib.pyplot.plot', 'plt.plot', (['x[:, 0]', 'y_', '"""b-"""'], {}), "(x[:, 0], y_, 'b-')\n", (1694, 1713), True, 'import matplotlib.pyplot as plt\n'), ((1719, 1729), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1727, 1729), True, 'import matplotlib.pyplot as plt\n'), ((1770, 1803), 'numpy.expand_dims', 'np.expand_dims', (['arr[0, :]'], {'axis': '(1)'}), '(arr[0, :], axis=1)\n', (1784, 1803), True, 'import numpy as np\n'), ((1859, 1891), 'numpy.concatenate', 'np.concatenate', (['[x0, x1]'], {'axis': '(1)'}), '([x0, x1], axis=1)\n', (1873, 1891), True, 'import numpy as np\n'), ((1933, 1966), 'numpy.expand_dims', 'np.expand_dims', (['arr[1, :]'], {'axis': '(1)'}), '(arr[1, :], axis=1)\n', (1947, 1966), True, 'import numpy as np\n'), ((1982, 2002), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', ([], {}), '()\n', (2000, 2002), False, 'from sklearn import linear_model\n'), ((2132, 2152), 'numpy.abs', 'np.abs', (['(y[:, 0] - y_)'], {}), '(y[:, 0] - y_)\n', (2138, 2152), True, 'import numpy as np\n'), ((2301, 2363), 'matplotlib.pyplot.plot', 'plt.plot', (['data[0, :]', 'data[1, :]', '"""b+"""'], {'label': '"""control points"""'}), "(data[0, :], data[1, :], 'b+', label='control points')\n", (2309, 2363), True, 'import matplotlib.pyplot as plt\n'), ((2375, 2418), 'matplotlib.pyplot.plot', 'plt.plot', (['x[:, 0]', 'y_', '"""r-"""'], {'label': '"""fitted"""'}), "(x[:, 0], y_, 'r-', label='fitted')\n", (2383, 2418), True, 'import matplotlib.pyplot as plt\n'), ((2425, 2446), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""gray(1)"""'], {}), "('gray(1)')\n", (2435, 2446), True, 'import matplotlib.pyplot as plt\n'), ((2455, 2481), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""distances(m)"""'], {}), "('distances(m)')\n", (2465, 2481), True, 'import matplotlib.pyplot as plt\n'), ((2490, 2502), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2500, 2502), True, 'import matplotlib.pyplot as plt\n'), ((2570, 2580), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2578, 2580), True, 'import matplotlib.pyplot as plt\n'), ((1521, 1541), 'numpy.abs', 'np.abs', (['(y[:, 0] - y_)'], {}), '(y[:, 0] - y_)\n', (1527, 1541), True, 'import numpy as np\n')]
|
import numpy as np
def im2col(input_data, filter_h, filter_w, stride, pad):
"""
(function) im2col
-----------------
- Convert the shape of the data from image to column
Parameter
---------
- input_data : input data
- filter_h : filter height
- filter_w : filter width
- stride : sliding interval
- pad : boundary padding length
Return
------
- reshaped column data
"""
# Calculate result resolution information
N, C, H, W = input_data.shape
out_h = (H + 2 * pad - filter_h) // stride + 1
out_w = (W + 2 * pad - filter_w) // stride + 1
# Do padding on the input data
img = np.pad(input_data, [(0, 0), (0, 0), (pad, pad), (pad, pad)], 'constant')
col = np.zeros((N, C, filter_h, filter_w, out_h, out_w))
# Generate the column data
for y in range(filter_h):
y_max = y + stride * out_h
for x in range(filter_w):
x_max = x + stride * out_w
col[:, :, y, x, :, :] = img[:, :, y:y_max:stride, x:x_max:stride]
col = col.transpose(0, 4, 5, 1, 2, 3).reshape(N * out_h * out_w, -1)
return col
# Convert column to image
def col2im(col, input_shape, filter_h, filter_w, stride, pad):
"""
(function) col2im
-----------------
- Convert the shape of the data from column to image
Parameter
---------
- col : column data
- input_shape : original shape on the input
- filter_h : filter height
- filter_w : filter width
- stride : sliding interval
- pad : boundary padding length
Return
------
- reshaped image data
"""
# Calculate result resolution information
N, C, H, W = input_shape
out_h = (H + 2 * pad - filter_h) // stride + 1
out_w = (W + 2 * pad - filter_w) // stride + 1
col = col.reshape(N, out_h, out_w, C, filter_h, filter_w).transpose(0, 3, 4, 5, 1, 2)
# Generate the image data
img = np.zeros((N, C, H + 2 * pad + stride - 1, W + 2 * pad + stride - 1))
for y in range(filter_h):
y_max = y + stride * out_h
for x in range(filter_w):
x_max = x + stride * out_w
img[:, :, y:y_max:stride, x:x_max:stride] += col[:, :, y, x, :, :]
return img[:, :, pad:H + pad, pad:W + pad]
|
[
"numpy.pad",
"numpy.zeros"
] |
[((660, 732), 'numpy.pad', 'np.pad', (['input_data', '[(0, 0), (0, 0), (pad, pad), (pad, pad)]', '"""constant"""'], {}), "(input_data, [(0, 0), (0, 0), (pad, pad), (pad, pad)], 'constant')\n", (666, 732), True, 'import numpy as np\n'), ((743, 793), 'numpy.zeros', 'np.zeros', (['(N, C, filter_h, filter_w, out_h, out_w)'], {}), '((N, C, filter_h, filter_w, out_h, out_w))\n', (751, 793), True, 'import numpy as np\n'), ((1929, 1997), 'numpy.zeros', 'np.zeros', (['(N, C, H + 2 * pad + stride - 1, W + 2 * pad + stride - 1)'], {}), '((N, C, H + 2 * pad + stride - 1, W + 2 * pad + stride - 1))\n', (1937, 1997), True, 'import numpy as np\n')]
|
"""
Source: https://github.com/yanxinzju/CSS-VQA/blob/0e2bfa68232f346adc9ad61e90e97ee38ad59f96/language_model.py#L31
"""
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
from torch.nn.utils.weight_norm import weight_norm
__all__ = ['WordEmbedding', 'QuestionEmbedding', 'FCNet', 'SimpleClassifier']
class WordEmbedding(nn.Module):
"""Word Embedding
The ntoken-th dim is used for padding_idx, which agrees *implicitly*
with the definition in Dictionary.
"""
def __init__(self, ntoken, emb_dim, dropout):
super().__init__()
self.emb = nn.Embedding(ntoken+1, emb_dim, padding_idx=ntoken)
self.dropout = nn.Dropout(dropout)
self.ntoken = ntoken
self.emb_dim = emb_dim
def init_embedding(self, np_file):
weight_init = torch.from_numpy(np.load(np_file))
assert weight_init.shape == (self.ntoken, self.emb_dim)
self.emb.weight.data[:self.ntoken] = weight_init
def forward(self, x):
emb = self.emb(x)
emb = self.dropout(emb)
return emb
class QuestionEmbedding(nn.Module):
def __init__(self,
in_dim,
num_hid,
nlayers,
bidirect,
dropout,
rnn_type='GRU'
) -> None:
"""Module for question embedding
"""
super().__init__()
assert rnn_type == 'LSTM' or rnn_type == 'GRU'
rnn_cls = nn.LSTM if rnn_type == 'LSTM' else nn.GRU
self.rnn = rnn_cls(
in_dim, num_hid, nlayers,
bidirectional=bidirect,
dropout=dropout,
batch_first=True)
self.in_dim = in_dim
self.num_hid = num_hid
self.nlayers = nlayers
self.rnn_type = rnn_type
self.ndirections = 1 + int(bidirect)
def init_hidden(self, batch):
# just to get the type of tensor
weight = next(self.parameters()).data
hid_shape = (self.nlayers * self.ndirections, batch, self.num_hid)
if self.rnn_type == 'LSTM':
return (Variable(weight.new(*hid_shape).zero_()),
Variable(weight.new(*hid_shape).zero_()))
else:
return Variable(weight.new(*hid_shape).zero_())
def forward(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
if self.ndirections == 1:
return output[:, -1]
forward_ = output[:, -1, :self.num_hid]
backward = output[:, 0, self.num_hid:]
return torch.cat((forward_, backward), dim=1)
def forward_all(self, x):
# x: [batch, sequence, in_dim]
batch = x.size(0)
hidden = self.init_hidden(batch)
self.rnn.flatten_parameters()
output, hidden = self.rnn(x, hidden)
return output
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network
"""
def __init__(self, dims):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
layers.append(nn.ReLU())
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
layers.append(nn.ReLU())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
class SimpleClassifier(nn.Module):
def __init__(self, in_dim, hid_dim, out_dim, dropout):
super(SimpleClassifier, self).__init__()
layers = [
weight_norm(nn.Linear(in_dim, hid_dim), dim=None),
nn.ReLU(),
nn.Dropout(dropout),
weight_norm(nn.Linear(hid_dim, out_dim), dim=None)
]
self.main = nn.Sequential(*layers)
def forward(self, x):
logits = self.main(x)
return logits
|
[
"torch.nn.Dropout",
"numpy.load",
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.Embedding",
"torch.cat",
"torch.nn.Linear"
] |
[((615, 668), 'torch.nn.Embedding', 'nn.Embedding', (['(ntoken + 1)', 'emb_dim'], {'padding_idx': 'ntoken'}), '(ntoken + 1, emb_dim, padding_idx=ntoken)\n', (627, 668), True, 'import torch.nn as nn\n'), ((690, 709), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (700, 709), True, 'import torch.nn as nn\n'), ((2696, 2734), 'torch.cat', 'torch.cat', (['(forward_, backward)'], {'dim': '(1)'}), '((forward_, backward), dim=1)\n', (2705, 2734), False, 'import torch\n'), ((3498, 3520), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3511, 3520), True, 'import torch.nn as nn\n'), ((3951, 3973), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3964, 3973), True, 'import torch.nn as nn\n'), ((849, 865), 'numpy.load', 'np.load', (['np_file'], {}), '(np_file)\n', (856, 865), True, 'import numpy as np\n'), ((3466, 3475), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3473, 3475), True, 'import torch.nn as nn\n'), ((3814, 3823), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3821, 3823), True, 'import torch.nn as nn\n'), ((3837, 3856), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3847, 3856), True, 'import torch.nn as nn\n'), ((3357, 3366), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3364, 3366), True, 'import torch.nn as nn\n'), ((3402, 3431), 'torch.nn.Linear', 'nn.Linear', (['dims[-2]', 'dims[-1]'], {}), '(dims[-2], dims[-1])\n', (3411, 3431), True, 'import torch.nn as nn\n'), ((3763, 3789), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'hid_dim'], {}), '(in_dim, hid_dim)\n', (3772, 3789), True, 'import torch.nn as nn\n'), ((3882, 3909), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'out_dim'], {}), '(hid_dim, out_dim)\n', (3891, 3909), True, 'import torch.nn as nn\n'), ((3292, 3318), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'out_dim'], {}), '(in_dim, out_dim)\n', (3301, 3318), True, 'import torch.nn as nn\n')]
|
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
# File Name : preprocess.py
# Purpose :
# Creation Date : 10-12-2017
# Last Modified : Thu 18 Jan 2018 05:34:42 PM CST
# Created By : <NAME> [jeasinema[at]gmail[dot]com]
import os
import multiprocessing
import numpy as np
from config import cfg
data_dir = 'velodyne'
def process_pointcloud(point_cloud, cls=cfg.DETECT_OBJ):
# Input:
# (N, 4)
# Output:
# voxel_dict
if cls == 'Car':
scene_size = np.array([4, 80, 70.4], dtype=np.float32)
voxel_size = np.array([0.4, 0.2, 0.2], dtype=np.float32)
grid_size = np.array([10, 400, 352], dtype=np.int64)
lidar_coord = np.array([0, 40, 3], dtype=np.float32)
max_point_number = 35
else:
scene_size = np.array([4, 40, 48], dtype=np.float32)
voxel_size = np.array([0.4, 0.2, 0.2], dtype=np.float32)
grid_size = np.array([10, 200, 240], dtype=np.int64)
lidar_coord = np.array([0, 20, 3], dtype=np.float32)
max_point_number = 45
np.random.shuffle(point_cloud)
shifted_coord = point_cloud[:, :3] + lidar_coord
# reverse the point cloud coordinate (X, Y, Z) -> (Z, Y, X)
voxel_index = np.floor(
shifted_coord[:, ::-1] / voxel_size).astype(np.int)
bound_x = np.logical_and(
voxel_index[:, 2] >= 0, voxel_index[:, 2] < grid_size[2])
bound_y = np.logical_and(
voxel_index[:, 1] >= 0, voxel_index[:, 1] < grid_size[1])
bound_z = np.logical_and(
voxel_index[:, 0] >= 0, voxel_index[:, 0] < grid_size[0])
bound_box = np.logical_and(np.logical_and(bound_x, bound_y), bound_z)
point_cloud = point_cloud[bound_box]
voxel_index = voxel_index[bound_box]
# [K, 3] coordinate buffer as described in the paper
coordinate_buffer = np.unique(voxel_index, axis=0)
K = len(coordinate_buffer)
T = max_point_number
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=(K), dtype=np.int64)
# [K, T, 7] feature buffer as described in the paper
feature_buffer = np.zeros(shape=(K, T, 7), dtype=np.float32)
# build a reverse index for coordinate buffer
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i
for voxel, point in zip(voxel_index, point_cloud):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < T:
feature_buffer[index, number, :4] = point
number_buffer[index] += 1
feature_buffer[:, :, -3:] = feature_buffer[:, :, :3] - \
feature_buffer[:, :, :3].sum(axis=1, keepdims=True)/number_buffer.reshape(K, 1, 1)
voxel_dict = {'feature_buffer': feature_buffer,
'coordinate_buffer': coordinate_buffer,
'number_buffer': number_buffer}
return voxel_dict
def worker(filelist):
for file in filelist:
point_cloud = np.fromfile(
os.path.join(data_dir, file), dtype=np.float32).reshape(-1, 4)
name, extension = os.path.splitext(file)
voxel_dict = process_pointcloud(point_cloud)
output_dir = 'voxel' if cfg.DETECT_OBJ == 'Car' else 'voxel_ped'
np.savez_compressed(os.path.join(output_dir, name), **voxel_dict)
if __name__ == '__main__':
filelist = [f for f in os.listdir(data_dir) if f.endswith('bin')]
num_worker = 8
for sublist in np.array_split(filelist, num_worker):
p = multiprocessing.Process(target=worker, args=(sublist,))
p.start()
|
[
"numpy.random.shuffle",
"numpy.logical_and",
"numpy.floor",
"numpy.zeros",
"numpy.array",
"os.path.splitext",
"numpy.array_split",
"multiprocessing.Process",
"os.path.join",
"os.listdir",
"numpy.unique"
] |
[((1029, 1059), 'numpy.random.shuffle', 'np.random.shuffle', (['point_cloud'], {}), '(point_cloud)\n', (1046, 1059), True, 'import numpy as np\n'), ((1281, 1353), 'numpy.logical_and', 'np.logical_and', (['(voxel_index[:, 2] >= 0)', '(voxel_index[:, 2] < grid_size[2])'], {}), '(voxel_index[:, 2] >= 0, voxel_index[:, 2] < grid_size[2])\n', (1295, 1353), True, 'import numpy as np\n'), ((1377, 1449), 'numpy.logical_and', 'np.logical_and', (['(voxel_index[:, 1] >= 0)', '(voxel_index[:, 1] < grid_size[1])'], {}), '(voxel_index[:, 1] >= 0, voxel_index[:, 1] < grid_size[1])\n', (1391, 1449), True, 'import numpy as np\n'), ((1473, 1545), 'numpy.logical_and', 'np.logical_and', (['(voxel_index[:, 0] >= 0)', '(voxel_index[:, 0] < grid_size[0])'], {}), '(voxel_index[:, 0] >= 0, voxel_index[:, 0] < grid_size[0])\n', (1487, 1545), True, 'import numpy as np\n'), ((1795, 1825), 'numpy.unique', 'np.unique', (['voxel_index'], {'axis': '(0)'}), '(voxel_index, axis=0)\n', (1804, 1825), True, 'import numpy as np\n'), ((1959, 1992), 'numpy.zeros', 'np.zeros', ([], {'shape': 'K', 'dtype': 'np.int64'}), '(shape=K, dtype=np.int64)\n', (1967, 1992), True, 'import numpy as np\n'), ((2074, 2117), 'numpy.zeros', 'np.zeros', ([], {'shape': '(K, T, 7)', 'dtype': 'np.float32'}), '(shape=(K, T, 7), dtype=np.float32)\n', (2082, 2117), True, 'import numpy as np\n'), ((3403, 3439), 'numpy.array_split', 'np.array_split', (['filelist', 'num_worker'], {}), '(filelist, num_worker)\n', (3417, 3439), True, 'import numpy as np\n'), ((477, 518), 'numpy.array', 'np.array', (['[4, 80, 70.4]'], {'dtype': 'np.float32'}), '([4, 80, 70.4], dtype=np.float32)\n', (485, 518), True, 'import numpy as np\n'), ((540, 583), 'numpy.array', 'np.array', (['[0.4, 0.2, 0.2]'], {'dtype': 'np.float32'}), '([0.4, 0.2, 0.2], dtype=np.float32)\n', (548, 583), True, 'import numpy as np\n'), ((604, 644), 'numpy.array', 'np.array', (['[10, 400, 352]'], {'dtype': 'np.int64'}), '([10, 400, 352], dtype=np.int64)\n', (612, 644), True, 'import numpy as np\n'), ((667, 705), 'numpy.array', 'np.array', (['[0, 40, 3]'], {'dtype': 'np.float32'}), '([0, 40, 3], dtype=np.float32)\n', (675, 705), True, 'import numpy as np\n'), ((767, 806), 'numpy.array', 'np.array', (['[4, 40, 48]'], {'dtype': 'np.float32'}), '([4, 40, 48], dtype=np.float32)\n', (775, 806), True, 'import numpy as np\n'), ((828, 871), 'numpy.array', 'np.array', (['[0.4, 0.2, 0.2]'], {'dtype': 'np.float32'}), '([0.4, 0.2, 0.2], dtype=np.float32)\n', (836, 871), True, 'import numpy as np\n'), ((892, 932), 'numpy.array', 'np.array', (['[10, 200, 240]'], {'dtype': 'np.int64'}), '([10, 200, 240], dtype=np.int64)\n', (900, 932), True, 'import numpy as np\n'), ((955, 993), 'numpy.array', 'np.array', (['[0, 20, 3]'], {'dtype': 'np.float32'}), '([0, 20, 3], dtype=np.float32)\n', (963, 993), True, 'import numpy as np\n'), ((1587, 1619), 'numpy.logical_and', 'np.logical_and', (['bound_x', 'bound_y'], {}), '(bound_x, bound_y)\n', (1601, 1619), True, 'import numpy as np\n'), ((3043, 3065), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (3059, 3065), False, 'import os\n'), ((3453, 3508), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'worker', 'args': '(sublist,)'}), '(target=worker, args=(sublist,))\n', (3476, 3508), False, 'import multiprocessing\n'), ((1196, 1241), 'numpy.floor', 'np.floor', (['(shifted_coord[:, ::-1] / voxel_size)'], {}), '(shifted_coord[:, ::-1] / voxel_size)\n', (1204, 1241), True, 'import numpy as np\n'), ((3220, 3250), 'os.path.join', 'os.path.join', (['output_dir', 'name'], {}), '(output_dir, name)\n', (3232, 3250), False, 'import os\n'), ((3322, 3342), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (3332, 3342), False, 'import os\n'), ((2953, 2981), 'os.path.join', 'os.path.join', (['data_dir', 'file'], {}), '(data_dir, file)\n', (2965, 2981), False, 'import os\n')]
|
from typing import Dict
import numpy as np
import math
import cv2
from nxs_types.model import NxsModel
class AttrDict(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class Config:
search_size = 255
exemplar_size = 127
base_size = 8
stride = 8
score_size = (search_size - exemplar_size) // stride + base_size + 1
penalty_k = 0.04
window_influence = 0.4
lr = 1.0
windowing = "cosine"
context_amount = 0.5
# Anchors.
anchor = AttrDict(
{
"stride": 8,
"ratios": [0.33, 0.5, 1, 2, 3],
"scales": [8],
"num_anchors": 5,
}
)
def generate_anchors(cfg):
anchors = np.zeros((cfg.num_anchors, 4), dtype=np.float32)
size = cfg.stride * cfg.stride
count = 0
for r in cfg.ratios:
ws = int(math.sqrt(size * 1.0 / r))
hs = int(ws * r)
for s in cfg.scales:
w = ws * s
h = hs * s
anchors[count] = 0.5 * np.array([-w, -h, w, h])
count += 1
return anchors
def prepare_anchors(anchor, cfg):
x1, y1, x2, y2 = anchor[:, 0], anchor[:, 1], anchor[:, 2], anchor[:, 3]
anchor = np.stack([(x1 + x2) * 0.5, (y1 + y2) * 0.5, x2 - x1, y2 - y1], 1)
total_stride = cfg.stride
anchor_num = anchor.shape[0]
anchor = np.tile(anchor, cfg.score_size * cfg.score_size).reshape((-1, 4))
b = -(cfg.score_size // 2) * total_stride
xx, yy = np.meshgrid(
[b + total_stride * dx for dx in range(cfg.score_size)],
[b + total_stride * dy for dy in range(cfg.score_size)],
)
xx, yy = (
np.tile(xx.flatten(), (anchor_num, 1)).flatten(),
np.tile(yy.flatten(), (anchor_num, 1)).flatten(),
)
anchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
return anchor
def init_anchors(cfg):
anchors = generate_anchors(cfg.anchor)
anchors = prepare_anchors(anchors, cfg)
return anchors
def tracking_init(cfg):
anchors = init_anchors(cfg)
window = np.outer(np.hanning(cfg.score_size), np.hanning(cfg.score_size))
window = np.tile(window.flatten(), cfg.anchor.num_anchors)
return anchors, window
def _create_polygon(loc, size):
loc = np.array(
[
[loc[0] - size[0] // 2, loc[1] - size[1] // 2],
[loc[0] - size[0] // 2, loc[1] + size[1] // 2],
[loc[0] + size[0] // 2, loc[1] + size[1] // 2],
[loc[0] + size[0] // 2, loc[1] - size[1] // 2],
],
dtype=np.int32,
)
loc = loc.reshape(-1, 1, 2)
return loc
def softmax(x):
adjusted_x = x - np.amax(x, axis=-1, keepdims=-1)
numerator = np.exp(adjusted_x)
denominator = np.sum(numerator, axis=-1, keepdims=-1)
return numerator / denominator
def update_bounding_box(
image,
scores,
bboxes,
anchors,
window,
target_pos,
target_size,
search_scale,
cfg,
):
bboxes = np.transpose(bboxes, [3, 1, 2, 0]).reshape(4, -1)
scores = softmax(np.transpose(scores, [3, 1, 2, 0]).reshape(2, -1).T)[:, 1]
bboxes[0, :] = bboxes[0, :] * anchors[:, 2] + anchors[:, 0]
bboxes[1, :] = bboxes[1, :] * anchors[:, 3] + anchors[:, 1]
bboxes[2, :] = np.exp(bboxes[2, :]) * anchors[:, 2]
bboxes[3, :] = np.exp(bboxes[3, :]) * anchors[:, 3]
def change(r):
return np.maximum(r, 1.0 / r)
def sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
# size penalty
target_sz_in_crop = target_size * search_scale
s_c = change(sz(bboxes[2, :], bboxes[3, :]) / (sz_wh(target_sz_in_crop)))
r_c = change(
(target_sz_in_crop[0] / target_sz_in_crop[1])
/ (bboxes[2, :] / bboxes[3, :])
)
penalty = np.exp(-(r_c * s_c - 1) * cfg.penalty_k)
pscore = penalty * scores
# cos window (motion model)
pscore = (
pscore * (1 - cfg.window_influence) + window * cfg.window_influence
)
best_pscore_id = np.argmax(pscore)
pred_in_crop = bboxes[:, best_pscore_id] / search_scale
lr = penalty[best_pscore_id] * scores[best_pscore_id] * cfg.lr
# print(lr, pred_in_crop)
res_x = pred_in_crop[0] + target_pos[0]
res_y = pred_in_crop[1] + target_pos[1]
res_w = target_size[0] * (1 - lr) + pred_in_crop[2] * lr
res_h = target_size[1] * (1 - lr) + pred_in_crop[3] * lr
target_pos = np.array([res_x, res_y])
target_size = np.array([res_w, res_h])
h, w, _ = image.shape if len(image.shape) == 3 else image[0].shape
target_pos[0] = max(0, min(w, target_pos[0]))
target_pos[1] = max(0, min(h, target_pos[1]))
target_size[0] = max(10, min(w, target_size[0]))
target_size[1] = max(10, min(h, target_size[1]))
return target_pos, target_size, np.max(pscore)
# create global vars
cfg = Config()
anchors, window = tracking_init(cfg)
def postprocessing(
data, postproc_params, component_model: NxsModel, metadata: Dict = {}
):
bbox = data["BBox/Head/conv2d_1/BiasAdd"]
score = data["Score/Head/conv2d_1/BiasAdd"]
cur_img = metadata.pop("cur_img")
h, w = cur_img.shape[:2]
target_pos1 = metadata.pop("target_pos1")
target_size1 = metadata.pop("target_size1")
search1_scale = metadata.pop("search1_scale")
target_pos1, target_size1, best_score1 = update_bounding_box(
cur_img,
np.expand_dims(score, axis=0),
np.expand_dims(np.array(bbox), axis=0),
anchors,
window,
target_pos1,
target_size1,
search1_scale,
cfg,
)
polygon = _create_polygon(target_pos1, target_size1)
polygon = polygon.reshape((-1, 2))
left, top, right, bottom = (
polygon[0][0],
polygon[0][1],
polygon[2][0],
polygon[2][1],
)
left = max(0, left)
top = max(0, top)
right = max(0, right)
bottom = max(0, bottom)
return {
"detections": [
{
"class_name": "track",
"class_id": int(0),
"score": float(best_score1),
"rel_bbox": {
"left": float(left) / w,
"top": float(top) / h,
"right": float(right) / w,
"bottom": float(bottom) / h,
},
"bbox": {
"left": left,
"top": top,
"right": right,
"bottom": bottom,
},
}
]
}
|
[
"numpy.stack",
"numpy.sum",
"numpy.maximum",
"math.sqrt",
"numpy.argmax",
"numpy.zeros",
"numpy.expand_dims",
"numpy.transpose",
"numpy.amax",
"numpy.max",
"numpy.array",
"numpy.exp",
"numpy.tile",
"numpy.hanning",
"numpy.sqrt"
] |
[((712, 760), 'numpy.zeros', 'np.zeros', (['(cfg.num_anchors, 4)'], {'dtype': 'np.float32'}), '((cfg.num_anchors, 4), dtype=np.float32)\n', (720, 760), True, 'import numpy as np\n'), ((1211, 1276), 'numpy.stack', 'np.stack', (['[(x1 + x2) * 0.5, (y1 + y2) * 0.5, x2 - x1, y2 - y1]', '(1)'], {}), '([(x1 + x2) * 0.5, (y1 + y2) * 0.5, x2 - x1, y2 - y1], 1)\n', (1219, 1276), True, 'import numpy as np\n'), ((2263, 2490), 'numpy.array', 'np.array', (['[[loc[0] - size[0] // 2, loc[1] - size[1] // 2], [loc[0] - size[0] // 2, \n loc[1] + size[1] // 2], [loc[0] + size[0] // 2, loc[1] + size[1] // 2],\n [loc[0] + size[0] // 2, loc[1] - size[1] // 2]]'], {'dtype': 'np.int32'}), '([[loc[0] - size[0] // 2, loc[1] - size[1] // 2], [loc[0] - size[0] //\n 2, loc[1] + size[1] // 2], [loc[0] + size[0] // 2, loc[1] + size[1] // \n 2], [loc[0] + size[0] // 2, loc[1] - size[1] // 2]], dtype=np.int32)\n', (2271, 2490), True, 'import numpy as np\n'), ((2699, 2717), 'numpy.exp', 'np.exp', (['adjusted_x'], {}), '(adjusted_x)\n', (2705, 2717), True, 'import numpy as np\n'), ((2736, 2775), 'numpy.sum', 'np.sum', (['numerator'], {'axis': '(-1)', 'keepdims': '(-1)'}), '(numerator, axis=-1, keepdims=-1)\n', (2742, 2775), True, 'import numpy as np\n'), ((3925, 3965), 'numpy.exp', 'np.exp', (['(-(r_c * s_c - 1) * cfg.penalty_k)'], {}), '(-(r_c * s_c - 1) * cfg.penalty_k)\n', (3931, 3965), True, 'import numpy as np\n'), ((4147, 4164), 'numpy.argmax', 'np.argmax', (['pscore'], {}), '(pscore)\n', (4156, 4164), True, 'import numpy as np\n'), ((4552, 4576), 'numpy.array', 'np.array', (['[res_x, res_y]'], {}), '([res_x, res_y])\n', (4560, 4576), True, 'import numpy as np\n'), ((4595, 4619), 'numpy.array', 'np.array', (['[res_w, res_h]'], {}), '([res_w, res_h])\n', (4603, 4619), True, 'import numpy as np\n'), ((2073, 2099), 'numpy.hanning', 'np.hanning', (['cfg.score_size'], {}), '(cfg.score_size)\n', (2083, 2099), True, 'import numpy as np\n'), ((2101, 2127), 'numpy.hanning', 'np.hanning', (['cfg.score_size'], {}), '(cfg.score_size)\n', (2111, 2127), True, 'import numpy as np\n'), ((2650, 2682), 'numpy.amax', 'np.amax', (['x'], {'axis': '(-1)', 'keepdims': '(-1)'}), '(x, axis=-1, keepdims=-1)\n', (2657, 2682), True, 'import numpy as np\n'), ((3252, 3272), 'numpy.exp', 'np.exp', (['bboxes[2, :]'], {}), '(bboxes[2, :])\n', (3258, 3272), True, 'import numpy as np\n'), ((3308, 3328), 'numpy.exp', 'np.exp', (['bboxes[3, :]'], {}), '(bboxes[3, :])\n', (3314, 3328), True, 'import numpy as np\n'), ((3380, 3402), 'numpy.maximum', 'np.maximum', (['r', '(1.0 / r)'], {}), '(r, 1.0 / r)\n', (3390, 3402), True, 'import numpy as np\n'), ((3501, 3513), 'numpy.sqrt', 'np.sqrt', (['sz2'], {}), '(sz2)\n', (3508, 3513), True, 'import numpy as np\n'), ((3629, 3641), 'numpy.sqrt', 'np.sqrt', (['sz2'], {}), '(sz2)\n', (3636, 3641), True, 'import numpy as np\n'), ((4936, 4950), 'numpy.max', 'np.max', (['pscore'], {}), '(pscore)\n', (4942, 4950), True, 'import numpy as np\n'), ((5523, 5552), 'numpy.expand_dims', 'np.expand_dims', (['score'], {'axis': '(0)'}), '(score, axis=0)\n', (5537, 5552), True, 'import numpy as np\n'), ((854, 879), 'math.sqrt', 'math.sqrt', (['(size * 1.0 / r)'], {}), '(size * 1.0 / r)\n', (863, 879), False, 'import math\n'), ((1355, 1403), 'numpy.tile', 'np.tile', (['anchor', '(cfg.score_size * cfg.score_size)'], {}), '(anchor, cfg.score_size * cfg.score_size)\n', (1362, 1403), True, 'import numpy as np\n'), ((2974, 3008), 'numpy.transpose', 'np.transpose', (['bboxes', '[3, 1, 2, 0]'], {}), '(bboxes, [3, 1, 2, 0])\n', (2986, 3008), True, 'import numpy as np\n'), ((5577, 5591), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (5585, 5591), True, 'import numpy as np\n'), ((1018, 1042), 'numpy.array', 'np.array', (['[-w, -h, w, h]'], {}), '([-w, -h, w, h])\n', (1026, 1042), True, 'import numpy as np\n'), ((3045, 3079), 'numpy.transpose', 'np.transpose', (['scores', '[3, 1, 2, 0]'], {}), '(scores, [3, 1, 2, 0])\n', (3057, 3079), True, 'import numpy as np\n')]
|
from torchfm.dataset.criteo import CriteoDataset
dataset = CriteoDataset()
import json
import numpy as np
n_items = 4096
total_items = 500
input_data = []
for i in range(3):
x = dataset[i][0]
tiled_x = np.tile(x,n_items)
tiled_x = tiled_x.reshape(n_items,-1)
tiled_x[:,-1] = np.random.choice(range(total_items), n_items, replace=True)
inp_x = tiled_x.flatten().tolist()
input_data.append({'INPUT0__0':inp_x})
test_data = {'data':input_data}
out_file = 'data_'+str(n_items)+'.json'
with open(out_file, 'w') as f:
json.dump(test_data, f)
|
[
"json.dump",
"numpy.tile",
"torchfm.dataset.criteo.CriteoDataset"
] |
[((59, 74), 'torchfm.dataset.criteo.CriteoDataset', 'CriteoDataset', ([], {}), '()\n', (72, 74), False, 'from torchfm.dataset.criteo import CriteoDataset\n'), ((212, 231), 'numpy.tile', 'np.tile', (['x', 'n_items'], {}), '(x, n_items)\n', (219, 231), True, 'import numpy as np\n'), ((543, 566), 'json.dump', 'json.dump', (['test_data', 'f'], {}), '(test_data, f)\n', (552, 566), False, 'import json\n')]
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import os
import cv2
import numpy as np
from tqdm import tqdm
import glob
def calculate_mean_std(path):
# folder = os.listdir(path)
folder = glob.glob(path, recursive=False)
mean = []
std = []
R_mean = 0.0
G_mean = 0.0
B_mean = 0.0
# for img_name in tqdm(folder, desc="calculate_mean_std"):
# image = cv2.imread(os.path.join(path+img_name))/255.
for img_name in tqdm(folder, desc="calculate_mean_std"):
image = cv2.imread(img_name)/255.
R_mean += np.mean(image[:,:,2])
G_mean += np.mean(image[:,:,1])
B_mean += np.mean(image[:,:,0])
R_mean = R_mean / len(folder)
G_mean = G_mean / len(folder)
B_mean = B_mean / len(folder)
mean.extend([R_mean, G_mean, B_mean])
#print(mean)
R_std = 0.0
G_std = 0.0
B_std = 0.0
# for img_name in tqdm(folder, desc="calculate_mean_std"):
# image = cv2.imread(os.path.join(path+img_name))/255.
for img_name in tqdm(folder, desc="calculate_mean_std"):
image = cv2.imread(img_name)/255.
image_size = image.shape[0]*image.shape[1]
R_std += np.sum(np.power(image[:,:,2] - R_mean, 2)) / image_size
G_std += np.sum(np.power(image[:,:,1] - G_mean, 2)) / image_size
B_std += np.sum(np.power(image[:,:,0] - B_mean, 2)) / image_size
R_std = np.sqrt(R_std / len(folder))
G_std = np.sqrt(G_std / len(folder))
B_std = np.sqrt(B_std / len(folder))
std.extend([R_std, G_std, B_std])
#print(std)
return mean, std
if __name__ == "__main__":
image_path = "./dataset/carla/data*/CameraRGB/*.png" # glob.iglob(img_path)>>dataABC,dataD
mean, std = calculate_mean_std(path=image_path)
print('mean={}\nstd={}\n#value=[R,G,B]'.format(mean,std))
# mean=[0.35245398366625774, 0.3581336873774504, 0.35212403845792456]
# std=[0.26050246625763057, 0.2570952503100638, 0.2654258674345489]
# value=[R,G,B]
|
[
"tqdm.tqdm",
"numpy.power",
"cv2.imread",
"numpy.mean",
"glob.glob"
] |
[((196, 228), 'glob.glob', 'glob.glob', (['path'], {'recursive': '(False)'}), '(path, recursive=False)\n', (205, 228), False, 'import glob\n'), ((455, 494), 'tqdm.tqdm', 'tqdm', (['folder'], {'desc': '"""calculate_mean_std"""'}), "(folder, desc='calculate_mean_std')\n", (459, 494), False, 'from tqdm import tqdm\n'), ((1014, 1053), 'tqdm.tqdm', 'tqdm', (['folder'], {'desc': '"""calculate_mean_std"""'}), "(folder, desc='calculate_mean_std')\n", (1018, 1053), False, 'from tqdm import tqdm\n'), ((556, 579), 'numpy.mean', 'np.mean', (['image[:, :, 2]'], {}), '(image[:, :, 2])\n', (563, 579), True, 'import numpy as np\n'), ((596, 619), 'numpy.mean', 'np.mean', (['image[:, :, 1]'], {}), '(image[:, :, 1])\n', (603, 619), True, 'import numpy as np\n'), ((636, 659), 'numpy.mean', 'np.mean', (['image[:, :, 0]'], {}), '(image[:, :, 0])\n', (643, 659), True, 'import numpy as np\n'), ((512, 532), 'cv2.imread', 'cv2.imread', (['img_name'], {}), '(img_name)\n', (522, 532), False, 'import cv2\n'), ((1071, 1091), 'cv2.imread', 'cv2.imread', (['img_name'], {}), '(img_name)\n', (1081, 1091), False, 'import cv2\n'), ((1172, 1208), 'numpy.power', 'np.power', (['(image[:, :, 2] - R_mean)', '(2)'], {}), '(image[:, :, 2] - R_mean, 2)\n', (1180, 1208), True, 'import numpy as np\n'), ((1245, 1281), 'numpy.power', 'np.power', (['(image[:, :, 1] - G_mean)', '(2)'], {}), '(image[:, :, 1] - G_mean, 2)\n', (1253, 1281), True, 'import numpy as np\n'), ((1318, 1354), 'numpy.power', 'np.power', (['(image[:, :, 0] - B_mean)', '(2)'], {}), '(image[:, :, 0] - B_mean, 2)\n', (1326, 1354), True, 'import numpy as np\n')]
|
import numpy as np
from napari.layers.utils.color_manager_utils import (
guess_continuous,
is_color_mapped,
)
def test_guess_continuous():
continuous_annotation = np.array([1, 2, 3], dtype=np.float32)
assert guess_continuous(continuous_annotation)
categorical_annotation_1 = np.array([True, False], dtype=bool)
assert not guess_continuous(categorical_annotation_1)
categorical_annotation_2 = np.array([1, 2, 3], dtype=int)
assert not guess_continuous(categorical_annotation_2)
def test_is_colormapped_string():
color = 'hello'
properties = {
'hello': np.array([1, 1, 1, 1]),
'hi': np.array([1, 0, 0, 1]),
}
assert is_color_mapped(color, properties)
assert not is_color_mapped('red', properties)
def test_is_colormapped_dict():
"""Colors passed as dicts are treated as colormapped"""
color = {0: np.array([1, 1, 1, 1]), 1: np.array([1, 1, 0, 1])}
properties = {
'hello': np.array([1, 1, 1, 1]),
'hi': np.array([1, 0, 0, 1]),
}
assert is_color_mapped(color, properties)
def test_is_colormapped_array():
"""Colors passed as list/array are treated as not colormapped"""
color_list = [[1, 1, 1, 1], [1, 1, 0, 1]]
properties = {
'hello': np.array([1, 1, 1, 1]),
'hi': np.array([1, 0, 0, 1]),
}
assert not is_color_mapped(color_list, properties)
color_array = np.array(color_list)
assert not is_color_mapped(color_array, properties)
|
[
"napari.layers.utils.color_manager_utils.guess_continuous",
"numpy.array",
"napari.layers.utils.color_manager_utils.is_color_mapped"
] |
[((178, 215), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.float32'}), '([1, 2, 3], dtype=np.float32)\n', (186, 215), True, 'import numpy as np\n'), ((227, 266), 'napari.layers.utils.color_manager_utils.guess_continuous', 'guess_continuous', (['continuous_annotation'], {}), '(continuous_annotation)\n', (243, 266), False, 'from napari.layers.utils.color_manager_utils import guess_continuous, is_color_mapped\n'), ((299, 334), 'numpy.array', 'np.array', (['[True, False]'], {'dtype': 'bool'}), '([True, False], dtype=bool)\n', (307, 334), True, 'import numpy as np\n'), ((425, 455), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'int'}), '([1, 2, 3], dtype=int)\n', (433, 455), True, 'import numpy as np\n'), ((685, 719), 'napari.layers.utils.color_manager_utils.is_color_mapped', 'is_color_mapped', (['color', 'properties'], {}), '(color, properties)\n', (700, 719), False, 'from napari.layers.utils.color_manager_utils import guess_continuous, is_color_mapped\n'), ((1046, 1080), 'napari.layers.utils.color_manager_utils.is_color_mapped', 'is_color_mapped', (['color', 'properties'], {}), '(color, properties)\n', (1061, 1080), False, 'from napari.layers.utils.color_manager_utils import guess_continuous, is_color_mapped\n'), ((1409, 1429), 'numpy.array', 'np.array', (['color_list'], {}), '(color_list)\n', (1417, 1429), True, 'import numpy as np\n'), ((350, 392), 'napari.layers.utils.color_manager_utils.guess_continuous', 'guess_continuous', (['categorical_annotation_1'], {}), '(categorical_annotation_1)\n', (366, 392), False, 'from napari.layers.utils.color_manager_utils import guess_continuous, is_color_mapped\n'), ((471, 513), 'napari.layers.utils.color_manager_utils.guess_continuous', 'guess_continuous', (['categorical_annotation_2'], {}), '(categorical_annotation_2)\n', (487, 513), False, 'from napari.layers.utils.color_manager_utils import guess_continuous, is_color_mapped\n'), ((606, 628), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (614, 628), True, 'import numpy as np\n'), ((644, 666), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (652, 666), True, 'import numpy as np\n'), ((735, 769), 'napari.layers.utils.color_manager_utils.is_color_mapped', 'is_color_mapped', (['"""red"""', 'properties'], {}), "('red', properties)\n", (750, 769), False, 'from napari.layers.utils.color_manager_utils import guess_continuous, is_color_mapped\n'), ((880, 902), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (888, 902), True, 'import numpy as np\n'), ((907, 929), 'numpy.array', 'np.array', (['[1, 1, 0, 1]'], {}), '([1, 1, 0, 1])\n', (915, 929), True, 'import numpy as np\n'), ((967, 989), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (975, 989), True, 'import numpy as np\n'), ((1005, 1027), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (1013, 1027), True, 'import numpy as np\n'), ((1267, 1289), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (1275, 1289), True, 'import numpy as np\n'), ((1305, 1327), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (1313, 1327), True, 'import numpy as np\n'), ((1350, 1389), 'napari.layers.utils.color_manager_utils.is_color_mapped', 'is_color_mapped', (['color_list', 'properties'], {}), '(color_list, properties)\n', (1365, 1389), False, 'from napari.layers.utils.color_manager_utils import guess_continuous, is_color_mapped\n'), ((1445, 1485), 'napari.layers.utils.color_manager_utils.is_color_mapped', 'is_color_mapped', (['color_array', 'properties'], {}), '(color_array, properties)\n', (1460, 1485), False, 'from napari.layers.utils.color_manager_utils import guess_continuous, is_color_mapped\n')]
|
# DIRECTORY: ~/kpyreb/eSims/MSims/integrate.py
#
# Integrates using whfast by default. This can be set by the user as an optional
# keyword. Auto calculates the timestep to be 1/1000 of the shortest orbit
# (Rein & Tamayo 2015). Sympletic corrector can be used if set by the user.
#
# This is the worker of the simulation. It runs the simulation and records the
# output in simResults.
#
# ARGUMENTS:
# sim = Rebound simulation structure that is ran/integrated. Global variable
# astroInputs = Astronomical parameters such as partical masses and mirror orbit configurations
# rebInputs = REBOUND parameters that are the settings for the simulation,
# for example, what symplectic correcter to use and what integrator.
# simResults = The results of the simulation are saved to this. This includes
# particle coordinates, velocites, and accelerations. These results
# are plotted in plotSim.py and output to .csv files in outputSim.py.
# energy = Recording of the total energy of the simulation at each timestep.
# Total energy is calculated using REBOUND's energy method.
# plotTypes = See plotSim.py to see what each plotType is. This is used to
# determine if energy should be returned.
#
# Called by runSim.py
# Author KCT
#
# HISTORY:
# 3 Jul 2017 Created
# 6 Jul 2017 Cleaned up
# 5 Mar 2017 Exact finish time and output points changable in INFILE.py
# 10 Sep 2017 Added functionality to break if the mirror hits the planet
# or reaches escape velocity
# 18 Jan 2018 Change to exact_finish_time to 1 <---- to correct time issue with ias15
# save the suggested end time and integrated end time to simResults
# 19 Feb 2018 Changed 'x2', 'y2' to 'RRFx', 'RRFy'. Altered input for rotTransform.
# 21 Mar 2018 Cleaned up with more comments
# 28 Mar 2018 Integrate saves the point where it escapes. (Prevents 0 size array errors)
# 18 Jun 2018 Fixed bug where if there was no star integrate would crash
# 20 Jun 2018 Fixed bug where timesteps were inconsistent with the step
# size we wanted to suggest. Don't compare any
# tests prior to this fix to tests made after.
# returns energy (if specified in plotTypes) over time and the number of timesteps
# 20 Jan 2020 SS implemented collision detection using Rebound features
# 9 Feb 2020 SJF added megno calculation (but it doesn't do what we want)
# 18 Feb 2020 SS fixed RRF z coordinate output
def integrate(sim, astroInputs, rebInputs, simResults, energy, plotTypes, file_dir):
# Import packages and methods from other classes
import numpy as np
import rebound
#import matplotlib.pyplot as plt
import math
from .Energy import Energy
from .rotTransform import rotTransform
from .outputSim import outputSim, outputSim_new
# Assign the sim particles to variable p.
p = sim.particles
#fresult=simResults.SimResults_new
# The amount of steps in the simulation.
Noutputs = int(rebInputs.outputPoints*rebInputs.orbits)
# The length of the simulation in seconds.
simlength = rebInputs.orbits*simResults.torbMirror
# Creates all the time steps of the simulation. Used mainly for plotting in plotSims.py.
times = np.linspace(0, simlength, Noutputs + 1) # Fixed bug here, used to not have + 1
#debug
#print(f"Times: length = {len(times)}")
#return times
# Record at what point the simulation ends. Also, records the timestamp for each
# integration loop.
integrateEndTime=0
# Record the timesteps of the simulation to properly index the recorded data
# in simResults.py.
ts = 0
megno=-1
#quick test of
#sim.N_active=2
# Define a minimum distance for collision. Used for collision detection.
minDist = astroInputs.planetRadius + astroInputs.atmos
sim.exit_min_distance = minDist
# Creates a loop that iterates Noutputs + 1's amount for integrating.
print('last input time : ',times[-1])
sim.status()
# SIMARCHIVE
# REQUIRES REBOUND v3.12.X
#archivefilename = logfile.replace("output-", "archive-").replace(".log", ".bin")
#archive_step_interval = 100*int(math.sqrt(Noutputs))#int(Noutputs/10)
#narchives_total = int(Noutputs/archive_step_interval)
#narchives_total = 20
##sim.automateSimulationArchive(archivefilename, step=archive_step_interval
#archive_counter = 0
#archive_steps = np.linspace(0, simlength, narchives_total + 1)
# save interval [timesteps]
save_interval = 100 # number of timesteps between each save
current_index = 0
nsaves = 0
withRRF = False
for i,time in enumerate(times):
#if (True): # Uncomment to remove crash detection
# Do the integration. Uses exactFinishTime parameter specified in
# the infile.
try:
sim.integrate( time, exact_finish_time = rebInputs.exactFinishTime )
except rebound.Encounter as error:
print("ENCOUNTER OCCURS")
print(error)
# SimArchive
#if time >= archive_steps[archive_counter]:
# sim.simulationarchive_snapshot(archivefilename)
# archive_counter += 1
print(f"energy values: {energy}")
integrateEndTime=sim.t # Update integration time.
if rebInputs.outputMegno:
megno=sim.calculate_megno()
simResults.newResults.append(p,sim.dt,sim.t,time,megno)
if astroInputs.starType == None: # If there is no star, update only mirror and planet coord & vel
coordTempPlanet = [p[0].x, p[0].y, p[0].z]
velTempPlanet = [p[0].vx, p[0].vy, p[0].vz]
accelTempPlanet = [p[0].ax, p[0].ay, p[0].az]
# Need both if statements because we created a dummy particle
# thus the indexes are off.
# TODO Perhaps add the dummy particle first so we can
# keep the indexes the same and we just need one if statement
# adding the star and then outside the if statement we
# add the planet and mirror coordinates because the indexes
# will be the same regardless of if there is a star or not.
coordTempMirror = [p[2].x, p[2].y, p[2].z]
velTempMirror = [p[2].vx, p[2].vy, p[2].vz]
accelTempMirror = [p[2].ax, p[2].ay, p[2].az]
if astroInputs.starType != None: # If there is a star, update all three particles.
coordTempStar = [p[0].x, p[0].y, p[0].z]
velTempStar = [p[0].vx, p[0].vy, p[0].vz]
accelTempStar = [p[0].ax, p[0].ay, p[0].az]
coordTempPlanet = [p[1].x, p[1].y, p[1].z]
velTempPlanet = [p[1].vx, p[1].vy, p[1].vz]
accelTempPlanet = [p[1].ax, p[1].ay, p[1].az]
coordTempMirror = [p[2].x, p[2].y, p[2].z]
velTempMirror = [p[2].vx, p[2].vy, p[2].vz]
accelTempMirror = [p[2].ax, p[2].ay, p[2].az]
# Calculate and save the current simulation energy.
energyTemp = sim.calculate_energy()
energy.saveEnergy(energyTemp)
# Update the number of timesteps.
ts = ts + 1
# Saves particle conditions
if astroInputs.starType == None: # If there is no star, only record the planet/mirror info
simResults.saveData(None, None, None,
coordTempPlanet, velTempPlanet, accelTempPlanet,
coordTempMirror, velTempMirror, accelTempMirror,
time,integrateEndTime, sim.dt)
#if astroInputs.starType != None: # If there is a star, record the star info too.
else:
simResults.saveData(coordTempStar, velTempStar, accelTempStar,
coordTempPlanet, velTempPlanet, accelTempPlanet,
coordTempMirror, velTempMirror, accelTempMirror,
time,integrateEndTime, sim.dt)
# EDIT: 10/21/2019 Moved this block from before integrating to here to fix bug where
# an extra point was output after a crash.
# time is equivalent to times[i]
# If the mirror gets within the radius of the planet, stop.
if astroInputs.starType != None:
dist = math.sqrt((p[2].x-p[1].x)**2 + (p[2].y-p[1].y)**2 + (p[2].z-p[1].z)**2)
mirrorVel = math.sqrt((p[2].vx-p[1].vx)**2 + (p[2].vy-p[1].vy)**2 + (p[2].vz-p[1].vz)**2)
if astroInputs.starType == None:
dist = math.sqrt((p[2].x-p[0].x)**2 + (p[2].y-p[0].y)**2 + (p[2].z-p[0].z)**2)
mirrorVel = math.sqrt((p[2].vx-p[0].vx)**2 + (p[2].vy-p[0].vy)**2 + (p[2].vz-p[0].vz)**2)
# Calculate the mirror's escape velocty
escVel = math.sqrt((2*sim.G*astroInputs.planetMass)/dist)
###
# debugging
#print("-"*len("no. suggested end times: "))
#print(f"iteration no. {i}")
#print(f"no. actual end times: {len(simResults.actualEndTime)}")
#print(f"no. suggested end times: {len(simResults.suggestedEndTime)}")
#print(f"no. mirror coords: {len(simResults.coordMirror)}")
#### ENERGY ###
## Extracting object velocities from sim results for KE calculations.
## It sets the velocities for the planet, mirror, and star by iterating
## through simResult's coordinates.
#pVX = np.array([x[0] for x in simResults.velPlanet]) # Planet
#pVY = np.array([y[1] for y in simResults.velPlanet])
#pVZ = np.array([z[2] for z in simResults.velPlanet])
#mVX = np.array([x[0] for x in simResults.velMirror]) # Mirror
#mVY = np.array([y[1] for y in simResults.velMirror])
#mVZ = np.array([z[2] for z in simResults.velMirror])
#if astroInputs.starType != None: # If there's a star, grab its vels too
# sVX = np.array([x[0] for x in simResults.velStar]) # Star
# sVY = np.array([y[1] for y in simResults.velStar])
# sVZ = np.array([z[2] for z in simResults.velStar])
## Calculating distances between objects to be used in GPE calculations.
#energy.mDistP = np.sqrt((mX-pX)**2 + (mY-pY)**2 + (mZ-pZ)**2) # Mirror distance from planet.
#if astroInputs.starType != None:
# energy.pDistS = np.sqrt((sX-pX)**2 + (sY-pY)**2 + (sZ-pZ)**2) # Distance between planet and star.
# energy.mDistS = np.sqrt((sX-mX)**2 + (sY-mY)**2 + (sZ-mZ)**2) # Distance between mirror and star.
## Calculating total velocities of objects
#velP = np.sqrt((pVX)**2 + (pVY)**2 + (pVZ)**2) # Resultant velocity of planet.
#velM = np.sqrt((mVX)**2 + (mVY)**2 + (mVZ)**2) # Resultant velocity of mirror.
#velMToP = np.sqrt((mVX-pVX)**2 + (mVY-pVY)**2 + (mVZ-pVZ)**2) # Resultant velocity relative to planet of mirror.
#if astroInputs.starType != None: # If there is a star...
# velS = np.sqrt((sVX)**2 + (sVY)**2 + (sVZ)**2) # Resultant velocity of star.
## Calculate the KE of the mirror and planet (do these first incase there is
## no star)
## KE of planet & mirror = .5mv**2
#energy.mirrorKE = .5*astroInputs.mirrorMass*velM**2 # Output in individualEnergiesDF.csv
#energy.mirrorKEToP = .5*astroInputs.mirrorMass*velMToP**2 # Output in individualEnergiesDF.csv
#energy.planetKE = .5*astroInputs.planetMass*velP**2 # Output in individualEnergiesDF.csv
#
## Calculate the GPE of the mirror and planet
## GPE = GMm/r (for planet & mirror)
#energy.planetMirrorGPE = -(sim.G*astroInputs.planetMass*astroInputs.mirrorMass)/energy.mDistP # Output in individualEnergiesDF.csv
#
#if astroInputs.starType != None: # Calculating energies that involve the star
# # KE
# energy.starKE = .5*astroInputs.starMass*velS**2 # Output in individualEnergiesDF.csv
# energy.totalKE = energy.starKE + energy.planetKE + energy.mirrorKE # Output in totalEnergy.csv
# # GPE
# energy.starPlanetGPE = -(sim.G*astroInputs.starMass*astroInputs.planetMass)/energy.pDistS
# energy.starMirrorGPE = -(sim.G*astroInputs.starMass*astroInputs.mirrorMass)/energy.mDistS
# # Total Energies (Output in totalEnergy.csv)
# energy.totalGPE = energy.starPlanetGPE + energy.planetMirrorGPE + energy.starMirrorGPE
# energy.mirrorEnergy = energy.mirrorKE + energy.planetMirrorGPE + energy.starMirrorGPE
# # Energy of the mirror relative to the planet. Should be constant for sims with no
# # additional forces.
# energy.mirrorEnergyToP = energy.mirrorKEToP + energy.planetMirrorGPE + energy.starMirrorGPE
###
if i % save_interval == 0 and i != 0 and withRRF:
# ---Transform the Coordinates to a Rotating Reference Frame---
# Create arrays for new rotating reference frame coordinates.
# Adjusted to transform for one timestep
planetRRFx = np.zeros(save_interval+1)#np.zeros(1)#ts) #np.zeros(Noutputs + 1)
planetRRFy = np.zeros(save_interval+1)# np.zeros(1)#ts) #np.zeros(Noutputs + 1)
mirrorRRFx = np.zeros(save_interval+1)#(1)#ts) #np.zeros(Noutputs + 1)
mirrorRRFy = np.zeros(save_interval+1)#(1)#ts) #np.zeros(Noutputs + 1)
# Finding XY coordinates. Don't need Z because the planet orbits in the XY plane.
pX = np.array([x[0] for x in simResults.coordPlanet[current_index:current_index + save_interval+1]])#[simResults.coordPlanet[-1][0]])#np.array([x[0] for x in simResults.coordPlanet])
pY = np.array([y[1] for y in simResults.coordPlanet[current_index:current_index + save_interval+1]])#[simResults.coordPlanet[-1][1]])#np.array([y[1] for y in simResults.coordPlanet])
pZ = np.array([z[2] for z in simResults.coordPlanet[current_index:current_index + save_interval+1]])#[simResults.coordPlanet[-1][2]])#np.array([z[2] for z in simResults.coordPlanet]) #added z info
mX = np.array([x[0] for x in simResults.coordMirror[current_index:current_index+save_interval+1]])#[simResults.coordMirror[-1][0]])#np.array([x[0] for x in simResults.coordMirror])
mY = np.array([y[1] for y in simResults.coordMirror[current_index:current_index+save_interval+1]])#[simResults.coordMirror[-1][1]])#np.array([y[1] for y in simResults.coordMirror])
mZ = np.array([z[2] for z in simResults.coordMirror[current_index:current_index+save_interval+1]])#[simResults.coordMirror[-1][2]])#np.array([z[2] for z in simResults.coordMirror]) #added z info
print(f"no. pX entries: {len(pX)}")
print(f"no. pY entries: {len(pY)}")
print(f"no. pZ entries: {len(pZ)}")
print(f"no. mX entries: {len(mX)}")
print(f"no. mY entries: {len(mY)}")
print(f"no. mZ entries: {len(mZ)}")
if astroInputs.starType != None: # If there is a star, calculate the star coordinates too.
sX = np.array([x[0] for x in simResults.coordStar[current_index:current_index+save_interval+1]])#[simResults.coordStar[-1][0]])#np.array([x[0] for x in simResults.coordStar])
sY = np.array([y[1] for y in simResults.coordStar[current_index:current_index+save_interval+1]])#[simResults.coordStar[-1][1]])#np.array([y[1] for y in simResults.coordStar])
sZ = np.array([z[2] for z in simResults.coordStar[current_index:current_index+save_interval+1]])#[simResults.coordStar[-1][2]])#np.array([z[2] for z in simResults.coordStar]) #added z info
# Finding theta (angle of Earth in its orbit).
print(f"no. sX entries: {len(sX)}")
print(f"no. sY entries: {len(sY)}")
print(f"no. sZ entries: {len(sZ)}")
theta = np.arctan2(pY-sY,pX-sX) # Translate the planet because the star may move.
print(f"no. theta values: {len(theta)}")
for t in range(len(theta)):
#for t in range(save_interval + 1):#current_index, current_index + save_interval + 1):#save_interval+1):#+1):#current_index,current_index + save_interval + 1):
# Do the transformation and save the rotating reference frame (RRF) coord.
planetxRRFy = rotTransform(pX[t]-sX[t],pY[t]-sY[t], theta[t])
planetRRFx[t] = planetxRRFy[0]
planetRRFy[t] = planetxRRFy[1]
mirrorxRRFy = rotTransform(mX[t]-sX[t],mY[t]-sY[t],theta[t])
mirrorRRFx[t] = mirrorxRRFy[0]
mirrorRRFy[t] = mirrorxRRFy[1]
coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], pZ[t]-sZ[t]]
coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], mZ[t]-sZ[t]]
coordRRFTempStar = [0, 0, 0] # 14 June 2018 changed x,y from None to 0.
# Save the transformed coordinates to the simResults object to be used
# in plotSim.py for graphing.
simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
#t = -1#ts - 1#-1 # use to index the last element of the array
#planetxRRFy = rotTransform(pX[t] - sX[t], pY[t] - sY[t], theta[t])
#planetRRFx[t] = planetxRRFy[0]
#planetRRFy[t] = planetxRRFy[1]
#mirrorxRRFy = rotTransform(mX[t] - sX[t], mY[t] - sY[t], theta[t])
#mirrorRRFx[t] = mirrorxRRFy[0]
#mirrorRRFy[t] = mirrorxRRFy[1]
#coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], pZ[t] - sZ[t]]
#coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], mZ[t] - sZ[t]]
#coordRRFTempStar = [0, 0, 0]
#simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
#for t in range(0,ts):
# # Do the transformation and save the rotating reference frame (RRF) coord.
# planetxRRFy = rotTransform(pX[t]-sX[t],pY[t]-sY[t], theta[t])
# planetRRFx[t] = planetxRRFy[0]
# planetRRFy[t] = planetxRRFy[1]
# mirrorxRRFy = rotTransform(mX[t]-sX[t],mY[t]-sY[t],theta[t])
# mirrorRRFx[t] = mirrorxRRFy[0]
# mirrorRRFy[t] = mirrorxRRFy[1]
# coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], pZ[t]-sZ[t]]
# coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], mZ[t]-sZ[t]]
# coordRRFTempStar = [0, 0, 0] # 14 June 2018 changed x,y from None to 0.
# # Save the transformed coordinates to the simResults object to be used
# # in plotSim.py for graphing.
# simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
else:
theta = np.arctan2(pY,pX) # No need to translate the planet if it's at the origin
print(f"no. theta values: {len(theta)}")
for t in range(len(theta)):
#for t in range(save_interval + 1):#current_index, current_index + save_interval + 1):#save_interval+1):# + 1):#0,ts):
# Do the transformation and save the rotating reference frame (RRF) coord.
planetxRRFy = rotTransform(pX[t],pY[t], theta[t])
planetRRFx[t] = planetxRRFy[0]
planetRRFy[t] = planetxRRFy[1]
mirrorxRRFy = rotTransform(mX[t],mY[t],theta[t])
mirrorRRFx[t] = mirrorxRRFy[0]
mirrorRRFy[t] = mirrorxRRFy[1]
coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], 0]
coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], 0]
coordRRFTempStar = [0, 0, 0] # 14 June 2018 changed x,y from None to 0.
# Save the transformed coordinates to the simResults object to be used
# in plotSim.py for graphing.
simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
#t = -1 # use to index the last element of the array
#planetxRRFy = rotTransform(pX[t], pY[t], theta[t])
#planetRRFx[t] = planetxRRFy[0]
#planetRRFy[t] = planetxRRFy[1]
#mirrorxRRFy = rotTransform(mX[t], mY[t], theta[t])
#mirrorRRFx[t] = mirrorRRFy[0]
#mirrorRRFy[t] = mirrorxRRFy[1]
#coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], 0]
#coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], 0]
#coordRRFTempStar = [0, 0, 0]
#simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
#for t in range(0,ts):
# # Do the transformation and save the rotating reference frame (RRF) coord.
# planetxRRFy = rotTransform(pX[t],pY[t], theta[t])
# planetRRFx[t] = planetxRRFy[0]
# planetRRFy[t] = planetxRRFy[1]
# mirrorxRRFy = rotTransform(mX[t],mY[t],theta[t])
# mirrorRRFx[t] = mirrorxRRFy[0]
# mirrorRRFy[t] = mirrorxRRFy[1]
# coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], 0]
# coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], 0]
# coordRRFTempStar = [0, 0, 0] # 14 June 2018 changed x,y from None to 0.
# # Save the transformed coordinates to the simResults object to be used
# # in plotSim.py for graphing.
# simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
# debug
print(f"\nno. of RRF coordinate entries: {len(simResults.coordRRFMirror)}\n")
print(f"attempting save no. {nsaves}")
outputSim(astroInputs, simResults, energy, file_dir, plotTypes)
nsaves += 1
current_index += save_interval #+ 1
elif i % save_interval == 0 and i != 0 and not withRRF:
outputSim(astroInputs, simResults, energy, file_dir, plotTypes, RRF=False)
nsaves += 1
else:
outputSim(astroInputs, simResults, energy, file_dir, plotTypes, RRF=False)
nsaves += 1
# If the mirror crashed or escaped orbit, stop the simulation.
# Considered a collision if within a certain distance of planet surface
if (dist < minDist or mirrorVel > escVel):
if (dist <= astroInputs.planetRadius + astroInputs.atmos):
print("Collision with planet.")
if (mirrorVel >= escVel):
print("Mirror reached escape velocity.")
# If the simulation stopped for any other reason, tell the user
# the current stats.
print("Sim stopped before specified orbit.")
print("Distance from planet (m) - Planet Radius + Atmosphere (m): ")
print(" ", dist, " - ", astroInputs.planetRadius + astroInputs.atmos)
print("Mirror Vel (m/s) - Mirror Escape Vel (m/s): ")
print(" ", mirrorVel, " - ", escVel)
# Breaks the integration.
break
#outputSim_new(astroInputs, simResults, file_dir)
print ('simulation end time - ',sim.t)
#########################################
# ATTENTION #
# #
# The below code is now deprecated #
# since all transforms are now done #
# iteratively each timestep #
# #
#########################################
# ---Transform the Coordinates to a Rotating Reference Frame---
# Create arrays for new rotating reference frame coordinates.
planetRRFx = np.zeros(Noutputs + 1)
planetRRFy = np.zeros(Noutputs + 1)
mirrorRRFx = np.zeros(Noutputs + 1)
mirrorRRFy = np.zeros(Noutputs + 1)
# Finding XY coordinates. Don't need Z because the planet orbits in the XY plane.
pX = np.array([x[0] for x in simResults.coordPlanet])
pY = np.array([y[1] for y in simResults.coordPlanet])
pZ = np.array([z[2] for z in simResults.coordPlanet]) #added z info
mX = np.array([x[0] for x in simResults.coordMirror])
mY = np.array([y[1] for y in simResults.coordMirror])
mZ = np.array([z[2] for z in simResults.coordMirror]) #added z info
if astroInputs.starType != None: # If there is a star, calculate the star coordinates too.
sX = np.array([x[0] for x in simResults.coordStar])
sY = np.array([y[1] for y in simResults.coordStar])
sZ = np.array([z[2] for z in simResults.coordStar]) #added z info
# Finding theta (angle of Earth in its orbit).
theta = np.arctan2(pY-sY,pX-sX) # Translate the planet because the star may move.
for t in range(0,ts):
# Do the transformation and save the rotating reference frame (RRF) coord.
planetxRRFy = rotTransform(pX[t]-sX[t],pY[t]-sY[t], theta[t])
planetRRFx[t] = planetxRRFy[0]
planetRRFy[t] = planetxRRFy[1]
mirrorxRRFy = rotTransform(mX[t]-sX[t],mY[t]-sY[t],theta[t])
mirrorRRFx[t] = mirrorxRRFy[0]
mirrorRRFy[t] = mirrorxRRFy[1]
coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], pZ[t]-sZ[t]]
coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], mZ[t]-sZ[t]]
# coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], 0]
# coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], 0]
coordRRFTempStar = [0, 0, 0] # 14 June 2018 changed x,y from None to 0.
# Save the transformed coordinates to the simResults object to be used
# in plotSim.py for graphing.
simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
else:
theta = np.arctan2(pY,pX) # No need to translate the planet if it's at the origin
for t in range(0,ts):
# Do the transformation and save the rotating reference frame (RRF) coord.
planetxRRFy = rotTransform(pX[t],pY[t], theta[t])
planetRRFx[t] = planetxRRFy[0]
planetRRFy[t] = planetxRRFy[1]
mirrorxRRFy = rotTransform(mX[t],mY[t],theta[t])
mirrorRRFx[t] = mirrorxRRFy[0]
mirrorRRFy[t] = mirrorxRRFy[1]
coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], 0]
coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], 0]
coordRRFTempStar = [0, 0, 0] # 14 June 2018 changed x,y from None to 0.
# Save the transformed coordinates to the simResults object to be used
# in plotSim.py for graphing.
simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
if withRRF:
# save any remaining unsaved data points
nremaining = len(simResults.coordMirror) - nsaves
remaining_indices = []
for idx in range(nremaining):
remaining_indices.append(nsaves + idx)
#for idx in remaining_indices:
# ---Transform the Coordinates to a Rotating Reference Frame---
# Create arrays for new rotating reference frame coordinates.
# Adjusted to transform for one timestep
planetRRFx = np.zeros(save_interval)#np.zeros(1)#ts) #np.zeros(Noutputs + 1)
planetRRFy = np.zeros(save_interval)# np.zeros(1)#ts) #np.zeros(Noutputs + 1)
mirrorRRFx = np.zeros(save_interval)#(1)#ts) #np.zeros(Noutputs + 1)
mirrorRRFy = np.zeros(save_interval)#(1)#ts) #np.zeros(Noutputs + 1)
# Finding XY coordinates. Don't need Z because the planet orbits in the XY plane.
pX = np.array([x[0] for x in simResults.coordPlanet[remaining_indices[0]:]])#current_index:current_index + save_interval]])#[simResults.coordPlanet[-1][0]])#np.array([x[0] for x in simResults.coordPlanet])
pY = np.array([y[1] for y in simResults.coordPlanet[remaining_indices[0]:]])#current_index:current_index + save_interval]])#[simResults.coordPlanet[-1][1]])#np.array([y[1] for y in simResults.coordPlanet])
pZ = np.array([z[2] for z in simResults.coordPlanet[remaining_indices[0]:]])#current_index:current_index + save_interval]])#[simResults.coordPlanet[-1][2]])#np.array([z[2] for z in simResults.coordPlanet]) #added z info
mX = np.array([x[0] for x in simResults.coordMirror[remaining_indices[0]:]])#current_index:current_index+save_interval]])#[simResults.coordMirror[-1][0]])#np.array([x[0] for x in simResults.coordMirror])
mY = np.array([y[1] for y in simResults.coordMirror[remaining_indices[0]:]])#current_index:current_index+save_interval]])#[simResults.coordMirror[-1][1]])#np.array([y[1] for y in simResults.coordMirror])
mZ = np.array([z[2] for z in simResults.coordMirror[remaining_indices[0]:]])#current_index:current_index+save_interval]])#[simResults.coordMirror[-1][2]])#np.array([z[2] for z in simResults.coordMirror]) #added z info
if astroInputs.starType != None: # If there is a star, calculate the star coordinates too.
sX = np.array([x[0] for x in simResults.coordStar[remaining_indices[0]:]])#current_index:current_index+save_interval]])#[simResults.coordStar[-1][0]])#np.array([x[0] for x in simResults.coordStar])
sY = np.array([y[1] for y in simResults.coordStar[remaining_indices[0]:]])#current_index:current_index+save_interval]])#[simResults.coordStar[-1][1]])#np.array([y[1] for y in simResults.coordStar])
sZ = np.array([z[2] for z in simResults.coordStar[remaining_indices[0]:]])#current_index:current_index+save_interval]])#[simResults.coordStar[-1][2]])#np.array([z[2] for z in simResults.coordStar]) #added z info
# Finding theta (angle of Earth in its orbit).
theta = np.arctan2(pY-sY,pX-sX) # Translate the planet because the star may move.
for t in range(len(remaining_indices)):#current_index,current_index + save_interval + 1):
# Do the transformation and save the rotating reference frame (RRF) coord.
planetxRRFy = rotTransform(pX[t]-sX[t],pY[t]-sY[t], theta[t])
planetRRFx[t] = planetxRRFy[0]
planetRRFy[t] = planetxRRFy[1]
mirrorxRRFy = rotTransform(mX[t]-sX[t],mY[t]-sY[t],theta[t])
mirrorRRFx[t] = mirrorxRRFy[0]
mirrorRRFy[t] = mirrorxRRFy[1]
coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], pZ[t]-sZ[t]]
coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], mZ[t]-sZ[t]]
coordRRFTempStar = [0, 0, 0] # 14 June 2018 changed x,y from None to 0.
# Save the transformed coordinates to the simResults object to be used
# in plotSim.py for graphing.
simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
#t = -1#ts - 1#-1 # use to index the last element of the array
#planetxRRFy = rotTransform(pX[t] - sX[t], pY[t] - sY[t], theta[t])
#planetRRFx[t] = planetxRRFy[0]
#planetRRFy[t] = planetxRRFy[1]
#mirrorxRRFy = rotTransform(mX[t] - sX[t], mY[t] - sY[t], theta[t])
#mirrorRRFx[t] = mirrorxRRFy[0]
#mirrorRRFy[t] = mirrorxRRFy[1]
#coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], pZ[t] - sZ[t]]
#coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], mZ[t] - sZ[t]]
#coordRRFTempStar = [0, 0, 0]
#simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
#for t in range(0,ts):
# # Do the transformation and save the rotating reference frame (RRF) coord.
# planetxRRFy = rotTransform(pX[t]-sX[t],pY[t]-sY[t], theta[t])
# planetRRFx[t] = planetxRRFy[0]
# planetRRFy[t] = planetxRRFy[1]
# mirrorxRRFy = rotTransform(mX[t]-sX[t],mY[t]-sY[t],theta[t])
# mirrorRRFx[t] = mirrorxRRFy[0]
# mirrorRRFy[t] = mirrorxRRFy[1]
# coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], pZ[t]-sZ[t]]
# coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], mZ[t]-sZ[t]]
# coordRRFTempStar = [0, 0, 0] # 14 June 2018 changed x,y from None to 0.
# # Save the transformed coordinates to the simResults object to be used
# # in plotSim.py for graphing.
# simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
else:
theta = np.arctan2(pY,pX) # No need to translate the planet if it's at the origin
for t in range(len(remaining_indices)):#save_interval)#0,ts):
# Do the transformation and save the rotating reference frame (RRF) coord.
planetxRRFy = rotTransform(pX[t],pY[t], theta[t])
planetRRFx[t] = planetxRRFy[0]
planetRRFy[t] = planetxRRFy[1]
mirrorxRRFy = rotTransform(mX[t],mY[t],theta[t])
mirrorRRFx[t] = mirrorxRRFy[0]
mirrorRRFy[t] = mirrorxRRFy[1]
coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], 0]
coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], 0]
coordRRFTempStar = [0, 0, 0] # 14 June 2018 changed x,y from None to 0.
# Save the transformed coordinates to the simResults object to be used
# in plotSim.py for graphing.
simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
#t = -1 # use to index the last element of the array
#planetxRRFy = rotTransform(pX[t], pY[t], theta[t])
#planetRRFx[t] = planetxRRFy[0]
#planetRRFy[t] = planetxRRFy[1]
#mirrorxRRFy = rotTransform(mX[t], mY[t], theta[t])
#mirrorRRFx[t] = mirrorRRFy[0]
#mirrorRRFy[t] = mirrorxRRFy[1]
#coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], 0]
#coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], 0]
#coordRRFTempStar = [0, 0, 0]
#simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
#for t in range(0,ts):
# # Do the transformation and save the rotating reference frame (RRF) coord.
# planetxRRFy = rotTransform(pX[t],pY[t], theta[t])
# planetRRFx[t] = planetxRRFy[0]
# planetRRFy[t] = planetxRRFy[1]
# mirrorxRRFy = rotTransform(mX[t],mY[t],theta[t])
# mirrorRRFx[t] = mirrorxRRFy[0]
# mirrorRRFy[t] = mirrorxRRFy[1]
# coordRRFTempPlanet = [planetRRFx[t], planetRRFy[t], 0]
# coordRRFTempMirror = [mirrorRRFx[t], mirrorRRFy[t], 0]
# coordRRFTempStar = [0, 0, 0] # 14 June 2018 changed x,y from None to 0.
# # Save the transformed coordinates to the simResults object to be used
# # in plotSim.py for graphing.
# simResults.saveTransform(coordRRFTempStar, coordRRFTempPlanet, coordRRFTempMirror)
outputSim(astroInputs, simResults, energy, file_dir, plotTypes)
if 'energy' in plotTypes: # If we care about energy, return it.
return [energy, ts]
else: # If we don't care about energy, just return the number of timesteps.
return ts
|
[
"numpy.arctan2",
"math.sqrt",
"numpy.zeros",
"numpy.array",
"numpy.linspace"
] |
[((3395, 3434), 'numpy.linspace', 'np.linspace', (['(0)', 'simlength', '(Noutputs + 1)'], {}), '(0, simlength, Noutputs + 1)\n', (3406, 3434), True, 'import numpy as np\n'), ((24428, 24450), 'numpy.zeros', 'np.zeros', (['(Noutputs + 1)'], {}), '(Noutputs + 1)\n', (24436, 24450), True, 'import numpy as np\n'), ((24468, 24490), 'numpy.zeros', 'np.zeros', (['(Noutputs + 1)'], {}), '(Noutputs + 1)\n', (24476, 24490), True, 'import numpy as np\n'), ((24508, 24530), 'numpy.zeros', 'np.zeros', (['(Noutputs + 1)'], {}), '(Noutputs + 1)\n', (24516, 24530), True, 'import numpy as np\n'), ((24548, 24570), 'numpy.zeros', 'np.zeros', (['(Noutputs + 1)'], {}), '(Noutputs + 1)\n', (24556, 24570), True, 'import numpy as np\n'), ((24666, 24714), 'numpy.array', 'np.array', (['[x[0] for x in simResults.coordPlanet]'], {}), '([x[0] for x in simResults.coordPlanet])\n', (24674, 24714), True, 'import numpy as np\n'), ((24724, 24772), 'numpy.array', 'np.array', (['[y[1] for y in simResults.coordPlanet]'], {}), '([y[1] for y in simResults.coordPlanet])\n', (24732, 24772), True, 'import numpy as np\n'), ((24782, 24830), 'numpy.array', 'np.array', (['[z[2] for z in simResults.coordPlanet]'], {}), '([z[2] for z in simResults.coordPlanet])\n', (24790, 24830), True, 'import numpy as np\n'), ((24855, 24903), 'numpy.array', 'np.array', (['[x[0] for x in simResults.coordMirror]'], {}), '([x[0] for x in simResults.coordMirror])\n', (24863, 24903), True, 'import numpy as np\n'), ((24913, 24961), 'numpy.array', 'np.array', (['[y[1] for y in simResults.coordMirror]'], {}), '([y[1] for y in simResults.coordMirror])\n', (24921, 24961), True, 'import numpy as np\n'), ((24971, 25019), 'numpy.array', 'np.array', (['[z[2] for z in simResults.coordMirror]'], {}), '([z[2] for z in simResults.coordMirror])\n', (24979, 25019), True, 'import numpy as np\n'), ((9005, 9057), 'math.sqrt', 'math.sqrt', (['(2 * sim.G * astroInputs.planetMass / dist)'], {}), '(2 * sim.G * astroInputs.planetMass / dist)\n', (9014, 9057), False, 'import math\n'), ((25143, 25189), 'numpy.array', 'np.array', (['[x[0] for x in simResults.coordStar]'], {}), '([x[0] for x in simResults.coordStar])\n', (25151, 25189), True, 'import numpy as np\n'), ((25203, 25249), 'numpy.array', 'np.array', (['[y[1] for y in simResults.coordStar]'], {}), '([y[1] for y in simResults.coordStar])\n', (25211, 25249), True, 'import numpy as np\n'), ((25263, 25309), 'numpy.array', 'np.array', (['[z[2] for z in simResults.coordStar]'], {}), '([z[2] for z in simResults.coordStar])\n', (25271, 25309), True, 'import numpy as np\n'), ((25396, 25424), 'numpy.arctan2', 'np.arctan2', (['(pY - sY)', '(pX - sX)'], {}), '(pY - sY, pX - sX)\n', (25406, 25424), True, 'import numpy as np\n'), ((26527, 26545), 'numpy.arctan2', 'np.arctan2', (['pY', 'pX'], {}), '(pY, pX)\n', (26537, 26545), True, 'import numpy as np\n'), ((28016, 28039), 'numpy.zeros', 'np.zeros', (['save_interval'], {}), '(save_interval)\n', (28024, 28039), True, 'import numpy as np\n'), ((28103, 28126), 'numpy.zeros', 'np.zeros', (['save_interval'], {}), '(save_interval)\n', (28111, 28126), True, 'import numpy as np\n'), ((28191, 28214), 'numpy.zeros', 'np.zeros', (['save_interval'], {}), '(save_interval)\n', (28199, 28214), True, 'import numpy as np\n'), ((28270, 28293), 'numpy.zeros', 'np.zeros', (['save_interval'], {}), '(save_interval)\n', (28278, 28293), True, 'import numpy as np\n'), ((28431, 28502), 'numpy.array', 'np.array', (['[x[0] for x in simResults.coordPlanet[remaining_indices[0]:]]'], {}), '([x[0] for x in simResults.coordPlanet[remaining_indices[0]:]])\n', (28439, 28502), True, 'import numpy as np\n'), ((28645, 28716), 'numpy.array', 'np.array', (['[y[1] for y in simResults.coordPlanet[remaining_indices[0]:]]'], {}), '([y[1] for y in simResults.coordPlanet[remaining_indices[0]:]])\n', (28653, 28716), True, 'import numpy as np\n'), ((28859, 28930), 'numpy.array', 'np.array', (['[z[2] for z in simResults.coordPlanet[remaining_indices[0]:]]'], {}), '([z[2] for z in simResults.coordPlanet[remaining_indices[0]:]])\n', (28867, 28930), True, 'import numpy as np\n'), ((29088, 29159), 'numpy.array', 'np.array', (['[x[0] for x in simResults.coordMirror[remaining_indices[0]:]]'], {}), '([x[0] for x in simResults.coordMirror[remaining_indices[0]:]])\n', (29096, 29159), True, 'import numpy as np\n'), ((29300, 29371), 'numpy.array', 'np.array', (['[y[1] for y in simResults.coordMirror[remaining_indices[0]:]]'], {}), '([y[1] for y in simResults.coordMirror[remaining_indices[0]:]])\n', (29308, 29371), True, 'import numpy as np\n'), ((29512, 29583), 'numpy.array', 'np.array', (['[z[2] for z in simResults.coordMirror[remaining_indices[0]:]]'], {}), '([z[2] for z in simResults.coordMirror[remaining_indices[0]:]])\n', (29520, 29583), True, 'import numpy as np\n'), ((8532, 8620), 'math.sqrt', 'math.sqrt', (['((p[2].x - p[1].x) ** 2 + (p[2].y - p[1].y) ** 2 + (p[2].z - p[1].z) ** 2)'], {}), '((p[2].x - p[1].x) ** 2 + (p[2].y - p[1].y) ** 2 + (p[2].z - p[1].\n z) ** 2)\n', (8541, 8620), False, 'import math\n'), ((8628, 8721), 'math.sqrt', 'math.sqrt', (['((p[2].vx - p[1].vx) ** 2 + (p[2].vy - p[1].vy) ** 2 + (p[2].vz - p[1].vz) ** 2\n )'], {}), '((p[2].vx - p[1].vx) ** 2 + (p[2].vy - p[1].vy) ** 2 + (p[2].vz -\n p[1].vz) ** 2)\n', (8637, 8721), False, 'import math\n'), ((8766, 8854), 'math.sqrt', 'math.sqrt', (['((p[2].x - p[0].x) ** 2 + (p[2].y - p[0].y) ** 2 + (p[2].z - p[0].z) ** 2)'], {}), '((p[2].x - p[0].x) ** 2 + (p[2].y - p[0].y) ** 2 + (p[2].z - p[0].\n z) ** 2)\n', (8775, 8854), False, 'import math\n'), ((8862, 8955), 'math.sqrt', 'math.sqrt', (['((p[2].vx - p[0].vx) ** 2 + (p[2].vy - p[0].vy) ** 2 + (p[2].vz - p[0].vz) ** 2\n )'], {}), '((p[2].vx - p[0].vx) ** 2 + (p[2].vy - p[0].vy) ** 2 + (p[2].vz -\n p[0].vz) ** 2)\n', (8871, 8955), False, 'import math\n'), ((13315, 13342), 'numpy.zeros', 'np.zeros', (['(save_interval + 1)'], {}), '(save_interval + 1)\n', (13323, 13342), True, 'import numpy as np\n'), ((13408, 13435), 'numpy.zeros', 'np.zeros', (['(save_interval + 1)'], {}), '(save_interval + 1)\n', (13416, 13435), True, 'import numpy as np\n'), ((13502, 13529), 'numpy.zeros', 'np.zeros', (['(save_interval + 1)'], {}), '(save_interval + 1)\n', (13510, 13529), True, 'import numpy as np\n'), ((13587, 13614), 'numpy.zeros', 'np.zeros', (['(save_interval + 1)'], {}), '(save_interval + 1)\n', (13595, 13614), True, 'import numpy as np\n'), ((13758, 13859), 'numpy.array', 'np.array', (['[x[0] for x in simResults.coordPlanet[current_index:current_index +\n save_interval + 1]]'], {}), '([x[0] for x in simResults.coordPlanet[current_index:current_index +\n save_interval + 1]])\n', (13766, 13859), True, 'import numpy as np\n'), ((13953, 14054), 'numpy.array', 'np.array', (['[y[1] for y in simResults.coordPlanet[current_index:current_index +\n save_interval + 1]]'], {}), '([y[1] for y in simResults.coordPlanet[current_index:current_index +\n save_interval + 1]])\n', (13961, 14054), True, 'import numpy as np\n'), ((14148, 14249), 'numpy.array', 'np.array', (['[z[2] for z in simResults.coordPlanet[current_index:current_index +\n save_interval + 1]]'], {}), '([z[2] for z in simResults.coordPlanet[current_index:current_index +\n save_interval + 1]])\n', (14156, 14249), True, 'import numpy as np\n'), ((14358, 14459), 'numpy.array', 'np.array', (['[x[0] for x in simResults.coordMirror[current_index:current_index +\n save_interval + 1]]'], {}), '([x[0] for x in simResults.coordMirror[current_index:current_index +\n save_interval + 1]])\n', (14366, 14459), True, 'import numpy as np\n'), ((14551, 14652), 'numpy.array', 'np.array', (['[y[1] for y in simResults.coordMirror[current_index:current_index +\n save_interval + 1]]'], {}), '([y[1] for y in simResults.coordMirror[current_index:current_index +\n save_interval + 1]])\n', (14559, 14652), True, 'import numpy as np\n'), ((14744, 14845), 'numpy.array', 'np.array', (['[z[2] for z in simResults.coordMirror[current_index:current_index +\n save_interval + 1]]'], {}), '([z[2] for z in simResults.coordMirror[current_index:current_index +\n save_interval + 1]])\n', (14752, 14845), True, 'import numpy as np\n'), ((29842, 29911), 'numpy.array', 'np.array', (['[x[0] for x in simResults.coordStar[remaining_indices[0]:]]'], {}), '([x[0] for x in simResults.coordStar[remaining_indices[0]:]])\n', (29850, 29911), True, 'import numpy as np\n'), ((30052, 30121), 'numpy.array', 'np.array', (['[y[1] for y in simResults.coordStar[remaining_indices[0]:]]'], {}), '([y[1] for y in simResults.coordStar[remaining_indices[0]:]])\n', (30060, 30121), True, 'import numpy as np\n'), ((30262, 30331), 'numpy.array', 'np.array', (['[z[2] for z in simResults.coordStar[remaining_indices[0]:]]'], {}), '([z[2] for z in simResults.coordStar[remaining_indices[0]:]])\n', (30270, 30331), True, 'import numpy as np\n'), ((30549, 30577), 'numpy.arctan2', 'np.arctan2', (['(pY - sY)', '(pX - sX)'], {}), '(pY - sY, pX - sX)\n', (30559, 30577), True, 'import numpy as np\n'), ((33363, 33381), 'numpy.arctan2', 'np.arctan2', (['pY', 'pX'], {}), '(pY, pX)\n', (33373, 33381), True, 'import numpy as np\n'), ((15347, 15446), 'numpy.array', 'np.array', (['[x[0] for x in simResults.coordStar[current_index:current_index +\n save_interval + 1]]'], {}), '([x[0] for x in simResults.coordStar[current_index:current_index +\n save_interval + 1]])\n', (15355, 15446), True, 'import numpy as np\n'), ((15538, 15637), 'numpy.array', 'np.array', (['[y[1] for y in simResults.coordStar[current_index:current_index +\n save_interval + 1]]'], {}), '([y[1] for y in simResults.coordStar[current_index:current_index +\n save_interval + 1]])\n', (15546, 15637), True, 'import numpy as np\n'), ((15729, 15828), 'numpy.array', 'np.array', (['[z[2] for z in simResults.coordStar[current_index:current_index +\n save_interval + 1]]'], {}), '([z[2] for z in simResults.coordStar[current_index:current_index +\n save_interval + 1]])\n', (15737, 15828), True, 'import numpy as np\n'), ((16157, 16185), 'numpy.arctan2', 'np.arctan2', (['(pY - sY)', '(pX - sX)'], {}), '(pY - sY, pX - sX)\n', (16167, 16185), True, 'import numpy as np\n'), ((19310, 19328), 'numpy.arctan2', 'np.arctan2', (['pY', 'pX'], {}), '(pY, pX)\n', (19320, 19328), True, 'import numpy as np\n')]
|
from functools import lru_cache
import torch
import numpy as np
from nltk.tokenize import word_tokenize
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
class Evaluator:
def __init__(self, corpus, n_ref, sample_params=None,
blue_span=(2, 5), blue_smooth='epsilon'):
self.corpus = corpus
self.n_ref = n_ref
self.sample_params = sample_params or {}
# BLEU
self.blue_weights = [
(i, np.array([1 / i] * i + [0] * (blue_span[1] - i)))
for i in range(blue_span[0], blue_span[1] + 1)
]
if blue_smooth == 'epsilon':
# Adds epsilon to zero counts
self.blue_smooth = SmoothingFunction().method1
else:
self.blue_smooth = SmoothingFunction().method0
# Preload some modes, it may require some time...
for mode in ('train', 'val', 'test'):
self._get_reference(mode)
def bleu(self, model, n_hypot, split):
"""Calculating similarity metric, higher is better"""
references = self._get_reference(split)
hypotheses = self._get_hypotheses(model, n_hypot)
result = {}
for i, w in self.blue_weights:
result[f'{i}-gram'] = np.mean([
sentence_bleu(references, h,
weights=w, smoothing_function=self.blue_smooth)
for h in hypotheses
])
return result
def self_bleu(self, model=None, n_hypot=None, split=None):
"""Calculating diversity metric, lower is better"""
if model is not None and n_hypot is not None:
hypotheses = self._get_hypotheses(model, n_hypot or self.n_ref)
else:
hypotheses = self._get_reference(split)
result = {}
for i, w in self.blue_weights:
result[f'{i}-gram'] = np.mean([
sentence_bleu(hypotheses[:j] + hypotheses[j + 1:],
hypotheses[j],
weights=w, smoothing_function=self.blue_smooth)
for j in range(len(hypotheses))
])
return result
def perplexity(self, model, split):
ppl = []
batcher = self.corpus.batcher(split, 'unlabeled')
for x in batcher:
ppl.append(model.perplexity(x, use_c_prior=True))
return torch.stack(ppl).mean().item()
@lru_cache(maxsize=None)
def _get_reference(self, split):
batcher = self.corpus.batcher(split, 'unlabeled',
n_batch=1, device=torch.device('cpu'))
vocab = self.corpus.vocab('x')
result = []
for x in batcher:
if len(result) == self.n_ref:
break
result.append([vocab.itos[i] for i in x[0]
if i != vocab.stoi['<pad>']])
return result
def _get_hypotheses(self, model, n_hypot):
vocab = self.corpus.vocab('x')
hypotheses = [
[vocab.itos[i] for i in sent if i != vocab.stoi['<pad>']]
for sent in model.sample_sentence(n_hypot,
**self.sample_params)[2]
]
return hypotheses
|
[
"torch.stack",
"nltk.translate.bleu_score.sentence_bleu",
"numpy.array",
"torch.device",
"nltk.translate.bleu_score.SmoothingFunction",
"functools.lru_cache"
] |
[((2420, 2443), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (2429, 2443), False, 'from functools import lru_cache\n'), ((479, 527), 'numpy.array', 'np.array', (['([1 / i] * i + [0] * (blue_span[1] - i))'], {}), '([1 / i] * i + [0] * (blue_span[1] - i))\n', (487, 527), True, 'import numpy as np\n'), ((708, 727), 'nltk.translate.bleu_score.SmoothingFunction', 'SmoothingFunction', ([], {}), '()\n', (725, 727), False, 'from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction\n'), ((781, 800), 'nltk.translate.bleu_score.SmoothingFunction', 'SmoothingFunction', ([], {}), '()\n', (798, 800), False, 'from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction\n'), ((2595, 2614), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2607, 2614), False, 'import torch\n'), ((1284, 1360), 'nltk.translate.bleu_score.sentence_bleu', 'sentence_bleu', (['references', 'h'], {'weights': 'w', 'smoothing_function': 'self.blue_smooth'}), '(references, h, weights=w, smoothing_function=self.blue_smooth)\n', (1297, 1360), False, 'from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction\n'), ((1905, 2022), 'nltk.translate.bleu_score.sentence_bleu', 'sentence_bleu', (['(hypotheses[:j] + hypotheses[j + 1:])', 'hypotheses[j]'], {'weights': 'w', 'smoothing_function': 'self.blue_smooth'}), '(hypotheses[:j] + hypotheses[j + 1:], hypotheses[j], weights=w,\n smoothing_function=self.blue_smooth)\n', (1918, 2022), False, 'from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction\n'), ((2383, 2399), 'torch.stack', 'torch.stack', (['ppl'], {}), '(ppl)\n', (2394, 2399), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 3 21:22:35 2019
@author: Reuben
The persist module helps the user save and load Box instances. It is
able to be extended for use with any handler.
The module instantiates a Manager class and copies its load and save methods
to the module level, for easier usage by client code.
"""
import json
import zlib
import numpy as np
from .box import Box
from . import adapters, variable, constants
def serialise_vars(box):
''' Turn all the variables in a Box into a list of strings '''
keys = box.keys(dependent=True, independent=True)
lst = [k.to_str() for k in keys if isinstance(k, variable.Variable)]
lst.sort()
return lst
def deserialise_vars(lst):
''' Turn a list of Variable strings into a dictionary of Variables '''
variables = [variable.Variable.from_str(s) for s in lst]
return {v.key: v for v in variables}
def make_pack(box):
''' Prepare a box for persistance by including Variable data '''
return {'data': list(box),
'vars': serialise_vars(box)}
def unpack(pack):
''' Recreate a box from persistance, including Variables as keys '''
box_data = pack['data']
if 'vars' not in pack:
pack['vars'] = []
var_dict = deserialise_vars(pack['vars'])
aliases = variable.Aliases(var_dict)
return aliases.translate(box_data)
class Manager():
''' The manager looks after different classes that can process Box data.
It provides two key methods - load and save.
'''
default_handler = 'cbox'
def __init__(self, default_enabled=True):
self.handlers = {'box': JSON(),
'cbox': CJSON(),
'npz': NPZ()}
self.specified = None
self.default_enabled = True
def add_handler(self, key, handler):
''' Add a handler
Args:
key (str): The unique name of the handler.
handler (Handler): A Handler subclass.
'''
self.handlers[key] = handler
def del_handler(self, key):
''' Delete a handler
Args:
key (str): The unique name of the handler
'''
def specify(self, key):
''' Specify the handler to use
Args:
key (str): The unique name of the handler.
'''
self.specified = key
def _load(self, source, handler=None, **kwargs):
h = handler if handler is not None else None
if h is None and self.specified is not None:
h = self.specified
if h is None:
for name, handler in self.handlers.items():
if handler.suitable(source, **kwargs):
h = name
if h is None and self.default_enabled:
h = self.default_handler
source += '.cbox'
if h is None:
raise ValueError('No valid handler found for source: ' +
str(source))
return self.handlers[h].load(source, **kwargs)
def load(self, source, handler=None, as_box=True, **kwargs):
''' Return a new Box by reading the source
Args:
source (str): The source to load from
handler (str): Optional key specifying which handler to use
as_box (bool): If true (the default), return a Box, otherwise
return just the raw data.
kwargs: Other keyword arguments passed to the handler.
'''
pack = self._load(source, handler, **kwargs)
ret = unpack(pack)
if as_box:
return Box(ret)
else:
return ret
def save(self, box, target, handler=None, **kwargs):
''' Save the Box data to the target
Args:
box (Box): The Box instance.
target (str): A string specifying the target
handler (str): Optional key specifying which handler to use
kwargs: Other keyword arguments passed to the handler.
'''
pack = make_pack(box)
h = handler if handler is not None else None
if h is None and self.specified is not None:
h = self.specified
if h is None:
for name, handler in self.handlers.items():
if handler.suitable(target, **kwargs):
h = name
if h is None and self.default_enabled:
h = self.default_handler
target += '.cbox'
if h is None:
raise ValueError('No valid handler found for target: ' +
str(target))
return self.handlers[h].save(pack, target, **kwargs)
class Handler():
''' A handler that can load or save Box data '''
def suitable(self, s, **kwargs):
''' Return true the handler suits the target/source string, s '''
raise NotImplementedError
def save(self, box, fname, **kwargs):
''' Save the box '''
raise NotImplementedError
def load(self, fname, **kwargs):
''' Load a box
Returns:
list: The raw data for the Box
'''
raise NotImplementedError
class JSONEncoder(json.JSONEncoder):
''' A custom encoder to encode numpy arrays '''
def default(self, obj):
if isinstance(obj, np.ndarray):
return {'__ndarray__': obj.tolist(), 'dtype': str(obj.dtype)}
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def np_decode(dct):
''' A custom decoder to handle numpy arrays '''
if '__ndarray__' in dct:
return np.array(dct['__ndarray__'], dtype=dct['dtype'])
return dct
class JSON(Handler):
''' Load and save in JSON format '''
def suitable(self, fname, **kwargs):
if fname.endswith('.box'):
return True
def save(self, box, fname, **kwargs):
''' Save a Box to a file
Args:
box (Box): The Box instance.
fname (str): A string specifying the full filename
'''
jsn = json.dumps(box, cls=JSONEncoder)
with open(fname, mode='w') as f:
f.write(jsn)
def load(self, fname, **kwargs):
''' Return box data by reading the source
Args:
fname (str): The file to load from
Returns:
list: A list of box data
'''
with open(fname, mode='r') as f:
jsn = f.read()
lst = json.loads(jsn, object_hook=np_decode)
return lst
class CJSON(Handler):
''' Load and save in compressed JSON format '''
def suitable(self, fname, **kwargs):
if fname.endswith('.cbox'):
return True
def save(self, box, fname, **kwargs):
''' Save a Box to a file
Args:
box (Box): The Box instance.
fname (str): A string specifying the full filename
'''
jsn = json.dumps(box, cls=JSONEncoder)
compressed = zlib.compress(jsn.encode(), level=9)
with open(fname, mode='wb') as f:
f.write(compressed)
def load(self, fname, **kwargs):
''' Return box data by reading the source
Args:
fname (str): The file to load from
Returns:
list: A list of box data
'''
with open(fname, mode='rb') as f:
compressed = f.read()
jsn = zlib.decompress(compressed).decode()
lst = json.loads(jsn, object_hook=np_decode)
return lst
class NPZ(Handler):
''' Load and save in compressed JSON format '''
def suitable(self, fname, **kwargs):
if fname.endswith('.npz'):
return True
def save(self, box, fname, **kwargs):
''' Save a Box to a file
Args:
box (Box): The Box instance.
fname (str): A string specifying the full filename
'''
flat = adapters.flat_dict(box)
np.savez_compressed(fname, **flat, **kwargs)
def load(self, fname, **kwargs):
''' Return box data by reading the source
Args:
fname (str): The file to load from
Returns:
list: A list of box data
'''
def itemise_scalars(obj):
if obj.ndim==0:
return obj.item()
return obj
flat = {}
with np.load(fname, **kwargs) as data:
for key in list(data.keys()):
flat[key] = itemise_scalars(data[key])
composed = adapters.compose(flat)
for row in composed['data']:
for k in [constants.DEP, constants.INDEP]:
if k not in row:
row[k] = {}
return composed
manager = Manager()
load = manager.load
save = manager.save
|
[
"numpy.load",
"json.loads",
"json.dumps",
"numpy.savez_compressed",
"numpy.array",
"zlib.decompress",
"json.JSONEncoder.default"
] |
[((5526, 5561), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (5550, 5561), False, 'import json\n'), ((5679, 5727), 'numpy.array', 'np.array', (["dct['__ndarray__']"], {'dtype': "dct['dtype']"}), "(dct['__ndarray__'], dtype=dct['dtype'])\n", (5687, 5727), True, 'import numpy as np\n'), ((6149, 6181), 'json.dumps', 'json.dumps', (['box'], {'cls': 'JSONEncoder'}), '(box, cls=JSONEncoder)\n', (6159, 6181), False, 'import json\n'), ((6572, 6610), 'json.loads', 'json.loads', (['jsn'], {'object_hook': 'np_decode'}), '(jsn, object_hook=np_decode)\n', (6582, 6610), False, 'import json\n'), ((7045, 7077), 'json.dumps', 'json.dumps', (['box'], {'cls': 'JSONEncoder'}), '(box, cls=JSONEncoder)\n', (7055, 7077), False, 'import json\n'), ((7593, 7631), 'json.loads', 'json.loads', (['jsn'], {'object_hook': 'np_decode'}), '(jsn, object_hook=np_decode)\n', (7603, 7631), False, 'import json\n'), ((8096, 8140), 'numpy.savez_compressed', 'np.savez_compressed', (['fname'], {}), '(fname, **flat, **kwargs)\n', (8115, 8140), True, 'import numpy as np\n'), ((8542, 8566), 'numpy.load', 'np.load', (['fname'], {}), '(fname, **kwargs)\n', (8549, 8566), True, 'import numpy as np\n'), ((7542, 7569), 'zlib.decompress', 'zlib.decompress', (['compressed'], {}), '(compressed)\n', (7557, 7569), False, 'import zlib\n')]
|
import torch
from model.base_model import BaseModel
from model.networks import base_function, external_function
import model.networks as network
from util import task, util
import itertools
import data as Dataset
import numpy as np
from itertools import islice
import random
import os
import matplotlib.pyplot as plt
from collections import OrderedDict
import glob
import cv2
class Face(BaseModel):
"""
Face Image Animation using edge image
"""
def name(self):
return "Face Image Animation"
@staticmethod
def modify_options(parser, is_train=True):
"""Add new options and rewrite default values for existing options"""
parser.add_argument('--attn_layer', action=util.StoreList, metavar="VAL1,VAL2...")
parser.add_argument('--kernel_size', action=util.StoreDictKeyPair, metavar="KEY1=VAL1,KEY2=VAL2...")
parser.add_argument('--layers', type=int, default=3, help='number of layers in G')
parser.add_argument('--netG', type=str, default='face', help='The name of net Generator')
parser.add_argument('--netD', type=str, default='res', help='The name of net Discriminator')
parser.add_argument('--netD_V', type=str, default='res', help='The name of net Discriminator')
parser.add_argument('--init_type', type=str, default='orthogonal', help='Initial type')
# if is_train:
parser.add_argument('--ratio_g2d', type=float, default=0.1, help='learning rate ratio G to D')
parser.add_argument('--lambda_rec', type=float, default=5.0, help='weight for image reconstruction loss')
parser.add_argument('--lambda_g', type=float, default=2.0, help='weight for generation loss')
parser.add_argument('--lambda_correct', type=float, default=5.0, help='weight for Sampling Correctness loss')
parser.add_argument('--lambda_style', type=float, default=500.0, help='weight for the VGG19 style loss')
parser.add_argument('--lambda_content', type=float, default=0.5, help='weight for the VGG19 content loss')
parser.add_argument('--lambda_regularization', type=float, default=0.0025, help='weight for the affine regularization loss')
parser.add_argument('--frames_D_V', type=int, default=3, help='number of frames of D_V')
parser.add_argument('--use_spect_g', action='store_false')
parser.add_argument('--use_spect_d', action='store_false')
parser.set_defaults(use_spect_g=False)
parser.set_defaults(use_spect_d=True)
parser.set_defaults(display_freq=100)
parser.set_defaults(eval_iters_freq=1000)
parser.set_defaults(print_freq=100)
parser.set_defaults(save_latest_freq=1000)
parser.set_defaults(save_iters_freq=10000)
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
self.loss_names = ['app_gen','correctness_p', 'correctness_r','content_gen','style_gen',
'regularization_p', 'regularization_r',
'ad_gen','dis_img_gen',
'ad_gen_v', 'dis_img_gen_v']
self.visual_names = ['P_reference','BP_reference', 'P_frame_step','BP_frame_step','img_gen', 'flow_fields', 'masks']
self.model_names = ['G','D','D_V']
self.FloatTensor = torch.cuda.FloatTensor if len(self.gpu_ids)>0 \
else torch.FloatTensor
self.ByteTensor = torch.cuda.ByteTensor if len(self.gpu_ids)>0 \
else torch.ByteTensor
# define the Animation model
self.net_G = network.define_g(opt, image_nc=opt.image_nc, structure_nc=opt.structure_nc, ngf=64, img_f=512,
layers=opt.layers, num_blocks=2, use_spect=opt.use_spect_g, attn_layer=opt.attn_layer,
norm='instance', activation='LeakyReLU', extractor_kz=opt.kernel_size)
if len(opt.gpu_ids) > 1:
self.net_G = torch.nn.DataParallel(self.net_G, device_ids=self.gpu_ids)
self.flow2color = util.flow2color()
self.net_D = network.define_d(opt, ndf=32, img_f=128, layers=4, use_spect=opt.use_spect_d)
if len(opt.gpu_ids) > 1:
self.net_D = torch.nn.DataParallel(self.net_D, device_ids=self.gpu_ids)
input_nc = (opt.frames_D_V-1) * opt.image_nc
self.net_D_V = network.define_d(opt, input_nc=input_nc, ndf=32, img_f=128, layers=4, use_spect=opt.use_spect_d)
if len(opt.gpu_ids) > 1:
self.net_D_V = torch.nn.DataParallel(self.net_D_V, device_ids=self.gpu_ids)
if self.isTrain:
# define the loss functions
self.GANloss = external_function.AdversarialLoss(opt.gan_mode).to(opt.device)
self.L1loss = torch.nn.L1Loss()
self.L2loss = torch.nn.MSELoss()
self.Correctness = external_function.PerceptualCorrectness().to(opt.device)
self.Regularization = external_function.MultiAffineRegularizationLoss(kz_dic=opt.kernel_size).to(opt.device)
self.Vggloss = external_function.VGGLoss().to(opt.device)
# define the optimizer
self.optimizer_G = torch.optim.Adam(itertools.chain(
filter(lambda p: p.requires_grad, self.net_G.parameters())),
lr=opt.lr, betas=(0.0, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizer_D = torch.optim.Adam(itertools.chain(
filter(lambda p: p.requires_grad, self.net_D.parameters()),
filter(lambda p: p.requires_grad, self.net_D_V.parameters())),
lr=opt.lr*opt.ratio_g2d, betas=(0.0, 0.999))
self.optimizers.append(self.optimizer_D)
else:
self.results_dir_base = self.opt.results_dir
self.setup(opt)
def set_input(self, data):
# move to GPU and change data types
opt = self.opt
_, n_frames_total, self.height, self.width = data['P'].size() #
self.n_frames_total = n_frames_total // opt.image_nc
n_frames_load = opt.max_frames_per_gpu # number of total frames loaded into GPU at a time for each batch
self.n_frames_load = min(n_frames_load, self.n_frames_total)
if self.isTrain:
self.P_reference = data['P'][:,:opt.image_nc, ...].cuda()
self.BP_reference = data['BP'][:, :opt.structure_nc, ...].cuda()
self.P_previous = None
self.BP_previous = None
self.P_images = data['P']
self.BP_structures = data['BP']
self.image_paths = [path[0] for path in data['P_path']]
self.change_seq=data['change_seq']
if not self.isTrain:
assert self.opt.batchSize == 1
if data['frame_idx'] == self.opt.start_frame + self.opt.n_frames_pre_load_test:
self.P_previous = None
self.BP_previous = None
self.P_reference = data['P'][:,:opt.image_nc, ...].cuda()
self.BP_reference = data['BP'][:, :opt.structure_nc, ...].cuda()
# else:
# self.P_previous = self.test_generated
# self.BP_previous = self.test_BP_previous
self.opt.results_dir = os.path.join(self.results_dir_base,
self.image_paths[0].split('/')[-2])
def write2video(self, name_list):
images=[]
for name in name_list:
images.append(sorted(glob.glob(self.opt.results_dir+'/*_'+name+'.png')))
image_array=[]
for i in range(len(images[0])):
cat_im=None
for image_list in images:
im = cv2.imread(image_list[i])
if cat_im is not None:
cat_im = np.concatenate((cat_im, im), axis=1)
else:
cat_im = im
image_array.append(cat_im)
res=''
for name in name_list:
res += (name +'_')
out_name = self.opt.results_dir+'_'+res+'.mp4'
print('write video %s'%out_name)
height, width, layers = cat_im.shape
size = (width,height)
out = cv2.VideoWriter(out_name, cv2.VideoWriter_fourcc(*'mp4v'), 15, size)
for i in range(len(image_array)):
out.write(image_array[i])
out.release()
def get_current_visuals(self):
"""Return visualization images"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
value = getattr(self, name)
if 'frame_step' in name:
value = value[0]
list_value=[]
for i in range(value.size(0)):
list_value.append(value[i].unsqueeze(0))
value=list_value
if 'flow_field' in name or 'masks' in name:
list_value = [item for sub_list in value for item in sub_list]
value = list_value
if isinstance(value, list):
# visual multi-scale ouputs
for i in range(len(value)):
visual_ret[name + str(i)] = self.convert2im(value[i], name)
# visual_ret[name] = util.tensor2im(value[-1].data)
else:
visual_ret[name] =self.convert2im(value, name)
return visual_ret
def test(self, save_features=False, save_all=False, generate_edge=True):
"""Forward function used in test time"""
# img_gen, flow_fields, masks = self.net_G(self.input_P1, self.input_BP1, self.input_BP2)
height, width = self.height, self.width
image_nc, structure_nc = self.opt.image_nc, self.opt.structure_nc
n_frames_pre_load = self.opt.n_frames_pre_load_test
self.BP_frame_step = self.BP_structures.view(-1, n_frames_pre_load, structure_nc, height, width).cuda()
self.test_generated, self.flow_fields, self.masks, _ = self.net_G(self.BP_frame_step,
self.P_reference,
self.BP_reference,
self.P_previous,
self.BP_previous)
self.P_previous = self.test_generated[-1]
self.BP_previous = self.BP_frame_step[:,-1,... ]
self.test_generated = torch.cat(self.test_generated, 0)
self.save_results(self.test_generated, data_name='vis', data_ext='png')
if generate_edge:
value = self.BP_frame_step[:,:,0:1,...][0]
value = (1-value)*2-1
self.save_results(value, data_name='edge', data_ext='png')
if self.change_seq:
name_list=[] if not generate_edge else ['edge']
name_list.append('vis')
print(self.opt.results_dir)
self.write2video(name_list)
def update(self):
"""Run forward processing to get the inputs"""
image_nc, structure_nc = self.opt.image_nc, self.opt.structure_nc
n_frames_total, n_frames_load = self.n_frames_total, self.n_frames_load
height, width = self.height, self.width
for i in range(0, n_frames_total, n_frames_load):
self.P_frame_step = self.P_images[:, i*image_nc:(i+n_frames_load)*image_nc].cuda()
self.BP_frame_step = self.BP_structures[:, i*structure_nc:(i+n_frames_load)*structure_nc].cuda()
self.P_frame_step = self.P_frame_step.view(-1, n_frames_load, image_nc, height, width)
self.BP_frame_step = self.BP_frame_step.view(-1, n_frames_load, structure_nc, height, width)
self.img_gen, self.flow_fields, self.masks, self.P_previous_recoder = self.net_G(self.BP_frame_step,
self.P_reference,
self.BP_reference,
self.P_previous,
self.BP_previous)
self.optimizer_D.zero_grad()
self.backward_D()
self.optimizer_D.step()
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
self.P_previous = self.img_gen[-1].detach()
self.BP_previous = self.BP_frame_step[:,-1,...].detach()
def backward_D_basic(self, netD, real, fake):
"""Calculate GAN loss for the discriminator"""
# Real
D_real = netD(real)
D_real_loss = self.GANloss(D_real, True, True)
# fake
D_fake = netD(fake.detach())
D_fake_loss = self.GANloss(D_fake, False, True)
# loss for discriminator
D_loss = (D_real_loss + D_fake_loss) * 0.5
# if print_loss:
# print(D_real_loss)
# print(D_fake_loss)
# gradient penalty for wgan-gp
if self.opt.gan_mode == 'wgangp':
gradient_penalty, gradients = external_function.cal_gradient_penalty(netD, real, fake.detach())
D_loss += gradient_penalty
D_loss.backward()
return D_loss
def backward_D(self):
"""Calculate the GAN loss for the discriminators"""
base_function._unfreeze(self.net_D)
i = np.random.randint(len(self.img_gen))
fake = self.img_gen[i]
real = self.P_frame_step[:,i,...]
self.loss_dis_img_gen = self.backward_D_basic(self.net_D, real, fake)
base_function._unfreeze(self.net_D_V)
i = np.random.randint(len(self.img_gen)-self.opt.frames_D_V+1)
# fake = [self.img_gen[i]]
# real = [self.P_frame_step[:,i,...]]
fake = []
real = []
for frame in range(self.opt.frames_D_V-1):
fake.append(self.img_gen[i+frame]-self.img_gen[i+frame+1])
real.append(self.P_frame_step[:,i+frame,...]
-self.P_frame_step[:,i+frame+1,...])
fake = torch.cat(fake, dim=1)
real = torch.cat(real, dim=1)
self.loss_dis_img_gen_v = self.backward_D_basic(self.net_D_V, real, fake)
def backward_G(self):
"""Calculate training loss for the generator"""
# gen_tensor = torch.cat([v.unsqueeze(1) for v in self.img_gen], 1)
loss_style_gen, loss_content_gen, loss_app_gen=0,0,0
for i in range(len(self.img_gen)):
gen = self.img_gen[i]
gt = self.P_frame_step[:,i,...]
loss_app_gen += self.L1loss(gen, gt)
content_gen, style_gen = self.Vggloss(gen, gt)
loss_style_gen += style_gen
loss_content_gen += content_gen
self.loss_style_gen = loss_style_gen * self.opt.lambda_style
self.loss_content_gen = loss_content_gen * self.opt.lambda_content
self.loss_app_gen = loss_app_gen * self.opt.lambda_rec
loss_correctness_p, loss_regularization_p=0, 0
loss_correctness_r, loss_regularization_r=0, 0
for i in range(len(self.flow_fields)):
flow_field_i = self.flow_fields[i]
flow_p, flow_r=[],[]
for j in range(0, len(flow_field_i), 2):
flow_p.append(flow_field_i[j])
flow_r.append(flow_field_i[j+1])
correctness_r = self.Correctness(self.P_frame_step[:,i,...], self.P_reference,
flow_r, self.opt.attn_layer)
correctness_p = self.Correctness(self.P_frame_step[:,i,...], self.P_previous_recoder[i].detach(),
flow_p, self.opt.attn_layer)
loss_correctness_p += correctness_p
loss_correctness_r += correctness_r
loss_regularization_p += self.Regularization(flow_p)
loss_regularization_r += self.Regularization(flow_r)
self.loss_correctness_p = loss_correctness_p * self.opt.lambda_correct
self.loss_correctness_r = loss_correctness_r * self.opt.lambda_correct
self.loss_regularization_p = loss_regularization_p * self.opt.lambda_regularization
self.loss_regularization_r = loss_regularization_r * self.opt.lambda_regularization
# rec loss fake
base_function._freeze(self.net_D)
i = np.random.randint(len(self.img_gen))
fake = self.img_gen[i]
D_fake = self.net_D(fake)
self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g
##########################################################################
base_function._freeze(self.net_D_V)
i = np.random.randint(len(self.img_gen)-self.opt.frames_D_V+1)
# fake = [self.img_gen[i]]
fake = []
for frame in range(self.opt.frames_D_V-1):
fake.append(self.img_gen[i+frame]-self.img_gen[i+frame+1])
fake = torch.cat(fake, dim=1)
D_fake = self.net_D_V(fake)
self.loss_ad_gen_v = self.GANloss(D_fake, True, False) * self.opt.lambda_g
##########################################################################
total_loss = 0
for name in self.loss_names:
if name != 'dis_img_gen_v' and name != 'dis_img_gen':
total_loss += getattr(self, "loss_" + name)
total_loss.backward()
def optimize_parameters(self):
self.update()
|
[
"cv2.VideoWriter_fourcc",
"torch.cat",
"util.util.flow2color",
"model.networks.external_function.PerceptualCorrectness",
"model.networks.base_function._freeze",
"glob.glob",
"torch.nn.MSELoss",
"model.networks.define_g",
"model.networks.base_function._unfreeze",
"model.networks.external_function.AdversarialLoss",
"model.networks.external_function.MultiAffineRegularizationLoss",
"model.networks.external_function.VGGLoss",
"numpy.concatenate",
"model.networks.define_d",
"torch.nn.L1Loss",
"model.base_model.BaseModel.__init__",
"cv2.imread",
"collections.OrderedDict",
"torch.nn.DataParallel"
] |
[((2812, 2841), 'model.base_model.BaseModel.__init__', 'BaseModel.__init__', (['self', 'opt'], {}), '(self, opt)\n', (2830, 2841), False, 'from model.base_model import BaseModel\n'), ((3570, 3836), 'model.networks.define_g', 'network.define_g', (['opt'], {'image_nc': 'opt.image_nc', 'structure_nc': 'opt.structure_nc', 'ngf': '(64)', 'img_f': '(512)', 'layers': 'opt.layers', 'num_blocks': '(2)', 'use_spect': 'opt.use_spect_g', 'attn_layer': 'opt.attn_layer', 'norm': '"""instance"""', 'activation': '"""LeakyReLU"""', 'extractor_kz': 'opt.kernel_size'}), "(opt, image_nc=opt.image_nc, structure_nc=opt.structure_nc,\n ngf=64, img_f=512, layers=opt.layers, num_blocks=2, use_spect=opt.\n use_spect_g, attn_layer=opt.attn_layer, norm='instance', activation=\n 'LeakyReLU', extractor_kz=opt.kernel_size)\n", (3586, 3836), True, 'import model.networks as network\n'), ((4044, 4061), 'util.util.flow2color', 'util.flow2color', ([], {}), '()\n', (4059, 4061), False, 'from util import task, util\n'), ((4084, 4161), 'model.networks.define_d', 'network.define_d', (['opt'], {'ndf': '(32)', 'img_f': '(128)', 'layers': '(4)', 'use_spect': 'opt.use_spect_d'}), '(opt, ndf=32, img_f=128, layers=4, use_spect=opt.use_spect_d)\n', (4100, 4161), True, 'import model.networks as network\n'), ((4356, 4456), 'model.networks.define_d', 'network.define_d', (['opt'], {'input_nc': 'input_nc', 'ndf': '(32)', 'img_f': '(128)', 'layers': '(4)', 'use_spect': 'opt.use_spect_d'}), '(opt, input_nc=input_nc, ndf=32, img_f=128, layers=4,\n use_spect=opt.use_spect_d)\n', (4372, 4456), True, 'import model.networks as network\n'), ((8600, 8613), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8611, 8613), False, 'from collections import OrderedDict\n'), ((10706, 10739), 'torch.cat', 'torch.cat', (['self.test_generated', '(0)'], {}), '(self.test_generated, 0)\n', (10715, 10739), False, 'import torch\n'), ((13630, 13665), 'model.networks.base_function._unfreeze', 'base_function._unfreeze', (['self.net_D'], {}), '(self.net_D)\n', (13653, 13665), False, 'from model.networks import base_function, external_function\n'), ((13875, 13912), 'model.networks.base_function._unfreeze', 'base_function._unfreeze', (['self.net_D_V'], {}), '(self.net_D_V)\n', (13898, 13912), False, 'from model.networks import base_function, external_function\n'), ((14363, 14385), 'torch.cat', 'torch.cat', (['fake'], {'dim': '(1)'}), '(fake, dim=1)\n', (14372, 14385), False, 'import torch\n'), ((14401, 14423), 'torch.cat', 'torch.cat', (['real'], {'dim': '(1)'}), '(real, dim=1)\n', (14410, 14423), False, 'import torch\n'), ((16630, 16663), 'model.networks.base_function._freeze', 'base_function._freeze', (['self.net_D'], {}), '(self.net_D)\n', (16651, 16663), False, 'from model.networks import base_function, external_function\n'), ((16951, 16986), 'model.networks.base_function._freeze', 'base_function._freeze', (['self.net_D_V'], {}), '(self.net_D_V)\n', (16972, 16986), False, 'from model.networks import base_function, external_function\n'), ((17248, 17270), 'torch.cat', 'torch.cat', (['fake'], {'dim': '(1)'}), '(fake, dim=1)\n', (17257, 17270), False, 'import torch\n'), ((3958, 4016), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.net_G'], {'device_ids': 'self.gpu_ids'}), '(self.net_G, device_ids=self.gpu_ids)\n', (3979, 4016), False, 'import torch\n'), ((4220, 4278), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.net_D'], {'device_ids': 'self.gpu_ids'}), '(self.net_D, device_ids=self.gpu_ids)\n', (4241, 4278), False, 'import torch\n'), ((4513, 4573), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.net_D_V'], {'device_ids': 'self.gpu_ids'}), '(self.net_D_V, device_ids=self.gpu_ids)\n', (4534, 4573), False, 'import torch\n'), ((4772, 4789), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (4787, 4789), False, 'import torch\n'), ((4816, 4834), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (4832, 4834), False, 'import torch\n'), ((8354, 8385), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (8376, 8385), False, 'import cv2\n'), ((7837, 7862), 'cv2.imread', 'cv2.imread', (['image_list[i]'], {}), '(image_list[i])\n', (7847, 7862), False, 'import cv2\n'), ((4683, 4730), 'model.networks.external_function.AdversarialLoss', 'external_function.AdversarialLoss', (['opt.gan_mode'], {}), '(opt.gan_mode)\n', (4716, 4730), False, 'from model.networks import base_function, external_function\n'), ((4866, 4907), 'model.networks.external_function.PerceptualCorrectness', 'external_function.PerceptualCorrectness', ([], {}), '()\n', (4905, 4907), False, 'from model.networks import base_function, external_function\n'), ((4957, 5028), 'model.networks.external_function.MultiAffineRegularizationLoss', 'external_function.MultiAffineRegularizationLoss', ([], {'kz_dic': 'opt.kernel_size'}), '(kz_dic=opt.kernel_size)\n', (5004, 5028), False, 'from model.networks import base_function, external_function\n'), ((5071, 5098), 'model.networks.external_function.VGGLoss', 'external_function.VGGLoss', ([], {}), '()\n', (5096, 5098), False, 'from model.networks import base_function, external_function\n'), ((7638, 7693), 'glob.glob', 'glob.glob', (["(self.opt.results_dir + '/*_' + name + '.png')"], {}), "(self.opt.results_dir + '/*_' + name + '.png')\n", (7647, 7693), False, 'import glob\n'), ((7931, 7967), 'numpy.concatenate', 'np.concatenate', (['(cat_im, im)'], {'axis': '(1)'}), '((cat_im, im), axis=1)\n', (7945, 7967), True, 'import numpy as np\n')]
|
import numpy as np
import time
import argparse
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import transforms
import csv
from sklearn.metrics import roc_auc_score
import copy
import cv2
import gc
import heapq
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import cm
from matplotlib.backends.backend_pdf import PdfPages
from datetime import datetime
from utils import set_seed
from utils import str2bool
from utils import choose_optimizer
from dataset import trainset
from dataset import testset
from scorecam import scorecam
class baseCNN(nn.Module): # Inherit from `nn.Module`, define `__init__` & `forward`
def __init__(self,args,device,mz_range=18000,num_neuron=64,kernel_size=5,drop_p1=0.5,drop_p2=0.5,poolingSize=5):
# Always call the init function of the parent class `nn.Module`
# so that magics can be set up.
super().__init__()
self.device=device
channels=args.channels
ReLUFlag=args.ReLUFlag
poolingFlag=args.poolingFlag
self.conv1 = nn.Conv1d(in_channels=1, out_channels=channels, kernel_size=kernel_size)
self.conv2 = nn.Conv1d(in_channels=channels, out_channels=channels, kernel_size=kernel_size)
self.conv3 = nn.Conv1d(in_channels=channels, out_channels=channels, kernel_size=kernel_size)
self.conv4 = nn.Conv1d(in_channels=channels, out_channels=channels, kernel_size=kernel_size)
self.globalpooling = nn.AdaptiveMaxPool1d(1)
self.poolingFlag=poolingFlag
self.ReLUFlag=ReLUFlag
self.drop1=nn.Dropout(p=drop_p1)
self.drop2=nn.Dropout(p=drop_p2)
pooling=nn.AvgPool1d(poolingSize,stride=2,padding=poolingSize//2)
self.pooling=nn.AvgPool1d(poolingSize,stride=2,padding=poolingSize//2)
if ReLUFlag:
activation=nn.ReLU()
else:
activation=nn.Tanh()
self.conv_layers = nn.Sequential(
#self.in1,
self.conv1,
nn.BatchNorm1d(channels),
activation,
pooling,
self.conv2,
nn.BatchNorm1d(channels),
activation,
pooling,
self.conv3,
nn.BatchNorm1d(channels),
activation,
pooling,
)
x = torch.randn(1,mz_range).view(-1,1,mz_range)
self._to_linear = None
self.convs(x)
self.fc1 = nn.Linear(self._to_linear, num_neuron) #flattening.
self.classifier = nn.Sequential(
#nn.Linear(256, 32),
nn.Linear(num_neuron, 1),
nn.Sigmoid(),
)
# This is achieved by defining the shapes of the multiple layers in the network.
def convs(self,x):
if self.poolingFlag == True:
x=self.pooling(x)
x=self.conv_layers(x)
if self._to_linear is None:
self._to_linear = x[0].shape[0]*x[0].shape[1]
return x
def forward(self, x):
# Define the network architecture.
# This is achieved by defining how the network forward propagates your inputs
# input 64 * 101 *4
# Input image size: 28 x 28, input channel: 1, batch size (training): 64
#print(x.size())
# Input (64 x 4 x 101) -> Conv1 (64 x 16 x 78) -> Max Pooling (64 x 16 x 26) -> ReLU -> ...
x = self.convs(x)
x = x.view(-1, self._to_linear) # .view is reshape ... this flattens X before
x=self.drop1(x)
x = F.relu(self.fc1(x))
#x=self.fc1(x)
x=self.drop2(x)
#print(x.size())
#x = self.fc2(x) # bc this is our output layer. No activation here.
x = self.classifier(x)
#print(x.size())
return x
def train(model, train_loader, optimizer, criterion, epoch):
print("Epoch {:d}".format(epoch))
model.train()
correct = 0
all_label = []
all_pred = []
for (data, target) in train_loader:
data=data.view(-1, 1,data.shape[-1]).float()
data, target = data.to(model.device), target.to(model.device)
target = target.float()
optimizer.zero_grad()
output = model(data)
pred = output>0.5
correct += pred.eq(target.view_as(pred)).sum().item()
all_label.extend(target.reshape(-1).tolist())
all_pred.extend((output[:]).reshape(-1).tolist())
output = output.reshape(-1)
loss = criterion(output, target)
loss.backward()
optimizer.step()
# print("Epoch %d\nTraining acc: %.2f"%(epoch, 100. * correct/len(train_loader.dataset))+"%")
print("Train AUC score: {:.4f}".format(roc_auc_score(np.array(all_label), np.array(all_pred))))
def test(model, test_loader, criterion,predPath=None, data_type='Test', arch=None):
model.eval()
correct = 0
tp, tn, fp, fn = 0, 0, 0, 0
all_label = []
all_pred = []
for batch_idx, (data, target) in enumerate(test_loader):
data=data.view(-1, 1,data.shape[-1]).float()
data, target = data.to(model.device), target.to(model.device)
target = target.float()
output = model(data)
pred = output>0.5
correct += pred.eq(target.view_as(pred)).sum().item()
for p, t in zip(pred, target.view_as(pred)):
if p.eq(t) and p.item()==1:
tp += 1
elif p.eq(t) and p.item()==0:
tn += 1
elif p.item()==1:
fp += 1
else:
fn += 1
all_label.extend(target.reshape(-1).tolist())
all_pred.extend((output[:]).reshape(-1).tolist())
accuracy=0
Specificity=0
Sensitivity=0
if data_type=='Test':
accuracy=100. * (tp+tn) / len(all_label)
Specificity=100. * tn / (tn+fp)
Sensitivity=100. * tp / (tp+fn)
print("false negatives: {} ({:.2f}%)".format(fn, 100. * fn / len(all_label)))
print("false positives: {} ({:.2f}%)".format(fp, 100. * fp / len(all_label)))
print("true positives: {} ({:.2f}%)".format(tp, 100. * tp / len(all_label)))
print("true negatives: {} ({:.2f}%) \n".format(tn, 100. * tn / len(all_label)))
print("accuracy: ({:.2f}%)".format( accuracy))
print("Specificity: ({:.2f}%)".format( Specificity))
print("Sensitivity: ({:.2f}%)".format( Sensitivity))
pred_res = np.concatenate((np.array(all_label), np.array(all_pred)),axis=0).reshape(2,-1).T
if predPath:
np.savetxt(predPath, pred_res, delimiter=",",fmt='%.6f')
print("{} AUC score: {:.4f}".format(data_type, roc_auc_score(np.array(all_label), np.array(all_pred))))
return roc_auc_score(np.array(all_label), np.array(all_pred)),accuracy,Specificity,Sensitivity
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--trainData', default='/volume/tsungting/MALDI-TOF/MALDI-TOF/20210414/Linkou_EF_Data_round_off_dim_18000.npy', type=str,help="training data path")
parser.add_argument('--trainLabel', default='/volume/tsungting/MALDI-TOF/MALDI-TOF/20210414/Linkou_EF_Data_labels.csv', type=str,help="training label path")
parser.add_argument('--testData', default='/volume/tsungting/MALDI-TOF/MALDI-TOF/20210414/Kaohsiung_EF_Data_round_off_dim_18000.npy', type=str,help="testing data path")
parser.add_argument('--testLabel', default='/volume/tsungting/MALDI-TOF/MALDI-TOF/20210414/Kaohsiung_EF_Data_labels.csv', type=str,help="testing label path")
parser.add_argument('--savePath', help='Model path to save')
parser.add_argument('--predPath', help="prediction result in test data to save")
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--optimizer', type=str, default='adam', choices=['sgd', 'adam', 'adagrad'], help='choose optimizer: [sgd, adam, adagrad]')
parser.add_argument('--seed',help='seed number',type=int,default=0)
parser.add_argument('--poolingFlag',default=True,type=str2bool,help="whether add pooling layer at first layer in model architecture or not")
parser.add_argument('--ReLUFlag',default=True,type=str2bool,help="determine activation: Yes=> ReLU() , No=>Tanh()")
parser.add_argument('--showPosImportance',default=True,type=str2bool,help="if True,will show mz range importance in test data")
parser.add_argument('--channels',default=64,type=int,help="channels")
parser.add_argument('--cuda', type=int, default=0,help="cuda")
parser.add_argument('--learning_rate', type=float, default=0.0001, help='learning rate')
parser.add_argument('--epochs', type=int, default=30, help='num of training epochs')
parser.add_argument('--splitRatio', type=float, default=0.2, help='In training process,we need to split Training data into training and validation parts by splitRatio to determine parameters')
args = parser.parse_args()
print(args)
seed_num=args.seed
set_seed(seed_num)
train_data = trainset(args.trainData,args.trainLabel)
test_data = testset(args.testData,args.testLabel)
dataset_size = len(train_data)
print(dataset_size)
indices = list(range(dataset_size))
split = int(np.floor(args.splitRatio * dataset_size))
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
batch_size = args.batch_size
epochs = args.epochs
learning_rate = args.learning_rate
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=2)
valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler, pin_memory=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=2)
device = 'cpu' if args.cuda==-1 else 'cuda:%d'%args.cuda
model = baseCNN(args,device=device)
model = model.to(device)
print(model)
print("model size: %d"%sum(p.numel() for p in model.parameters()))
criterion = nn.BCELoss()
criterion = criterion.to(device)
#optimizer = torch.optim.SGD(model.parameters(), lr= learning_rate)
optimizer = choose_optimizer(args.optimizer, model, learning_rate)
#epochs=1000
best_auc=0
best_epoch=0
model_check_epoch = copy.deepcopy(model)
#optimizer_check_epoch = torch.optim.SGD(model_check_epoch.parameters(), lr= learning_rate)
optimizer_check_epoch = choose_optimizer(args.optimizer, model_check_epoch, learning_rate)
criterion_check = nn.BCELoss()
criterion_check = criterion_check.to(device)
for epoch in range(1, epochs):
train(model_check_epoch, train_loader, optimizer_check_epoch, criterion_check, epoch)
auc,accuracy,Specificity,Sensitivity=test(model_check_epoch, valid_loader, criterion_check,args.predPath, data_type='valid')
if auc > best_auc:
best_auc=auc
best_epoch=epoch
print("best_epoch",best_epoch)
print("valid_auc:",best_auc)
if best_epoch==1:
best_epoch=2
for epoch in range(1, best_epoch):
train(model, train_loader, optimizer, criterion, epoch)
train(model, valid_loader, optimizer, criterion, epoch)
auc,accuracy,Specificity,Sensitivity=test(model, test_loader, criterion,args.predPath)
print(auc)
if args.savePath:
torch.save(model,args.savePath)
if args.showPosImportance:
out_mask=scorecam(model,model.pooling,test_data)
np.save('model_avgpool_score_cam.npy'.format(seed_num), out_mask)
return auc,accuracy,Specificity,Sensitivity
if __name__ == '__main__':
start=datetime.now()
auc,accuracy,Specificity,Sensitivity=main()
end=datetime.now()
print("total seconds:",end-start)
print("{} {} {} {}".format(auc,accuracy,Specificity,Sensitivity))
|
[
"torch.nn.Dropout",
"argparse.ArgumentParser",
"utils.set_seed",
"numpy.floor",
"torch.nn.AdaptiveMaxPool1d",
"torch.randn",
"dataset.testset",
"dataset.trainset",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"torch.nn.Conv1d",
"numpy.savetxt",
"torch.nn.Linear",
"torch.nn.AvgPool1d",
"datetime.datetime.now",
"numpy.random.shuffle",
"copy.deepcopy",
"torch.nn.Tanh",
"torch.nn.BatchNorm1d",
"scorecam.scorecam",
"utils.choose_optimizer",
"torch.nn.Sigmoid",
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.nn.ReLU",
"torch.save",
"numpy.array"
] |
[((7018, 7043), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7041, 7043), False, 'import argparse\n'), ((9163, 9181), 'utils.set_seed', 'set_seed', (['seed_num'], {}), '(seed_num)\n', (9171, 9181), False, 'from utils import set_seed\n'), ((9204, 9245), 'dataset.trainset', 'trainset', (['args.trainData', 'args.trainLabel'], {}), '(args.trainData, args.trainLabel)\n', (9212, 9245), False, 'from dataset import trainset\n'), ((9261, 9299), 'dataset.testset', 'testset', (['args.testData', 'args.testLabel'], {}), '(args.testData, args.testLabel)\n', (9268, 9299), False, 'from dataset import testset\n'), ((9461, 9487), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (9478, 9487), True, 'import numpy as np\n'), ((9576, 9610), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (9595, 9610), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((9631, 9663), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['val_indices'], {}), '(val_indices)\n', (9650, 9663), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((9781, 9903), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'batch_size', 'sampler': 'train_sampler', 'pin_memory': '(True)', 'num_workers': '(2)'}), '(train_data, batch_size=batch_size, sampler=\n train_sampler, pin_memory=True, num_workers=2)\n', (9808, 9903), False, 'import torch\n'), ((9918, 10040), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'batch_size', 'sampler': 'valid_sampler', 'pin_memory': '(True)', 'num_workers': '(2)'}), '(train_data, batch_size=batch_size, sampler=\n valid_sampler, pin_memory=True, num_workers=2)\n', (9945, 10040), False, 'import torch\n'), ((10054, 10130), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': 'batch_size', 'num_workers': '(2)'}), '(test_data, batch_size=batch_size, num_workers=2)\n', (10081, 10130), False, 'import torch\n'), ((10370, 10382), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (10380, 10382), True, 'import torch.nn as nn\n'), ((10508, 10562), 'utils.choose_optimizer', 'choose_optimizer', (['args.optimizer', 'model', 'learning_rate'], {}), '(args.optimizer, model, learning_rate)\n', (10524, 10562), False, 'from utils import choose_optimizer\n'), ((10638, 10658), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (10651, 10658), False, 'import copy\n'), ((10783, 10849), 'utils.choose_optimizer', 'choose_optimizer', (['args.optimizer', 'model_check_epoch', 'learning_rate'], {}), '(args.optimizer, model_check_epoch, learning_rate)\n', (10799, 10849), False, 'from utils import choose_optimizer\n'), ((10872, 10884), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (10882, 10884), True, 'import torch.nn as nn\n'), ((11982, 11996), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11994, 11996), False, 'from datetime import datetime\n'), ((12053, 12067), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12065, 12067), False, 'from datetime import datetime\n'), ((1222, 1294), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(1)', 'out_channels': 'channels', 'kernel_size': 'kernel_size'}), '(in_channels=1, out_channels=channels, kernel_size=kernel_size)\n', (1231, 1294), True, 'import torch.nn as nn\n'), ((1316, 1395), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'channels', 'out_channels': 'channels', 'kernel_size': 'kernel_size'}), '(in_channels=channels, out_channels=channels, kernel_size=kernel_size)\n', (1325, 1395), True, 'import torch.nn as nn\n'), ((1417, 1496), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'channels', 'out_channels': 'channels', 'kernel_size': 'kernel_size'}), '(in_channels=channels, out_channels=channels, kernel_size=kernel_size)\n', (1426, 1496), True, 'import torch.nn as nn\n'), ((1518, 1597), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'channels', 'out_channels': 'channels', 'kernel_size': 'kernel_size'}), '(in_channels=channels, out_channels=channels, kernel_size=kernel_size)\n', (1527, 1597), True, 'import torch.nn as nn\n'), ((1636, 1659), 'torch.nn.AdaptiveMaxPool1d', 'nn.AdaptiveMaxPool1d', (['(1)'], {}), '(1)\n', (1656, 1659), True, 'import torch.nn as nn\n'), ((1749, 1770), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'drop_p1'}), '(p=drop_p1)\n', (1759, 1770), True, 'import torch.nn as nn\n'), ((1790, 1811), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'drop_p2'}), '(p=drop_p2)\n', (1800, 1811), True, 'import torch.nn as nn\n'), ((1829, 1890), 'torch.nn.AvgPool1d', 'nn.AvgPool1d', (['poolingSize'], {'stride': '(2)', 'padding': '(poolingSize // 2)'}), '(poolingSize, stride=2, padding=poolingSize // 2)\n', (1841, 1890), True, 'import torch.nn as nn\n'), ((1908, 1969), 'torch.nn.AvgPool1d', 'nn.AvgPool1d', (['poolingSize'], {'stride': '(2)', 'padding': '(poolingSize // 2)'}), '(poolingSize, stride=2, padding=poolingSize // 2)\n', (1920, 1969), True, 'import torch.nn as nn\n'), ((2613, 2651), 'torch.nn.Linear', 'nn.Linear', (['self._to_linear', 'num_neuron'], {}), '(self._to_linear, num_neuron)\n', (2622, 2651), True, 'import torch.nn as nn\n'), ((9415, 9455), 'numpy.floor', 'np.floor', (['(args.splitRatio * dataset_size)'], {}), '(args.splitRatio * dataset_size)\n', (9423, 9455), True, 'import numpy as np\n'), ((11699, 11731), 'torch.save', 'torch.save', (['model', 'args.savePath'], {}), '(model, args.savePath)\n', (11709, 11731), False, 'import torch\n'), ((11780, 11821), 'scorecam.scorecam', 'scorecam', (['model', 'model.pooling', 'test_data'], {}), '(model, model.pooling, test_data)\n', (11788, 11821), False, 'from scorecam import scorecam\n'), ((2020, 2029), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2027, 2029), True, 'import torch.nn as nn\n'), ((2067, 2076), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2074, 2076), True, 'import torch.nn as nn\n'), ((2179, 2203), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['channels'], {}), '(channels)\n', (2193, 2203), True, 'import torch.nn as nn\n'), ((2286, 2310), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['channels'], {}), '(channels)\n', (2300, 2310), True, 'import torch.nn as nn\n'), ((2393, 2417), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['channels'], {}), '(channels)\n', (2407, 2417), True, 'import torch.nn as nn\n'), ((2742, 2766), 'torch.nn.Linear', 'nn.Linear', (['num_neuron', '(1)'], {}), '(num_neuron, 1)\n', (2751, 2766), True, 'import torch.nn as nn\n'), ((2771, 2783), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2781, 2783), True, 'import torch.nn as nn\n'), ((6719, 6776), 'numpy.savetxt', 'np.savetxt', (['predPath', 'pred_res'], {'delimiter': '""","""', 'fmt': '"""%.6f"""'}), "(predPath, pred_res, delimiter=',', fmt='%.6f')\n", (6729, 6776), True, 'import numpy as np\n'), ((6915, 6934), 'numpy.array', 'np.array', (['all_label'], {}), '(all_label)\n', (6923, 6934), True, 'import numpy as np\n'), ((6936, 6954), 'numpy.array', 'np.array', (['all_pred'], {}), '(all_pred)\n', (6944, 6954), True, 'import numpy as np\n'), ((2487, 2511), 'torch.randn', 'torch.randn', (['(1)', 'mz_range'], {}), '(1, mz_range)\n', (2498, 2511), False, 'import torch\n'), ((4881, 4900), 'numpy.array', 'np.array', (['all_label'], {}), '(all_label)\n', (4889, 4900), True, 'import numpy as np\n'), ((4902, 4920), 'numpy.array', 'np.array', (['all_pred'], {}), '(all_pred)\n', (4910, 4920), True, 'import numpy as np\n'), ((6847, 6866), 'numpy.array', 'np.array', (['all_label'], {}), '(all_label)\n', (6855, 6866), True, 'import numpy as np\n'), ((6868, 6886), 'numpy.array', 'np.array', (['all_pred'], {}), '(all_pred)\n', (6876, 6886), True, 'import numpy as np\n'), ((6621, 6640), 'numpy.array', 'np.array', (['all_label'], {}), '(all_label)\n', (6629, 6640), True, 'import numpy as np\n'), ((6642, 6660), 'numpy.array', 'np.array', (['all_pred'], {}), '(all_pred)\n', (6650, 6660), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 14:22:31 2016
@author: <NAME>
"""
import collections
import matplotlib.pyplot as plt
import numpy as np
from abc import ABCMeta, abstractmethod
from .. import gui
from .. import helpers as hp
from .. import traces as tc
from ..evaluate import signal as sn
from ..graph import GraphMember
from ..picklable import InteractiveAttributes
class GraphicalMod(object):
"""
This class's subclasses should implement `_figure()` and `_update_fig()`,
which return and update a matplotlib figure, respectively. The figure can
be accessed by `self.figure`.
Parameters
----------
figure
modification : Modification
"""
def __init__(self, modification=None, **kwargs):
# Register the modification which should be graphically adjusted
self.modification = modification
# Initialize figure to None, which effectively disables
# `self.update_fig()` and Co. and prevent them from throwing an error
self._fig = None
def _set_plot_params(self, plot_params=None):
if plot_params is None:
plot_params = {}
gui.set_plot_params(plot_params=plot_params)
def display(self, plot_params=None):
self.init_fig(plot_params=plot_params)
def init_fig(self, show=True, plot_params=None):
"""
This method calls self._figure() to create an interactive figure and
interact with the user to determine the parameters necessary to
calculate the modification (see self._recalculate()). and
self._close_fig() to release all references to the actors of the
figure.
`self._figure()` and self._close_fig() should be (over)written by
subclasses.
"""
# Only create a figure, if the function `self._figure()` is implemented
if not hasattr(self, '_figure'):
return
# close the figure
# nbagg backend needs to have the figure closed and recreated
# whenever the code of the cell displaying the figure is executed.
# A simple update of the figure would let it disappear. Even a
# self.figure.show() wouldn't work anymore.
# For backends this just means a bit of extra calculation.
# Therefore, close the figure first before replotting it.
self.close_fig()
# set default plot parameters, can be recalled / overwritten in
# `self._figure()`
self._set_plot_params(plot_params=plot_params)
# create the figure
self.figure = self._figure()
# update the figure
self.update_fig()
# show the figure
if show:
self.figure.show()
def update(self, **kwargs):
self.update_fig(**kwargs)
def update_fig(self, **kwargs):
if self._fig is not None:
self._update_fig(**kwargs)
self._figure_canvas_draw()
def _update_fig(self, **kwargs):
pass
def close_fig(self):
if self._fig is not None:
self._pre_close_fig()
self._close_fig()
self._post_close_fig()
def _pre_close_fig(self):
"""
Method to be overwritten by subclasses.
"""
pass
def _close_fig(self):
# force redraw of the figure
self._figure_canvas_draw()
# close the figure
plt.close(self.figure)
# release memory
self.figure = None
def _post_close_fig(self):
"""
Method to be overwritten by subclasses.
"""
pass
def _figure_canvas_draw(self):
# Some matplotlib backends will throw an error when trying to draw the
# canvas. Simply ignoring the error that could happen here will prevent
# the figure from not beeing closed, left open, and preventing the next
# figure to be drawn. Even though the "except: pass" clause is
# considered bad, here the worst thing that could happen is that the
# figure produced by the matplotlib backend upon closing is not
# updated. Therefore, "except: pass" should be considered as an
# acceptable workaround for this case.
try:
# redraw the figure, before closing it
self.figure.canvas.draw()
except:
pass
@property
def figure(self):
"""
The matplotlib figure that represents and/or adjusts the parameters of
`self.modification`.
"""
# Automatically initialize a figure
if self._fig is None:
self.init_fig(show=False)
# Return a previously initialized figure
return self._fig
@figure.setter
def figure(self, figure):
self._fig = figure
class Modification(GraphMember, metaclass=ABCMeta):
"""
Modification is an abstract class, that implements methods to modify the
data of a `View` (`view_apply`) and adjust the parameters which control the
behaviour of the modifications applied.
Whenever one of the parameters needed to calculate the modification is
changed, the view, this modification is applied to, is informed.
`self.set_changed()` Has to be called upon any change of the modification
that influences the behaviour of `self.modify()`. In essence, these are all
parameters that are used to determine the modification. Therefore, this
should be called by all setters of the parameters/attributes.
Every subclass of Modification has to implement a constructor method
`self.__init__(self, **kwargs)`, which calls the superclasses' constructor
and sets the traces, the modification is applied to with the keyword
parameter `traces_apply`. An example could be:
super().__init__(traces_apply=['psdX', 'psdZ'], **kwargs)
"""
# set a graphical modification, which will, per default, do nothing
GRAPHICALMOD = GraphicalMod
def __init__(self, traces_apply=None, view_apply=None, view_based=None,
automatic_switch=False, datapoints=-1, **kwargs):
# Call the constructor of the superclass `GraphMember` and set the
# maximum allowed number of parents (`view_based`) and childs
# (`view_apply`) to one.
super().__init__(max_children=1, max_parents=1, **kwargs)
# A `Modification` has to be applied to a `View`!
if view_apply is None:
raise TypeError("Modification missing required positional argument"
" `view_apply`.")
# Set the view, from where the parameters for the modification are
# calculated from
if view_based is not None:
self.view_based = view_based
# Set the view, whose data is going to be modified
self.view_apply = view_apply
# Set the traces, which are modified by this `Modification`
self.traces_apply = traces_apply
# Initialize InteractiveAttributes object, which will hold all the
# parameters that the user should interact with.
self.iattributes = InteractiveAttributes()
# A checkbox to switch on/off the automatic determination of the
# parameters that are used to calculate the modification in the method
# `self.recalculate()`. The attribute `self.automatic` is checked in
# the method `self.recalculate()`. If `automatic` is True, the
# parameters are recalculated, otherwise the parameters are left
# unchanged. Whenever `automatic` is changed (by the user or
# automatically), `self.evaluate()` is called.
if automatic_switch:
self.add_iattribute('automatic', description='Automatic mode',
value=True, unset_automatic=False,
set_changed=False,
callback_functions=[self.evaluate])
# A checkbox to de-/activate this `Modification`. This attribute gets
# evaluated by `self.modify()`. If the `Modification` is active, it
# modifies data, otherwise not, i.e. modify() returns modified or
# unmodified original data, respectively.
desc = "".join((self.__class__.__name__, " active"))
self.add_iattribute('active', description=desc, value=True,
unset_automatic=False)
# Datapoints is used to calculate and/or present modification. The
# attribute `datapoints` is used to calculate a decimating factor and
# speed up the calculations and/or plot commands.
if datapoints > 0:
desc = "Datapoints to calculate/visualize modification"
self.add_iattribute('datapoints', description=desc,
value=datapoints, unset_automatic=False)
# Add a Button to manually call the method `self.evaluate()`.
self.add_iattribute('evaluate', description='Evaluate',
unset_automatic=False, set_changed=False,
callback_functions=[self.evaluate])
def add_iattribute(self, key, description=None, value=None,
unset_automatic=True, set_changed=True,
callback_functions=None, **kwargs):
"""
Add logic for automatic checkbox.
Register widget with unset_automatic=True
(-> Upon change of widget, unset automatic mode).
Change default behaviour by setting kwarg: unset_automatic = False
Add logic for triggering changed (calling self.set_changed).
Register widget with set_changed=True.
"""
if callback_functions is None:
callback_functions = []
if unset_automatic:
callback_functions.append(self._unset_automatic)
if set_changed:
callback_functions.append(self.set_changed)
self.iattributes.add(key, description=description, value=value,
callback_functions=callback_functions, **kwargs)
def _unset_automatic(self, leave_automatic=False, **kwargs):
"""
Add the logic for the automatic checkbox. If the value of an attribute
is changed and the attribute was created with `unset_automatic=True`,
deactivate the automatic mode (see `self.add_iattribute()`). To
temporarily leave the automatic mode status untouched when changing the
value of an attribute, i.e. not unset the automatic mode, set the value
of the attribute with the keyword argument `leave_automatic=True`
(see method `self.iattributes.set_value()`)
"""
if not leave_automatic:
self.iattributes.set_value('automatic', False, callback=False)
def evaluate(self):
"""
Implement the (re)calculation for the values necessary to calculate the
modification in the subclass and call recalculate() of the superclass
(this class).
"""
if self.updated:
# This method makes sure the modification is calculated with the
# current values of the View this modification is based on. It is
# called by self.modify().
# When a View requests data, it calls modify(), which in turn calls
# recalculate(). Recalculate(), if necessary, calls
# get_data_modified() from the View it is based on, which again
# triggers a call of modify() and a subsequent recalcaulte() of all
# modifications associated with this View.
# Modification need update, because view, this mod is based on,
# was changed.
# self._view_based.evaluate()is not needed, it is called via:
# recalculate() -> get_data_based() -> _view_based.get_data() ->
# get_modified_data() -> super().evaluate()
return
# Recalculate and print info of recalculated values if in automatic
# mode
if self.recalculate():
self.print_info()
# Update figure after recalculation has taken place
self.graphicalmod.update()
def recalculate(self):
# Check if recalculation of parameters is necessary
if self.updated:
return False
# Check the attribute self.automatic, whether the parameters needed for
# the calculation of the modification should be determined
# automatically or not. If values are set manually, no recalculation is
# necessary, and `self` is therefore up to date.
if not self.automatic:
self.updated = True
return True
# Recalculate the parameters, inform the view this `Modification`
# is applied to about the change, and set `self` to be updated.
self._recalculate()
self.set_changed(updated=True)
return True
def _recalculate(self):
"""
This method should be overwritten by subclasses and perform the
recalculation necessary to determine the parameters used by this
Modification to modify the data in `self._modify()`.
"""
pass
def print_info(self):
print("Values for Modification of class %s:"
% self.__class__.__name__)
if not self.automatic:
print(" Parameters set manually!")
for key, widget in self.iattributes._widgets.items():
if hasattr(widget, 'value'):
if isinstance(widget.value, float):
print(" %s: %.5f" % (widget.description, widget.value))
if isinstance(widget.value, collections.Iterable):
print(" %s: %s" % (widget.description, widget.value))
self._print_info()
def _print_info(self):
"""
This method should be overwritten by subclasses, which want to print
extra info additionally to the info of the calculated paremeters.
"""
pass
def modify(self, data, samples, traces_idx):
"""
Modifies data and returns the modified array.
Parameters
----------
data : 2D numpy.ndarray of type float
`data` holds the data to be modified
samples : index array or slice
`samples` is the index of the samples that was used to get the
`data`
traces : index array or slice
`traces` is the index of the traces that was used to get the `data`
"""
# Modification is active.
if self.active:
# Check if traces contained in data are modified by this
# modification.
data_traces = self.view_apply.idx_to_traces(traces_idx)
mod_traces = self.traces_apply
# Calculate the indices of traces contained in data and
# modification. First, calculate indices of modification traces.
mod_index = hp.overlap_index(mod_traces, data_traces)
if len(mod_index) > 0:
# At least one trace exists in both data and modification.
# Therefore, the data needs to be modified...
mod_index = hp.slicify(mod_index)
# Calculate indices of traces of the data in such a way that
# `data[:, data_index]` indexes the same traces as
# `self.traces_apply[mod_index]`
data_index = np.array([data_traces.index(trace)
for trace
in np.array(mod_traces)[mod_index]])
data_index = hp.slicify(data_index)
# Trigger a recalculation of the parameters for the
# modification (if necessary) before modifying the data.
self.evaluate()
# Modify and return the modified data
return self._modify(data=data,
samples=samples,
data_traces=data_traces,
data_index=data_index,
mod_index=mod_index)
# Return unmodified data
return data
@abstractmethod
def _modify(self, data, samples, data_traces, data_index, mod_index):
"""
Is called by self.modify() whenever data is requested and needs to be
modified.
Parameters
----------
data : 2D numpy.array()
Contains the data, indexed by samples and data_traces
samples : slice or 1D numpy.array()
Is the index of the samples contained in data, which was
given/asked by the user/process who called _get_data().
data_traces : list of str
Contains a list of traces (str) existent in data, which
was given/asked by the user/process who called _get_data().
data_index : slice or 1D numpy.array()
data[:, data_index] gives the data, which is modified by
this modification
mod_index : slice or 1D numpy.array()
np.array(self.traces_apply)[mod_index] gives the traces,
which are existent in data and also modified by this modfication.
Returns
-------
2D numpy.array()
The modified data.
"""
# modify data here, like so:
# data[:,data_index] -= modification[:,mod_index]
return data
@property
def updated(self):
return self._updated
@updated.setter
def updated(self, value):
"""
Gets set to True, after all `Views`, this `Modification` is based on,
have been updated and after this `Modification` has been recalculated.
This is automatically taken care of by `self.evaluate()` ->
`self.recalculate()`.
Gets called by a `View`, this `Modification` is based on, whenever the
`View` (a `Modification` of the `View`) has been changed. It
automatically informs its own `View`, that there was a change, by
calling `self.set_changed()`.
"""
self._updated = value
def member_changed(self, ancestor=True, calledfromself=False,
index_shift=None, **kwargs):
# If a change of an ancestor View or a MultiRegion was triggered by an
# index_shift, the modification needs to recalculate itself, i.e.
# the modification will alter its changeing behaviour. Because an
# index_shift change is only transmitted to `level=1`, inform the
# descendants of the change itself. A change of descendants is ignored.
if index_shift is not None and not calledfromself and ancestor:
self.set_changed(includeself=False)
# Update update status
super().member_changed(ancestor=ancestor,
calledfromself=calledfromself, **kwargs)
def _get_data(self, based=True, samples=None, traces=None, window=False,
decimate=False, copy=True):
if based:
view = self.view_based
else:
view = self.view_apply
if not isinstance(window, bool) and isinstance(window, int):
window = window
elif window:
window = self.decimate
else:
window = 1
if not isinstance(decimate, bool) and isinstance(decimate, int):
decimate = decimate
elif decimate:
decimate = self.decimate
else:
decimate = 1
if not based:
old_active = self.iattributes.active
self.iattributes.set_value('active', False, callback=False)
data = view.get_data(traces=traces, samples=samples,
moving_filter='mean', window=window,
decimate=decimate, copy=copy)
if not based:
self.iattributes.set_value('active', old_active, callback=False)
return data
def _get_data_based(self, samples=None, traces=None, window=False,
decimate=False, copy=True):
"""
decimate is False per default. If decimate is True, it only gets used,
if samples are set to None (step information in samples precedes over
decimate).
"""
return self._get_data(based=True, samples=samples, traces=traces,
window=window, decimate=decimate, copy=copy)
def _get_data_apply(self, samples=None, traces=None, window=False,
decimate=False, copy=True):
"""
Get data of view apply with all modifications applied, except self.
This is achieved by setting the self.__active flag to False.
self.__active is intentionally set directly by accessing the attribute
and not using the property/set_active() method, to prevent firing the
self.set_changed() method within the set_active() method.
decimate is False per default. If decimate is True, it only gets used,
if samples are set to None (step information in samples precedes over
decimate).
"""
return self._get_data(based=False, samples=samples, traces=traces,
window=window, decimate=decimate, copy=copy)
def calculate_bin_means(self, data=None, traces=None, bins=None,
datapoints_per_bin=None, sorttrace=0):
"""
Calculates binned means based on the data to be fitted. The binned
means are usually used by data fitting routines.
Parameters
----------
data : 2D numpy.ndarray of type float, optional
Defaults to `self._get_data_based(traces=traces, decimate=True)`.
traces : str or list of str, optional
Defaults to `self.traces_apply`.
bins : int, optional
Number of bins that contain the datapoints to be averaged. If
possible, it defaults to (`self.iattributes.datapoints` /
`datapoints_per_bin`), otherwise bins defaults to
(`self.view_based.datapoints` / `datapoints_per_bin`).
datapoints_per_bin : int, optional
Average number of datapoints to be averaged in one bin. Defaults to
25.
sorttrace : int, optional
Trace (column) of `data` that acts as sorting index upon binning
for the rest of the data. Defaults to the first trace of the data.
Returns
-------
1D numpy.ndarray of type float
The averaged bin values.
float
The size of one bin.
"""
# Bin data and average bins to prevent arbitrary weighting of bins with
# more datapoints
if bins is None:
bins = self._bins(datapoints_per_bin=datapoints_per_bin)
# get the traces to retrieve data from
if traces is None:
traces = self.traces_apply
# get the data to bin
if data is None:
data = self._get_data_based(traces=traces, decimate=True)
# create the bins based on one trace of the data
minimum = np.min(data[:, sorttrace])
maximum = np.max(data[:, sorttrace])
edges = np.linspace(minimum, maximum, bins + 1)
# Get the indices of the bins to which each value in input array
# belongs.
bin_idx = np.digitize(data[:, sorttrace], edges)
# Find which points are on the rightmost edge.
on_edge = data[:, sorttrace] == edges[-1]
# Shift these points one bin to the left.
bin_idx[on_edge] -= 1
# fill the bins with the means of the data contained in each bin
bin_means = np.array([data[bin_idx == i].mean(axis=0)
for i in range(1, bins + 1)
if np.any(bin_idx == i)])
bin_width = edges[1] - edges[0]
return bin_means, bin_width
def _bins(self, datapoints_per_bin=None):
# On average 25 datapoints per bin
datapoints_per_bin = datapoints_per_bin or 25
if 'datapoints' in self.iattributes:
bins = self.iattributes.datapoints / datapoints_per_bin
else:
bins = self.view_based.datapoints / datapoints_per_bin
bins = max(1, int(np.round(bins)))
return bins
_NAME = {
'position': ['positionX', 'positionY'],
'psd': ['psdX', 'psdY'],
'axis': ['X', 'Y']
}
def _excited(self, traces=None):
traces = traces or ['positionX', 'positionY']
data = self._get_data_based(traces=traces, copy=False)
return sn.get_excited_signal(data)
def interact(self):
self.recalculate()
self.iattributes.display()
self.graphicalmod.display()
@property
def graphicalmod(self):
# ZODB volatile
if not hasattr(self, '_v_graphicalmod'):
self._v_graphicalmod \
= self.__class__.GRAPHICALMOD(modification=self)
return self._v_graphicalmod
@property
def active(self):
active = False
if 'active' in self.iattributes:
active = self.iattributes.active
return active
@active.setter
def active(self, active=True):
if 'active' in self.iattributes:
self.iattributes.active = active
@property
def automatic(self):
# Does the modification automatically calculate its parameters
automatic = True
if 'automatic' in self.iattributes:
automatic = self.iattributes.automatic
return automatic
@property
def datapoints(self):
if 'datapoints' in self.iattributes:
return self.iattributes.datapoints
else:
return self.view_based.datapoints
@property
def decimate(self):
if 'datapoints' in self.iattributes:
return max(1, int(np.round(self.view_based.datapoints
/ self.datapoints)))
else:
return 1
@property
def view_based(self):
return self.parent
@property
def view_apply(self):
return self.child
@view_based.setter
def view_based(self, view):
self.set_parent(view)
@view_apply.setter
def view_apply(self, view):
self.set_child(view)
def lia(self, trace):
"""
Return the local index of trace in traces_apply
"""
return self.traces_apply.index(trace)
@property
def traces_apply(self):
# return a copy to protect local copy
return self._traces_apply.copy()
@traces_apply.setter
def traces_apply(self, traces):
if traces is None:
traces_apply = []
else:
traces_apply = tc.normalize(traces)
self._traces_apply = traces_apply
|
[
"matplotlib.pyplot.close",
"numpy.any",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.linspace",
"numpy.digitize",
"numpy.round"
] |
[((3383, 3405), 'matplotlib.pyplot.close', 'plt.close', (['self.figure'], {}), '(self.figure)\n', (3392, 3405), True, 'import matplotlib.pyplot as plt\n'), ((23139, 23165), 'numpy.min', 'np.min', (['data[:, sorttrace]'], {}), '(data[:, sorttrace])\n', (23145, 23165), True, 'import numpy as np\n'), ((23184, 23210), 'numpy.max', 'np.max', (['data[:, sorttrace]'], {}), '(data[:, sorttrace])\n', (23190, 23210), True, 'import numpy as np\n'), ((23227, 23266), 'numpy.linspace', 'np.linspace', (['minimum', 'maximum', '(bins + 1)'], {}), '(minimum, maximum, bins + 1)\n', (23238, 23266), True, 'import numpy as np\n'), ((23378, 23416), 'numpy.digitize', 'np.digitize', (['data[:, sorttrace]', 'edges'], {}), '(data[:, sorttrace], edges)\n', (23389, 23416), True, 'import numpy as np\n'), ((24295, 24309), 'numpy.round', 'np.round', (['bins'], {}), '(bins)\n', (24303, 24309), True, 'import numpy as np\n'), ((23830, 23850), 'numpy.any', 'np.any', (['(bin_idx == i)'], {}), '(bin_idx == i)\n', (23836, 23850), True, 'import numpy as np\n'), ((25906, 25960), 'numpy.round', 'np.round', (['(self.view_based.datapoints / self.datapoints)'], {}), '(self.view_based.datapoints / self.datapoints)\n', (25914, 25960), True, 'import numpy as np\n'), ((15533, 15553), 'numpy.array', 'np.array', (['mod_traces'], {}), '(mod_traces)\n', (15541, 15553), True, 'import numpy as np\n')]
|
# example 5-1 Modeling CSV data with multilayer perceptron networks
import tensorflow.python.platform
import tensorflow as tf
import pandas as pd
import numpy as np
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Dense
from matplotlib import pyplot
print("Example 5.1 with TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
path_prefix = os.path.join("data", "classification-simdata")
# filenameTrain = os.path.join(path_prefix, "saturn_data_train.csv")
# filenameTest = os.path.join(path_prefix, "saturn_data_eval.csv")
filenameTrain = "saturn_data_train.csv"
filenameTest = "saturn_data_eval.csv"
# Data by Dr. <NAME> (http://www.jasonbaldridge.com) to test neural network frameworks.
# Read "https://github.com/jasonbaldridge/try-tf/tree/master/simdata" and copy
# to data/classification-simdata
if not os.path.isdir(path_prefix):
print("Missing Saturn simulation data!")
printf("Downloading from https://github.com/jasonbaldridge/try-tf/tree/master/simdata")
fd = open(os.path.join(path_prefix, filenameTrain))
for i in range(5):
print(fd.readline())
fd.close()
# Extract tf.data.Dataset representations of labels and features in CSV files
# given data in the format of label, feat[0], feat[1]. feat[2], etc..
def get_dataset(file_path, plotDataset=False):
tf.keras.backend.set_floatx('float64')
# The raw data from the file is easily loaded as a Pandas DataFrame
df = pd.read_csv(file_path, header=None)
# The first column is the column of classification labels. Peel off the column of labels as a
# vector of 64 bit floating point values.
labels = df.pop(0).astype(np.float64)
dataset_length = len(labels)
# The remainder of the values are the features
feat = df.values
if plotDataset:
pyplot.figure()
# There are only two labels in this dataset 0 or 1
idx = labels > 0.5
pyplot.scatter(feat[idx, 0], feat[idx, 1], marker='+', c='#ff0000')
idx = labels <= 0.5
pyplot.scatter(feat[idx, 0], feat[idx, 1], marker='o', c='#00ff00')
pyplot.show()
# Assuming that a value of zero is a label, the number of labels it the maximum integer in the array, plus 1
NUM_LABELS = np.max(labels) + 1
# Convert the integer lables into a one hot encoding matrix
labels_onehot = (np.arange(NUM_LABELS) == labels[:, None]).astype(np.float64)
# A tf.data.Dataset represents a sequence of elements, where each element consists of the data and the data label.
# See: https://www.tensorflow.org/guide/data
# As one-hot encoded data...
dataset = tf.data.Dataset.from_tensor_slices((feat, labels_onehot))
# The Dataset object is Python iterable.
return dataset, dataset_length
# Load the training data set
raw_train_data, raw_train_data_length = get_dataset(os.path.join(path_prefix, filenameTrain), plotDataset=True)
print("\n\nTraining data set.")
for feat, targ in raw_train_data.take(5):
print ('Features: {}, Target: {}'.format(feat, targ))
# Load the test/evaluation data set
raw_test_data, raw_test_data_length = get_dataset(os.path.join(path_prefix, filenameTest))
print("\n\nTesting data set.")
for feat, targ in raw_test_data.take(5):
print ('Features: {}, Target: {}'.format(feat, targ))
print("\n\n")
seed = 123
LEARNING_RATE = 0.005
BATCH_SIZE = 50
NUM_EPOCHS = 30 # Number of epochs, full passes of the data
NUM_INPUTS = 2
NUM_OUTPUTS = 2
NUM_HIDDEN_NODES = 20
# Build the model. For this example, the model has two layers. The input layer is
# an multilayer perceptron network with an RELU activation function and the output
# layer is is a softmax activation function with a negative log likelihood loss function.
#
# The weight initializer in the Deep Learning book is Xavier
model = Sequential([
tf.keras.layers.Dense(NUM_HIDDEN_NODES, activation='relu'),
tf.keras.layers.Dense(NUM_OUTPUTS, activation='softmax')
])
# For this example, we need to calcualte the negative log likelihood of the model given the data
# To do this with Keras, we need to create a class that inhertis from the tf.keras.losses.Loss class
# and implement the following two methods:
# __init__(self) —Accept parameters to pass during the call of your loss function
# call(self, y_true, y_pred) —Use the targets (y_true) and the model predictions (y_pred) to compute the model's loss
#
# See:
class NegLogLikelihood(tf.keras.losses.Loss):
"""Loss class calcuates the negative log likelihood of the model, given the data.
Arguments:
model -- The Keras neural network model
reduction -- Type of tf.keras.losses.Reduction to apply to loss.
name -- Name of the loss function.
"""
def __init__(self, model, reduction=tf.keras.losses.Reduction.AUTO, name='nll_gausian'):
super().__init__(reduction=reduction, name=name)
self.model = model
"""Need to convert the loss function below to a
loss function suitable to the above input parameters.
Likelihood is the probability that the calculated parameters
produced the known data. Probability of the parameters (model)
given the data.
Likelihood:
L = Product i=1..N p(x(i) | theta)
NLL:
NLL = Sum i=1..N -log(p(x(i) | theta))
where, p(x(i) | theta) is the gausian probability density function
"""
def call(self, y_true, y_pred):
print("y_true:", y_true, ", y_pred:", y_pred)
y_pred_mean = tf.math.reduce_mean(y_pred, axis=-1)
y_pred_sd = tf.math.reduce_std(y_pred, axis=-1)
print("mean:", y_pred_mean, ", sd:", y_pred_sd)
## element wise square
square = tf.square(y_pred_mean - y_true)## preserve the same shape as y_pred.shape
ms = tf.add(tf.divide(square,y_pred_sd), tf.math.log(y_pred_sd))
## axis = -1 means that we take mean across the last dimension
## the output keeps all but the last dimension
## ms = tf.reduce_mean(ms,axis=-1)
## return scalar
ms = tf.reduce_mean(ms)
print("ms:", ms)
return(ms)
# To train using the Dataset, we should shuffle and batch the data
training_batches = raw_train_data.shuffle(raw_train_data_length).batch(BATCH_SIZE)
# Optimizer is Adam, loss function is mean squared error
# model.compile(loss = tf.losses.MeanSquaredError(), optimizer = tf.optimizers.Adam(), metrics=['accuracy'])
# Optimizer is stochastic gradient descent (sgd), loss function is negative log likelihood
model.compile(optimizer='sgd', loss=NegLogLikelihood(model), metrics=['accuracy'])
history = model.fit(training_batches, epochs=NUM_EPOCHS, verbose=1)
model.summary()
# plot history
pyplot.plot(history.history['loss'], label='loss')
pyplot.plot(history.history['accuracy'], label='accuracy')
pyplot.title('Training loss and accuracy')
pyplot.legend()
pyplot.show()
# Run against the test set. Final evaluation of the model
testing_batches = raw_test_data.shuffle(raw_test_data_length).batch(BATCH_SIZE)
scores = model.evaluate(testing_batches, verbose=0)
print("Test set analysis accuracy: %.2f%%" % (scores[1]*100))
|
[
"matplotlib.pyplot.title",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"tensorflow.math.reduce_std",
"tensorflow.executing_eagerly",
"matplotlib.pyplot.figure",
"tensorflow.divide",
"numpy.arange",
"os.path.join",
"tensorflow.math.log",
"numpy.max",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"tensorflow.reduce_mean",
"tensorflow.math.reduce_mean",
"matplotlib.pyplot.plot",
"os.path.isdir",
"matplotlib.pyplot.scatter",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.square",
"tensorflow.keras.backend.set_floatx"
] |
[((464, 510), 'os.path.join', 'os.path.join', (['"""data"""', '"""classification-simdata"""'], {}), "('data', 'classification-simdata')\n", (476, 510), False, 'import os\n'), ((6927, 6977), 'matplotlib.pyplot.plot', 'pyplot.plot', (["history.history['loss']"], {'label': '"""loss"""'}), "(history.history['loss'], label='loss')\n", (6938, 6977), False, 'from matplotlib import pyplot\n'), ((6979, 7037), 'matplotlib.pyplot.plot', 'pyplot.plot', (["history.history['accuracy']"], {'label': '"""accuracy"""'}), "(history.history['accuracy'], label='accuracy')\n", (6990, 7037), False, 'from matplotlib import pyplot\n'), ((7039, 7081), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Training loss and accuracy"""'], {}), "('Training loss and accuracy')\n", (7051, 7081), False, 'from matplotlib import pyplot\n'), ((7083, 7098), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {}), '()\n', (7096, 7098), False, 'from matplotlib import pyplot\n'), ((7100, 7113), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (7111, 7113), False, 'from matplotlib import pyplot\n'), ((942, 968), 'os.path.isdir', 'os.path.isdir', (['path_prefix'], {}), '(path_prefix)\n', (955, 968), False, 'import os\n'), ((1124, 1164), 'os.path.join', 'os.path.join', (['path_prefix', 'filenameTrain'], {}), '(path_prefix, filenameTrain)\n', (1136, 1164), False, 'import os\n'), ((1429, 1467), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float64"""'], {}), "('float64')\n", (1456, 1467), True, 'import tensorflow as tf\n'), ((1553, 1588), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'header': 'None'}), '(file_path, header=None)\n', (1564, 1588), True, 'import pandas as pd\n'), ((2763, 2820), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(feat, labels_onehot)'], {}), '((feat, labels_onehot))\n', (2797, 2820), True, 'import tensorflow as tf\n'), ((2992, 3032), 'os.path.join', 'os.path.join', (['path_prefix', 'filenameTrain'], {}), '(path_prefix, filenameTrain)\n', (3004, 3032), False, 'import os\n'), ((3279, 3318), 'os.path.join', 'os.path.join', (['path_prefix', 'filenameTest'], {}), '(path_prefix, filenameTest)\n', (3291, 3318), False, 'import os\n'), ((422, 444), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (442, 444), True, 'import tensorflow as tf\n'), ((1925, 1940), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (1938, 1940), False, 'from matplotlib import pyplot\n'), ((2038, 2105), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['feat[idx, 0]', 'feat[idx, 1]'], {'marker': '"""+"""', 'c': '"""#ff0000"""'}), "(feat[idx, 0], feat[idx, 1], marker='+', c='#ff0000')\n", (2052, 2105), False, 'from matplotlib import pyplot\n'), ((2144, 2211), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['feat[idx, 0]', 'feat[idx, 1]'], {'marker': '"""o"""', 'c': '"""#00ff00"""'}), "(feat[idx, 0], feat[idx, 1], marker='o', c='#00ff00')\n", (2158, 2211), False, 'from matplotlib import pyplot\n'), ((2221, 2234), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2232, 2234), False, 'from matplotlib import pyplot\n'), ((2369, 2383), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (2375, 2383), True, 'import numpy as np\n'), ((3999, 4057), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['NUM_HIDDEN_NODES'], {'activation': '"""relu"""'}), "(NUM_HIDDEN_NODES, activation='relu')\n", (4020, 4057), True, 'import tensorflow as tf\n'), ((4064, 4120), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['NUM_OUTPUTS'], {'activation': '"""softmax"""'}), "(NUM_OUTPUTS, activation='softmax')\n", (4085, 4120), True, 'import tensorflow as tf\n'), ((5687, 5723), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['y_pred'], {'axis': '(-1)'}), '(y_pred, axis=-1)\n', (5706, 5723), True, 'import tensorflow as tf\n'), ((5745, 5780), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['y_pred'], {'axis': '(-1)'}), '(y_pred, axis=-1)\n', (5763, 5780), True, 'import tensorflow as tf\n'), ((5892, 5923), 'tensorflow.square', 'tf.square', (['(y_pred_mean - y_true)'], {}), '(y_pred_mean - y_true)\n', (5901, 5923), True, 'import tensorflow as tf\n'), ((6253, 6271), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['ms'], {}), '(ms)\n', (6267, 6271), True, 'import tensorflow as tf\n'), ((5987, 6015), 'tensorflow.divide', 'tf.divide', (['square', 'y_pred_sd'], {}), '(square, y_pred_sd)\n', (5996, 6015), True, 'import tensorflow as tf\n'), ((6016, 6038), 'tensorflow.math.log', 'tf.math.log', (['y_pred_sd'], {}), '(y_pred_sd)\n', (6027, 6038), True, 'import tensorflow as tf\n'), ((2477, 2498), 'numpy.arange', 'np.arange', (['NUM_LABELS'], {}), '(NUM_LABELS)\n', (2486, 2498), True, 'import numpy as np\n')]
|
"""Doomsday fuel.
"""
def solution(m):
import numpy as np
import fractions
if len(m) == 1:
return [1, 1]
n_states = len(m)
mask = [False if mi == [0] * n_states else True for mi in m]
idx = np.concatenate([np.arange(n_states)[mask], np.arange(n_states)[np.logical_not(mask)]])
M = np.array(m)
M = M[idx, :]
M = M[:, idx]
# Convert to probabilities
M = [np.array(mi)/np.sum(mi).astype(float) if np.sum(mi) > 0 else mi for mi in M]
# The two test cases are both already sorted, so to test we can just use
# them as-is. Eventually we'll have to sort, though.
M = np.array(M)
n_transient = sum(mask) # Will need to figure this out
Q = M[0:n_transient, 0:n_transient]
R = M[0:n_transient, n_transient:]
N = np.linalg.inv(np.eye(n_transient) - Q)
B = np.matmul(N, R)
# Convert the solution into a fraction
frac = [fractions.Fraction(si).limit_denominator(1000) for si in B[0, :]]
numerator = [f.numerator for f in frac]
denominator = [f.denominator for f in frac]
d = int(np.lcm.reduce(denominator))
numerator = [int(ni * d / di) for ni, di in zip(numerator, denominator)]
return numerator + [d]
if __name__ == '__main__':
m = [
[0, 1, 0, 0, 0, 1], # s0, the initial state, goes to s1 and s5 with equal probability
[0, 0, 0, 0, 0, 0], # s2 is terminal, and unreachable (never observed in practice)
[0, 0, 0, 0, 0, 0], # s3 is terminal
[0, 0, 0, 0, 0, 0], # s4 is terminal
[0, 0, 0, 0, 0, 0], # s5 is terminal
[4, 0, 0, 3, 2, 0], # s1 can become s0, s3, or s4, but with different probabilities
]
expected_result = [9, 0, 3, 2, 14]
result = solution(m)
print('\n')
print(expected_result)
print(result)
# print(f'Result: {result}, Expected: {expected_result}')
m = [
[0, 1, 0, 0, 0, 1], # s0, the initial state, goes to s1 and s5 with equal probability
[4, 0, 0, 3, 2, 0], # s1 can become s0, s3, or s4, but with different probabilities
[0, 0, 0, 0, 0, 0], # s2 is terminal, and unreachable (never observed in practice)
[0, 0, 0, 0, 0, 0], # s3 is terminal
[0, 0, 0, 0, 0, 0], # s4 is terminal
[0, 0, 0, 0, 0, 0], # s5 is terminal
]
expected_result = [0, 3, 2, 9, 14]
result = solution(m)
print('\n')
print(expected_result)
print(result)
m = [
[0, 2, 1, 0, 0],
[0, 0, 0, 3, 4],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
]
expected_result = [7, 6, 8, 21]
result = solution(m)
print('\n')
print(expected_result)
print(result)
m = [
[1]
]
expected_result = [1, 1]
result = solution(m)
print('\n')
print(expected_result)
print(result)
|
[
"numpy.sum",
"numpy.logical_not",
"numpy.lcm.reduce",
"numpy.array",
"numpy.arange",
"numpy.matmul",
"numpy.eye",
"fractions.Fraction"
] |
[((323, 334), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (331, 334), True, 'import numpy as np\n'), ((633, 644), 'numpy.array', 'np.array', (['M'], {}), '(M)\n', (641, 644), True, 'import numpy as np\n'), ((839, 854), 'numpy.matmul', 'np.matmul', (['N', 'R'], {}), '(N, R)\n', (848, 854), True, 'import numpy as np\n'), ((1081, 1107), 'numpy.lcm.reduce', 'np.lcm.reduce', (['denominator'], {}), '(denominator)\n', (1094, 1107), True, 'import numpy as np\n'), ((806, 825), 'numpy.eye', 'np.eye', (['n_transient'], {}), '(n_transient)\n', (812, 825), True, 'import numpy as np\n'), ((244, 263), 'numpy.arange', 'np.arange', (['n_states'], {}), '(n_states)\n', (253, 263), True, 'import numpy as np\n'), ((271, 290), 'numpy.arange', 'np.arange', (['n_states'], {}), '(n_states)\n', (280, 290), True, 'import numpy as np\n'), ((291, 311), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (305, 311), True, 'import numpy as np\n'), ((453, 463), 'numpy.sum', 'np.sum', (['mi'], {}), '(mi)\n', (459, 463), True, 'import numpy as np\n'), ((412, 424), 'numpy.array', 'np.array', (['mi'], {}), '(mi)\n', (420, 424), True, 'import numpy as np\n'), ((911, 933), 'fractions.Fraction', 'fractions.Fraction', (['si'], {}), '(si)\n', (929, 933), False, 'import fractions\n'), ((425, 435), 'numpy.sum', 'np.sum', (['mi'], {}), '(mi)\n', (431, 435), True, 'import numpy as np\n')]
|
from os import cpu_count
from bfio import BioReader, BioWriter, OmeXml
import argparse, logging
import numpy as np
from pathlib import Path
from cellpose import dynamics, utils
import torch
from concurrent.futures import ThreadPoolExecutor, wait, Future
import typing
""" Plugin Constants """
TILE_SIZE = 2048 # Largest chunk of an image to process
TILE_OVERLAP = 256 # Amount of overlap between tiles
NITER = 200 # Number of iterations to run flow dynamics
# Use a gpu if it's available
USE_GPU = torch.cuda.is_available()
if USE_GPU:
DEV = torch.device("cuda")
else:
DEV = torch.device("cpu")
# Initialize the logger
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
def overlap(previous_values: np.ndarray,
current_values: np.ndarray,
tile: np.ndarray
) -> typing.Tuple[np.ndarray,list,list]:
"""Resolve label values between tiles
This function takes a row/column from the previous tile and a row/column
from the current tile and finds labels that that likely match. If labels
in the current tile should be replaced with labels from the previous tile,
the pixels in the current tile are removed from ``tile`` and the label value
and pixel coordinates of the label are stored in ``labels`` and ``indices``
respectively.
Args:
previous_values (np.ndarray): Previous tile edge values
current_values (np.ndarray): Current tile edge values
tile (np.ndarray): Current tile pixel values, flattened
Returns:
typing.Tuple[np.ndarray,np.ndarray,np.ndarray]: Returns the modified
tile with overlapping labels removed, a list of new labels, and a
list of indices associated with the new labels.
"""
# Get a list of unique values in the previous and current tiles
previous_labels = np.unique(previous_values)
if previous_values[0] == 0:
previous_labels = previous_values[1:]
current_labels = np.unique(current_values)
if current_labels[0] == 0:
current_labels = current_labels[1:]
# Initialize outputs
indices = []
labels = []
if previous_labels.size != 0 and current_labels.size != 0:
# Find overlapping indices
for label in current_labels:
new_labels,counts = np.unique(previous_values[current_values==label],return_counts=True)
if new_labels.size == 0:
continue
if new_labels[0] == 0:
new_labels = new_labels[1:]
counts = counts[1:]
if new_labels.size == 0:
continue
# Get the most frequently occuring overlapping label
labels.append(new_labels[np.argmax(counts)])
# Add indices to output, remove pixel values from the tile
indices.append(np.argwhere(tile==label))
tile[indices[-1]] = 0
return tile, labels, indices
def mask_thread(coords: typing.Tuple[int,int,int],
file_path: Path,
bw: BioWriter,
cellprob_threshold: float,
flow_threshold: float,
dependency1: typing.Optional[Future],
dependency2: typing.Optional[Future]
) -> typing.Tuple[np.ndarray,np.ndarray,np.uint32]:
"""[summary]
Args:
coords: x,y,z starting coordinates of the tile to process
file_path: Vector field file path
bw: Output file
cellprob_threshold: Cell probability threshold
flow_threshold: Flow field threshold
dependency1: Tile future to the left of the current tile
dependency2: Tile future above the current tile
Returns:
typing.Tuple[np.ndarray,np.ndarray,list]: Returns the right column of
the processed tile, bottom row of the processed tile, and largest
label value.
"""
# Calculate indice for the tile
x,y,z = coords
with BioReader(file_path) as br:
image_X = br.X
image_Y = br.Y
x_min = max([0, x - TILE_OVERLAP])
x_max = min([image_X, x + TILE_SIZE + TILE_OVERLAP])
y_min = max([0, y - TILE_OVERLAP])
y_max = min([image_Y, y + TILE_SIZE + TILE_OVERLAP])
tile = br[y_min:y_max, x_min:x_max, z:z + 1, :3, 0]
logger.debug('Calculating flows and masks for tile [{}:{},{}:{},{}:{}]'.format(y, y_max, x,
x_max, z, z + 1))
# Get flows and probabilities
cellprob = tile[:,:,0,0].squeeze()
dP = tile[:,:,0,1:].squeeze().transpose(2,0,1)
# Compute flows for the tile
p = dynamics.follow_flows(-1 * dP * (cellprob > cellprob_threshold) / 5.,
niter=NITER, interp=True,
use_gpu=USE_GPU,device=DEV)
mask = dynamics.get_masks(p, iscell=(cellprob>cellprob_threshold),
flows=dP, threshold=flow_threshold,
use_gpu=USE_GPU,device=DEV)
mask = utils.fill_holes_and_remove_small_masks(mask, min_size=15)
# reshape mask based on tile
x_overlap = x - x_min
x_min = x
x_max = min([image_X, x + TILE_SIZE])
y_overlap = y - y_min
y_min = y
y_max = min([image_Y, y + TILE_SIZE])
mask = mask[y_overlap:y_max - y_min + y_overlap,
x_overlap:x_max - x_min + x_overlap,
np.newaxis, np.newaxis, np.newaxis].astype(np.uint32)
""" Fix tile conflicts if image is large enough to require tiling """
# Get previously processed tiles if they exist
dependency1 = None if dependency1 is None else dependency1.result()
dependency2 = None if dependency2 is None else dependency2.result()
# Get offset to make labels consistent between tiles
offset = 0 if dependency1 is None else dependency1[2]
current_x = mask[:,0].squeeze()
current_y = mask[0,:].squeeze()
shape = mask.shape
mask = mask.reshape(-1)
# Resolve label conflicts along the left border
if x > 0:
mask, labels_x, indices_x = overlap(dependency1[0].squeeze(),current_x,mask)
if y > 0:
mask, labels_y, indices_y = overlap(dependency2[1].squeeze(),current_y,mask)
_, image = np.unique(mask, return_inverse=True)
image = image.astype(np.uint32)
image[image>0] = image[image>0] + offset
if x > 0:
for label,ind in zip(labels_x,indices_x):
if ind.size==0:
continue
image[ind] = label
if y > 0:
for label,ind in zip(labels_y,indices_y):
if ind.size==0:
continue
image[ind] = label
image = image.reshape(shape)
bw[y_min:y_max, x_min:x_max, z:z + 1, 0, 0] = image
return image[:,-1],image[-1,:],image.max()
def close_thread(dependency: Future,
bw: BioWriter):
""" Close an image once the final tile is written
Args:
dependency (Future): The final tile thread
bw (BioWriter): The BioWriter to clsoe
Returns:
Returns True when completed
"""
dependency.result()
bw.close()
return True
def main(inpDir: Path,
cellprob_threshold: float,
flow_threshold: float,
outDir: Path
) -> None:
# Get the list of files in path
files = [p for p in Path(inpDir).iterdir() if p.name.endswith('_flow.ome.zarr')]
num_threads = max([cpu_count()//2,1])
logger.info(f'Processing tiles with {num_threads} threads using {DEV}')
if len(files) == 0:
logger.critical('No flow files detected.')
quit()
processes = []
with ThreadPoolExecutor(num_threads) as executor:
# Loop through files in inpDir image collection and process
for ind,fpath in enumerate(files):
br = BioReader(fpath)
threads = np.empty((br.shape[:3]),dtype=object)
logger.debug(
'Processing image ({}/{}): {}'.format(ind, len(files),
fpath))
# TODO: Hard coding to ome.tif for now, this should be changed later.
path = Path(outDir).joinpath(fpath.name.replace('_flow.ome.zarr','.ome.tif'))
bw = BioWriter(file_path=Path(path), metadata=br.metadata)
bw.dtype=np.dtype(np.uint32)
bw.C = 1
bw.channel_names = ['label']
for z in range(0, br.Z, 1):
y_ind = None
dependency1 = None
for y in range(0, br.Y, TILE_SIZE):
for x in range(0, br.X, TILE_SIZE):
dependency2 = None if y_ind is None else threads[y_ind,x//TILE_SIZE,z]
processes.append(executor.submit(mask_thread,
(x,y,z),
fpath,bw,
cellprob_threshold,flow_threshold,
dependency1,dependency2))
dependency1 = processes[-1]
threads[y//TILE_SIZE,x//TILE_SIZE,z] = dependency1
y_ind = y//TILE_SIZE
executor.submit(close_thread,dependency1,bw)
done, not_done = wait(processes, 0)
logger.info(f'Percent complete: {100 * len(done) / len(processes):6.3f}%')
while len(not_done) > 0:
for r in done:
r.result()
done, not_done = wait(processes, 15)
logger.info(f'Percent complete: {100 * len(done) / len(processes):6.3f}%')
if __name__ == '__main__':
''' Argument parsing '''
logger.info("Parsing arguments...")
parser = argparse.ArgumentParser(prog='main', description='Cellpose parameters')
# Input arguments
parser.add_argument('--inpDir', dest='inpDir', type=str,
help='Input image collection to be processed by this plugin', required=True)
parser.add_argument('--flowThreshold', required=False,
default=0.8, type=float, help='flow error threshold, 0 turns off this optional QC step')
parser.add_argument('--cellprobThreshold', required=False,
default=0.0, type=float, help='cell probability threshold, centered at 0.0')
# Output arguments
parser.add_argument('--outDir', dest='outDir', type=str,
help='Output collection', required=True)
# Parse the arguments
args = parser.parse_args()
inpDir = Path(args.inpDir)
logger.info('inpDir = {}'.format(inpDir))
outDir = args.outDir
logger.info('outDir = {}'.format(outDir))
cellprob_threshold = args.cellprobThreshold
logger.info('cellprobThreshold = {}'.format(cellprob_threshold))
flow_threshold= args.flowThreshold
logger.info('flowThreshold = {}'.format(flow_threshold))
main(inpDir,
cellprob_threshold,
flow_threshold,
outDir)
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"numpy.argmax",
"numpy.empty",
"numpy.dtype",
"logging.getLogger",
"numpy.argwhere",
"bfio.BioReader",
"os.cpu_count",
"cellpose.utils.fill_holes_and_remove_small_masks",
"pathlib.Path",
"torch.cuda.is_available",
"cellpose.dynamics.get_masks",
"torch.device",
"concurrent.futures.wait",
"concurrent.futures.ThreadPoolExecutor",
"cellpose.dynamics.follow_flows",
"numpy.unique"
] |
[((512, 537), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (535, 537), False, 'import torch\n'), ((642, 767), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s"""', 'datefmt': '"""%d-%b-%y %H:%M:%S"""'}), "(format=\n '%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s', datefmt=\n '%d-%b-%y %H:%M:%S')\n", (661, 767), False, 'import argparse, logging\n'), ((787, 812), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (804, 812), False, 'import argparse, logging\n'), ((560, 580), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (572, 580), False, 'import torch\n'), ((597, 616), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (609, 616), False, 'import torch\n'), ((1999, 2025), 'numpy.unique', 'np.unique', (['previous_values'], {}), '(previous_values)\n', (2008, 2025), True, 'import numpy as np\n'), ((2130, 2155), 'numpy.unique', 'np.unique', (['current_values'], {}), '(current_values)\n', (2139, 2155), True, 'import numpy as np\n'), ((4882, 5011), 'cellpose.dynamics.follow_flows', 'dynamics.follow_flows', (['(-1 * dP * (cellprob > cellprob_threshold) / 5.0)'], {'niter': 'NITER', 'interp': '(True)', 'use_gpu': 'USE_GPU', 'device': 'DEV'}), '(-1 * dP * (cellprob > cellprob_threshold) / 5.0,\n niter=NITER, interp=True, use_gpu=USE_GPU, device=DEV)\n', (4903, 5011), False, 'from cellpose import dynamics, utils\n'), ((5082, 5210), 'cellpose.dynamics.get_masks', 'dynamics.get_masks', (['p'], {'iscell': '(cellprob > cellprob_threshold)', 'flows': 'dP', 'threshold': 'flow_threshold', 'use_gpu': 'USE_GPU', 'device': 'DEV'}), '(p, iscell=cellprob > cellprob_threshold, flows=dP,\n threshold=flow_threshold, use_gpu=USE_GPU, device=DEV)\n', (5100, 5210), False, 'from cellpose import dynamics, utils\n'), ((5277, 5335), 'cellpose.utils.fill_holes_and_remove_small_masks', 'utils.fill_holes_and_remove_small_masks', (['mask'], {'min_size': '(15)'}), '(mask, min_size=15)\n', (5316, 5335), False, 'from cellpose import dynamics, utils\n'), ((6536, 6572), 'numpy.unique', 'np.unique', (['mask'], {'return_inverse': '(True)'}), '(mask, return_inverse=True)\n', (6545, 6572), True, 'import numpy as np\n'), ((10282, 10353), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""main"""', 'description': '"""Cellpose parameters"""'}), "(prog='main', description='Cellpose parameters')\n", (10305, 10353), False, 'import argparse, logging\n'), ((11108, 11125), 'pathlib.Path', 'Path', (['args.inpDir'], {}), '(args.inpDir)\n', (11112, 11125), False, 'from pathlib import Path\n'), ((4220, 4240), 'bfio.BioReader', 'BioReader', (['file_path'], {}), '(file_path)\n', (4229, 4240), False, 'from bfio import BioReader, BioWriter, OmeXml\n'), ((7987, 8018), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['num_threads'], {}), '(num_threads)\n', (8005, 8018), False, 'from concurrent.futures import ThreadPoolExecutor, wait, Future\n'), ((9840, 9858), 'concurrent.futures.wait', 'wait', (['processes', '(0)'], {}), '(processes, 0)\n', (9844, 9858), False, 'from concurrent.futures import ThreadPoolExecutor, wait, Future\n'), ((2484, 2555), 'numpy.unique', 'np.unique', (['previous_values[current_values == label]'], {'return_counts': '(True)'}), '(previous_values[current_values == label], return_counts=True)\n', (2493, 2555), True, 'import numpy as np\n'), ((8170, 8186), 'bfio.BioReader', 'BioReader', (['fpath'], {}), '(fpath)\n', (8179, 8186), False, 'from bfio import BioReader, BioWriter, OmeXml\n'), ((8226, 8262), 'numpy.empty', 'np.empty', (['br.shape[:3]'], {'dtype': 'object'}), '(br.shape[:3], dtype=object)\n', (8234, 8262), True, 'import numpy as np\n'), ((8718, 8737), 'numpy.dtype', 'np.dtype', (['np.uint32'], {}), '(np.uint32)\n', (8726, 8737), True, 'import numpy as np\n'), ((10060, 10079), 'concurrent.futures.wait', 'wait', (['processes', '(15)'], {}), '(processes, 15)\n', (10064, 10079), False, 'from concurrent.futures import ThreadPoolExecutor, wait, Future\n'), ((3085, 3111), 'numpy.argwhere', 'np.argwhere', (['(tile == label)'], {}), '(tile == label)\n', (3096, 3111), True, 'import numpy as np\n'), ((7764, 7775), 'os.cpu_count', 'cpu_count', ([], {}), '()\n', (7773, 7775), False, 'from os import cpu_count\n'), ((2954, 2971), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (2963, 2971), True, 'import numpy as np\n'), ((7675, 7687), 'pathlib.Path', 'Path', (['inpDir'], {}), '(inpDir)\n', (7679, 7687), False, 'from pathlib import Path\n'), ((8554, 8566), 'pathlib.Path', 'Path', (['outDir'], {}), '(outDir)\n', (8558, 8566), False, 'from pathlib import Path\n'), ((8663, 8673), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (8667, 8673), False, 'from pathlib import Path\n')]
|
import cv2
from numpy import pi, cos, sin
from pprint import pprint
from statistics import median
from sys import exit
def hough_transform(edges, img=None, thresh=110):
"""Get contour lines of the receipt in polar coords(r, t)
using a thresh for the hough accumulator and calculate cartesian coords(x, y).
"""
lines = cv2.HoughLines(edges, 1, pi/180, thresh)
for x in range(0, len(lines)):
for rho, theta in lines[x]:
a = cos(theta)
b = sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
# Draw the line if the img parameter is given
if img is not None:
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
return lines
def get_median_lines(intersections, width, height, img):
"""Calculate the median lines from intersections lines
sorting the intersections list in 4 groups for x(x1,..,x4)
and 4 groups for y(y1,..,y4).
"""
jmp_val = 50 # value used to differentiate groups
first = second = first_p = second_p = third_p = forth_p = None
intersections.sort(key = lambda x: x[1])
pprint(intersections)
for i, x in enumerate(intersections):
if i+1<len(intersections):
if intersections[i][0]>width or intersections[i][1]>height:
continue
if (intersections[i][1] + jmp_val) <= intersections[i+1][1]:
first = intersections[:i+1]
second = intersections[i+1:]
if first is None or second is None:
print('Error: must adjust jmp_val or thresh from hough_transform')
exit()
first.sort(key = lambda x: x[0])
for i, x in enumerate(first):
if i+1<len(first):
if (first[i][0] + jmp_val) <= first[i+1][0]:
first_p = first[:i+1]
second_p = first[i+1:]
second.sort(key = lambda x: x[0])
for i, x in enumerate(second):
if i+1<len(second):
if (second[i][0] + jmp_val) <= second[i+1][0]:
third_p = second[:i+1]
forth_p = second[i+1:]
if all([first_p, second_p, third_p, forth_p]) is False:
print('Error: must adjust jmp_val or thresh from hough_transform')
exit()
x1 = int(median([p[0] for p in first_p]))
x2 = int(median([p[0] for p in second_p]))
x3 = int(median([p[0] for p in third_p]))
x4 = int(median([p[0] for p in forth_p]))
# print(x1, x2, x3, x4)
first.sort(key = lambda x: x[1])
for i, x in enumerate(first):
if i+1<len(first):
if (first[i][0] + jmp_val) <= first[i+1][0]:
first_p = first[:i+1]
second_p = first[i+1:]
second.sort(key = lambda x: x[1])
for i, x in enumerate(second):
if i+1<len(second):
if (second[i][1] + jmp_val) <= second[i+1][1]:
third_p = second[:i+1]
forth_p = second[i+1:]
y1 = int(median([p[1] for p in first_p]))
y2 = int(median([p[1] for p in second_p]))
y3 = int(median([p[1] for p in third_p]))
y4 = int(median([p[1] for p in forth_p]))
# print(y1, y2, y3, y4)
img[int(y1), int(x1)] = [0, 255, 0]
img[int(y2), int(x2)] = [0, 255, 0]
img[int(y3), int(x3)] = [0, 255, 0]
img[int(y4), int(x4)] = [0, 255, 0]
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.line(img, (x1, y1), (x3, y3), (255, 0, 0), 2)
cv2.line(img, (x2, y2), (x4, y4), (255, 0, 0), 2)
cv2.line(img, (x3, y3), (x4, y4), (255, 0, 0), 2)
|
[
"cv2.line",
"statistics.median",
"numpy.sin",
"cv2.HoughLines",
"pprint.pprint",
"numpy.cos",
"sys.exit"
] |
[((337, 379), 'cv2.HoughLines', 'cv2.HoughLines', (['edges', '(1)', '(pi / 180)', 'thresh'], {}), '(edges, 1, pi / 180, thresh)\n', (351, 379), False, 'import cv2\n'), ((1266, 1287), 'pprint.pprint', 'pprint', (['intersections'], {}), '(intersections)\n', (1272, 1287), False, 'from pprint import pprint\n'), ((3448, 3497), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', '(255, 0, 0)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (255, 0, 0), 2)\n', (3456, 3497), False, 'import cv2\n'), ((3502, 3551), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x3, y3)', '(255, 0, 0)', '(2)'], {}), '(img, (x1, y1), (x3, y3), (255, 0, 0), 2)\n', (3510, 3551), False, 'import cv2\n'), ((3556, 3605), 'cv2.line', 'cv2.line', (['img', '(x2, y2)', '(x4, y4)', '(255, 0, 0)', '(2)'], {}), '(img, (x2, y2), (x4, y4), (255, 0, 0), 2)\n', (3564, 3605), False, 'import cv2\n'), ((3610, 3659), 'cv2.line', 'cv2.line', (['img', '(x3, y3)', '(x4, y4)', '(255, 0, 0)', '(2)'], {}), '(img, (x3, y3), (x4, y4), (255, 0, 0), 2)\n', (3618, 3659), False, 'import cv2\n'), ((1749, 1755), 'sys.exit', 'exit', ([], {}), '()\n', (1753, 1755), False, 'from sys import exit\n'), ((2372, 2378), 'sys.exit', 'exit', ([], {}), '()\n', (2376, 2378), False, 'from sys import exit\n'), ((2393, 2424), 'statistics.median', 'median', (['[p[0] for p in first_p]'], {}), '([p[0] for p in first_p])\n', (2399, 2424), False, 'from statistics import median\n'), ((2439, 2471), 'statistics.median', 'median', (['[p[0] for p in second_p]'], {}), '([p[0] for p in second_p])\n', (2445, 2471), False, 'from statistics import median\n'), ((2486, 2517), 'statistics.median', 'median', (['[p[0] for p in third_p]'], {}), '([p[0] for p in third_p])\n', (2492, 2517), False, 'from statistics import median\n'), ((2532, 2563), 'statistics.median', 'median', (['[p[0] for p in forth_p]'], {}), '([p[0] for p in forth_p])\n', (2538, 2563), False, 'from statistics import median\n'), ((3080, 3111), 'statistics.median', 'median', (['[p[1] for p in first_p]'], {}), '([p[1] for p in first_p])\n', (3086, 3111), False, 'from statistics import median\n'), ((3126, 3158), 'statistics.median', 'median', (['[p[1] for p in second_p]'], {}), '([p[1] for p in second_p])\n', (3132, 3158), False, 'from statistics import median\n'), ((3173, 3204), 'statistics.median', 'median', (['[p[1] for p in third_p]'], {}), '([p[1] for p in third_p])\n', (3179, 3204), False, 'from statistics import median\n'), ((3219, 3250), 'statistics.median', 'median', (['[p[1] for p in forth_p]'], {}), '([p[1] for p in forth_p])\n', (3225, 3250), False, 'from statistics import median\n'), ((466, 476), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (469, 476), False, 'from numpy import pi, cos, sin\n'), ((493, 503), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (496, 503), False, 'from numpy import pi, cos, sin\n'), ((803, 852), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (811, 852), False, 'import cv2\n')]
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from sklearn.model_selection import train_test_split
def create_validation_split(X, y, grouplabels, test_size, random_seed=45):
"""
:param X: Features matrix
:param y: label matrix (column vector)
:param grouplabels: numpy array denoting a groups index for each sample point
:param test_size: proportion of each groups (and thus overall data) to be witheld for validation
:param random_seed: random state for sklearns train/test split
:return: X_train, X_test, y_train, y_test, grouplabels_train, grouplabels_test
To create the validation data, we need an even split from each of the groups. We will create an
individual matrix of features and labels (X and y) for each of the groups individually and perform a train/test
split on them. After we have the train/test component of each groups's data, we can simply concatenate
(vertically stack) the feature matrices for each groups and the label matrices for each groups giving us a balanced
train/test split across the entire dataset. We also recompute groups labels array to match reconcatened split.
"""
num_group_types = grouplabels.shape[0]
# Default, single groups type case
if num_group_types == 1:
grouplabels = grouplabels[0]
numgroups = np.size(np.unique(grouplabels))
# Each of these 'pieces' is the training or testing portion of a specific groups to be combined later
X_train_pieces = []
y_train_pieces = []
X_test_pieces = []
y_test_pieces = []
grouplabels_train = []
grouplabels_test = []
# Create an array to store the index arrays for each groups so we do not have to contiunally recompute
index = [np.array([]) for _ in range(numgroups)]
for g in range(0, numgroups):
index[g] = np.where(grouplabels == g)
for g in range(numgroups):
# Perform the train test split of the desired size on this particular groups
X_train_curr, X_test_curr, y_train_curr, y_test_curr = \
train_test_split(X[index[g]], y[index[g]], test_size=test_size, random_state=random_seed)
# Append the matrix portions for this groups onto the appropriate python lists
X_train_pieces.append(X_train_curr)
X_test_pieces.append(X_test_curr)
y_train_pieces.append(y_train_curr)
y_test_pieces.append(y_test_curr)
# Assert that we have the same number of rows for X and y
assert X_train_curr.shape[0] == y_train_curr.shape[0]
assert X_test_curr.shape[0] == y_test_curr.shape[0]
# Add the appropriate grouplabels in preparation for the matrices that will be constructed (groups in order)
grouplabels_train.extend([g] * X_train_curr.shape[0]) # python short-hand to add many `g`s to the list
grouplabels_test.extend([g] * X_test_curr.shape[0]) # python short-hand to add many `g`s to the list
# Once we have all the pieces off the 4 matrices, all we have left to do is vertically stack them
X_train = np.concatenate(X_train_pieces, axis=0)
X_test = np.concatenate(X_test_pieces, axis=0)
y_train = np.concatenate(y_train_pieces, axis=0)
y_test = np.concatenate(y_test_pieces, axis=0)
# Assert that we still have the same number of features
assert X_train.shape[1] == X.shape[1]
assert X_test.shape[1] == X.shape[1]
grouplabels_train = np.expand_dims(np.array(grouplabels_train), axis=0)
grouplabels_test = np.expand_dims(np.array(grouplabels_test), axis=0)
return X_train, X_test, y_train, y_test, grouplabels_train, grouplabels_test
# Multi groups split
else:
# Return a random split over the training data
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=test_size, random_state=random_seed)
grouplabels_train_T, grouplabels_test_T = \
train_test_split(grouplabels.T, test_size=test_size, random_state=random_seed)
# print(grouplabels_train_T.T)
# print(grouplabels_train_T.T.shape)
# print(grouplabels_test_T.T)
# print(grouplabels_test_T.T.shape)
# Ensure that taking the transpose worked out fine
assert grouplabels_train_T.T.shape[0] == grouplabels_test_T.T.shape[0] == grouplabels.shape[0]
assert grouplabels_train_T.T.shape[1] + grouplabels_test_T.T.shape[1] == grouplabels.shape[1]
return X_train, X_test, y_train, y_test, grouplabels_train_T.T, grouplabels_test_T.T
|
[
"numpy.concatenate",
"sklearn.model_selection.train_test_split",
"numpy.where",
"numpy.array",
"numpy.unique"
] |
[((3235, 3273), 'numpy.concatenate', 'np.concatenate', (['X_train_pieces'], {'axis': '(0)'}), '(X_train_pieces, axis=0)\n', (3249, 3273), True, 'import numpy as np\n'), ((3291, 3328), 'numpy.concatenate', 'np.concatenate', (['X_test_pieces'], {'axis': '(0)'}), '(X_test_pieces, axis=0)\n', (3305, 3328), True, 'import numpy as np\n'), ((3347, 3385), 'numpy.concatenate', 'np.concatenate', (['y_train_pieces'], {'axis': '(0)'}), '(y_train_pieces, axis=0)\n', (3361, 3385), True, 'import numpy as np\n'), ((3403, 3440), 'numpy.concatenate', 'np.concatenate', (['y_test_pieces'], {'axis': '(0)'}), '(y_test_pieces, axis=0)\n', (3417, 3440), True, 'import numpy as np\n'), ((3990, 4059), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'random_seed'}), '(X, y, test_size=test_size, random_state=random_seed)\n', (4006, 4059), False, 'from sklearn.model_selection import train_test_split\n'), ((4124, 4202), 'sklearn.model_selection.train_test_split', 'train_test_split', (['grouplabels.T'], {'test_size': 'test_size', 'random_state': 'random_seed'}), '(grouplabels.T, test_size=test_size, random_state=random_seed)\n', (4140, 4202), False, 'from sklearn.model_selection import train_test_split\n'), ((1417, 1439), 'numpy.unique', 'np.unique', (['grouplabels'], {}), '(grouplabels)\n', (1426, 1439), True, 'import numpy as np\n'), ((1851, 1863), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1859, 1863), True, 'import numpy as np\n'), ((1952, 1978), 'numpy.where', 'np.where', (['(grouplabels == g)'], {}), '(grouplabels == g)\n', (1960, 1978), True, 'import numpy as np\n'), ((2189, 2282), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X[index[g]]', 'y[index[g]]'], {'test_size': 'test_size', 'random_state': 'random_seed'}), '(X[index[g]], y[index[g]], test_size=test_size,\n random_state=random_seed)\n', (2205, 2282), False, 'from sklearn.model_selection import train_test_split\n'), ((3641, 3668), 'numpy.array', 'np.array', (['grouplabels_train'], {}), '(grouplabels_train)\n', (3649, 3668), True, 'import numpy as np\n'), ((3720, 3746), 'numpy.array', 'np.array', (['grouplabels_test'], {}), '(grouplabels_test)\n', (3728, 3746), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# encoding: utf-8
"""An asymmetric SOM. This type of SOM doesn't use a grid, but the nodes are
freely positioned on a plane.
"""
from collections import UserList
from math import exp
import random
from random import choice
from scipy.spatial import Voronoi, voronoi_plot_2d
from .som import SOM, Topology, Node, normalize
import numpy as np
import matplotlib.pyplot as plt
class ASOM(SOM):
def __init__(self, data, width, height, topology_kwargs={}, **kwargs):
"""Initializes a new ASOM object.
:data: should be a list of numerical vectors
:width and :height: should be the dimensions of the initial grid
:init_variance: the initial variance of the gaussian distribution of
the neighbourhood function.
:toroidal: whether to use a torus or a plane
For other parameters, check the SOM documentation.
"""
toroidal = topology_kwargs.pop('toroidal', False)
codebook_class = Torus if toroidal else Plane
codebook = codebook_class(data, width, height, **topology_kwargs)
super().__init__(data, codebook, **kwargs)
def voronoi_plot(self):
"""Shows a representation of the SOM where the location of the nodes
are marked and a Voronoi tesselation is made with this points."""
centroids, voronoi = self.codebook.voronoi
voronoi_plot_2d(voronoi)
for node in self.codebook:
plt.text(node.x, node.y, '%.1f,%.1f,%.1f' % tuple(node.vector),
horizontalalignment='center', verticalalignment='center')
plt.title('Voronoi plot')
plt.show()
def color_plot(self):
"""Same as voronoi_plot, but assuming that the data is 3-dimensional,
gives the regions the color corresponding to the weight vectors.
"""
assert self.data_vector_size == 3
centroids, vor = self.codebook.voronoi
regions, vertices = voronoi_finite_polygons(vor)
for node, region in zip(self.codebook, regions):
polygon = vertices[region]
plt.fill(*zip(*polygon), color=node.vector)
plt.plot([x[0] for x in centroids], [x[1] for x in centroids], 'ko')
plt.axis('equal')
plt.xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)
plt.ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)
plt.title('Color plot')
plt.show()
def label_plot(self):
"""Some as voronoi plot, but if there are class labels available for
the data, we plot the SOM with labels on the nodes where this class is
the most frequent.
"""
assert self.labels is not None
centroids, vor = self.codebook.voronoi
regions, vertices = voronoi_finite_polygons(vor)
normalized_codebook = normalize(node.vector for node in self.codebook)
for codebook_vector, region in zip(normalized_codebook, regions):
polygon = vertices[region]
plt.fill(*zip(*polygon), color=codebook_vector[:3] + [.6])
xs, ys = zip(*centroids)
plt.plot(xs, ys, 'ko', ms=1)
plt.axis('equal')
plt.xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)
plt.ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)
for label in set(self.labels) - set([None]):
class_node = max(self.codebook, key=lambda node: node.labels[label])
plt.text(class_node.x, class_node.y, label,
horizontalalignment='center', verticalalignment='center')
plt.title('Voronoi label plot')
plt.show()
class PlaneNode(Node):
"""A node on the plane."""
def __init__(self, x, y, vector, push=.2, inhibition=15, **kwargs):
super().__init__(vector, **kwargs)
self.x, self.y = x, y
self.push = push
self.inhibition = inhibition
def update(self, learning_rate, influence, input_vector, bmu):
"""The update rule is extended to also influence the position of the
node on the plane. We only want this after a certain while though.
"""
super().update(learning_rate, influence, input_vector, bmu)
self.update_position(learning_rate, influence, bmu)
def update_position(self, learning_rate, influence, bmu):
"""Updates the position of the node on the plane."""
factor = learning_rate * (influence - self.push) / self.inhibition
self.x = self.x + factor * (bmu.x - self.x)
self.y = self.y + factor * (bmu.y - self.y)
def location(self):
"""Returns the coordinates of the node on the plane as a tuple."""
return (self.x, self.y)
def __repr__(self):
"""Representation: 'x,y (vector)'"""
return '%f,%f (%s)' % (self.x, self.y, self.vector)
class Plane(Topology, UserList):
"""A 2D topology where the coordinates of the nodes are not bound to a
grid.
"""
NODE_CLASS = PlaneNode
def __init__(self, data, width, height, init_variance=.5, **kwargs):
"""`width * height` nodes are generated."""
def new_node(x, y):
return self.NODE_CLASS(x, y, choice(data), **kwargs)
real_list = list(self._generate_nodes(width * height, new_node))
super().__init__(real_list)
self.init_variance = init_variance
self._voronoi = None
def _generate_nodes(self, n, new_node):
"""Generates nodes randomly distributed in a circle with radius .5
around (.5, .5).
"""
i = 0
while i < n:
x, y = random.random(), random.random()
if (x - .5) ** 2 + (y - .5) ** 2 < .5 ** 2:
yield new_node(x, y)
i += 1
def __iter__(self):
return iter(self.data)
def neighbourhood(self, node1, node2, t):
"""Calculates the neighbourhood influence using a Gaussian
distribution.
"""
return self._gaussian(node1, node2, t)
# M-SOM
# return max(0, 1 - self.plane_distance_squared(node1, node2))
def _gaussian(self, node1, node2, t):
"""Calculates the neighbourhood influence using a Gaussian
distribution.
"""
dist_sq = self.plane_distance_squared(node1, node2)
variance = self._gaussian_variance(t)
return exp(-dist_sq / (2 * variance * variance))
def _gaussian_variance(self, t):
"""A decreasing function for the variance of the gaussian distribution.
"""
# return self.init_variance * (1 - t)
return self.init_variance / (1 + t)
# return self.init_variance ** (-t + 1)
# return self.init_variance * (.001 / self.init_variance) ** t
def plane_distance_squared(self, node1, node2):
"""Calculates the distance between two nodes on the plane."""
return (node1.x - node2.x) ** 2 + (node1.y - node2.y) ** 2
@property
def voronoi(self):
"""Wrapper around self._voronoi to make sure we don't calculate it
multiple times unnecessarily.
"""
if self._voronoi is None:
self._calculate_voronoi()
return self._voronoi[:2]
def _calculate_voronoi(self):
"""Calculates the Voronoi tesselation of the nodes."""
centroids = [node.location() for node in self]
vor = Voronoi(centroids)
self._voronoi = (centroids, vor, voronoi_finite_polygons(vor))
def are_neighbours(self, node1, node2):
"""Checks whether two nodes are neighbouring in the Voronoi
tesselation.
"""
# Calculate the finite regions of the nodes in the voronoi tesselation
self._calculate_voronoi()
regions, _ = self._voronoi[2]
nodes = list(self)
region1 = regions[nodes.index(node1)]
region2 = regions[nodes.index(node2)]
# Check if regions have borders in common
def ridges(region):
n = len(region)
ridges = {(region[i], region[(i + 1) % n]) for i in range(n)}
return ridges | {(y, x) for x, y in ridges}
ridges_in_common = ridges(region1) & ridges(region2)
return len(ridges_in_common) > 0
class TorusNode(PlaneNode):
"""A Node implementation for the Torus topology."""
def update_position(self, learning_rate, influence, bmu):
"""Updates the position of the node on the torus."""
factor = learning_rate * (influence - self.push) / self.inhibition
self.x = self.lin_comb(self.x, bmu.x, factor)
self.y = self.lin_comb(self.y, bmu.y, factor)
@staticmethod
def lin_comb(a, b, factor):
if b < a:
a, b = b, a
factor = 1 - factor
if abs(a - b) > abs(a + 1 - b):
a += 1
c = ((1 - factor) * a + factor * b) % 1
return c
class Torus(Plane):
"""An extension of the plane using a torus."""
NODE_CLASS = TorusNode
def plane_distance_squared(self, node1, node2):
dx = abs(node1.x - node2.x)
dy = abs(node1.y - node2.y)
return min(dx, 1 - dx) ** 2 + min(dy, 1 - dy) ** 2
# Adapted from common code of <NAME>
# (https://gist.github.com/pv/8036995)
def voronoi_finite_polygons(vor, radius=None):
"""Reconstruct infinite voronoi regions in a 2D diagram to finite regions.
:vor: Voronoi diagram
:radius: (optional) distance to 'points at infinity'
Returns
:regions: Indices of vertices in each revised Voronoi regions
:vertices: Coordinates for revised Voronoi vertices. Same as coordinates of
input vertices, with 'points at infinity' appended to the end.
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max() * 2
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all(v >= 0 for v in vertices):
# Finite region
new_regions.append(vertices)
continue
# Reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# Finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# Sort region counterclockwise
vs = np.asarray([new_vertices[v] for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0])
new_region = np.array(new_region)[np.argsort(angles)]
# Finish
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
|
[
"matplotlib.pyplot.title",
"scipy.spatial.voronoi_plot_2d",
"matplotlib.pyplot.xlim",
"math.exp",
"matplotlib.pyplot.show",
"numpy.arctan2",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.asarray",
"matplotlib.pyplot.axis",
"scipy.spatial.Voronoi",
"random.choice",
"matplotlib.pyplot.text",
"numpy.argsort",
"random.random",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot"
] |
[((1379, 1403), 'scipy.spatial.voronoi_plot_2d', 'voronoi_plot_2d', (['voronoi'], {}), '(voronoi)\n', (1394, 1403), False, 'from scipy.spatial import Voronoi, voronoi_plot_2d\n'), ((1604, 1629), 'matplotlib.pyplot.title', 'plt.title', (['"""Voronoi plot"""'], {}), "('Voronoi plot')\n", (1613, 1629), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1648), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1646, 1648), True, 'import matplotlib.pyplot as plt\n'), ((2147, 2215), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[0] for x in centroids]', '[x[1] for x in centroids]', '"""ko"""'], {}), "([x[0] for x in centroids], [x[1] for x in centroids], 'ko')\n", (2155, 2215), True, 'import matplotlib.pyplot as plt\n'), ((2224, 2241), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2232, 2241), True, 'import matplotlib.pyplot as plt\n'), ((2250, 2306), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(vor.min_bound[0] - 0.1)', '(vor.max_bound[0] + 0.1)'], {}), '(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)\n', (2258, 2306), True, 'import matplotlib.pyplot as plt\n'), ((2315, 2371), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(vor.min_bound[1] - 0.1)', '(vor.max_bound[1] + 0.1)'], {}), '(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)\n', (2323, 2371), True, 'import matplotlib.pyplot as plt\n'), ((2381, 2404), 'matplotlib.pyplot.title', 'plt.title', (['"""Color plot"""'], {}), "('Color plot')\n", (2390, 2404), True, 'import matplotlib.pyplot as plt\n'), ((2413, 2423), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2421, 2423), True, 'import matplotlib.pyplot as plt\n'), ((3095, 3123), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys', '"""ko"""'], {'ms': '(1)'}), "(xs, ys, 'ko', ms=1)\n", (3103, 3123), True, 'import matplotlib.pyplot as plt\n'), ((3132, 3149), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (3140, 3149), True, 'import matplotlib.pyplot as plt\n'), ((3158, 3214), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(vor.min_bound[0] - 0.1)', '(vor.max_bound[0] + 0.1)'], {}), '(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)\n', (3166, 3214), True, 'import matplotlib.pyplot as plt\n'), ((3223, 3279), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(vor.min_bound[1] - 0.1)', '(vor.max_bound[1] + 0.1)'], {}), '(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)\n', (3231, 3279), True, 'import matplotlib.pyplot as plt\n'), ((3559, 3590), 'matplotlib.pyplot.title', 'plt.title', (['"""Voronoi label plot"""'], {}), "('Voronoi label plot')\n", (3568, 3590), True, 'import matplotlib.pyplot as plt\n'), ((3599, 3609), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3607, 3609), True, 'import matplotlib.pyplot as plt\n'), ((6318, 6359), 'math.exp', 'exp', (['(-dist_sq / (2 * variance * variance))'], {}), '(-dist_sq / (2 * variance * variance))\n', (6321, 6359), False, 'from math import exp\n'), ((7324, 7342), 'scipy.spatial.Voronoi', 'Voronoi', (['centroids'], {}), '(centroids)\n', (7331, 7342), False, 'from scipy.spatial import Voronoi, voronoi_plot_2d\n'), ((11280, 11329), 'numpy.asarray', 'np.asarray', (['[new_vertices[v] for v in new_region]'], {}), '([new_vertices[v] for v in new_region])\n', (11290, 11329), True, 'import numpy as np\n'), ((11375, 11419), 'numpy.arctan2', 'np.arctan2', (['(vs[:, 1] - c[1])', '(vs[:, 0] - c[0])'], {}), '(vs[:, 1] - c[1], vs[:, 0] - c[0])\n', (11385, 11419), True, 'import numpy as np\n'), ((11573, 11597), 'numpy.asarray', 'np.asarray', (['new_vertices'], {}), '(new_vertices)\n', (11583, 11597), True, 'import numpy as np\n'), ((3427, 3532), 'matplotlib.pyplot.text', 'plt.text', (['class_node.x', 'class_node.y', 'label'], {'horizontalalignment': '"""center"""', 'verticalalignment': '"""center"""'}), "(class_node.x, class_node.y, label, horizontalalignment='center',\n verticalalignment='center')\n", (3435, 3532), True, 'import matplotlib.pyplot as plt\n'), ((10871, 10888), 'numpy.linalg.norm', 'np.linalg.norm', (['t'], {}), '(t)\n', (10885, 10888), True, 'import numpy as np\n'), ((10905, 10928), 'numpy.array', 'np.array', (['[-t[1], t[0]]'], {}), '([-t[1], t[0]])\n', (10913, 10928), True, 'import numpy as np\n'), ((11441, 11461), 'numpy.array', 'np.array', (['new_region'], {}), '(new_region)\n', (11449, 11461), True, 'import numpy as np\n'), ((11462, 11480), 'numpy.argsort', 'np.argsort', (['angles'], {}), '(angles)\n', (11472, 11480), True, 'import numpy as np\n'), ((5150, 5162), 'random.choice', 'choice', (['data'], {}), '(data)\n', (5156, 5162), False, 'from random import choice\n'), ((5566, 5581), 'random.random', 'random.random', ([], {}), '()\n', (5579, 5581), False, 'import random\n'), ((5583, 5598), 'random.random', 'random.random', ([], {}), '()\n', (5596, 5598), False, 'import random\n'), ((11029, 11057), 'numpy.dot', 'np.dot', (['(midpoint - center)', 'n'], {}), '(midpoint - center, n)\n', (11035, 11057), True, 'import numpy as np\n')]
|
import collections
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn import dynamic_rnn
from tensorflow.contrib.rnn import BasicLSTMCell
from helpers import FileLogger
from ml_utils import create_adam_optimizer
from ml_utils import create_weight_variable
from phased_lstm import PhasedLSTMCell
from sanitycheck.constants import *
from sanitycheck.data_reader import next_batch
def get_placeholders():
return tf.placeholder('float32', [BATCH_SIZE, SEQUENCE_LENGTH, 2 if ADD_TIME_INPUTS else 1]), tf.placeholder(
'float32', [BATCH_SIZE, 1])
def run_experiment(init_session=None, placeholder_def_func=get_placeholders):
batch_size = BATCH_SIZE
hidden_size = HIDDEN_STATES
learning_rate = 3e-4
momentum = 0.9
file_logger = FileLogger('log.tsv', ['step', 'training_loss', 'benchmark_loss'])
x, y = placeholder_def_func()
if ADD_TIME_INPUTS:
lstm = PhasedLSTMCell(hidden_size)
print('Using PhasedLSTMCell impl.')
else:
lstm = BasicLSTMCell(hidden_size)
print('Using BasicLSTMCell impl.')
initial_state = (tf.random_normal([batch_size, hidden_size], stddev=0.1),
tf.random_normal([batch_size, hidden_size], stddev=0.1))
outputs, state = dynamic_rnn(lstm, x, initial_state=initial_state, dtype=tf.float32)
rnn_out = tf.squeeze(tf.slice(outputs, begin=[0, tf.shape(outputs)[1] - 1, 0], size=[-1, -1, -1]))
# _, final_hidden = state
fc0_w = create_weight_variable('fc0_w', [hidden_size, 1])
fc0_b = tf.get_variable('fc0_b', [1])
out = tf.matmul(rnn_out, fc0_w) + fc0_b
loss = tf.reduce_mean(tf.square(out - y))
optimizer = create_adam_optimizer(learning_rate, momentum)
trainable = tf.trainable_variables()
grad_update = optimizer.minimize(loss, var_list=trainable)
if init_session is not None:
sess = init_session
else:
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
init = tf.global_variables_initializer()
sess.run(init)
# lstm.__call__(x[:, 0, :], initial_state, scope=None)
d = collections.deque(maxlen=10)
benchmark_d = collections.deque(maxlen=10)
max_steps = int(1e6)
for step in range(1, max_steps):
if step % 10 == 0:
print('step {}/{}'.format(step, max_steps))
x_s, y_s = next_batch(batch_size)
loss_value, _, pred_value = sess.run([loss, grad_update, out], feed_dict={x: x_s, y: y_s})
# The mean converges to 0.5 for IID U(0,1) random variables. Good benchmark.
benchmark_d.append(np.mean(np.square(0.5 - y_s)))
d.append(loss_value)
mean_loss = np.mean(d)
benchmark_mean_loss = np.mean(benchmark_d)
file_logger.write([step, mean_loss, benchmark_mean_loss])
file_logger.close()
if __name__ == '__main__':
run_experiment()
|
[
"tensorflow.trainable_variables",
"phased_lstm.PhasedLSTMCell",
"tensorflow.matmul",
"tensorflow.ConfigProto",
"numpy.mean",
"collections.deque",
"tensorflow.get_variable",
"tensorflow.placeholder",
"ml_utils.create_weight_variable",
"ml_utils.create_adam_optimizer",
"tensorflow.global_variables_initializer",
"numpy.square",
"tensorflow.contrib.rnn.BasicLSTMCell",
"sanitycheck.data_reader.next_batch",
"tensorflow.random_normal",
"helpers.FileLogger",
"tensorflow.python.ops.rnn.dynamic_rnn",
"tensorflow.shape",
"tensorflow.square"
] |
[((781, 847), 'helpers.FileLogger', 'FileLogger', (['"""log.tsv"""', "['step', 'training_loss', 'benchmark_loss']"], {}), "('log.tsv', ['step', 'training_loss', 'benchmark_loss'])\n", (791, 847), False, 'from helpers import FileLogger\n'), ((1268, 1335), 'tensorflow.python.ops.rnn.dynamic_rnn', 'dynamic_rnn', (['lstm', 'x'], {'initial_state': 'initial_state', 'dtype': 'tf.float32'}), '(lstm, x, initial_state=initial_state, dtype=tf.float32)\n', (1279, 1335), False, 'from tensorflow.python.ops.rnn import dynamic_rnn\n'), ((1482, 1531), 'ml_utils.create_weight_variable', 'create_weight_variable', (['"""fc0_w"""', '[hidden_size, 1]'], {}), "('fc0_w', [hidden_size, 1])\n", (1504, 1531), False, 'from ml_utils import create_weight_variable\n'), ((1544, 1573), 'tensorflow.get_variable', 'tf.get_variable', (['"""fc0_b"""', '[1]'], {}), "('fc0_b', [1])\n", (1559, 1573), True, 'import tensorflow as tf\n'), ((1681, 1727), 'ml_utils.create_adam_optimizer', 'create_adam_optimizer', (['learning_rate', 'momentum'], {}), '(learning_rate, momentum)\n', (1702, 1727), False, 'from ml_utils import create_adam_optimizer\n'), ((1744, 1768), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1766, 1768), True, 'import tensorflow as tf\n'), ((1992, 2025), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2023, 2025), True, 'import tensorflow as tf\n'), ((2114, 2142), 'collections.deque', 'collections.deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (2131, 2142), False, 'import collections\n'), ((2161, 2189), 'collections.deque', 'collections.deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (2178, 2189), False, 'import collections\n'), ((439, 528), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[BATCH_SIZE, SEQUENCE_LENGTH, 2 if ADD_TIME_INPUTS else 1]'], {}), "('float32', [BATCH_SIZE, SEQUENCE_LENGTH, 2 if\n ADD_TIME_INPUTS else 1])\n", (453, 528), True, 'import tensorflow as tf\n'), ((526, 568), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""', '[BATCH_SIZE, 1]'], {}), "('float32', [BATCH_SIZE, 1])\n", (540, 568), True, 'import tensorflow as tf\n'), ((922, 949), 'phased_lstm.PhasedLSTMCell', 'PhasedLSTMCell', (['hidden_size'], {}), '(hidden_size)\n', (936, 949), False, 'from phased_lstm import PhasedLSTMCell\n'), ((1019, 1045), 'tensorflow.contrib.rnn.BasicLSTMCell', 'BasicLSTMCell', (['hidden_size'], {}), '(hidden_size)\n', (1032, 1045), False, 'from tensorflow.contrib.rnn import BasicLSTMCell\n'), ((1111, 1166), 'tensorflow.random_normal', 'tf.random_normal', (['[batch_size, hidden_size]'], {'stddev': '(0.1)'}), '([batch_size, hidden_size], stddev=0.1)\n', (1127, 1166), True, 'import tensorflow as tf\n'), ((1189, 1244), 'tensorflow.random_normal', 'tf.random_normal', (['[batch_size, hidden_size]'], {'stddev': '(0.1)'}), '([batch_size, hidden_size], stddev=0.1)\n', (1205, 1244), True, 'import tensorflow as tf\n'), ((1584, 1609), 'tensorflow.matmul', 'tf.matmul', (['rnn_out', 'fc0_w'], {}), '(rnn_out, fc0_w)\n', (1593, 1609), True, 'import tensorflow as tf\n'), ((1645, 1663), 'tensorflow.square', 'tf.square', (['(out - y)'], {}), '(out - y)\n', (1654, 1663), True, 'import tensorflow as tf\n'), ((2357, 2379), 'sanitycheck.data_reader.next_batch', 'next_batch', (['batch_size'], {}), '(batch_size)\n', (2367, 2379), False, 'from sanitycheck.data_reader import next_batch\n'), ((2671, 2681), 'numpy.mean', 'np.mean', (['d'], {}), '(d)\n', (2678, 2681), True, 'import numpy as np\n'), ((2712, 2732), 'numpy.mean', 'np.mean', (['benchmark_d'], {}), '(benchmark_d)\n', (2719, 2732), True, 'import numpy as np\n'), ((1937, 1979), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(False)'}), '(log_device_placement=False)\n', (1951, 1979), True, 'import tensorflow as tf\n'), ((2599, 2619), 'numpy.square', 'np.square', (['(0.5 - y_s)'], {}), '(0.5 - y_s)\n', (2608, 2619), True, 'import numpy as np\n'), ((1389, 1406), 'tensorflow.shape', 'tf.shape', (['outputs'], {}), '(outputs)\n', (1397, 1406), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: token_features
Author : <NAME>
date: 2019/8/20
-------------------------------------------------
"""
__author__ = '<NAME>'
# 获取token features,即每一个字符的向量,可以用cls作为句子向量,也可以用每一个字符的向量
import os
import sys
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
print(sys.path)
import tensorflow as tf
import tokenization
import modeling
import numpy as np
import h5py
# 配置文件
# data_root是模型文件,可以用预训练的,也可以用在分类任务上微调过的模型
data_root = '../chinese_wwm_ext_L-12_H-768_A-12/'
bert_config_file = data_root + 'bert_config.json'
bert_config = modeling.BertConfig.from_json_file(bert_config_file)
# init_checkpoint = data_root + 'bert_model.ckpt'
# 这样的话,就是使用在具体任务上微调过的模型来做词向量
# init_checkpoint = '../model/legal_fine_tune/model.ckpt-4153'
init_checkpoint = '../model/cnews_fine_tune/model.ckpt-18674'
bert_vocab_file = data_root + 'vocab.txt'
# 经过处理的输入文件路径
# file_input_x_c_train = '../data/legal_domain/train_x_c.txt'
# file_input_x_c_val = '../data/legal_domain/val_x_c.txt'
# file_input_x_c_test = '../data/legal_domain/test_x_c.txt'
# embedding存放路径
# emb_file_dir = '../data/legal_domain/emb_fine_tune.h5'
# graph
input_ids = tf.placeholder(tf.int32, shape=[None, None], name='input_ids')
input_mask = tf.placeholder(tf.int32, shape=[None, None], name='input_masks')
segment_ids = tf.placeholder(tf.int32, shape=[None, None], name='segment_ids')
BATCH_SIZE = 16
SEQ_LEN = 510
def batch_iter(x, batch_size=64, shuffle=False):
"""生成批次数据,一个batch一个batch地产生句子向量"""
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
if shuffle:
indices = np.random.permutation(np.arange(data_len))
x_shuffle = np.array(x)[indices]
else:
x_shuffle = x[:]
word_mask = [[1] * (SEQ_LEN + 2) for i in range(data_len)]
word_segment_ids = [[0] * (SEQ_LEN + 2) for i in range(data_len)]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
yield x_shuffle[start_id:end_id], word_mask[start_id:end_id], word_segment_ids[start_id:end_id]
def read_input(file_dir):
# 从文件中读到所有需要转化的句子
# 这里需要做统一长度为510
# input_list = []
with open(file_dir, 'r', encoding='utf-8') as f:
input_list = f.readlines()
# input_list是输入list,每一个元素是一个str,代表输入文本
# 现在需要转化成id_list
word_id_list = []
for query in input_list:
quert_str = ''.join(query.strip().split())
split_tokens = token.tokenize(quert_str)
if len(split_tokens) > SEQ_LEN:
split_tokens = split_tokens[:SEQ_LEN]
else:
while len(split_tokens) < SEQ_LEN:
split_tokens.append('[PAD]')
# ****************************************************
# 如果是需要用到句向量,需要用这个方法
# 加个CLS头,加个SEP尾
tokens = []
tokens.append("[CLS]")
for i_token in split_tokens:
tokens.append(i_token)
tokens.append("[SEP]")
# ****************************************************
word_ids = token.convert_tokens_to_ids(tokens)
word_id_list.append(word_ids)
return word_id_list
# 初始化BERT
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=False
)
# 加载BERT模型
tvars = tf.trainable_variables()
(assignment, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment)
# 获取最后一层和倒数第二层
encoder_last_layer = model.get_sequence_output()
encoder_last2_layer = model.all_encoder_layers[-2]
# 读取数据
token = tokenization.FullTokenizer(vocab_file=bert_vocab_file)
# input_train_data = read_input(file_dir='../data/legal_domain/train_x_c.txt')
input_train_data = read_input(file_dir='../data/cnews/train_x.txt')
# input_val_data = read_input(file_dir='../data/legal_domain/val_x_c.txt')
input_val_data = read_input(file_dir='../data/cnews/val_x.txt')
# input_test_data = read_input(file_dir='../data/legal_domain/test_x_c.txt')
input_test_data = read_input(file_dir='../data/cnews/test_x.txt')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
save_file = h5py.File('../downstream/emb_fine_tune_cnews.h5', 'w')
emb_train = []
train_batches = batch_iter(input_train_data, batch_size=BATCH_SIZE, shuffle=False)
for word_id, mask, segment in train_batches:
feed_data = {input_ids: word_id, input_mask: mask, segment_ids: segment}
last2 = sess.run(encoder_last2_layer, feed_dict=feed_data)
# print(last2.shape)
for sub_array in last2:
emb_train.append(sub_array)
# 可以保存了
emb_train_array = np.asarray(emb_train)
save_file.create_dataset('train', data=emb_train_array)
# val
emb_val = []
val_batches = batch_iter(input_val_data, batch_size=BATCH_SIZE, shuffle=False)
for word_id, mask, segment in val_batches:
feed_data = {input_ids: word_id, input_mask: mask, segment_ids: segment}
last2 = sess.run(encoder_last2_layer, feed_dict=feed_data)
# print(last2.shape)
for sub_array in last2:
emb_val.append(sub_array)
# 可以保存了
emb_val_array = np.asarray(emb_val)
save_file.create_dataset('val', data=emb_val_array)
# test
emb_test = []
test_batches = batch_iter(input_test_data, batch_size=BATCH_SIZE, shuffle=False)
for word_id, mask, segment in test_batches:
feed_data = {input_ids: word_id, input_mask: mask, segment_ids: segment}
last2 = sess.run(encoder_last2_layer, feed_dict=feed_data)
# print(last2.shape)
for sub_array in last2:
emb_test.append(sub_array)
# 可以保存了
emb_test_array = np.asarray(emb_test)
save_file.create_dataset('test', data=emb_test_array)
save_file.close()
print(emb_train_array.shape)
print(emb_val_array.shape)
print(emb_test_array.shape)
# 这边目标是接下游CNN任务,因此先写入所有token的embedding,768维
# 写入shape直接是(N, max_seq_len + 2, 768)
# 下游需要选用的时候,如果卷积,则去掉头尾使用,如果全连接,则直接使用头部
# 这里直接设定max_seq_len=510,加上[cls]和[sep],得到512
# 写入(n, 512, 768) ndarray到文件,需要用的时候再读出来,就直接舍弃embedding层
|
[
"sys.path.append",
"h5py.File",
"tensorflow.trainable_variables",
"modeling.BertModel",
"tensorflow.global_variables_initializer",
"tokenization.FullTokenizer",
"os.path.dirname",
"tensorflow.Session",
"numpy.asarray",
"tensorflow.placeholder",
"tensorflow.train.init_from_checkpoint",
"numpy.arange",
"modeling.BertConfig.from_json_file",
"numpy.array",
"os.path.split",
"modeling.get_assignment_map_from_checkpoint"
] |
[((408, 433), 'sys.path.append', 'sys.path.append', (['rootPath'], {}), '(rootPath)\n', (423, 433), False, 'import sys\n'), ((707, 759), 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['bert_config_file'], {}), '(bert_config_file)\n', (741, 759), False, 'import modeling\n'), ((1296, 1358), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""input_ids"""'}), "(tf.int32, shape=[None, None], name='input_ids')\n", (1310, 1358), True, 'import tensorflow as tf\n'), ((1372, 1436), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""input_masks"""'}), "(tf.int32, shape=[None, None], name='input_masks')\n", (1386, 1436), True, 'import tensorflow as tf\n'), ((1451, 1515), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""segment_ids"""'}), "(tf.int32, shape=[None, None], name='segment_ids')\n", (1465, 1515), True, 'import tensorflow as tf\n'), ((3285, 3453), 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': '(False)', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': '(False)'}), '(config=bert_config, is_training=False, input_ids=\n input_ids, input_mask=input_mask, token_type_ids=segment_ids,\n use_one_hot_embeddings=False)\n', (3303, 3453), False, 'import modeling\n'), ((3491, 3515), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3513, 3515), True, 'import tensorflow as tf\n'), ((3559, 3626), 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), '(tvars, init_checkpoint)\n', (3602, 3626), False, 'import modeling\n'), ((3627, 3685), 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment'], {}), '(init_checkpoint, assignment)\n', (3656, 3685), True, 'import tensorflow as tf\n'), ((3817, 3871), 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'bert_vocab_file'}), '(vocab_file=bert_vocab_file)\n', (3843, 3871), False, 'import tokenization\n'), ((344, 369), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (359, 369), False, 'import os\n'), ((382, 404), 'os.path.split', 'os.path.split', (['curPath'], {}), '(curPath)\n', (395, 404), False, 'import os\n'), ((4308, 4320), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4318, 4320), True, 'import tensorflow as tf\n'), ((4394, 4448), 'h5py.File', 'h5py.File', (['"""../downstream/emb_fine_tune_cnews.h5"""', '"""w"""'], {}), "('../downstream/emb_fine_tune_cnews.h5', 'w')\n", (4403, 4448), False, 'import h5py\n'), ((4887, 4908), 'numpy.asarray', 'np.asarray', (['emb_train'], {}), '(emb_train)\n', (4897, 4908), True, 'import numpy as np\n'), ((5406, 5425), 'numpy.asarray', 'np.asarray', (['emb_val'], {}), '(emb_val)\n', (5416, 5425), True, 'import numpy as np\n'), ((5926, 5946), 'numpy.asarray', 'np.asarray', (['emb_test'], {}), '(emb_test)\n', (5936, 5946), True, 'import numpy as np\n'), ((4343, 4376), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4374, 4376), True, 'import tensorflow as tf\n'), ((1769, 1788), 'numpy.arange', 'np.arange', (['data_len'], {}), '(data_len)\n', (1778, 1788), True, 'import numpy as np\n'), ((1810, 1821), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1818, 1821), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# inst: university of bristol
# auth: <NAME>
# mail: <EMAIL> / <EMAIL>
import sys
import subprocess
import configparser
import getopt
import numpy as np
import pandas as pd
import gdalutils
from lfptools import shapefile
from lfptools import misc_utils
from osgeo import osr
def getwidths_shell(argv):
myhelp = '''
LFPtools v0.1
Name
----
getwidths
Description
-----------
Retrieve river widths from a data set
Usage
-----
>> lfp-getwidths -i config.txt
Content in config.txt
---------------------
[getwidths]
thresh = Searching window threshold in same units as input data set
output = Shapefile output file path
recf = `Rec` file path
netf = Target mask file path
proj = Output projection in Proj4 format
fwidth = Source width file path GDAL format
'''
try:
opts, args = getopt.getopt(argv, "i:")
for o, a in opts:
if o == "-i":
inifile = a
except:
print(myhelp)
sys.exit(0)
config = configparser.SafeConfigParser()
config.read(inifile)
recf = str(config.get('getwidths', 'recf'))
netf = str(config.get('getwidths', 'netf'))
proj = str(config.get('getwidths', 'proj'))
fwidth = str(config.get('getwidths', 'fwidth'))
output = str(config.get('getwidths', 'output'))
thresh = np.float64(config.get('getwidths', 'thresh'))
getwidths(recf, netf, proj, fwidth, output, thresh)
def getwidths(recf, netf, proj, fwidth, output, thresh):
print(" running getwidths.py...")
w = shapefile.Writer(shapefile.POINT)
w.field('x')
w.field('y')
w.field('width')
# Reading XXX_rec.csv file
rec = pd.read_csv(recf)
# Get nearest width from datasource
# Uses Euclidean distance to find nearest point in source
# `try` included since it may happen that the width database doesn't
# contains data in the basin if that is the case all values are assigned
# a 30 m width
width = []
for x, y in zip(rec['lon'], rec['lat']):
xmin = x - thresh
ymin = y - thresh
xmax = x + thresh
ymax = y + thresh
dat, geo = gdalutils.clip_raster(fwidth, xmin, ymin, xmax, ymax)
iy, ix = np.where(dat > 30)
xdat = geo[8][ix]
ydat = geo[9][iy]
try:
dis, ind = misc_utils.near_euc(xdat, ydat, (x, y))
val = dat[iy[ind], ix[ind]]
width.append(val)
except ValueError:
width.append(np.nan)
rec['width'] = width
# Group river network per link
# If there are more NaN than real values, all values in link are equal to 30
# Otherwise, interpolate real values to fill NaNs
def check_width(a):
b = a.copy()
c = b.isnull()
falses = c.sum()
trues = c.count() - falses
if trues >= falses:
return a.interpolate(limit_direction='both')
else:
b.loc[:] = 30
return b
rec.loc[:, 'width'] = rec.groupby('link').width.apply(check_width)
# Writing .shp resulting file
for x, y, width in zip(rec['lon'], rec['lat'], rec['width']):
w.point(x, y)
w.record(x, y, width)
w.save("%s.shp" % output)
# write .prj file
prj = open("%s.prj" % output, "w")
srs = osr.SpatialReference()
srs.ImportFromProj4(proj)
prj.write(srs.ExportToWkt())
prj.close()
geo = gdalutils.get_geo(netf)
fmt = "GTiff"
nodata = -9999
name1 = output+".shp"
name2 = output+".tif"
subprocess.call(["gdal_rasterize", "-a_nodata", str(nodata), "-of", fmt, "-tr", str(geo[6]), str(geo[7]),
"-a", "width", "-a_srs", proj, "-te", str(geo[0]), str(geo[1]), str(geo[2]), str(geo[3]), name1, name2])
if __name__ == '__main__':
getwidths_shell(sys.argv[1:])
|
[
"gdalutils.get_geo",
"lfptools.misc_utils.near_euc",
"getopt.getopt",
"pandas.read_csv",
"lfptools.shapefile.Writer",
"numpy.where",
"configparser.SafeConfigParser",
"sys.exit",
"gdalutils.clip_raster",
"osgeo.osr.SpatialReference"
] |
[((1001, 1032), 'configparser.SafeConfigParser', 'configparser.SafeConfigParser', ([], {}), '()\n', (1030, 1032), False, 'import configparser\n'), ((1533, 1566), 'lfptools.shapefile.Writer', 'shapefile.Writer', (['shapefile.POINT'], {}), '(shapefile.POINT)\n', (1549, 1566), False, 'from lfptools import shapefile\n'), ((1664, 1681), 'pandas.read_csv', 'pd.read_csv', (['recf'], {}), '(recf)\n', (1675, 1681), True, 'import pandas as pd\n'), ((3284, 3306), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (3304, 3306), False, 'from osgeo import osr\n'), ((3397, 3420), 'gdalutils.get_geo', 'gdalutils.get_geo', (['netf'], {}), '(netf)\n', (3414, 3420), False, 'import gdalutils\n'), ((827, 852), 'getopt.getopt', 'getopt.getopt', (['argv', '"""i:"""'], {}), "(argv, 'i:')\n", (840, 852), False, 'import getopt\n'), ((2139, 2192), 'gdalutils.clip_raster', 'gdalutils.clip_raster', (['fwidth', 'xmin', 'ymin', 'xmax', 'ymax'], {}), '(fwidth, xmin, ymin, xmax, ymax)\n', (2160, 2192), False, 'import gdalutils\n'), ((2210, 2228), 'numpy.where', 'np.where', (['(dat > 30)'], {}), '(dat > 30)\n', (2218, 2228), True, 'import numpy as np\n'), ((975, 986), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (983, 986), False, 'import sys\n'), ((2318, 2357), 'lfptools.misc_utils.near_euc', 'misc_utils.near_euc', (['xdat', 'ydat', '(x, y)'], {}), '(xdat, ydat, (x, y))\n', (2337, 2357), False, 'from lfptools import misc_utils\n')]
|
from .experiment import Experiment
import numpy as np
class XenonSimple(Experiment):
detector_name = 'Xe_simple'
target_material = 'Xe'
exposure_tonne_year = 5
energy_threshold_kev = 10
cut_efficiency = 0.8
detection_efficiency = 0.5
interaction_type = 'SI'
location = 'XENON'
def __init__(self, n_energy_bins=10, e_min_kev=0, e_max_kev=100, ):
super().__init__(n_energy_bins=n_energy_bins, e_min_kev=e_min_kev, e_max_kev=e_max_kev)
def resolution(self, energies_in_kev):
"""Simple square root dependency of the energy resolution"""
return 0.6 * np.sqrt(energies_in_kev)
def background_function(self, energies_in_kev):
"""Assume background free detector"""
return np.zeros(len(energies_in_kev))
class GermaniumSimple(Experiment):
detector_name = 'Ge_simple'
target_material = 'Ge'
exposure_tonne_year = 3
energy_threshold_kev = 10
cut_efficiency = 0.8
detection_efficiency = 0.9
interaction_type = 'SI'
location = 'SUF'
def __init__(self, n_energy_bins=10, e_min_kev=0, e_max_kev=100, ):
super().__init__(n_energy_bins=n_energy_bins, e_min_kev=e_min_kev, e_max_kev=e_max_kev)
def resolution(self, energies_in_kev):
"""Simple resolution model"""
return np.sqrt(0.3 ** 2 + (0.06 ** 2) * energies_in_kev)
def background_function(self, energies_in_kev):
"""Assume background free detector"""
return np.zeros(len(energies_in_kev))
class ArgonSimple(Experiment):
detector_name = 'Ar_simple'
target_material = 'Ar'
exposure_tonne_year = 10
energy_threshold_kev = 30
cut_efficiency = 0.8
detection_efficiency = 0.8
interaction_type = 'SI'
location = 'XENON' # Assume also located at LNGS
def __init__(self, n_energy_bins=10, e_min_kev=0, e_max_kev=100, ):
super().__init__(n_energy_bins=n_energy_bins, e_min_kev=e_min_kev, e_max_kev=e_max_kev)
def resolution(self, energies_in_kev):
"""Simple square root dependency of the energy resolution"""
return 0.7 * np.sqrt(energies_in_kev)
def background_function(self, energies_in_kev):
"""Assume background free detector"""
return np.zeros(len(energies_in_kev))
|
[
"numpy.sqrt"
] |
[((1309, 1356), 'numpy.sqrt', 'np.sqrt', (['(0.3 ** 2 + 0.06 ** 2 * energies_in_kev)'], {}), '(0.3 ** 2 + 0.06 ** 2 * energies_in_kev)\n', (1316, 1356), True, 'import numpy as np\n'), ((614, 638), 'numpy.sqrt', 'np.sqrt', (['energies_in_kev'], {}), '(energies_in_kev)\n', (621, 638), True, 'import numpy as np\n'), ((2096, 2120), 'numpy.sqrt', 'np.sqrt', (['energies_in_kev'], {}), '(energies_in_kev)\n', (2103, 2120), True, 'import numpy as np\n')]
|
import os
import unittest
import cv2
import sys
import numpy as np
from src.models.storage.frame import Frame
from src.models.storage.batch import FrameBatch
from src.udfs.depth_estimation.depth_estimator import DepthEstimator
from src.utils.frame_filter_util import FrameFilter
class DepthEstimatorTest(unittest.TestCase):
"""
unit test class for depth estimation model
Arguments:
unittest.TestCase
"""
def __init__(self, *args, **kwargs):
"""
method to initialize the class object
Arguments:
args
kwargs
"""
super().__init__(*args, **kwargs)
self.base_path = os.path.dirname(os.path.abspath(__file__))
def _load_image(self, path):
"""
method to load the image from a given input path
Arguments:
path : path where image file is located
"""
img = cv2.imread(path)
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def test_should_return_batches_equivalent_to_number_of_frames(self):
"""
Unit test method which creates a batch of frames, sends it for
model prediction.
It then checks if the returned object size is as expected.
"""
# create two frames from kitti car dataset
frame_1 = Frame(1, self._load_image(
os.path.join(self.base_path, 'data', 'kitti_car_1.png')), None)
frame_2 = Frame(1, self._load_image(
os.path.join(self.base_path, 'data', 'kitti_car_2.png')), None)
# create a batch of 2 frames
frame_batch = FrameBatch([frame_1, frame_2], None)
# process the batch frames for depth and segmentation prediction
estimator = DepthEstimator('ExpKITTI_joint.ckpt')
result = estimator.classify(frame_batch)
# assert if result size is same as the batch size
self.assertEqual(len(result), 2)
# assert if frame in result object is same as original frame
self.assertTrue(np.array_equal(result[0].frame.data, frame_1.data))
self.assertTrue(np.array_equal(result[1].frame.data, frame_2.data))
# assert that depth and segmentation results should not be null
assert result[0].depth is not None
assert result[0].segm is not None
assert result[1].depth is not None
assert result[1].segm is not None
@unittest.skip("need correction in depth mask initialization")
def test_frame_filtering_for_depth_estimation(self):
"""
Unit test method to test frame filtering functionality.
it loops over frames and sends them over to frame filtering
object's apply_filter method.
Finally it verifies that depth mask is applied to the frames
except every fifth one.
"""
# create two frames from kitti car dataset
frame_1 = Frame(1, self._load_image(
os.path.join(self.base_path, 'data', 'kitti_car_1.png')), None)
frame_2 = Frame(1, self._load_image(
os.path.join(self.base_path, 'data', 'kitti_car_2.png')), None)
# create a batch of 2 frames
frame_batch = FrameBatch([frame_1, frame_2], None)
frames = frame_batch.frames_as_numpy_array()
# initialize the frame filtering class object
frame_filter = FrameFilter()
# create a random depth mask array
depth_mask = np.random.rand(
frames[0].shape[0],
frames[0].shape[1],
frames[0].shape[2])
# iterate over frames in the batch
for i, img in enumerate(frames):
# apply frame filter on each frame
img = frame_filter.apply_filter(img, depth_mask)
# For every fifth frame the mask should not be applied. Hence, the
# frame returned by apply_filter method should be same as original
# frame
if i % 5 == 0:
self.assertTrue(np.array_equal(img, frames[0]))
else:
# Every other frame should be transformed after applying depth
# mask
self.assertTrue(np.array_equal(
img, frames[i] * depth_mask[:, :, None]))
|
[
"os.path.abspath",
"numpy.array_equal",
"src.utils.frame_filter_util.FrameFilter",
"cv2.cvtColor",
"numpy.random.rand",
"unittest.skip",
"cv2.imread",
"src.udfs.depth_estimation.depth_estimator.DepthEstimator",
"os.path.join",
"src.models.storage.batch.FrameBatch"
] |
[((2565, 2626), 'unittest.skip', 'unittest.skip', (['"""need correction in depth mask initialization"""'], {}), "('need correction in depth mask initialization')\n", (2578, 2626), False, 'import unittest\n'), ((1029, 1045), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (1039, 1045), False, 'import cv2\n'), ((1062, 1098), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1074, 1098), False, 'import cv2\n'), ((1757, 1793), 'src.models.storage.batch.FrameBatch', 'FrameBatch', (['[frame_1, frame_2]', 'None'], {}), '([frame_1, frame_2], None)\n', (1767, 1793), False, 'from src.models.storage.batch import FrameBatch\n'), ((1891, 1928), 'src.udfs.depth_estimation.depth_estimator.DepthEstimator', 'DepthEstimator', (['"""ExpKITTI_joint.ckpt"""'], {}), "('ExpKITTI_joint.ckpt')\n", (1905, 1928), False, 'from src.udfs.depth_estimation.depth_estimator import DepthEstimator\n'), ((3372, 3408), 'src.models.storage.batch.FrameBatch', 'FrameBatch', (['[frame_1, frame_2]', 'None'], {}), '([frame_1, frame_2], None)\n', (3382, 3408), False, 'from src.models.storage.batch import FrameBatch\n'), ((3546, 3559), 'src.utils.frame_filter_util.FrameFilter', 'FrameFilter', ([], {}), '()\n', (3557, 3559), False, 'from src.utils.frame_filter_util import FrameFilter\n'), ((3628, 3702), 'numpy.random.rand', 'np.random.rand', (['frames[0].shape[0]', 'frames[0].shape[1]', 'frames[0].shape[2]'], {}), '(frames[0].shape[0], frames[0].shape[1], frames[0].shape[2])\n', (3642, 3702), True, 'import numpy as np\n'), ((773, 798), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (788, 798), False, 'import os\n'), ((2179, 2229), 'numpy.array_equal', 'np.array_equal', (['result[0].frame.data', 'frame_1.data'], {}), '(result[0].frame.data, frame_1.data)\n', (2193, 2229), True, 'import numpy as np\n'), ((2256, 2306), 'numpy.array_equal', 'np.array_equal', (['result[1].frame.data', 'frame_2.data'], {}), '(result[1].frame.data, frame_2.data)\n', (2270, 2306), True, 'import numpy as np\n'), ((1507, 1562), 'os.path.join', 'os.path.join', (['self.base_path', '"""data"""', '"""kitti_car_1.png"""'], {}), "(self.base_path, 'data', 'kitti_car_1.png')\n", (1519, 1562), False, 'import os\n'), ((1630, 1685), 'os.path.join', 'os.path.join', (['self.base_path', '"""data"""', '"""kitti_car_2.png"""'], {}), "(self.base_path, 'data', 'kitti_car_2.png')\n", (1642, 1685), False, 'import os\n'), ((3122, 3177), 'os.path.join', 'os.path.join', (['self.base_path', '"""data"""', '"""kitti_car_1.png"""'], {}), "(self.base_path, 'data', 'kitti_car_1.png')\n", (3134, 3177), False, 'import os\n'), ((3245, 3300), 'os.path.join', 'os.path.join', (['self.base_path', '"""data"""', '"""kitti_car_2.png"""'], {}), "(self.base_path, 'data', 'kitti_car_2.png')\n", (3257, 3300), False, 'import os\n'), ((4187, 4217), 'numpy.array_equal', 'np.array_equal', (['img', 'frames[0]'], {}), '(img, frames[0])\n', (4201, 4217), True, 'import numpy as np\n'), ((4375, 4430), 'numpy.array_equal', 'np.array_equal', (['img', '(frames[i] * depth_mask[:, :, None])'], {}), '(img, frames[i] * depth_mask[:, :, None])\n', (4389, 4430), True, 'import numpy as np\n')]
|
from __future__ import print_function, division
import os
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import Options
import cv2
import shutil
config = Options.Config()
def find_classes(dir, config=config):
classes = [str(d) for d in range(config.label_size)]
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx, mode, config=config):
videos = []
dir = os.path.expanduser(dir)
classes = [str(d) for d in range(config.label_size)]
for target in classes:
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
listd = sorted(os.listdir(d))
#if mode == 'val':
#listd = random.sample(listd, 10)
for fnames in listd:
path = os.path.join(d, fnames)
if os.path.isdir(os.path.join(d, fnames)):
if os.path.exists(path):
item = (path, class_to_idx[target])
videos.append(item)
return videos
def lip_reading_loader(path, config=config, mode='train', random_crop=True,
ini='fan'):
loader = {}
pair = np.arange(2, 27)
im_pth = []
video_block = np.zeros((25,
config.image_size,
config.image_size,
config.image_channel_size))
mfcc_block = np.zeros(( 25, 1,
config.mfcc_length,
config.mfcc_width,
))
blinkdata_block = np.zeros((25,
config.blinkdata_width))
if os.path.isdir(path):
for block in (os.listdir(path)):
block_dir = os.path.join(path, block)
crop_x = 2
crop_y = 2
if mode == 'train':
flip = np.random.randint(0, 2)
if random_crop:
crop_x = np.random.randint(0, 5)
crop_y = np.random.randint(0, 5)
else:
flip = 0
if os.path.isdir(block_dir):
if block == config.image_block_name:
k1 = 0
for image_num in pair:
image_path = os.path.join(block_dir, str(image_num) + '.jpg')
im_pth.append(image_path)
if os.path.exists(image_path):
image = cv2.imread(image_path)
if flip == 1:
image = np.fliplr(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if ini == 'fan':
image = image / 255
video_block[k1] = image[crop_x:crop_x + config.image_size, crop_y:crop_y + config.image_size]
else:
print("video_block = 0")
shutil.rmtree(path)
break
k1 += 1
if block == 'mfcc20':
if config.require_audio:
k4 = 0
for mfcc_num in pair:
# for s in range(-1,2):
mfcc_path = os.path.join(block_dir, str(mfcc_num) + '.bin')
if os.path.exists(mfcc_path):
mfcc = np.fromfile(mfcc_path)
mfcc = mfcc.reshape(20, 12)
mfcc_block[k4, 0, :, :] = mfcc
k4 += 1
else:
raise ("mfccs = 0")
if block == config.blink_block_name:
blinkdata_path = os.path.join(block_dir, 'd.txt')
blinkdatas = np.loadtxt(blinkdata_path)
k3hjq = 0
for b_num in pair:
if (config.blinkdata_width-1) % 2 != 0:
print("WIDTH ERROR INIT BLINKDATA!!! Not and odd number. This may cause errors. HJQERR")
b_expand = config.blinkdata_width // 2
blinkdata_block[k3hjq] = blinkdatas[b_num - b_expand:b_num + b_expand + 1]
k3hjq += 1
video_block = video_block.transpose((0, 3, 1, 2))
loader['video'] = video_block
loader['mfcc20'] = mfcc_block
loader['blinkdata'] = blinkdata_block
loader['A_path'] = im_pth[0]
loader['B_path'] = im_pth[1:]
# loader['label_map'] = label_map_block[:, 1:, :, :]
if not np.abs(np.mean(mfcc_block)) < 1e5:
print(np.mean(mfcc_block))
print(im_pth)
#shutil.rmtree(path)
return loader
class VideoFolder(Dataset):
def __init__(self, root, config=config, transform=None, target_transform=None,
loader=lip_reading_loader, mode='train'):
classes, class_to_idx = find_classes(root,config=config)
videos = make_dataset(root, class_to_idx, mode, config=config)
if len(videos) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"))
self.root = root
self.vids = videos
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
self.config = config
self.mode = mode
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
path, target = self.vids[index]
vid = self.loader(path, config=self.config, mode=self.mode)
return vid, target
def __len__(self):
return len(self.vids)
|
[
"os.path.join",
"Options.Config",
"os.path.isdir",
"cv2.cvtColor",
"numpy.fromfile",
"numpy.zeros",
"os.path.exists",
"cv2.imread",
"numpy.fliplr",
"numpy.random.randint",
"numpy.arange",
"numpy.mean",
"numpy.loadtxt",
"shutil.rmtree",
"os.path.expanduser",
"os.listdir"
] |
[((188, 204), 'Options.Config', 'Options.Config', ([], {}), '()\n', (202, 204), False, 'import Options\n'), ((485, 508), 'os.path.expanduser', 'os.path.expanduser', (['dir'], {}), '(dir)\n', (503, 508), False, 'import os\n'), ((1219, 1235), 'numpy.arange', 'np.arange', (['(2)', '(27)'], {}), '(2, 27)\n', (1228, 1235), True, 'import numpy as np\n'), ((1270, 1349), 'numpy.zeros', 'np.zeros', (['(25, config.image_size, config.image_size, config.image_channel_size)'], {}), '((25, config.image_size, config.image_size, config.image_channel_size))\n', (1278, 1349), True, 'import numpy as np\n'), ((1451, 1507), 'numpy.zeros', 'np.zeros', (['(25, 1, config.mfcc_length, config.mfcc_width)'], {}), '((25, 1, config.mfcc_length, config.mfcc_width))\n', (1459, 1507), True, 'import numpy as np\n'), ((1617, 1655), 'numpy.zeros', 'np.zeros', (['(25, config.blinkdata_width)'], {}), '((25, config.blinkdata_width))\n', (1625, 1655), True, 'import numpy as np\n'), ((1692, 1711), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1705, 1711), False, 'import os\n'), ((605, 630), 'os.path.join', 'os.path.join', (['dir', 'target'], {}), '(dir, target)\n', (617, 630), False, 'import os\n'), ((1735, 1751), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1745, 1751), False, 'import os\n'), ((647, 663), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (660, 663), False, 'import os\n'), ((709, 722), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (719, 722), False, 'import os\n'), ((845, 868), 'os.path.join', 'os.path.join', (['d', 'fnames'], {}), '(d, fnames)\n', (857, 868), False, 'import os\n'), ((1778, 1803), 'os.path.join', 'os.path.join', (['path', 'block'], {}), '(path, block)\n', (1790, 1803), False, 'import os\n'), ((2126, 2150), 'os.path.isdir', 'os.path.isdir', (['block_dir'], {}), '(block_dir)\n', (2139, 2150), False, 'import os\n'), ((898, 921), 'os.path.join', 'os.path.join', (['d', 'fnames'], {}), '(d, fnames)\n', (910, 921), False, 'import os\n'), ((943, 963), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (957, 963), False, 'import os\n'), ((1905, 1928), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (1922, 1928), True, 'import numpy as np\n'), ((4806, 4825), 'numpy.mean', 'np.mean', (['mfcc_block'], {}), '(mfcc_block)\n', (4813, 4825), True, 'import numpy as np\n'), ((1990, 2013), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (2007, 2013), True, 'import numpy as np\n'), ((2043, 2066), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (2060, 2066), True, 'import numpy as np\n'), ((3880, 3912), 'os.path.join', 'os.path.join', (['block_dir', '"""d.txt"""'], {}), "(block_dir, 'd.txt')\n", (3892, 3912), False, 'import os\n'), ((3946, 3972), 'numpy.loadtxt', 'np.loadtxt', (['blinkdata_path'], {}), '(blinkdata_path)\n', (3956, 3972), True, 'import numpy as np\n'), ((4760, 4779), 'numpy.mean', 'np.mean', (['mfcc_block'], {}), '(mfcc_block)\n', (4767, 4779), True, 'import numpy as np\n'), ((2438, 2464), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (2452, 2464), False, 'import os\n'), ((2502, 2524), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2512, 2524), False, 'import cv2\n'), ((2661, 2699), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2673, 2699), False, 'import cv2\n'), ((3031, 3050), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (3044, 3050), False, 'import shutil\n'), ((3449, 3474), 'os.path.exists', 'os.path.exists', (['mfcc_path'], {}), '(mfcc_path)\n', (3463, 3474), False, 'import os\n'), ((2607, 2623), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (2616, 2623), True, 'import numpy as np\n'), ((3515, 3537), 'numpy.fromfile', 'np.fromfile', (['mfcc_path'], {}), '(mfcc_path)\n', (3526, 3537), True, 'import numpy as np\n')]
|
from concurrent import futures
from copy import deepcopy
import nifty.tools as nt
import numpy as np
import torch
from tqdm import tqdm
from ..transform.raw import standardize
def _load_block(input_, offset, block_shape, halo, padding_mode="reflect", with_channels=False):
shape = input_.shape
if with_channels:
shape = shape[1:]
starts = [off - ha for off, ha in zip(offset, halo)]
stops = [off + bs + ha for off, bs, ha in zip(offset, block_shape, halo)]
# we pad the input volume if necessary
pad_left = None
pad_right = None
# check for padding to the left
if any(start < 0 for start in starts):
pad_left = tuple(abs(start) if start < 0 else 0 for start in starts)
starts = [max(0, start) for start in starts]
# check for padding to the right
if any(stop > shape[i] for i, stop in enumerate(stops)):
pad_right = tuple(stop - shape[i] if stop > shape[i] else 0 for i, stop in enumerate(stops))
stops = [min(shape[i], stop) for i, stop in enumerate(stops)]
bb = tuple(slice(start, stop) for start, stop in zip(starts, stops))
if with_channels:
data = input_[(slice(None),) + bb]
else:
data = input_[bb]
ndim = len(shape)
# pad if necessary
if pad_left is not None or pad_right is not None:
pad_left = (0,) * ndim if pad_left is None else pad_left
pad_right = (0,) * ndim if pad_right is None else pad_right
pad_width = tuple((pl, pr) for pl, pr in zip(pad_left, pad_right))
if with_channels:
pad_width = ((0, 0),) + pad_width
data = np.pad(data, pad_width, mode=padding_mode)
# extend the bounding box for downstream
bb = tuple(
slice(b.start - pl, b.stop + pr)
for b, pl, pr in zip(bb, pad_left, pad_right)
)
return data, bb
# TODO half precision prediction
def predict_with_halo(
input_,
model,
gpu_ids,
block_shape,
halo,
output=None,
preprocess=standardize,
postprocess=None,
with_channels=False,
skip_block=None,
):
""" Run block-wise network prediction with halo.
Arguments:
input_ [arraylike] - the input data, can be a numpy array, a hdf5/zarr/z5py dataset or similar
model [nn.Module] - the network
gpu_ids [list[int or string]] - list of gpus id used for prediction
block_shape [tuple] - shape of inner block used for prediction
halo [tuple] - shape of halo used for prediction
output [arraylike or list[tuple[arraylike, slice]]] - output data, will be allocated if None is passed.
Instead of a single output, this can also be a list of outputs and the corresponding channels.
(default: None)
preprocess [callable] - function to preprocess input data before passing it to the network.
(default: standardize)
postprocess [callable] - function to postprocess the network predictions (default: None)
with_channels [bool] - whether the input has a channel axis (default: False)
skip_block [callable] - function to evaluate wheter a given input block should be skipped (default: None)
"""
devices = [torch.device(gpu) for gpu in gpu_ids]
models = [
(model if next(model.parameters()).device == device else deepcopy(model).to(device), device)
for device in devices
]
n_workers = len(gpu_ids)
shape = input_.shape
if with_channels:
shape = shape[1:]
ndim = len(shape)
assert len(block_shape) == len(halo) == ndim
blocking = nt.blocking([0] * ndim, shape, block_shape)
if output is None:
n_out = models[0][0].out_channels
output = np.zeros((n_out,) + shape, dtype="float32")
def predict_block(block_id):
worker_id = block_id % n_workers
net, device = models[worker_id]
with torch.no_grad():
block = blocking.getBlock(block_id)
offset = [beg for beg in block.begin]
inp, _ = _load_block(input_, offset, block_shape, halo, with_channels=with_channels)
if skip_block is not None and skip_block(inp):
return
if preprocess is not None:
inp = preprocess(inp)
# add (channel) and batch axis
expand_dims = np.s_[None] if with_channels else np.s_[None, None]
inp = torch.from_numpy(inp[expand_dims]).to(device)
prediction = net(inp)
# allow for list of tensors
try:
prediction = prediction.cpu().numpy().squeeze(0)
except AttributeError:
prediction = prediction[0]
prediction = prediction.cpu().numpy().squeeze(0)
if postprocess is not None:
prediction = postprocess(prediction)
inner_bb = tuple(slice(ha, ha + bs) for ha, bs in zip(halo, block.shape))
if prediction.ndim == ndim + 1:
inner_bb = (slice(None),) + inner_bb
prediction = prediction[inner_bb]
bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
if isinstance(output, list): # we have multiple outputs and split the prediction channels
for out, channel_slice in output:
this_bb = bb if out.ndim == ndim else (slice(None),) + bb
out[this_bb] = prediction[channel_slice]
else: # we only have a single output array
if output.ndim == ndim + 1:
bb = (slice(None),) + bb
output[bb] = prediction
n_blocks = blocking.numberOfBlocks
with futures.ThreadPoolExecutor(n_workers) as tp:
list(tqdm(tp.map(predict_block, range(n_blocks)), total=n_blocks))
return output
|
[
"numpy.pad",
"copy.deepcopy",
"numpy.zeros",
"nifty.tools.blocking",
"torch.device",
"concurrent.futures.ThreadPoolExecutor",
"torch.no_grad",
"torch.from_numpy"
] |
[((3601, 3644), 'nifty.tools.blocking', 'nt.blocking', (['([0] * ndim)', 'shape', 'block_shape'], {}), '([0] * ndim, shape, block_shape)\n', (3612, 3644), True, 'import nifty.tools as nt\n'), ((1621, 1663), 'numpy.pad', 'np.pad', (['data', 'pad_width'], {'mode': 'padding_mode'}), '(data, pad_width, mode=padding_mode)\n', (1627, 1663), True, 'import numpy as np\n'), ((3222, 3239), 'torch.device', 'torch.device', (['gpu'], {}), '(gpu)\n', (3234, 3239), False, 'import torch\n'), ((3728, 3771), 'numpy.zeros', 'np.zeros', (['((n_out,) + shape)'], {'dtype': '"""float32"""'}), "((n_out,) + shape, dtype='float32')\n", (3736, 3771), True, 'import numpy as np\n'), ((5695, 5732), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', (['n_workers'], {}), '(n_workers)\n', (5721, 5732), False, 'from concurrent import futures\n'), ((3901, 3916), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3914, 3916), False, 'import torch\n'), ((4414, 4448), 'torch.from_numpy', 'torch.from_numpy', (['inp[expand_dims]'], {}), '(inp[expand_dims])\n', (4430, 4448), False, 'import torch\n'), ((3340, 3355), 'copy.deepcopy', 'deepcopy', (['model'], {}), '(model)\n', (3348, 3355), False, 'from copy import deepcopy\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 1 19:11:52 2017
@author: mariapanteli
"""
import pytest
import numpy as np
import os
import scripts.OPMellin as OPMellin
opm = OPMellin.OPMellin()
TEST_AUDIO_FILE = os.path.join(os.path.dirname(__file__), 'data', 'mel_1_2_1.wav')
def test_load_audiofile():
audiofile = TEST_AUDIO_FILE
opm.load_audiofile(audiofile, segment=False)
assert opm.y is not None and opm.sr is not None
def test_mel_spectrogram():
audiofile = TEST_AUDIO_FILE
opm.load_audiofile(audiofile, segment=False)
opm.mel_spectrogram(y=opm.y, sr=opm.sr)
# assume 40 mel bands
assert opm.melspec.shape[0] == 40
def test_post_process_spec():
audiofile = TEST_AUDIO_FILE
opm.load_audiofile(audiofile, segment=False)
opm.mel_spectrogram(y=opm.y, sr=opm.sr)
melspec = opm.melspec
opm.post_process_spec(melspec=melspec)
proc_melspec = opm.melspec
assert melspec.shape == proc_melspec.shape
def test_onset_patterns_n_frames():
audiofile = TEST_AUDIO_FILE
opm.load_audiofile(audiofile, segment=False)
opm.mel_spectrogram(y=opm.y, sr=opm.sr)
opm.onset_patterns(melspec=opm.melspec, melsr=opm.melsr)
assert opm.op.shape[2] == np.round(((opm.melspec.shape[1] / opm.melsr) - opm.win2sec) * 2.)
def test_onset_patterns_n_bins():
audiofile = TEST_AUDIO_FILE
opm.load_audiofile(audiofile, segment=False)
opm.mel_spectrogram(y=opm.y, sr=opm.sr)
opm.onset_patterns(melspec=opm.melspec, melsr=opm.melsr)
assert opm.op.shape[0] == 40
def test_post_process_op():
audiofile = TEST_AUDIO_FILE
opm.load_audiofile(audiofile, segment=False)
opm.mel_spectrogram(y=opm.y, sr=opm.sr)
opm.onset_patterns(melspec=opm.melspec, melsr=opm.melsr)
op = opm.op
opm.post_process_op()
proc_op = opm.op
assert op.shape == proc_op.shape
|
[
"os.path.dirname",
"numpy.round",
"scripts.OPMellin.OPMellin"
] |
[((181, 200), 'scripts.OPMellin.OPMellin', 'OPMellin.OPMellin', ([], {}), '()\n', (198, 200), True, 'import scripts.OPMellin as OPMellin\n'), ((232, 257), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (247, 257), False, 'import os\n'), ((1222, 1286), 'numpy.round', 'np.round', (['((opm.melspec.shape[1] / opm.melsr - opm.win2sec) * 2.0)'], {}), '((opm.melspec.shape[1] / opm.melsr - opm.win2sec) * 2.0)\n', (1230, 1286), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021-2021 the DerivX authors
# All rights reserved.
#
# The project sponsor and lead author is <NAME>.
# E-mail: <EMAIL>, QQ: 277195007, WeChat: xrd_ustc
# See the contributors file for names of other contributors.
#
# Commercial use of this code in source and binary forms is
# governed by a LGPL v3 license. You may get a copy from the
# root directory. Or else you should get a specific written
# permission from the project author.
#
# Individual and educational use of this code in source and
# binary forms is governed by a 3-clause BSD license. You may
# get a copy from the root directory. Certainly welcome you
# to contribute code of all sorts.
#
# Be sure to retain the above copyright notice and conditions.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import derivx
g_uoc_dop = 1 # 向上敲出看涨,向下敲出看跌,双鲨
class Config(object):
def __init__(self):
self.rand_rows = 0 # 随机数据行数 # InitRand
self.rand_cols = 0 # 随机数据列数 # InitRand
self.rand_quasi = False # 随机数据类型 # InitRand # 目前 quasi 随机数据只能使用单核处理
self.rand_seed = np.array([]) # 随机数据种子 # InitRand # 非负整数,有效位数不超逻辑处理器数量,目前 quasi 仅第一位有效
self.dual_smooth = True # 对偶平滑路径 # InitPath
self.runs_size = 0 # 模拟路径数量 # InitPath
self.runs_step = 0 # 价格变动步数 # InitPath
self.year_days = 0 # 年交易日数量 # InitPath
self.sigma = 0.0 # 波动率 # InitPath
self.risk_free_rate = 0.0 # 无风险利率 # InitPath
self.basis_rate = 0.0 # 股息或贴水 # InitPath
self.price_limit_ratio = 0.0 # 涨跌停限制幅度 # InitPath
self.price_limit_style = 0 # 涨跌停限制方式,0 不限制,1 超限部分移至下日,2 超限部分直接削掉 # InitPath
self.s = 0.0 # 标的价格
self.h_l = 0.0 # 障碍价格,低
self.h_h = 0.0 # 障碍价格,高
self.k_l = 0.0 # 行权价格,低
self.k_h = 0.0 # 行权价格,高
self.x = 0.0 # 敲出后需支付的资金
self.v = 0.0 # 波动率 # 双鲨未用
self.r = 0.0 # 无风险利率 # 双鲨未用
self.q = 0.0 # 年化分红率 # 双鲨未用
self.t = 0.0 # 年化到期期限 # 双鲨未用
self.p = 0.0 # 参与率,未敲出情况下客户对收益的占比要求
self.is_kop_delay = False # 敲出后是立即还是延期支付资金
self.barrier_type = 0 # 障碍类型
self.trade_long = True # 交易方向
self.price_rate = 0.0 # 价格比率
self.calc_price = np.array([]) # 计算价格序列
self.run_from = 0 # 起始天数,第一天为零
self.run_days = 0 # 运行天数
def ToArgs(self):
return self.__dict__
def FigureResult(config, result):
figure = plt.figure()
ax = Axes3D(figure)
#ax = Axes3D(figure, auto_add_to_figure = False)
#figure.add_axes(ax)
x = np.arange(0, config.runs_step, 1)
y = config.calc_price
X, Y = np.meshgrid(x, y)
ax.plot_surface(X, Y, result, rstride = 1, cstride = 1, cmap = plt.get_cmap("rainbow"))
plt.show()
def ExportResult(config, result, file_path):
export_days = config.run_days
if export_days > 255: # Excel 最大 256 列,首列显示价格,剩余可用 255 列
export_days = 255
print("提示:Excel 最大 256 列,剩余 %d 列数据未作导出!" % (config.run_days - 255))
df_result = pd.DataFrame(result[:, config.run_from : (config.run_from + export_days)]).iloc[::-1] # 上下倒下顺序
df_result.index = config.calc_price[::-1]
df_result.columns = ["day_%d" % (days + 1) for days in np.arange(config.run_from, config.run_from + export_days, 1)]
df_result.to_excel(file_path, sheet_name = "result")
print("导出结果:%s" % file_path)
def Test_Barrier_Double():
config = Config()
config.rand_rows = 50000 # 随机数据行数 # InitRand
config.rand_cols = 250 # 随机数据列数 # InitRand
config.rand_quasi = False # 随机数据类型 # InitRand # 目前 quasi 随机数据只能使用单核处理
config.rand_seed = np.array([0, 1, 2, 3, 4, 5, 6, 7]) # 随机数据种子 # InitRand # 非负整数,有效位数不超逻辑处理器数量,目前 quasi 仅第一位有效
config.dual_smooth = True # 对偶平滑路径 # InitPath
config.runs_size = 100000 # 模拟路径数量 # InitPath
config.runs_step = 244 # 价格变动步数 # InitPath
config.year_days = 244 # 年交易日数量 # InitPath
config.sigma = 0.16 # 波动率 # InitPath
config.risk_free_rate = 0.03 # 无风险利率 # InitPath
config.basis_rate = 0.06 # 股息或贴水 # InitPath
config.price_limit_ratio = 0.1 # 涨跌停限制幅度 # InitPath
config.price_limit_style = 0 # 涨跌停限制方式,0 不限制,1 超限部分移至下日,2 超限部分直接削掉 # InitPath
config.s = 100.0 # 标的价格
config.h_l = 95.0 # 障碍价格,低
config.h_h = 105.0 # 障碍价格,高
config.k_l = 99.0 # 行权价格,低
config.k_h = 101.0 # 行权价格,高
config.x = 3.5 # 敲出后需支付的资金
# config.v = 0.16 # 波动率 # 双鲨未用
# config.r = 0.03 # 无风险利率 # 双鲨未用
# config.q = 0.06 # 年化分红率 # 双鲨未用
# config.t = 1.0 # 年化到期期限 # 双鲨未用
config.p = 1.0 # 参与率,未敲出情况下客户对收益的占比要求
config.is_kop_delay = True # 敲出后是立即还是延期支付资金
config.barrier_type = g_uoc_dop # 障碍类型
config.trade_long = False # 交易方向
config.price_rate = 0.035 # 价格比率
calc_price_u = 110.0 # 价格点上界
calc_price_d = 90.0 # 价格点下界
calc_price_g = 1.0 # 价格点间隔
#config.calc_price = np.array([65.0, 70.0, 75.0, 80.0, 85.0, 90.0, 95.0, 100.0, 105.0]) # 计算价格序列
config.calc_price = np.arange(calc_price_d, calc_price_u + calc_price_g, calc_price_g) # 含价格点上下界
config.run_from = 0 # 起始天数,第一天为零
config.run_days = 1 # 运行天数
ret_cols = config.runs_step
ret_rows = len(config.calc_price)
barrier = derivx.Barrier("Double")
if barrier.InitArgs(config.ToArgs()) < 0:
print(barrier.GetError())
return
if barrier.InitRand() < 0:
print(barrier.GetError())
return
# 除非电脑性能较差,否则不推荐使用 SaveRand() 和 LoadRand() 了
# 最好将影响随机数据的参数都包含在文件名中,避免导入的随机数据与所设参数不一致
#rand_file = "./rand_data_%d_%d_%d.rand" % (config.rand_rows, config.rand_cols, config.rand_seed[0])
#if barrier.SaveRand(rand_file) < 0:
# print(barrier.GetError())
# return
#if barrier.LoadRand(rand_file) < 0:
# print(barrier.GetError())
# return
if barrier.InitPath() < 0:
print(barrier.GetError())
return
# 除非电脑性能较差,否则不推荐使用 SavePath() 和 LoadPath() 了
# 最好将影响路径数据的参数都包含在文件名中,避免导入的路径数据与所设参数不一致
#path_file = "./path_data_%d_%d_%d_%d_%.3f_%.3f_%.3f_%.3f_%d.path" % \
# (config.dual_smooth, config.runs_size, config.runs_step, config.year_days,
# config.sigma, config.risk_free_rate, config.basis_rate, config.price_limit_ratio, config.price_limit_style)
#if barrier.SavePath(path_file) < 0:
# print(barrier.GetError())
# return
#if barrier.LoadPath(path_file) < 0:
# print(barrier.GetError())
# return
#print("price:", barrier.CalcPrice())
#print("payoff:", barrier.CalcPayoff())
greek_flags = {"delta":"d"}
#greek_flags = {"delta":"d", "gamma":"g", "vega":"v", "theta":"t", "rho":"r"}
for name, flag in greek_flags.items():
result = np.zeros((ret_rows, ret_cols))
barrier.CalcGreeks(flag, result)
FigureResult(config, result)
ExportResult(config, result, "/export_greeks_%s.xls" % name)
if __name__ == "__main__":
Test_Barrier_Double()
|
[
"pandas.DataFrame",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.get_cmap",
"numpy.zeros",
"derivx.Barrier",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.array"
] |
[((2501, 2513), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2511, 2513), True, 'import matplotlib.pyplot as plt\n'), ((2523, 2537), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['figure'], {}), '(figure)\n', (2529, 2537), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((2624, 2657), 'numpy.arange', 'np.arange', (['(0)', 'config.runs_step', '(1)'], {}), '(0, config.runs_step, 1)\n', (2633, 2657), True, 'import numpy as np\n'), ((2695, 2712), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2706, 2712), True, 'import numpy as np\n'), ((2809, 2819), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2817, 2819), True, 'import matplotlib.pyplot as plt\n'), ((3674, 3708), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7])\n', (3682, 3708), True, 'import numpy as np\n'), ((5013, 5079), 'numpy.arange', 'np.arange', (['calc_price_d', '(calc_price_u + calc_price_g)', 'calc_price_g'], {}), '(calc_price_d, calc_price_u + calc_price_g, calc_price_g)\n', (5022, 5079), True, 'import numpy as np\n'), ((5257, 5281), 'derivx.Barrier', 'derivx.Barrier', (['"""Double"""'], {}), "('Double')\n", (5271, 5281), False, 'import derivx\n'), ((1166, 1178), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1174, 1178), True, 'import numpy as np\n'), ((2307, 2319), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2315, 2319), True, 'import numpy as np\n'), ((6761, 6791), 'numpy.zeros', 'np.zeros', (['(ret_rows, ret_cols)'], {}), '((ret_rows, ret_cols))\n', (6769, 6791), True, 'import numpy as np\n'), ((2780, 2803), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""rainbow"""'], {}), "('rainbow')\n", (2792, 2803), True, 'import matplotlib.pyplot as plt\n'), ((3079, 3149), 'pandas.DataFrame', 'pd.DataFrame', (['result[:, config.run_from:config.run_from + export_days]'], {}), '(result[:, config.run_from:config.run_from + export_days])\n', (3091, 3149), True, 'import pandas as pd\n'), ((3279, 3339), 'numpy.arange', 'np.arange', (['config.run_from', '(config.run_from + export_days)', '(1)'], {}), '(config.run_from, config.run_from + export_days, 1)\n', (3288, 3339), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import cv_bridge
from jsk_topic_tools import ConnectionBasedTransport
import rospy
from sensor_msgs.msg import Image
class ImageToLabel(ConnectionBasedTransport):
def __init__(self):
super(ImageToLabel, self).__init__()
self._pub = self.advertise('~output', Image, queue_size=1)
def subscribe(self):
self._sub = rospy.Subscriber('~input', Image, self._convert)
def unsubscribe(self):
self._sub.unregister()
def _convert(self, msg):
bridge = cv_bridge.CvBridge()
img = bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
label = np.ones(img.shape[:2], dtype=np.int32)
label_msg = bridge.cv2_to_imgmsg(label, encoding='32SC1')
label_msg.header = msg.header
self._pub.publish(label_msg)
if __name__ == '__main__':
rospy.init_node('image_to_label')
img2label = ImageToLabel()
rospy.spin()
|
[
"cv_bridge.CvBridge",
"rospy.Subscriber",
"numpy.ones",
"rospy.init_node",
"rospy.spin"
] |
[((884, 917), 'rospy.init_node', 'rospy.init_node', (['"""image_to_label"""'], {}), "('image_to_label')\n", (899, 917), False, 'import rospy\n'), ((953, 965), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (963, 965), False, 'import rospy\n'), ((414, 462), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~input"""', 'Image', 'self._convert'], {}), "('~input', Image, self._convert)\n", (430, 462), False, 'import rospy\n'), ((569, 589), 'cv_bridge.CvBridge', 'cv_bridge.CvBridge', ([], {}), '()\n', (587, 589), False, 'import cv_bridge\n'), ((671, 709), 'numpy.ones', 'np.ones', (['img.shape[:2]'], {'dtype': 'np.int32'}), '(img.shape[:2], dtype=np.int32)\n', (678, 709), True, 'import numpy as np\n')]
|
import numpy as np
from sgmrfmix import sGMRFmix
def check_model() -> None:
m = sGMRFmix(K=5, rho=0.8)
train = np.genfromtxt('../Examples/Data/train.csv', delimiter=',', skip_header=True)[:, 1:]
test = np.genfromtxt('../Examples/Data/test.csv', delimiter=',', skip_header=True)[:, 1:]
print(m)
# def test_model(self):
print(train.shape, test.shape)
m.fit(train)
m.show_model_params()
results = m.compute_anomaly(test)
print("Anomaly score:")
print(results)
m.save('test_model.pkl')
m.load('test_model.pkl')
results2 = m.compute_anomaly(test)
print(np.allclose(results, results2))
# # print([r.shape for r in results])
# plt.plot(results[:,0])
# plt.show()
check_model()
|
[
"numpy.allclose",
"numpy.genfromtxt",
"sgmrfmix.sGMRFmix"
] |
[((86, 108), 'sgmrfmix.sGMRFmix', 'sGMRFmix', ([], {'K': '(5)', 'rho': '(0.8)'}), '(K=5, rho=0.8)\n', (94, 108), False, 'from sgmrfmix import sGMRFmix\n'), ((121, 197), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../Examples/Data/train.csv"""'], {'delimiter': '""","""', 'skip_header': '(True)'}), "('../Examples/Data/train.csv', delimiter=',', skip_header=True)\n", (134, 197), True, 'import numpy as np\n'), ((216, 291), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../Examples/Data/test.csv"""'], {'delimiter': '""","""', 'skip_header': '(True)'}), "('../Examples/Data/test.csv', delimiter=',', skip_header=True)\n", (229, 291), True, 'import numpy as np\n'), ((609, 639), 'numpy.allclose', 'np.allclose', (['results', 'results2'], {}), '(results, results2)\n', (620, 639), True, 'import numpy as np\n')]
|
# The MIT License (MIT)
#
# Copyright (c) snkas
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from networkload import *
import unittest
import numpy as np
class TestBaseTopology(unittest.TestCase):
def test_construction_valid(self):
plus_grid(11, 3)
leaf_spine(6, 7)
fat_tree_asymmetric(6)
fat_tree_symmetric(4)
extend_tors_with_servers(plus_grid(11, 3), 2)
extend_tors_with_servers(leaf_spine(5, 5), 3)
extend_tors_with_servers(fat_tree_asymmetric(6), 1)
extend_tors_with_servers(fat_tree_symmetric(4), 70)
def test_str(self):
self.assertEqual(
str(extend_tors_with_servers(leaf_spine(5, 7), 11)),
"Topology(#nodes=67 (12 switches (of which 5 are ToRs), 55 servers), #edges=90)"
)
def test_construction_invalid(self):
Topology(
4,
[(0, 1), (1, 2), (1, 3)],
[0, 1, 2],
[1],
[3]
)
# Duplicate edge I
try:
Topology(
4,
[(0, 1), (1, 2), (1, 3), (1, 3)],
[0, 1, 2],
[1],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Duplicate edge II
try:
Topology(
4,
[(0, 1), (1, 2), (1, 3), (2, 1)],
[0, 1, 2],
[1],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Invalid endpoints I
try:
Topology(
4,
[(0, 1), (1, 2), (1, 4)],
[0, 1, 2],
[1],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Invalid endpoints II
try:
Topology(
4,
[(0, 1), (1, 2), (-1, 3)],
[0, 1, 2],
[1],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Duplicate switches
try:
Topology(
4,
[(0, 1), (1, 2), (1, 3)],
[0, 1, 2, 1],
[1],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Duplicate switches which are ToRs
try:
Topology(
4,
[(0, 1), (1, 2), (1, 3)],
[0, 1, 2],
[1, 1],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Duplicate servers
try:
Topology(
4,
[(0, 1), (1, 2), (1, 3)],
[0, 1, 2],
[1],
[3, 3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Not valid switch id
try:
Topology(
4,
[(0, 1), (1, 2), (1, 3)],
[0, 1, 2, -1],
[1],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Not valid ToR id
try:
Topology(
4,
[(0, 1), (1, 2), (1, 3)],
[0, 1, 2],
[1, -5],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Not valid server id
try:
Topology(
4,
[(0, 1), (1, 2), (1, 3)],
[0, 1, 2],
[1],
[3, -1]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Servers and switches not distinct
try:
Topology(
4,
[(0, 1), (1, 2), (1, 3)],
[0, 1, 2],
[1],
[3, 0]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Servers and switches do not cover all nodes
try:
Topology(
5,
[(0, 1), (1, 2), (1, 3)],
[0, 1, 2],
[1],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Server is a ToR
try:
Topology(
4,
[(0, 1), (1, 2), (1, 3)],
[0, 1, 2],
[1, 3],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Server is connected to two ToRs
try:
Topology(
4,
[(0, 1), (1, 2), (1, 3), (3, 0)],
[0, 1, 2],
[0, 1],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Server edge to non-ToR
try:
Topology(
4,
[(0, 1), (1, 2), (3, 0)],
[0, 1, 2],
[1],
[3]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Server edge to non-ToR
try:
Topology(
4,
[(1, 2), (2, 3), (0, 3)],
[1, 2, 3],
[2],
[0]
)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
def test_matrix_to_list(self):
# Good matrix
a = np.zeros((3, 3))
a[0][1] = 1
a[1][0] = 1
self.assertEqual([(0, 1)], adjacency_matrix_to_undirected_edges_list(3, a))
# Not bi-directional
try:
a = np.zeros((3, 3))
a[0][1] = 1
adjacency_matrix_to_undirected_edges_list(3, a)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Self-edge
try:
a = np.zeros((3, 3))
a[1][1] = 1
adjacency_matrix_to_undirected_edges_list(3, a)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
# Wrong shape
try:
a = np.zeros((3, 3))
a[0][1] = 1
a[1][0] = 1
adjacency_matrix_to_undirected_edges_list(4, a)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
|
[
"numpy.zeros"
] |
[((7051, 7067), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7059, 7067), True, 'import numpy as np\n'), ((7251, 7267), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7259, 7267), True, 'import numpy as np\n'), ((7498, 7514), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7506, 7514), True, 'import numpy as np\n'), ((7747, 7763), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (7755, 7763), True, 'import numpy as np\n')]
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import list_context
ctxs = list_context('INQAffine')
def quantize(x, max_absval, num_bits):
y = x
# get maximum/minimum exponent
n1 = np.floor(np.log2(max_absval)) + (np.log2(max_absval) -
np.floor(np.log2(max_absval)) >= np.log2(1.5))
n2 = n1 + 1 - 2 ** (num_bits - 2)
pruning_threshold = 2 ** (n2 - 1)
# prune all small values
y[np.abs(x) < pruning_threshold] = 0.0
# quantize remaining values to powers of two
_i = y != 0
_s = np.sign(y[_i])
_b = np.log2(np.abs(y[_i]))
_d = np.log2(1.5) # quantization threshold
# _d = 0.5 use geometric mean
# _d = np.log2(1.5) use arithmetic mean
_e = np.floor(_b) + (_b - np.floor(_b) >= _d)
_e = np.maximum(n2, np.minimum(n1, _e))
y[_i] = _s * 2 ** (_e)
return y
def ref_inq_affine(x, w, i, b, base_axis, num_bits,
inq_iterations, selection_algorithm, seed):
if inq_iterations[-1] == 0:
# last element in `inq_iterations`, quantize all weights
i = np.ones_like(i)
elif 0 in inq_iterations:
# only `largest_abs` is deterministic and currently tested
assert(selection_algorithm == 'largest_abs')
idx_var = np.flatnonzero(i == 0)
idx_newfix = idx_var[np.argsort(
np.abs(w.ravel()[idx_var]))[-(len(idx_var) // 2):]]
i.ravel()[idx_newfix] = 1
shape = list(x.shape[:base_axis])
shape += [-1]
out_shape = w.shape[1:]
# quantize weights (0 ... learnable, 1 ... fixed)
wq = np.copy(w)
if np.any(i == 1):
wq[i == 1] = quantize(w[i == 1], np.max(np.abs(w)), num_bits)
wq = wq.reshape(w.shape[0], -1)
y = np.dot(x.reshape(*shape), wq)
if b is not None:
y += b.reshape((1,) * (len(shape) - 1) + (-1,))
return y.reshape(tuple(shape[:-1]) + tuple(out_shape))
def ref_grad_inq_affine(x, w, i, b, dy, base_axis, num_bits,
inq_iterations, selection_algorithm, seed):
shape = list(x.shape[:base_axis])
if inq_iterations[-1] == 0:
# last element in `inq_iterations`, quantize all weights
i = np.ones_like(i)
elif 0 in inq_iterations:
# only `largest_abs` is deterministic
assert(selection_algorithm == 'largest_abs')
idx_var = np.flatnonzero(i == 0)
idx_newfix = idx_var[np.argsort(
np.abs(w.ravel()[idx_var]))[-(len(idx_var) // 2):]]
i.ravel()[idx_newfix] = 1
x_ = x.reshape(np.prod(shape), -1)
wq_ = np.copy(w)
if np.any(i == 1):
wq_[i == 1] = quantize(w[i == 1], np.max(np.abs(w)), num_bits)
wq_ = wq_.reshape(w.shape[0], -1)
dy_ = dy.reshape(np.prod(shape), -1)
dx = np.dot(dy_, np.transpose(wq_))
dw = np.dot(np.transpose(x_), dy_)
if b is not None:
db = np.sum(dy_, 0)
else:
db = np.empty(0)
return np.concatenate([dx.flatten(),
dw.flatten(),
db.flatten()])
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("base_axis, weight_shape, num_bits",
[(1, (12, 2, 3), 2), (2, (4, 4), 4)])
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.parametrize("inq_iterations", [(10, 20), (0,), (0, 10)])
def test_inq_affine_forward_backward(seed, base_axis, weight_shape, num_bits,
bias, inq_iterations, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
# Input
inputs = [rng.randn(2, 3, 4).astype(np.float32)]
# Weights
inputs += [rng.randn(*weight_shape).astype(np.float32)]
# Indices
inputs += [np.random.randint(2, size=weight_shape)]
# Bias
if bias:
inputs += [rng.randn(*weight_shape[1:]).astype(np.float32)]
else:
inputs += [None]
selection_algorithm = 'largest_abs'
function_tester(rng, F.inq_affine, ref_inq_affine, inputs,
func_args=[base_axis, num_bits,
inq_iterations, selection_algorithm, seed],
atol_b=1e-2, backward=[True, True, False, True], ctx=ctx, func_name=func_name,
ref_grad=ref_grad_inq_affine)
|
[
"numpy.minimum",
"numpy.abs",
"numpy.ones_like",
"numpy.copy",
"numpy.sum",
"numpy.log2",
"numpy.floor",
"numpy.empty",
"numpy.transpose",
"numpy.flatnonzero",
"numpy.random.RandomState",
"numpy.any",
"nbla_test_utils.list_context",
"nbla_test_utils.function_tester",
"numpy.random.randint",
"numpy.sign",
"pytest.mark.parametrize",
"numpy.prod"
] |
[((718, 743), 'nbla_test_utils.list_context', 'list_context', (['"""INQAffine"""'], {}), "('INQAffine')\n", (730, 743), False, 'from nbla_test_utils import list_context\n'), ((3687, 3734), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ctx, func_name"""', 'ctxs'], {}), "('ctx, func_name', ctxs)\n", (3710, 3734), False, 'import pytest\n'), ((3736, 3774), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed"""', '[313]'], {}), "('seed', [313])\n", (3759, 3774), False, 'import pytest\n'), ((3776, 3879), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""base_axis, weight_shape, num_bits"""', '[(1, (12, 2, 3), 2), (2, (4, 4), 4)]'], {}), "('base_axis, weight_shape, num_bits', [(1, (12, 2, 3\n ), 2), (2, (4, 4), 4)])\n", (3799, 3879), False, 'import pytest\n'), ((3901, 3947), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bias"""', '[True, False]'], {}), "('bias', [True, False])\n", (3924, 3947), False, 'import pytest\n'), ((3949, 4017), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inq_iterations"""', '[(10, 20), (0,), (0, 10)]'], {}), "('inq_iterations', [(10, 20), (0,), (0, 10)])\n", (3972, 4017), False, 'import pytest\n'), ((1209, 1223), 'numpy.sign', 'np.sign', (['y[_i]'], {}), '(y[_i])\n', (1216, 1223), True, 'import numpy as np\n'), ((1265, 1277), 'numpy.log2', 'np.log2', (['(1.5)'], {}), '(1.5)\n', (1272, 1277), True, 'import numpy as np\n'), ((2237, 2247), 'numpy.copy', 'np.copy', (['w'], {}), '(w)\n', (2244, 2247), True, 'import numpy as np\n'), ((2255, 2269), 'numpy.any', 'np.any', (['(i == 1)'], {}), '(i == 1)\n', (2261, 2269), True, 'import numpy as np\n'), ((3208, 3218), 'numpy.copy', 'np.copy', (['w'], {}), '(w)\n', (3215, 3218), True, 'import numpy as np\n'), ((3226, 3240), 'numpy.any', 'np.any', (['(i == 1)'], {}), '(i == 1)\n', (3232, 3240), True, 'import numpy as np\n'), ((4230, 4257), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (4251, 4257), True, 'import numpy as np\n'), ((4640, 4897), 'nbla_test_utils.function_tester', 'function_tester', (['rng', 'F.inq_affine', 'ref_inq_affine', 'inputs'], {'func_args': '[base_axis, num_bits, inq_iterations, selection_algorithm, seed]', 'atol_b': '(0.01)', 'backward': '[True, True, False, True]', 'ctx': 'ctx', 'func_name': 'func_name', 'ref_grad': 'ref_grad_inq_affine'}), '(rng, F.inq_affine, ref_inq_affine, inputs, func_args=[\n base_axis, num_bits, inq_iterations, selection_algorithm, seed], atol_b\n =0.01, backward=[True, True, False, True], ctx=ctx, func_name=func_name,\n ref_grad=ref_grad_inq_affine)\n', (4655, 4897), False, 'from nbla_test_utils import function_tester\n'), ((1241, 1254), 'numpy.abs', 'np.abs', (['y[_i]'], {}), '(y[_i])\n', (1247, 1254), True, 'import numpy as np\n'), ((1391, 1403), 'numpy.floor', 'np.floor', (['_b'], {}), '(_b)\n', (1399, 1403), True, 'import numpy as np\n'), ((1456, 1474), 'numpy.minimum', 'np.minimum', (['n1', '_e'], {}), '(n1, _e)\n', (1466, 1474), True, 'import numpy as np\n'), ((1742, 1757), 'numpy.ones_like', 'np.ones_like', (['i'], {}), '(i)\n', (1754, 1757), True, 'import numpy as np\n'), ((2832, 2847), 'numpy.ones_like', 'np.ones_like', (['i'], {}), '(i)\n', (2844, 2847), True, 'import numpy as np\n'), ((3177, 3191), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3184, 3191), True, 'import numpy as np\n'), ((3373, 3387), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3380, 3387), True, 'import numpy as np\n'), ((3415, 3432), 'numpy.transpose', 'np.transpose', (['wq_'], {}), '(wq_)\n', (3427, 3432), True, 'import numpy as np\n'), ((3450, 3466), 'numpy.transpose', 'np.transpose', (['x_'], {}), '(x_)\n', (3462, 3466), True, 'import numpy as np\n'), ((3509, 3523), 'numpy.sum', 'np.sum', (['dy_', '(0)'], {}), '(dy_, 0)\n', (3515, 3523), True, 'import numpy as np\n'), ((3547, 3558), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (3555, 3558), True, 'import numpy as np\n'), ((4426, 4465), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'weight_shape'}), '(2, size=weight_shape)\n', (4443, 4465), True, 'import numpy as np\n'), ((849, 868), 'numpy.log2', 'np.log2', (['max_absval'], {}), '(max_absval)\n', (856, 868), True, 'import numpy as np\n'), ((970, 982), 'numpy.log2', 'np.log2', (['(1.5)'], {}), '(1.5)\n', (977, 982), True, 'import numpy as np\n'), ((1097, 1106), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1103, 1106), True, 'import numpy as np\n'), ((1926, 1948), 'numpy.flatnonzero', 'np.flatnonzero', (['(i == 0)'], {}), '(i == 0)\n', (1940, 1948), True, 'import numpy as np\n'), ((2995, 3017), 'numpy.flatnonzero', 'np.flatnonzero', (['(i == 0)'], {}), '(i == 0)\n', (3009, 3017), True, 'import numpy as np\n'), ((873, 892), 'numpy.log2', 'np.log2', (['max_absval'], {}), '(max_absval)\n', (880, 892), True, 'import numpy as np\n'), ((1412, 1424), 'numpy.floor', 'np.floor', (['_b'], {}), '(_b)\n', (1420, 1424), True, 'import numpy as np\n'), ((2319, 2328), 'numpy.abs', 'np.abs', (['w'], {}), '(w)\n', (2325, 2328), True, 'import numpy as np\n'), ((3291, 3300), 'numpy.abs', 'np.abs', (['w'], {}), '(w)\n', (3297, 3300), True, 'import numpy as np\n'), ((946, 965), 'numpy.log2', 'np.log2', (['max_absval'], {}), '(max_absval)\n', (953, 965), True, 'import numpy as np\n')]
|
# Created by <NAME> (<EMAIL>)
from collections.abc import Iterable
import numpy as np
from .cost import Cost
class SumCost(Cost):
def __init__(self, system, costs):
"""
A cost which is the sum of other cost terms. It can be created by combining
other Cost objects with the `+` operator
Parameters
----------
system : System
System for the cost object.
costs : List of Costs
Cost objects to be summed.
"""
super().__init__(system)
self._costs = costs
@property
def costs(self):
return self._costs[:]
def get_cost_matrices(self):
if self.is_quad:
Q = np.zeros((self.system.obs_dim, self.system.obs_dim))
F = np.zeros((self.system.obs_dim, self.system.obs_dim))
R = np.zeros((self.system.ctrl_dim, self.system.ctrl_dim))
for cost in self._costs:
Q_, R_, F_ = cost.get_cost_matrices()
Q += Q_
R += R_
F += F_
return Q, R, F
else:
raise NotImplementedError
def get_goal(self):
if self.has_goal:
return self.costs[0]
def _sum_results(self, arg, attr):
results = [getattr(cost, attr)(arg) for cost in self.costs]
if isinstance(results[0], Iterable):
return [sum(vals) for vals in zip(*results)]
else:
return sum(results)
def eval_obs_cost(self, obs):
return self._sum_results(obs, "eval_obs_cost")
def eval_obs_cost_diff(self, obs):
return self._sum_results(obs, "eval_obs_cost_diff")
def eval_obs_cost_hess(self, obs):
return self._sum_results(obs, "eval_obs_cost_hess")
def eval_ctrl_cost(self, ctrl):
return self._sum_results(ctrl, "eval_ctrl_cost")
def eval_ctrl_cost_diff(self, ctrl):
return self._sum_results(ctrl, "eval_ctrl_cost_diff")
def eval_ctrl_cost_hess(self, ctrl):
return self._sum_results(ctrl, "eval_ctrl_cost_hess")
def eval_term_obs_cost(self, obs):
return self._sum_results(obs, "eval_term_obs_cost")
def eval_term_obs_cost_diff(self, obs):
return self._sum_results(obs, "eval_term_obs_cost_diff")
def eval_term_obs_cost_hess(self, obs):
return self._sum_results(obs, "eval_term_obs_cost_hess")
@property
def is_quad(self):
if not self.costs[0].is_quad:
return False
goal = self.costs[0].get_goal()
for cost in self.costs[1:]:
if not cost.is_quad:
return False
if not (goal == cost.get_goal()).all():
return False
return True
@property
def is_convex(self):
for cost in self.costs:
if not cost.is_convex:
return False
return True
@property
def is_diff(self):
for cost in self.costs:
if not cost.is_diff:
return False
return True
@property
def is_twice_diff(self):
for cost in self.costs:
if not cost.is_diff:
return False
return True
@property
def has_goal(self):
if not self.costs[0].has_goal:
return False
goal = self.costs[0].get_goal()
for cost in self.costs[1:]:
if not cost.has_goal:
return False
if not (goal == cost.get_goal()).all():
return False
return True
def __add__(self, other):
if isinstance(other, SumCost):
return SumCost(self.system, [*self.costs, *other.costs])
else:
return SumCost(self.system, [*self.costs, other])
def __radd__(self, other):
if isinstance(other, SumCost):
return SumCost(self.system, [*other.costs, *self.costs])
else:
return SumCost(self.system, [other, *self.costs])
|
[
"numpy.zeros"
] |
[((705, 757), 'numpy.zeros', 'np.zeros', (['(self.system.obs_dim, self.system.obs_dim)'], {}), '((self.system.obs_dim, self.system.obs_dim))\n', (713, 757), True, 'import numpy as np\n'), ((774, 826), 'numpy.zeros', 'np.zeros', (['(self.system.obs_dim, self.system.obs_dim)'], {}), '((self.system.obs_dim, self.system.obs_dim))\n', (782, 826), True, 'import numpy as np\n'), ((843, 897), 'numpy.zeros', 'np.zeros', (['(self.system.ctrl_dim, self.system.ctrl_dim)'], {}), '((self.system.ctrl_dim, self.system.ctrl_dim))\n', (851, 897), True, 'import numpy as np\n')]
|
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
import logging
from typing import Callable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import numpy as np
import pytorch_lightning as pl
import torch
# TODO: wandb and matplotlib are not in requirements
import matplotlib.pyplot as plt
import wandb
import disent.util.strings.colors as c
from disent.dataset import DisentDataset
from disent.dataset.data import GroundTruthData
from disent.frameworks.ae import Ae
from disent.frameworks.vae import Vae
from disent.util.function import wrapped_partial
from disent.util.iters import chunked
from disent.util.lightning.callbacks._callbacks_base import BaseCallbackPeriodic
from disent.util.lightning.callbacks._helper import _get_dataset_and_ae_like
from disent.util.lightning.logger_util import wb_log_metrics
from disent.util.profiling import Timer
from disent.util.seeds import TempNumpySeed
from disent.util.visualize.plot import plt_subplots_imshow
log = logging.getLogger(__name__)
# ========================================================================= #
# Helper Functions #
# ========================================================================= #
# helper
def _to_dmat(
size: int,
i_a: np.ndarray,
i_b: np.ndarray,
dists: Union[torch.Tensor, np.ndarray],
) -> np.ndarray:
if isinstance(dists, torch.Tensor):
dists = dists.detach().cpu().numpy()
# checks
assert i_a.ndim == 1
assert i_a.shape == i_b.shape
assert i_a.shape == dists.shape
# compute
dmat = np.zeros([size, size], dtype='float32')
dmat[i_a, i_b] = dists
dmat[i_b, i_a] = dists
return dmat
_AE_DIST_NAMES = ('x', 'z', 'x_recon')
_VAE_DIST_NAMES = ('x', 'z', 'kl', 'x_recon')
@torch.no_grad()
def _get_dists_ae(ae: Ae, x_a: torch.Tensor, x_b: torch.Tensor):
# feed forware
z_a, z_b = ae.encode(x_a), ae.encode(x_b)
r_a, r_b = ae.decode(z_a), ae.decode(z_b)
# distances
return [
ae.recon_handler.compute_pairwise_loss(x_a, x_b),
torch.norm(z_a - z_b, p=2, dim=-1), # l2 dist
ae.recon_handler.compute_pairwise_loss(r_a, r_b),
]
@torch.no_grad()
def _get_dists_vae(vae: Vae, x_a: torch.Tensor, x_b: torch.Tensor):
from torch.distributions import kl_divergence
# feed forward
(z_post_a, z_prior_a), (z_post_b, z_prior_b) = vae.encode_dists(x_a), vae.encode_dists(x_b)
z_a, z_b = z_post_a.mean, z_post_b.mean
r_a, r_b = vae.decode(z_a), vae.decode(z_b)
# dists
kl_ab = 0.5 * kl_divergence(z_post_a, z_post_b) + 0.5 * kl_divergence(z_post_b, z_post_a)
# distances
return [
vae.recon_handler.compute_pairwise_loss(x_a, x_b),
torch.norm(z_a - z_b, p=2, dim=-1), # l2 dist
vae.recon_handler._pairwise_reduce(kl_ab),
vae.recon_handler.compute_pairwise_loss(r_a, r_b),
]
def _get_dists_fn(model: Ae) -> Tuple[Optional[Tuple[str, ...]], Optional[Callable[[object, object], Sequence[Sequence[float]]]]]:
# get aggregate function
if isinstance(model, Vae):
dists_names, dists_fn = _VAE_DIST_NAMES, wrapped_partial(_get_dists_vae, model)
elif isinstance(model, Ae):
dists_names, dists_fn = _AE_DIST_NAMES, wrapped_partial(_get_dists_ae, model)
else:
dists_names, dists_fn = None, None
return dists_names, dists_fn
@torch.no_grad()
def _collect_dists_subbatches(dists_fn: Callable[[object, object], Sequence[Sequence[float]]], batch: torch.Tensor, i_a: np.ndarray, i_b: np.ndarray, batch_size: int = 64):
# feed forward
results = []
for idxs in chunked(np.stack([i_a, i_b], axis=-1), chunk_size=batch_size):
ia, ib = idxs.T
x_a, x_b = batch[ia], batch[ib]
# feed forward
data = dists_fn(x_a, x_b)
results.append(data)
return [torch.cat(r, dim=0) for r in zip(*results)]
def _compute_and_collect_dists(
dataset: DisentDataset,
dists_fn,
dists_names: Sequence[str],
traversal_repeats: int = 100,
batch_size: int = 32,
include_gt_factor_dists: bool = True,
transform_batch: Callable[[object], object] = None,
data_mode: str = 'input',
) -> Tuple[Tuple[str, ...], List[List[np.ndarray]]]:
assert traversal_repeats > 0
gt_data = dataset.gt_data
# generate
f_grid = []
# generate
for f_idx, f_size in enumerate(gt_data.factor_sizes):
# save for the current factor (traversal_repeats, len(names), len(i_a))
f_dists = []
# upper triangle excluding diagonal
i_a, i_b = np.triu_indices(f_size, k=1)
# repeat over random traversals
for i in range(traversal_repeats):
# get random factor traversal
factors = gt_data.sample_random_factor_traversal(f_idx=f_idx)
indices = gt_data.pos_to_idx(factors)
# load data
batch = dataset.dataset_batch_from_indices(indices, data_mode)
if transform_batch is not None:
batch = transform_batch(batch)
# feed forward & compute dists -- (len(names), len(i_a))
dists = _collect_dists_subbatches(dists_fn=dists_fn, batch=batch, i_a=i_a, i_b=i_b, batch_size=batch_size)
assert len(dists) == len(dists_names)
# distances
f_dists.append(dists)
# aggregate all dists into distances matrices for current factor
f_dmats = [
_to_dmat(size=f_size, i_a=i_a, i_b=i_b, dists=torch.stack(dists, dim=0).mean(dim=0))
for dists in zip(*f_dists)
]
# handle factors
if include_gt_factor_dists:
i_dmat = _to_dmat(size=f_size, i_a=i_a, i_b=i_b, dists=np.abs(factors[i_a] - factors[i_b]).sum(axis=-1))
f_dmats = [i_dmat, *f_dmats]
# append data
f_grid.append(f_dmats)
# handle factors
if include_gt_factor_dists:
dists_names = ('factors', *dists_names)
# done
return tuple(dists_names), f_grid
def compute_factor_distances(
dataset: DisentDataset,
dists_fn,
dists_names: Sequence[str],
traversal_repeats: int = 100,
batch_size: int = 32,
include_gt_factor_dists: bool = True,
transform_batch: Callable[[object], object] = None,
seed: Optional[int] = 777,
data_mode: str = 'input',
) -> Tuple[Tuple[str, ...], List[List[np.ndarray]]]:
# log this callback
gt_data = dataset.gt_data
log.info(f'| {gt_data.name} - computing factor distances...')
# compute various distances matrices for each factor
with Timer() as timer, TempNumpySeed(seed):
dists_names, f_grid = _compute_and_collect_dists(
dataset=dataset,
dists_fn=dists_fn,
dists_names=dists_names,
traversal_repeats=traversal_repeats,
batch_size=batch_size,
include_gt_factor_dists=include_gt_factor_dists,
transform_batch=transform_batch,
data_mode=data_mode,
)
# log this callback!
log.info(f'| {gt_data.name} - computed factor distances! time{c.GRY}={c.lYLW}{timer.pretty:<9}{c.RST}')
return dists_names, f_grid
def plt_factor_distances(
gt_data: GroundTruthData,
f_grid: List[List[np.ndarray]],
dists_names: Sequence[str],
title: str,
plt_block_size: float = 1.25,
plt_transpose: bool = False,
plt_cmap='Blues',
):
# plot information
imshow_kwargs = dict(cmap=plt_cmap)
figsize = (plt_block_size*len(f_grid[0]), plt_block_size * gt_data.num_factors)
# plot!
if not plt_transpose:
fig, axs = plt_subplots_imshow(grid=f_grid, col_labels=dists_names, row_labels=gt_data.factor_names, figsize=figsize, title=title, imshow_kwargs=imshow_kwargs)
else:
fig, axs = plt_subplots_imshow(grid=list(zip(*f_grid)), col_labels=gt_data.factor_names, row_labels=dists_names, figsize=figsize[::-1], title=title, imshow_kwargs=imshow_kwargs)
# done
return fig, axs
# ========================================================================= #
# Data Dists Visualisation Callback #
# ========================================================================= #
class VaeGtDistsLoggingCallback(BaseCallbackPeriodic):
def __init__(
self,
seed: Optional[int] = 7777,
every_n_steps: Optional[int] = None,
traversal_repeats: int = 100,
begin_first_step: bool = False,
plt_block_size: float = 1.25,
plt_show: bool = False,
plt_transpose: bool = False,
log_wandb: bool = True, # TODO: detect this automatically?
batch_size: int = 128,
include_factor_dists: bool = True,
):
assert traversal_repeats > 0
self._traversal_repeats = traversal_repeats
self._seed = seed
self._plt_block_size = plt_block_size
self._plt_show = plt_show
self._log_wandb = log_wandb
self._include_gt_factor_dists = include_factor_dists
self._transpose_plot = plt_transpose
self._batch_size = batch_size
super().__init__(every_n_steps, begin_first_step)
@torch.no_grad()
def do_step(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
# exit early
if not (self._plt_show or self._log_wandb):
log.warning(f'skipping {self.__class__.__name__} neither `plt_show` or `log_wandb` is `True`!')
return
# get dataset and vae framework from trainer and module
dataset, vae = _get_dataset_and_ae_like(trainer, pl_module, unwrap_groundtruth=True)
# exit early
if not dataset.is_ground_truth:
log.warning(f'cannot run {self.__class__.__name__} over non-ground-truth data, skipping!')
return
# get aggregate function
dists_names, dists_fn = _get_dists_fn(vae)
if (dists_names is None) or (dists_fn is None):
log.warning(f'cannot run {self.__class__.__name__}, unsupported model type: {type(vae)}, must be {Ae.__name__} or {Vae.__name__}')
return
# compute various distances matrices for each factor
dists_names, f_grid = compute_factor_distances(
dataset=dataset,
dists_fn=dists_fn,
dists_names=dists_names,
traversal_repeats=self._traversal_repeats,
batch_size=self._batch_size,
include_gt_factor_dists=self._include_gt_factor_dists,
transform_batch=lambda batch: batch.to(vae.device),
seed=self._seed,
data_mode='input',
)
# plot these results
fig, axs = plt_factor_distances(
gt_data=dataset.gt_data,
f_grid=f_grid,
dists_names=dists_names,
title=f'{vae.__class__.__name__}: {dataset.gt_data.name.capitalize()} Distances',
plt_block_size=self._plt_block_size,
plt_transpose=self._transpose_plot,
plt_cmap='Blues',
)
# show the plot
if self._plt_show:
plt.show()
# log the plot to wandb
if self._log_wandb:
wb_log_metrics(trainer.logger, {
'factor_distances': wandb.Image(fig)
})
# ========================================================================= #
# END #
# ========================================================================= #
|
[
"numpy.stack",
"matplotlib.pyplot.show",
"torch.stack",
"numpy.abs",
"disent.util.lightning.callbacks._helper._get_dataset_and_ae_like",
"torch.norm",
"disent.util.visualize.plot.plt_subplots_imshow",
"numpy.zeros",
"torch.cat",
"numpy.triu_indices",
"torch.distributions.kl_divergence",
"disent.util.profiling.Timer",
"wandb.Image",
"disent.util.seeds.TempNumpySeed",
"disent.util.function.wrapped_partial",
"torch.no_grad",
"logging.getLogger"
] |
[((2277, 2304), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2294, 2304), False, 'import logging\n'), ((3102, 3117), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3115, 3117), False, 'import torch\n'), ((3503, 3518), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3516, 3518), False, 'import torch\n'), ((4697, 4712), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4710, 4712), False, 'import torch\n'), ((2902, 2941), 'numpy.zeros', 'np.zeros', (['[size, size]'], {'dtype': '"""float32"""'}), "([size, size], dtype='float32')\n", (2910, 2941), True, 'import numpy as np\n'), ((10503, 10518), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10516, 10518), False, 'import torch\n'), ((3389, 3423), 'torch.norm', 'torch.norm', (['(z_a - z_b)'], {'p': '(2)', 'dim': '(-1)'}), '(z_a - z_b, p=2, dim=-1)\n', (3399, 3423), False, 'import torch\n'), ((4046, 4080), 'torch.norm', 'torch.norm', (['(z_a - z_b)'], {'p': '(2)', 'dim': '(-1)'}), '(z_a - z_b, p=2, dim=-1)\n', (4056, 4080), False, 'import torch\n'), ((4946, 4975), 'numpy.stack', 'np.stack', (['[i_a, i_b]'], {'axis': '(-1)'}), '([i_a, i_b], axis=-1)\n', (4954, 4975), True, 'import numpy as np\n'), ((5163, 5182), 'torch.cat', 'torch.cat', (['r'], {'dim': '(0)'}), '(r, dim=0)\n', (5172, 5182), False, 'import torch\n'), ((5887, 5915), 'numpy.triu_indices', 'np.triu_indices', (['f_size'], {'k': '(1)'}), '(f_size, k=1)\n', (5902, 5915), True, 'import numpy as np\n'), ((7876, 7883), 'disent.util.profiling.Timer', 'Timer', ([], {}), '()\n', (7881, 7883), False, 'from disent.util.profiling import Timer\n'), ((7894, 7913), 'disent.util.seeds.TempNumpySeed', 'TempNumpySeed', (['seed'], {}), '(seed)\n', (7907, 7913), False, 'from disent.util.seeds import TempNumpySeed\n'), ((8911, 9064), 'disent.util.visualize.plot.plt_subplots_imshow', 'plt_subplots_imshow', ([], {'grid': 'f_grid', 'col_labels': 'dists_names', 'row_labels': 'gt_data.factor_names', 'figsize': 'figsize', 'title': 'title', 'imshow_kwargs': 'imshow_kwargs'}), '(grid=f_grid, col_labels=dists_names, row_labels=gt_data\n .factor_names, figsize=figsize, title=title, imshow_kwargs=imshow_kwargs)\n', (8930, 9064), False, 'from disent.util.visualize.plot import plt_subplots_imshow\n'), ((10881, 10950), 'disent.util.lightning.callbacks._helper._get_dataset_and_ae_like', '_get_dataset_and_ae_like', (['trainer', 'pl_module'], {'unwrap_groundtruth': '(True)'}), '(trainer, pl_module, unwrap_groundtruth=True)\n', (10905, 10950), False, 'from disent.util.lightning.callbacks._helper import _get_dataset_and_ae_like\n'), ((3874, 3907), 'torch.distributions.kl_divergence', 'kl_divergence', (['z_post_a', 'z_post_b'], {}), '(z_post_a, z_post_b)\n', (3887, 3907), False, 'from torch.distributions import kl_divergence\n'), ((3916, 3949), 'torch.distributions.kl_divergence', 'kl_divergence', (['z_post_b', 'z_post_a'], {}), '(z_post_b, z_post_a)\n', (3929, 3949), False, 'from torch.distributions import kl_divergence\n'), ((4451, 4489), 'disent.util.function.wrapped_partial', 'wrapped_partial', (['_get_dists_vae', 'model'], {}), '(_get_dists_vae, model)\n', (4466, 4489), False, 'from disent.util.function import wrapped_partial\n'), ((12412, 12422), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12420, 12422), True, 'import matplotlib.pyplot as plt\n'), ((4570, 4607), 'disent.util.function.wrapped_partial', 'wrapped_partial', (['_get_dists_ae', 'model'], {}), '(_get_dists_ae, model)\n', (4585, 4607), False, 'from disent.util.function import wrapped_partial\n'), ((12564, 12580), 'wandb.Image', 'wandb.Image', (['fig'], {}), '(fig)\n', (12575, 12580), False, 'import wandb\n'), ((6802, 6827), 'torch.stack', 'torch.stack', (['dists'], {'dim': '(0)'}), '(dists, dim=0)\n', (6813, 6827), False, 'import torch\n'), ((7018, 7053), 'numpy.abs', 'np.abs', (['(factors[i_a] - factors[i_b])'], {}), '(factors[i_a] - factors[i_b])\n', (7024, 7053), True, 'import numpy as np\n')]
|
import numpy as np
from analysisdatalink import datalink
from collections import defaultdict
class AnalysisDataLinkExt(datalink.AnalysisDataLink):
def __init__(self, dataset_name, materialization_version=None,
sqlalchemy_database_uri=None, verbose=True,
annotation_endpoint=None):
super().__init__(dataset_name, materialization_version,
sqlalchemy_database_uri, verbose=verbose,
annotation_endpoint=annotation_endpoint)
def query_synapses(self, synapse_table, pre_ids=None, post_ids=None,
compartment_include_filter=None,
include_autapses=False,
compartment_table=None, return_sql=False,
fix_wkb=True, fix_decimal=True, import_via_buffer=True,
n_threads=None):
"""Query a synapse table and return a dataframe
Parameters
----------
synapse_table : str
Table name with a synapse schema
pre_ids : collection of ints, optional
Object ids for presynaptic neurons, by default None
post_ids : collection of ints, optional
Object ids for postsynaptic neurons, by default None
compartment_include_filter : None, optional
Not currently implemented. By default None
include_autapses : bool, optional
Include synapses whose pre- and post-synaptic objects are the same, by default False
compartment_table : str, optional
Not currently implemented. Would be a table name for synapse compartments. By default None
return_sql : bool, optional
Return the sqlalchemy query object instead of the data itself, by default False
fix_wkb : bool, optional
Convert wkb-formatted spatial location columns to numpy 3-vectors. Setting to False
can be much faster, but spatial information is not easy to parse. These columns can be
parsed after the fact with analysisdatalink.fix_wkb_column. Optional, by default True
fix_decimal : bool, optional
Convert Decimal columns to ints. Not used if import_via_buffer is True. By default True
import_via_buffer : bool, optional
Flag to determine whether to use a fast csv and tempfile based SQL import (if True) or the pandas
native read_sql import (if False). If column formatting is odd, try setting to False. Optional, by default True.
n_threads : int or None, optional
Number of threads to use when parsing columns to convert wkb. Unused if fix_wkb is False. If set to 1,
multiprocessing is not used and slower numpy vectorization is used instead.
If None, uses the number of cpus available on the device. By default None
Returns
-------
pandas.DataFrame
DataFrame representation of the query results.
"""
filter_in_dict = defaultdict(dict)
filter_equal_dict = defaultdict(dict)
if pre_ids is not None:
filter_in_dict[synapse_table]["pre_pt_root_id"] = [int(pid) for pid in pre_ids]
if post_ids is not None:
filter_in_dict[synapse_table]["post_pt_root_id"] = [int(pid) for pid in post_ids]
if not include_autapses:
filter_equal_dict[synapse_table]["valid"] = True
if compartment_table is not None:
tables = [[synapse_table, "id"],
[compartment_table, "synapse_id"]]
if compartment_include_filter is not None:
filter_in_dict[compartment_table]['label'] = compartment_include_filter
else:
tables = [synapse_table]
df = self.specific_query(tables,
filter_in_dict=filter_in_dict,
filter_equal_dict=filter_equal_dict,
return_sql=return_sql,
fix_wkb=fix_wkb, fix_decimal=fix_decimal,
import_via_buffer=import_via_buffer,
n_threads=n_threads)
return df
def query_cell_types(self, cell_type_table, cell_type_include_filter=None,
cell_type_exclude_filter=None, return_only_ids=False,
exclude_zero_root_ids=False, fix_wkb=True, fix_decimal=True,
return_sql=False, import_via_buffer=True, n_threads=None):
"""Query a synapse table and return a dataframe
Parameters
----------
cell_type_table : str
Table name with a cell_type schema
cell_type_include_filter : collection of str, optional
Cell types to include
cell_type_exclude_filter : collection of str, optional
Cell types to exclude
return_only_ids : bool, optional
Process to include only root ids matching the query.
exclude_zero_root_ids : bool, optional
Fitler out points with a null segmentation id.
return_sql : bool, optional
Return the sqlalchemy query object instead of the data itself, by default False
fix_wkb : bool, optional
Convert wkb-formatted spatial location columns to numpy 3-vectors. Setting to False
can be much faster, but spatial information is not easy to parse. These columns can be
parsed after the fact with analysisdatalink.fix_wkb_column. Optional, by default True
fix_decimal : bool, optional
Convert Decimal columns to ints. Not used if import_via_buffer is True. By default True
import_via_buffer : bool, optional
Flag to determine whether to use a fast csv and tempfile based SQL import (if True) or the pandas
native read_sql import (if False). If column formatting is odd, try setting to False. Optional, by default True.
n_threads : int or None, optional
Number of threads to use when parsing columns to convert wkb. Unused if fix_wkb is False. If set to 1,
multiprocessing is not used and slower numpy vectorization is used instead.
If None, uses the number of cpus available on the device. By default None
Returns
-------
pandas.DataFrame
DataFrame representation of the query results.
"""
filter_in_dict = defaultdict(dict)
if cell_type_include_filter is not None:
filter_in_dict[cell_type_table]["cell_type"] = cell_type_include_filter
filter_notin_dict = defaultdict(dict)
if exclude_zero_root_ids:
filter_notin_dict[cell_type_table]["pt_root_id"] = [0]
if cell_type_exclude_filter is not None:
filter_notin_dict[cell_type_table]['cell_type'] = cell_type_exclude_filter
if return_only_ids:
select_columns = ["pt_root_id"]
else:
select_columns = None
df = self.specific_query(tables=[cell_type_table],
filter_in_dict=filter_in_dict,
filter_notin_dict=filter_notin_dict,
select_columns=select_columns,
fix_wkb=fix_wkb,
fix_decimal=fix_decimal,
return_sql=return_sql,
import_via_buffer=import_via_buffer,
n_threads=n_threads)
if return_only_ids:
return np.array(df, dtype = np.uint64).squeeze()
else:
return df
def query_cell_ids(self, cell_id_table, cell_id_filter=None,
cell_id_exclude_filter=None, return_only_ids=False,
exclude_zero_root_ids=False, fix_wkb=True, fix_decimal=True,
return_sql=False, import_via_buffer=True, n_threads=None):
""" Query cell id tables
Parameters
----------
cell_id_table : str
Table name for a microns_functional_coregistration table
cell_id_filter : list of uint64s, optional
List of root ids to include. Default is None.
cell_id_exclude_filter : list of uint64s, optional
List of root ids to exclude. Default is None.
return_only_ids : bool, optional
Process to include only root ids matching the query. Default is False.
exclude_zero_root_ids : bool, optional
Fitler out points with a null segmentation id. Default is False.
return_sql : bool, optional
Return the sqlalchemy query object instead of the data itself, by default False
fix_wkb : bool, optional
Convert wkb-formatted spatial location columns to numpy 3-vectors. Setting to False
can be much faster, but spatial information is not easy to parse. These columns can be
parsed after the fact with analysisdatalink.fix_wkb_column. Optional, by default True
fix_decimal : bool, optional
Convert Decimal columns to ints. Not used if import_via_buffer is True. By default True
import_via_buffer : bool, optional
Flag to determine whether to use a fast csv and tempfile based SQL import (if True) or the pandas
native read_sql import (if False). If column formatting is odd, try setting to False. Optional, by default True.
n_threads : int or None, optional
Number of threads to use when parsing columns to convert wkb. Unused if fix_wkb is False. If set to 1,
multiprocessing is not used and slower numpy vectorization is used instead.
If None, uses the number of cpus available on the device. By default None
Returns
-------
pandas.DataFrame
DataFrame representation of the query results.
"""
filter_in_dict = defaultdict(dict)
if cell_id_filter is not None:
filter_in_dict[cell_id_table]['func_id'] = [int(pid) for pid in cell_id_filter]
filter_notin_dict = defaultdict(dict)
if cell_id_exclude_filter is not None:
filter_notin_dict[cell_id_table]['func_id'] = [int(pid) for pid in cell_id_exclude_filter]
if exclude_zero_root_ids is not None:
filter_notin_dict[cell_id_table]['pt_root_id'] = [0]
if return_only_ids:
select_columns = ['pt_root_id']
else:
select_columns = None
df = self.specific_query(tables=[cell_id_table],
filter_in_dict=filter_in_dict,
filter_notin_dict=filter_notin_dict,
select_columns=select_columns,
fix_wkb=fix_wkb,
fix_decimal=fix_decimal,
return_sql=return_sql,
import_via_buffer=import_via_buffer, n_threads=n_threads)
if return_only_ids:
return np.array(df, dtype=np.uint64).squeeze()
else:
return df
def query_coreg(self, coreg_table, cell_id_filter=None,
cell_id_exclude_filter=None, return_only_mapping=False,
exclude_zero_root_ids=False,
fix_wkb=True, fix_decimal=True,
return_sql=False, import_via_buffer=True, n_threads=None):
""" Query cell id tables
Parameters
----------
coreg_table : str
Table name for a microns_functional_coregistration table
cell_id_filter : list of uint64s, optional
List of root ids to include. Default is None.
cell_id_exclude_filter : list of uint64s, optional
List of root ids to exclude. Default is None.
return_only_ids : bool, optional
Process to include only root ids matching the query. Default is False.
exclude_zero_root_ids : bool, optional
Fitler out points with a null segmentation id. Default is False.
return_sql : bool, optional
Return the sqlalchemy query object instead of the data itself, by default False
fix_wkb : bool, optional
Convert wkb-formatted spatial location columns to numpy 3-vectors. Setting to False
can be much faster, but spatial information is not easy to parse. These columns can be
parsed after the fact with analysisdatalink.fix_wkb_column. Optional, by default True
fix_decimal : bool, optional
Convert Decimal columns to ints. Not used if import_via_buffer is True. By default True
import_via_buffer : bool, optional
Flag to determine whether to use a fast csv and tempfile based SQL import (if True) or the pandas
native read_sql import (if False). If column formatting is odd, try setting to False. Optional, by default True.
n_threads : int or None, optional
Number of threads to use when parsing columns to convert wkb. Unused if fix_wkb is False. If set to 1,
multiprocessing is not used and slower numpy vectorization is used instead.
If None, uses the number of cpus available on the device. By default None
Returns
-------
pandas.DataFrame
DataFrame representation of the query results.
"""
filter_in_dict = defaultdict(dict)
if cell_id_filter is not None:
filter_in_dict[coreg_table]['func_id'] = [int(pid) for pid in cell_id_filter]
filter_notin_dict = defaultdict(dict)
if cell_id_exclude_filter is not None:
filter_notin_dict[coreg_table]['func_id'] = [int(pid) for pid in cell_id_exclude_filter]
if exclude_zero_root_ids is not None:
filter_notin_dict[coreg_table]['pt_root_id'] = [0]
if return_only_mapping:
select_columns = ['pt_root_id', 'func_id']
else:
select_columns = None
df = self.specific_query(tables=[coreg_table],
filter_in_dict=filter_in_dict,
filter_notin_dict=filter_notin_dict,
select_columns=select_columns,
fix_wkb=fix_wkb,
fix_decimal=fix_decimal,
return_sql=return_sql,
import_via_buffer=import_via_buffer, n_threads=n_threads)
if return_only_mapping:
return np.array(df, dtype=np.uint64).squeeze()
else:
return df
|
[
"collections.defaultdict",
"numpy.array"
] |
[((3052, 3069), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3063, 3069), False, 'from collections import defaultdict\n'), ((3098, 3115), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3109, 3115), False, 'from collections import defaultdict\n'), ((6529, 6546), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (6540, 6546), False, 'from collections import defaultdict\n'), ((6709, 6726), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (6720, 6726), False, 'from collections import defaultdict\n'), ((10087, 10104), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (10098, 10104), False, 'from collections import defaultdict\n'), ((10265, 10282), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (10276, 10282), False, 'from collections import defaultdict\n'), ((13628, 13645), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (13639, 13645), False, 'from collections import defaultdict\n'), ((13804, 13821), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (13815, 13821), False, 'from collections import defaultdict\n'), ((7688, 7717), 'numpy.array', 'np.array', (['df'], {'dtype': 'np.uint64'}), '(df, dtype=np.uint64)\n', (7696, 7717), True, 'import numpy as np\n'), ((11224, 11253), 'numpy.array', 'np.array', (['df'], {'dtype': 'np.uint64'}), '(df, dtype=np.uint64)\n', (11232, 11253), True, 'import numpy as np\n'), ((14776, 14805), 'numpy.array', 'np.array', (['df'], {'dtype': 'np.uint64'}), '(df, dtype=np.uint64)\n', (14784, 14805), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import imutils
from text_recognition import text_name
import pytesseract
def auto_canny(image, sigma=0.55):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
'''
def id_text():
img2 = cv2.imread("images/Name007.png", -1)
cv2.imshow('',img2)
text=pytesseract.image_to_string(dark, config="-l tessdata/spa --oem 1 --psm 13")
print("Detected Number is:",text)
'''
def id_detection():
cap = cv2.VideoCapture(0)
while(True):
ret, img = cap.read()
#img = cv2.imread("card_06.jpeg", -1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray,13)
#ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_TRIANGLE)
thresh=cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
edged = auto_canny(opening)
cv2.imshow('edge',edged)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:30]
number_plate = []
peri = cv2.arcLength(cnts[0], True)
approx = cv2.approxPolyDP(cnts[0], 0.018 * peri, True)
number_plate.append(approx)
if len(approx) == 4:
# compute the bounding box of the contour and use the
# bounding box to compute the aspect ratio
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
if ar>1.4 and ar<1.6:
print('rectagle', str(ar))
print('aprox', str(approx))
cv2.drawContours(img, number_plate, -1, (0,255,0), 3)
cv2.imshow('square',img)
point1=approx[0][0][0]
print('point'+str(point1))
x1=approx[0][0][0]
y1=approx[0][0][1]
x2 = approx[1][0][0]
y2=approx[1][0][1]
x3=approx[2][0][0]
y3= approx[2][0][1]
x4 =approx[3][0][0]
y4= approx[3][0][1]
top_left_x = min(x1,x2,x3,x4)
top_left_y = min([y1,y2,y3,y4])
bot_right_x = max([x1,x2,x3,x4])
bot_right_y = max([y1,y2,y3,y4])
crop=img[top_left_y:bot_right_y+1, top_left_x:bot_right_x+1]
cv2.imshow('crop',crop)
name=crop[75:160,120:270]
#cv2.imshow('name',name)
dark= cv2.cvtColor(name, cv2.COLOR_BGR2GRAY)
thresh=cv2.adaptiveThreshold(dark,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
kernel = np.ones((3,3),np.uint8)
dark = cv2.morphologyEx(dark, cv2.MORPH_OPEN, kernel)
dark = auto_canny(dark)
#cv2.imshow('dark',dark)
string_name=text_name()
print(string_name)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
return string_name,crop
|
[
"cv2.boundingRect",
"cv2.Canny",
"text_recognition.text_name",
"cv2.medianBlur",
"numpy.median",
"cv2.cvtColor",
"cv2.morphologyEx",
"cv2.arcLength",
"cv2.imshow",
"numpy.ones",
"cv2.adaptiveThreshold",
"cv2.approxPolyDP",
"cv2.VideoCapture",
"cv2.waitKey",
"imutils.grab_contours",
"cv2.drawContours",
"cv2.destroyAllWindows"
] |
[((212, 228), 'numpy.median', 'np.median', (['image'], {}), '(image)\n', (221, 228), True, 'import numpy as np\n'), ((400, 430), 'cv2.Canny', 'cv2.Canny', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (409, 430), False, 'import cv2\n'), ((729, 748), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (745, 748), False, 'import cv2\n'), ((3560, 3583), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3581, 3583), False, 'import cv2\n'), ((859, 896), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (871, 896), False, 'import cv2\n'), ((913, 937), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(13)'], {}), '(gray, 13)\n', (927, 937), False, 'import cv2\n'), ((1022, 1117), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(11)', '(2)'], {}), '(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 11, 2)\n', (1043, 1117), False, 'import cv2\n'), ((1148, 1173), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (1155, 1173), True, 'import numpy as np\n'), ((1190, 1236), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gray', 'cv2.MORPH_OPEN', 'kernel'], {}), '(gray, cv2.MORPH_OPEN, kernel)\n', (1206, 1236), False, 'import cv2\n'), ((1291, 1316), 'cv2.imshow', 'cv2.imshow', (['"""edge"""', 'edged'], {}), "('edge', edged)\n", (1301, 1316), False, 'import cv2\n'), ((1423, 1450), 'imutils.grab_contours', 'imutils.grab_contours', (['cnts'], {}), '(cnts)\n', (1444, 1450), False, 'import imutils\n'), ((1561, 1589), 'cv2.arcLength', 'cv2.arcLength', (['cnts[0]', '(True)'], {}), '(cnts[0], True)\n', (1574, 1589), False, 'import cv2\n'), ((1607, 1652), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnts[0]', '(0.018 * peri)', '(True)'], {}), '(cnts[0], 0.018 * peri, True)\n', (1623, 1652), False, 'import cv2\n'), ((1867, 1891), 'cv2.boundingRect', 'cv2.boundingRect', (['approx'], {}), '(approx)\n', (1883, 1891), False, 'import cv2\n'), ((2068, 2123), 'cv2.drawContours', 'cv2.drawContours', (['img', 'number_plate', '(-1)', '(0, 255, 0)', '(3)'], {}), '(img, number_plate, -1, (0, 255, 0), 3)\n', (2084, 2123), False, 'import cv2\n'), ((2139, 2164), 'cv2.imshow', 'cv2.imshow', (['"""square"""', 'img'], {}), "('square', img)\n", (2149, 2164), False, 'import cv2\n'), ((2835, 2859), 'cv2.imshow', 'cv2.imshow', (['"""crop"""', 'crop'], {}), "('crop', crop)\n", (2845, 2859), False, 'import cv2\n'), ((2966, 3004), 'cv2.cvtColor', 'cv2.cvtColor', (['name', 'cv2.COLOR_BGR2GRAY'], {}), '(name, cv2.COLOR_BGR2GRAY)\n', (2978, 3004), False, 'import cv2\n'), ((3029, 3124), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['dark', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(11)', '(2)'], {}), '(dark, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 11, 2)\n', (3050, 3124), False, 'import cv2\n'), ((3163, 3188), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (3170, 3188), True, 'import numpy as np\n'), ((3210, 3256), 'cv2.morphologyEx', 'cv2.morphologyEx', (['dark', 'cv2.MORPH_OPEN', 'kernel'], {}), '(dark, cv2.MORPH_OPEN, kernel)\n', (3226, 3256), False, 'import cv2\n'), ((3376, 3387), 'text_recognition.text_name', 'text_name', ([], {}), '()\n', (3385, 3387), False, 'from text_recognition import text_name\n'), ((3436, 3450), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3447, 3450), False, 'import cv2\n')]
|
__all__ = ['checkEquivalentApprox']
import copy
import logging
import numpy as np
from LevelSetPy.Utilities import *
logger = logging.getLogger(__name__)
def checkEquivalentApprox(approx1, approx2,bound):
"""
checkEquivalentApprox: Checks two derivative approximations for equivalence.
[ relError, absError ] = checkEquivalentApprox(approx1, approx2, bound)
Checks two derivative approximations for equivalence.
A warning is generated if either of these conditions holds:
1) The approximation magnitude > bound
and the maximum relative error > bound.
2) The approximation magnitude < bound
and the maximum absolute error > bound.
Normally, the return values are ignored
(the whole point is the warning checks).
parameters:
approx1 An array containing one approximation.
approx2 An array containing the other approximation.
bound The bound above which warnings are generated.
relError The relative error at each point in the array
where the magnitude > bound (NaN otherwise).
absError The absolute error at each point in the array.
Copyright 2004 <NAME> (<EMAIL>).
This software is used, copied and distributed under the licensing
agreement contained in the file LICENSE in the top directory of
the distribution.
<NAME>, 1/23/04
"""
# Approximate magnitude of the solution
magnitude = 0.5 * np.abs(approx1 + approx2)
# Which nodes deserve relative treatment, and which absolute treatment?
useRelative = np.nonzero(magnitude > bound)
useAbsolute = np.nonzero(magnitude <= bound)
absError = np.abs(approx1 - approx2)
# Be careful not to divide by too small a number.
relError = ones(size(absError))
relError.fill(np.nan)
relError[useRelative] = np.divide(absError[useRelative], magnitude[useRelative])
# Check that bounds are respected.
if(max(relError[useRelative]) > bound):
logger.warn(f'exceeded relative bound. Error in supposedly'
'equivalent derivative approximations'
'{max(relError[useRelative])}, {bound}')
if(max(absError[useAbsolute]) > bound):
logger.warn(f'exceeded absolute bound. Error in supposedly'
'equivalent derivative approximations'
'{max(relError[useAbsolute])}, {bound}')
return relError, absError
|
[
"numpy.nonzero",
"numpy.divide",
"numpy.abs",
"logging.getLogger"
] |
[((127, 154), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (144, 154), False, 'import logging\n'), ((1616, 1645), 'numpy.nonzero', 'np.nonzero', (['(magnitude > bound)'], {}), '(magnitude > bound)\n', (1626, 1645), True, 'import numpy as np\n'), ((1664, 1694), 'numpy.nonzero', 'np.nonzero', (['(magnitude <= bound)'], {}), '(magnitude <= bound)\n', (1674, 1694), True, 'import numpy as np\n'), ((1711, 1736), 'numpy.abs', 'np.abs', (['(approx1 - approx2)'], {}), '(approx1 - approx2)\n', (1717, 1736), True, 'import numpy as np\n'), ((1882, 1938), 'numpy.divide', 'np.divide', (['absError[useRelative]', 'magnitude[useRelative]'], {}), '(absError[useRelative], magnitude[useRelative])\n', (1891, 1938), True, 'import numpy as np\n'), ((1495, 1520), 'numpy.abs', 'np.abs', (['(approx1 + approx2)'], {}), '(approx1 + approx2)\n', (1501, 1520), True, 'import numpy as np\n')]
|
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import json
import os
from random import shuffle
import math
PATH = os.path.join(os.getcwd(), "data4.0")
SPACEGROUP_FILE = {
0: "Triclinic.txt",
1: "Monoclinic.txt",
2: "Orthorhombic.txt",
3: "Tetragonal.txt",
4: "Trigonal.txt",
5: "Hexagonal.txt",
6: "Cubic.txt"
}
SPACEGROUP_LABEL_OFFSET = {
0: 1,
1: 3,
2: 16,
3: 75,
4: 143,
5: 168,
6: 195
}
SPACEGROUP_SHAPE = {
0: 2,
1: 13,
2: 59,
3: 68,
4: 25,
5: 27,
6: 36
}
class Cs2Sg(Dataset):
def __init__(self, crystal_system, valid_size):
print("Preparing dataset")
data_input_valid = []
data_label_valid = []
data_input_train = []
data_label_train = []
with open(SPACEGROUP_FILE[crystal_system], "r") as f:
path_list = f.readlines()
shuffle(path_list)
valid_len = math.floor(len(path_list) * valid_size)
for i, path in enumerate(path_list):
data_path = os.path.join(PATH, path.rstrip())
with open(data_path, "r") as f:
data = json.load(f)
data_input = np.array(data["bands"]).T
data_label = np.array([data["number"]]) - SPACEGROUP_LABEL_OFFSET[crystal_system]
if i < valid_len:
data_input_valid.append(torch.from_numpy(data_input).float())
data_label_valid.append(torch.from_numpy(data_label).long())
else:
data_input_train.append(torch.from_numpy(data_input).float())
data_label_train.append(torch.from_numpy(data_label).long())
print("valid length:", len(data_input_valid))
print("train length:", len(data_input_train))
self.data_inputs = data_input_valid + data_input_train
self.data_labels = data_label_valid + data_label_train
self.length = len(self.data_labels)
self.valid_size = valid_len
def __len__(self):
return self.length
def __getitem__(self, item):
return self.data_inputs[item], self.data_labels[item]
def get_valid_train_loader(dataset, batch_size):
num = len(dataset)
indices = [i for i in range(num)]
split = dataset.valid_size
valid_idx, train_idx = indices[:split], indices[split:]
valid_sampler = SubsetRandomSampler(valid_idx)
train_sampler = SubsetRandomSampler(train_idx)
valid_loader = DataLoader(dataset, batch_size=batch_size, sampler=valid_sampler)
train_loader = DataLoader(dataset, batch_size=batch_size, sampler=train_sampler)
return train_loader, valid_loader
|
[
"torch.utils.data.sampler.SubsetRandomSampler",
"json.load",
"torch.utils.data.DataLoader",
"os.getcwd",
"random.shuffle",
"numpy.array",
"torch.from_numpy"
] |
[((231, 242), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (240, 242), False, 'import os\n'), ((2619, 2649), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['valid_idx'], {}), '(valid_idx)\n', (2638, 2649), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((2671, 2701), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_idx'], {}), '(train_idx)\n', (2690, 2701), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((2724, 2789), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'valid_sampler'}), '(dataset, batch_size=batch_size, sampler=valid_sampler)\n', (2734, 2789), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2810, 2875), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'train_sampler'}), '(dataset, batch_size=batch_size, sampler=train_sampler)\n', (2820, 2875), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1091, 1109), 'random.shuffle', 'shuffle', (['path_list'], {}), '(path_list)\n', (1098, 1109), False, 'from random import shuffle\n'), ((1347, 1359), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1356, 1359), False, 'import json\n'), ((1390, 1413), 'numpy.array', 'np.array', (["data['bands']"], {}), "(data['bands'])\n", (1398, 1413), True, 'import numpy as np\n'), ((1446, 1472), 'numpy.array', 'np.array', (["[data['number']]"], {}), "([data['number']])\n", (1454, 1472), True, 'import numpy as np\n'), ((1597, 1625), 'torch.from_numpy', 'torch.from_numpy', (['data_input'], {}), '(data_input)\n', (1613, 1625), False, 'import torch\n'), ((1680, 1708), 'torch.from_numpy', 'torch.from_numpy', (['data_label'], {}), '(data_label)\n', (1696, 1708), False, 'import torch\n'), ((1787, 1815), 'torch.from_numpy', 'torch.from_numpy', (['data_input'], {}), '(data_input)\n', (1803, 1815), False, 'import torch\n'), ((1870, 1898), 'torch.from_numpy', 'torch.from_numpy', (['data_label'], {}), '(data_label)\n', (1886, 1898), False, 'import torch\n')]
|
import os, sys
import time
import argparse
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
# import video_transforms
# import models
# import datasets
import traceback
import logging
from functools import partial
import distiller
import distiller.apputils as apputils
import parser
import os
import numpy as np
from ptq_lapq import image_classifier_ptq_lapq
import image_classifier as classifier
# Logger handle
msglogger = logging.getLogger()
def main():
# Parse arguments
args = parser.add_cmdline_args(classifier.init_classifier_compression_arg_parser(True)).parse_args()
app = ActionRecognizerCompressor(args, script_dir=os.path.dirname(__file__))
if app.handle_subapps():
return
# init_knowledge_distillation(app.args, app.model, app.compression_scheduler)
app.run_training_loop()
# Finally run results on the test set
return app.test()
def handle_subapps(model, criterion, optimizer, compression_scheduler, pylogger, args):
def load_test_data(args):
test_loader = classifier.load_data(args, load_train=False, load_val=False, load_test=True)
return test_loader
do_exit = False
if args.greedy:
greedy(model, criterion, optimizer, pylogger, args)
do_exit = True
elif args.summary:
# This sample application can be invoked to produce various summary reports
for summary in args.summary:
distiller.model_summary(model, summary, args.dataset)
do_exit = True
elif args.export_onnx is not None:
distiller.export_img_classifier_to_onnx(model,
os.path.join(msglogger.logdir, args.export_onnx),
args.dataset, add_softmax=True, verbose=False)
do_exit = True
elif args.qe_calibration and not (args.evaluate and args.quantize_eval):
classifier.acts_quant_stats_collection(model, criterion, pylogger, args, save_to_file=True)
do_exit = True
elif args.activation_histograms:
classifier.acts_histogram_collection(model, criterion, pylogger, args)
do_exit = True
elif args.sensitivity is not None:
test_loader = load_test_data(args)
sensitivities = np.arange(*args.sensitivity_range)
sensitivity_analysis(model, criterion, test_loader, pylogger, args, sensitivities)
do_exit = True
elif args.evaluate:
if args.quantize_eval and args.qe_lapq:
image_classifier_ptq_lapq(model, criterion, pylogger, args)
else:
test_loader = load_test_data(args)
classifier.evaluate_model(test_loader, model, criterion, pylogger,
classifier.create_activation_stats_collectors(model, *args.activation_stats),
args, scheduler=compression_scheduler)
do_exit = True
elif args.thinnify:
assert args.resumed_checkpoint_path is not None, \
"You must use --resume-from to provide a checkpoint file to thinnify"
distiller.contract_model(model, compression_scheduler.zeros_mask_dict, args.arch, args.dataset, optimizer=None)
apputils.save_checkpoint(0, args.arch, model, optimizer=None, scheduler=compression_scheduler,
name="{}_thinned".format(args.resumed_checkpoint_path.replace(".pth.tar", "")),
dir=msglogger.logdir)
msglogger.info("Note: if your model collapsed to random inference, you may want to fine-tune")
do_exit = True
return do_exit
# def init_knowledge_distillation(args, model, compression_scheduler):
# args.kd_policy = None
# if args.kd_teacher:
# teacher = create_model(args.kd_pretrained, args.dataset, args.kd_teacher, device_ids=args.gpus)
# if args.kd_resume:
# teacher = apputils.load_lean_checkpoint(teacher, args.kd_resume)
# dlw = distiller.DistillationLossWeights(args.kd_distill_wt, args.kd_student_wt, args.kd_teacher_wt)
# args.kd_policy = distiller.KnowledgeDistillationPolicy(model, teacher, args.kd_temp, dlw)
# compression_scheduler.add_policy(args.kd_policy, starting_epoch=args.kd_start_epoch, ending_epoch=args.epochs,
# frequency=1)
# msglogger.info('\nStudent-Teacher knowledge distillation enabled:')
# msglogger.info('\tTeacher Model: %s', args.kd_teacher)
# msglogger.info('\tTemperature: %s', args.kd_temp)
# msglogger.info('\tLoss Weights (distillation | student | teacher): %s',
# ' | '.join(['{:.2f}'.format(val) for val in dlw]))
# msglogger.info('\tStarting from Epoch: %s', args.kd_start_epoch)
# def early_exit_init(args):
# if not args.earlyexit_thresholds:
# return
# args.num_exits = len(args.earlyexit_thresholds) + 1
# args.loss_exits = [0] * args.num_exits
# args.losses_exits = []
# args.exiterrors = []
# msglogger.info('=> using early-exit threshold values of %s', args.earlyexit_thresholds)
class ActionRecognizerCompressor(classifier.ClassifierCompressor):
def __init__(self, args, script_dir):
super().__init__(args, script_dir)
# early_exit_init(self.args)
# Save the randomly-initialized model before training (useful for lottery-ticket method)
if args.save_untrained_model:
ckpt_name = '_'.join((self.args.name or "", "untrained"))
apputils.save_checkpoint(0, self.args.arch, self.model,
name=ckpt_name, dir=msglogger.logdir)
def handle_subapps(self):
return handle_subapps(self.model, self.criterion, self.optimizer,
self.compression_scheduler, self.pylogger, self.args)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("\n-- KeyboardInterrupt --")
except Exception as e:
if msglogger is not None:
# We catch unhandled exceptions here in order to log them to the log file
# However, using the msglogger as-is to do that means we get the trace twice in stdout - once from the
# logging operation and once from re-raising the exception. So we remove the stdout logging handler
# before logging the exception
handlers_bak = msglogger.handlers
msglogger.handlers = [h for h in msglogger.handlers if type(h) != logging.StreamHandler]
msglogger.error(traceback.format_exc())
msglogger.handlers = handlers_bak
raise
finally:
if msglogger is not None and hasattr(msglogger, 'log_filename'):
msglogger.info('')
msglogger.info('Log file for this run: ' + os.path.realpath(msglogger.log_filename))
|
[
"ptq_lapq.image_classifier_ptq_lapq",
"image_classifier.init_classifier_compression_arg_parser",
"image_classifier.create_activation_stats_collectors",
"image_classifier.load_data",
"os.path.dirname",
"os.path.realpath",
"logging.getLogger",
"distiller.model_summary",
"numpy.arange",
"traceback.format_exc",
"image_classifier.acts_histogram_collection",
"distiller.contract_model",
"os.path.join",
"distiller.apputils.save_checkpoint",
"image_classifier.acts_quant_stats_collection"
] |
[((554, 573), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (571, 573), False, 'import logging\n'), ((1154, 1230), 'image_classifier.load_data', 'classifier.load_data', (['args'], {'load_train': '(False)', 'load_val': '(False)', 'load_test': '(True)'}), '(args, load_train=False, load_val=False, load_test=True)\n', (1174, 1230), True, 'import image_classifier as classifier\n'), ((768, 793), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (783, 793), False, 'import os\n'), ((5589, 5687), 'distiller.apputils.save_checkpoint', 'apputils.save_checkpoint', (['(0)', 'self.args.arch', 'self.model'], {'name': 'ckpt_name', 'dir': 'msglogger.logdir'}), '(0, self.args.arch, self.model, name=ckpt_name, dir\n =msglogger.logdir)\n', (5613, 5687), True, 'import distiller.apputils as apputils\n'), ((644, 699), 'image_classifier.init_classifier_compression_arg_parser', 'classifier.init_classifier_compression_arg_parser', (['(True)'], {}), '(True)\n', (693, 699), True, 'import image_classifier as classifier\n'), ((1538, 1591), 'distiller.model_summary', 'distiller.model_summary', (['model', 'summary', 'args.dataset'], {}), '(model, summary, args.dataset)\n', (1561, 1591), False, 'import distiller\n'), ((1757, 1805), 'os.path.join', 'os.path.join', (['msglogger.logdir', 'args.export_onnx'], {}), '(msglogger.logdir, args.export_onnx)\n', (1769, 1805), False, 'import os\n'), ((2010, 2105), 'image_classifier.acts_quant_stats_collection', 'classifier.acts_quant_stats_collection', (['model', 'criterion', 'pylogger', 'args'], {'save_to_file': '(True)'}), '(model, criterion, pylogger, args,\n save_to_file=True)\n', (2048, 2105), True, 'import image_classifier as classifier\n'), ((6628, 6650), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6648, 6650), False, 'import traceback\n'), ((6884, 6924), 'os.path.realpath', 'os.path.realpath', (['msglogger.log_filename'], {}), '(msglogger.log_filename)\n', (6900, 6924), False, 'import os\n'), ((2170, 2240), 'image_classifier.acts_histogram_collection', 'classifier.acts_histogram_collection', (['model', 'criterion', 'pylogger', 'args'], {}), '(model, criterion, pylogger, args)\n', (2206, 2240), True, 'import image_classifier as classifier\n'), ((2370, 2404), 'numpy.arange', 'np.arange', (['*args.sensitivity_range'], {}), '(*args.sensitivity_range)\n', (2379, 2404), True, 'import numpy as np\n'), ((2603, 2662), 'ptq_lapq.image_classifier_ptq_lapq', 'image_classifier_ptq_lapq', (['model', 'criterion', 'pylogger', 'args'], {}), '(model, criterion, pylogger, args)\n', (2628, 2662), False, 'from ptq_lapq import image_classifier_ptq_lapq\n'), ((3148, 3264), 'distiller.contract_model', 'distiller.contract_model', (['model', 'compression_scheduler.zeros_mask_dict', 'args.arch', 'args.dataset'], {'optimizer': 'None'}), '(model, compression_scheduler.zeros_mask_dict, args\n .arch, args.dataset, optimizer=None)\n', (3172, 3264), False, 'import distiller\n'), ((2819, 2895), 'image_classifier.create_activation_stats_collectors', 'classifier.create_activation_stats_collectors', (['model', '*args.activation_stats'], {}), '(model, *args.activation_stats)\n', (2864, 2895), True, 'import image_classifier as classifier\n')]
|
import policy as policy_module
import env_rna
import torch
import numpy as np
import torch.nn.functional as F
import matplotlib.pyplot as mlp
import arc_diagram
from torch.distributions import Categorical
env = env_rna.EnvRNA()
class Reinforce:
running_reward = 0
MAX_ITER = 1000
exploration_eps = 10
N = 10
alpha = .90
correct_predictions = 0
sum_iterations_done = 0
number_episodes = 100;
policy = policy_module.Policy(N)
optimizer = torch.optim.Adam(policy.parameters(), lr=1e-6)
weights_dir = "./Weights/"
def select_action(self,state, policy):
probs = policy.forward(state)
m = Categorical(probs)
action = m.sample()
policy.saved_probs.append(m.log_prob(action))
item = action.item()
return (int(item/self.N),item%self.N)
class MonteCarloReinforceTrainer(Reinforce):
def train(self):
for i_episode in range(self.number_episodes):
if i_episode%10 == 0 :
print("Episode : ", i_episode)
seq = generate_random_sequence(self.N)
env.reset(seq)
state = env.rna.structure_representation
ep_reward = 0
rewards = []
bestState = env.rna.structure_representation_dot.copy()
bestReward = 0
for t in range(self.MAX_ITER):
exploration = np.random.uniform(0,1)
if exploration > 0.3 or i_episode > self.exploration_eps or t == 0:#.9 * 1/(i_episode+1):
action = self.select_action(convert_to_tensor(state, seq), self.policy)
state, reward, done, _ = env.step(action,self.N)
rewards.append(reward)
else:
action = np.random.randint(0,self.N,2)
action = (action[0],action[1])
state, reward, done, _ = env.step(action, self.N)
rewards.append(reward)
ep_reward += reward
if ep_reward > bestReward:
bestState = env.rna.structure_representation_dot.copy()
bestReward = ep_reward
if done:
if i_episode > self.exploration_eps:
self.correct_predictions +=1
self.sum_iterations_done += t
print("Done ", i_episode, " iteration ", t)
title = str(i_episode) + " Done at iteration " + str(t)
if i_episode >= 2090:
mlp.show(arc_diagram.arc_diagram(
arc_diagram.phrantheses_to_pairing_list(env.rna.structure_representation_dot),seq, title))
break
if (t+1)%100 == 0 and i_episode >= 2090:
mlp.show(arc_diagram.arc_diagram(arc_diagram.phrantheses_to_pairing_list(env.rna.structure_representation_dot),seq,i_episode))
self.running_reward = self.running_reward * self.alpha + ep_reward * (1-self.alpha)
if i_episode >= 2000:
mlp.show(
arc_diagram.arc_diagram(arc_diagram.phrantheses_to_pairing_list(bestState),
seq, "Best State Achieved"))
self.finish_episode(rewards)
self.policy.save_weights(self.weights_dir+ "monte_carlo_reinforce"+str(self.number_episodes))
def finish_episode(self, rewards):
R = 0
DISCOUNT_FACTOR = 0.99
policy_loss = []
returns = []
for r in rewards[::-1]:
R = r+DISCOUNT_FACTOR*R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean())
for log_prob, R in zip(self.policy.saved_probs, returns):
loss = -log_prob * R
policy_loss.append(loss.unsqueeze(0))
self.optimizer.zero_grad()
policy_loss = torch.stack(policy_loss).sum()
policy_loss.backward()
self.optimizer.step()
del self.policy.saved_probs[:]
class TemporalDifferenceReinforceTrainer(Reinforce):
def train(self):
for i_episode in range(self.number_episodes):
seq = generate_random_sequence(self.N)
env.reset(seq)
state = env.rna.structure_representation
ep_reward = 0
rewards = []
bestState = env.rna.structure_representation_dot.copy()
bestReward = 0
for t in range(self.MAX_ITER):
exploration = np.random.uniform(0,1)
if exploration > 0.3 or i_episode > 100 or t == 0:
action = self.select_action(convert_to_tensor(state, seq), self.policy)
state, reward, done, _ = env.step(action,self.N)
rewards.append(reward)
self.update_policy(reward)
else:
action = np.random.randint(0,self.N,2)
action = (action[0],action[1])
state, reward, done, _ = env.step(action, self.N)
rewards.append(reward)
ep_reward += reward
if ep_reward > bestReward:
bestState = env.rna.structure_representation_dot.copy()
bestReward = ep_reward
if done:
if i_episode > self.exploration_eps:
self.correct_predictions +=1
self.sum_iterations_done += t
print("Done ", i_episode, " iteration ", t)
title = str(i_episode) + " Done at iteration " + str(t)
if i_episode >= 3000:
mlp.show(arc_diagram.arc_diagram(
arc_diagram.phrantheses_to_pairing_list(env.rna.structure_representation_dot), seq, title))
break
if (t + 1) % 100 == 0 and i_episode >= 3000:
mlp.show(arc_diagram.arc_diagram(
arc_diagram.phrantheses_to_pairing_list(env.rna.structure_representation_dot), seq, i_episode))
self.running_reward = self.running_reward * self.alpha + ep_reward * (1 - self.alpha)
if i_episode >= 3000:
mlp.show(
arc_diagram.arc_diagram(arc_diagram.phrantheses_to_pairing_list(bestState),
seq, "Best State Achieved"))
self.finish_episode()
self.policy.save_weights(self.weights_dir+"td_reinforce"+str(self.number_episodes))
def update_policy(self,reward):
R = 0
DISCOUNT_FACTOR = 0.99
policy_loss = []
returns = []
R = reward
loss = -self.policy.saved_probs[-1] * R
policy_loss.append(loss.unsqueeze(0))
self.optimizer.zero_grad()
policy_loss = F.smooth_l1_loss(2,loss)
policy_loss.backward()
self.optimizer.step()
def finish_episode(self):
del self.policy.saved_probs[:]
def convert_to_tensor(list,sequence):
#TODO Change to NxN tensor
n = len(sequence)
tensor = torch.tensor(np.zeros((1,1,8,n)),dtype=torch.double)
base_index = 0
for base in sequence:
position = 0
if base == 'A': position = 0
elif base == 'U': position = 2
elif base == 'G' : position = 4
else : position = 6
if len([ (x,y) for x, y in list if x == base_index or y == base_index ]) != 0:
tensor[0][0][position+1][base_index] = 1
else:
tensor[0][0][position][base_index] = 1
base_index +=1
return tensor
def generate_random_sequence(N):
sequence = ""
for i in range (N):
epsilon = np.random.uniform(0,1)
if epsilon <= 0.25:
sequence += "A"
elif epsilon <= 0.5:
sequence += "U"
elif epsilon <= 0.75:
sequence+= "G"
else:
sequence+= "C"
return sequence
|
[
"numpy.random.uniform",
"torch.distributions.Categorical",
"torch.stack",
"numpy.zeros",
"env_rna.EnvRNA",
"numpy.random.randint",
"policy.Policy",
"arc_diagram.phrantheses_to_pairing_list",
"torch.nn.functional.smooth_l1_loss",
"torch.tensor"
] |
[((212, 228), 'env_rna.EnvRNA', 'env_rna.EnvRNA', ([], {}), '()\n', (226, 228), False, 'import env_rna\n'), ((440, 463), 'policy.Policy', 'policy_module.Policy', (['N'], {}), '(N)\n', (460, 463), True, 'import policy as policy_module\n'), ((651, 669), 'torch.distributions.Categorical', 'Categorical', (['probs'], {}), '(probs)\n', (662, 669), False, 'from torch.distributions import Categorical\n'), ((3659, 3680), 'torch.tensor', 'torch.tensor', (['returns'], {}), '(returns)\n', (3671, 3680), False, 'import torch\n'), ((6904, 6929), 'torch.nn.functional.smooth_l1_loss', 'F.smooth_l1_loss', (['(2)', 'loss'], {}), '(2, loss)\n', (6920, 6929), True, 'import torch.nn.functional as F\n'), ((7181, 7203), 'numpy.zeros', 'np.zeros', (['(1, 1, 8, n)'], {}), '((1, 1, 8, n))\n', (7189, 7203), True, 'import numpy as np\n'), ((7775, 7798), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (7792, 7798), True, 'import numpy as np\n'), ((1383, 1406), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1400, 1406), True, 'import numpy as np\n'), ((3932, 3956), 'torch.stack', 'torch.stack', (['policy_loss'], {}), '(policy_loss)\n', (3943, 3956), False, 'import torch\n'), ((4549, 4572), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4566, 4572), True, 'import numpy as np\n'), ((1767, 1798), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.N', '(2)'], {}), '(0, self.N, 2)\n', (1784, 1798), True, 'import numpy as np\n'), ((4941, 4972), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.N', '(2)'], {}), '(0, self.N, 2)\n', (4958, 4972), True, 'import numpy as np\n'), ((3139, 3189), 'arc_diagram.phrantheses_to_pairing_list', 'arc_diagram.phrantheses_to_pairing_list', (['bestState'], {}), '(bestState)\n', (3178, 3189), False, 'import arc_diagram\n'), ((6354, 6404), 'arc_diagram.phrantheses_to_pairing_list', 'arc_diagram.phrantheses_to_pairing_list', (['bestState'], {}), '(bestState)\n', (6393, 6404), False, 'import arc_diagram\n'), ((2844, 2921), 'arc_diagram.phrantheses_to_pairing_list', 'arc_diagram.phrantheses_to_pairing_list', (['env.rna.structure_representation_dot'], {}), '(env.rna.structure_representation_dot)\n', (2883, 2921), False, 'import arc_diagram\n'), ((6051, 6128), 'arc_diagram.phrantheses_to_pairing_list', 'arc_diagram.phrantheses_to_pairing_list', (['env.rna.structure_representation_dot'], {}), '(env.rna.structure_representation_dot)\n', (6090, 6128), False, 'import arc_diagram\n'), ((2617, 2694), 'arc_diagram.phrantheses_to_pairing_list', 'arc_diagram.phrantheses_to_pairing_list', (['env.rna.structure_representation_dot'], {}), '(env.rna.structure_representation_dot)\n', (2656, 2694), False, 'import arc_diagram\n'), ((5792, 5869), 'arc_diagram.phrantheses_to_pairing_list', 'arc_diagram.phrantheses_to_pairing_list', (['env.rna.structure_representation_dot'], {}), '(env.rna.structure_representation_dot)\n', (5831, 5869), False, 'import arc_diagram\n')]
|
import numpy as np
from astropy.io import fits
def calc_erro(arquivo_1, arquivo_2, arquivo_3): #Define a funcao que calcula a soma quadratica dos erros dos 3 arquivos
hdul_1 = fits.open(arquivo_1) #Abre o arquivo Header Data Unit List, formado por um header e um data
hdul_2 = fits.open(arquivo_2)
hdul_3 = fits.open(arquivo_3)
error_1 = hdul_1[0].data #Selecionamos a parte data e transormamos em um array numpy
error_2 = hdul_2[0].data
error_3 = hdul_3[0].data
erro_lcg = np.sqrt(error_1**2 + error_2**2 + error_3**2) / 3 #Calculamos o erro
return(erro_lcg) #Retornamos o resultado
grupo = int(input("ID do grupo: "))
extensoes = int(input("Quantidade de extensões: "))
for i in range(2,extensoes+1):
arquivo_1 = str('error_LCG' + str(grupo) + '_1_' + str(i) + '.fits')
arquivo_2 = str('error_LCG' + str(grupo) + '_2_' + str(i) + '.fits')
arquivo_3 = str('error_LCG' + str(grupo) + '_3_' + str(i) + '.fits')
erro_lcg = calc_erro(arquivo_1, arquivo_2, arquivo_3) #Chama a funcao
hdu = fits.PrimaryHDU(erro_lcg) #Transforma de array numpy para HDUL novamente
output = str('ERROR_LCG' + str(grupo) + '_' + str(i) + '.fits')
hdu.writeto(output)
|
[
"astropy.io.fits.PrimaryHDU",
"astropy.io.fits.open",
"numpy.sqrt"
] |
[((181, 201), 'astropy.io.fits.open', 'fits.open', (['arquivo_1'], {}), '(arquivo_1)\n', (190, 201), False, 'from astropy.io import fits\n'), ((286, 306), 'astropy.io.fits.open', 'fits.open', (['arquivo_2'], {}), '(arquivo_2)\n', (295, 306), False, 'from astropy.io import fits\n'), ((320, 340), 'astropy.io.fits.open', 'fits.open', (['arquivo_3'], {}), '(arquivo_3)\n', (329, 340), False, 'from astropy.io import fits\n'), ((1044, 1069), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['erro_lcg'], {}), '(erro_lcg)\n', (1059, 1069), False, 'from astropy.io import fits\n'), ((503, 554), 'numpy.sqrt', 'np.sqrt', (['(error_1 ** 2 + error_2 ** 2 + error_3 ** 2)'], {}), '(error_1 ** 2 + error_2 ** 2 + error_3 ** 2)\n', (510, 554), True, 'import numpy as np\n')]
|
"""
Trains End 2 End VarNet
"""
import argparse
import os
from pathlib import Path
import random
import numpy as np
import torch
import torch.distributed as dist
from torch.optim import Adam, lr_scheduler
from torch.utils.data import DataLoader, DistributedSampler
from torch.utils import tensorboard
import fastmri
from fastmri.data.mri_data import SliceDataset
from fastmri.data.subsample import create_mask_for_mask_type
from fastmri.data.transforms import VarNetDataTransform
from fastmri.models import VarNet
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Train E2E VarNet")
parser.add_argument(
"--training-dir",
type=(
lambda x: x
if Path(x).is_dir()
else parser.error("Invalid training directory")
),
required=True,
help="Path to training directory",
)
parser.add_argument(
"--validation-dir",
type=(
lambda x: x
if Path(x).is_dir()
else parser.error("Invalid validation directory")
),
required=True,
help="Path to validation directory",
)
parser.add_argument(
"--challenge",
type=str,
choices=["singlecoil", "multicoil"],
default="multicoil",
help="One of singlecoil or multicoil",
)
parser.add_argument(
"--mask-type",
type=str,
choices=["equispaced", "equispaced_fraction", "magic", "magic_fraction"],
default="equispaced_fraction",
)
parser.add_argument(
"--center-fractions",
type=float,
nargs="+",
default=[0.08],
)
parser.add_argument(
"--accelerations",
type=int,
nargs="+",
default=[4],
)
parser.add_argument("--batch-size", type=int, default=1)
parser.add_argument("--num-workers", type=int, default=4)
parser.add_argument("--lr", type=float, default=3e-4, help="learning rate")
parser.add_argument("--weight-decay", type=float, default=0.0)
parser.add_argument("--lr-step-size", type=int, default=40)
parser.add_argument("--lr-gamma", type=float, default=0.1)
parser.add_argument("--epochs", type=int, default=50)
parser.add_argument("--seed", type=int, default=42, help="Random seed")
parser.add_argument(
"--init-method", default="tcp://127.0.0.1:3456", type=str, help=""
)
parser.add_argument("--dist-backend", default="gloo", type=str, help="")
parser.add_argument("--world-size", default=1, type=int, help="")
parser.add_argument("--distributed", action="store_true", help="")
parser.add_argument("--logdir", type=str, default="./log", help="")
parser.add_argument("--epochs-per-val", type=int, default=5)
return parser
def init_ddp_process_group(args: argparse.Namespace) -> int:
ngpus = torch.cuda.device_count()
local_rank = os.environ.get("SLURM_LOCALID")
node_id = os.environ.get("SLURM_NODEID")
assert local_rank is not None and node_id is not None
rank = int(node_id) * ngpus + int(local_rank)
current_device = int(local_rank)
torch.cuda.set_device(current_device)
dist.init_process_group(
backend=args.dist_backend,
init_method=args.init_method,
world_size=args.world_size,
rank=rank,
)
return current_device
def main():
args = get_parser().parse_args()
current_device = init_ddp_process_group(args)
log_dir = args.log_dir
os.makedirs(log_dir, exist_ok=True)
writer = tensorboard.writer.SummaryWriter(log_dir=log_dir)
for seed in (torch.manual_seed, np.random.seed, random.seed):
seed(args.seed)
mask = create_mask_for_mask_type(
mask_type_str=args.mask_type,
center_fractions=args.center_fractions,
accelerations=args.accelerations,
)
train_transform = VarNetDataTransform(mask_func=mask, use_seed=False)
val_transform = VarNetDataTransform(mask_func=mask)
train_dataset = SliceDataset(
root=args.training_dir,
challenge=args.challenge,
transform=train_transform,
use_dataset_cache=True,
)
val_dataset = SliceDataset(
root=args.validation_dir,
challenge=args.challenge,
transform=val_transform,
use_dataset_cache=True,
)
model = VarNet().cuda()
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[current_device]
)
train_sampler = DistributedSampler(train_dataset)
val_sampler = DistributedSampler(val_dataset)
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.num_workers,
sampler=train_sampler,
)
val_loader = DataLoader(
val_dataset,
batch_size=1,
shuffle=False,
num_workers=args.num_workers,
sampler=val_sampler
)
criterion = fastmri.SSIMLoss()
optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
scheduler = lr_scheduler.StepLR(optimizer, args.lr_step_size, args.lr_gamma)
for epoch in args.epochs:
epoch_loss = []
for x in train_loader:
reconstructed_image = model(x)
loss = criterion(x, reconstructed_image)
loss.backward()
optimizer.step()
epoch_loss.append(loss.item())
scheduler.step()
writer.add_scalar("Train loss", np.mean(epoch_loss))
if epoch % args.epochs_per_val == 0:
model.eval()
with torch.no_grad():
val_loss = []
for x in val_loader:
reconstructed_image = model(x)
loss = criterion(x, reconstructed_image)
loss.backward()
val_loss.append(loss.item())
writer.add_scalar("Validation loss", np.mean(val_loss))
|
[
"torch.optim.lr_scheduler.StepLR",
"argparse.ArgumentParser",
"fastmri.models.VarNet",
"fastmri.data.transforms.VarNetDataTransform",
"torch.cuda.device_count",
"pathlib.Path",
"numpy.mean",
"fastmri.data.mri_data.SliceDataset",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.tensorboard.writer.SummaryWriter",
"fastmri.SSIMLoss",
"torch.cuda.set_device",
"fastmri.data.subsample.create_mask_for_mask_type",
"torch.distributed.init_process_group",
"os.makedirs",
"os.environ.get",
"torch.utils.data.DistributedSampler"
] |
[((576, 631), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train E2E VarNet"""'}), "(description='Train E2E VarNet')\n", (599, 631), False, 'import argparse\n'), ((2888, 2913), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2911, 2913), False, 'import torch\n'), ((2932, 2963), 'os.environ.get', 'os.environ.get', (['"""SLURM_LOCALID"""'], {}), "('SLURM_LOCALID')\n", (2946, 2963), False, 'import os\n'), ((2978, 3008), 'os.environ.get', 'os.environ.get', (['"""SLURM_NODEID"""'], {}), "('SLURM_NODEID')\n", (2992, 3008), False, 'import os\n'), ((3159, 3196), 'torch.cuda.set_device', 'torch.cuda.set_device', (['current_device'], {}), '(current_device)\n', (3180, 3196), False, 'import torch\n'), ((3202, 3326), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': 'args.dist_backend', 'init_method': 'args.init_method', 'world_size': 'args.world_size', 'rank': 'rank'}), '(backend=args.dist_backend, init_method=args.\n init_method, world_size=args.world_size, rank=rank)\n', (3225, 3326), True, 'import torch.distributed as dist\n'), ((3519, 3554), 'os.makedirs', 'os.makedirs', (['log_dir'], {'exist_ok': '(True)'}), '(log_dir, exist_ok=True)\n', (3530, 3554), False, 'import os\n'), ((3568, 3617), 'torch.utils.tensorboard.writer.SummaryWriter', 'tensorboard.writer.SummaryWriter', ([], {'log_dir': 'log_dir'}), '(log_dir=log_dir)\n', (3600, 3617), False, 'from torch.utils import tensorboard\n'), ((3721, 3855), 'fastmri.data.subsample.create_mask_for_mask_type', 'create_mask_for_mask_type', ([], {'mask_type_str': 'args.mask_type', 'center_fractions': 'args.center_fractions', 'accelerations': 'args.accelerations'}), '(mask_type_str=args.mask_type, center_fractions=\n args.center_fractions, accelerations=args.accelerations)\n', (3746, 3855), False, 'from fastmri.data.subsample import create_mask_for_mask_type\n'), ((3905, 3956), 'fastmri.data.transforms.VarNetDataTransform', 'VarNetDataTransform', ([], {'mask_func': 'mask', 'use_seed': '(False)'}), '(mask_func=mask, use_seed=False)\n', (3924, 3956), False, 'from fastmri.data.transforms import VarNetDataTransform\n'), ((3977, 4012), 'fastmri.data.transforms.VarNetDataTransform', 'VarNetDataTransform', ([], {'mask_func': 'mask'}), '(mask_func=mask)\n', (3996, 4012), False, 'from fastmri.data.transforms import VarNetDataTransform\n'), ((4034, 4152), 'fastmri.data.mri_data.SliceDataset', 'SliceDataset', ([], {'root': 'args.training_dir', 'challenge': 'args.challenge', 'transform': 'train_transform', 'use_dataset_cache': '(True)'}), '(root=args.training_dir, challenge=args.challenge, transform=\n train_transform, use_dataset_cache=True)\n', (4046, 4152), False, 'from fastmri.data.mri_data import SliceDataset\n'), ((4206, 4324), 'fastmri.data.mri_data.SliceDataset', 'SliceDataset', ([], {'root': 'args.validation_dir', 'challenge': 'args.challenge', 'transform': 'val_transform', 'use_dataset_cache': '(True)'}), '(root=args.validation_dir, challenge=args.challenge, transform=\n val_transform, use_dataset_cache=True)\n', (4218, 4324), False, 'from fastmri.data.mri_data import SliceDataset\n'), ((4400, 4477), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[current_device]'}), '(model, device_ids=[current_device])\n', (4441, 4477), False, 'import torch\n'), ((4513, 4546), 'torch.utils.data.DistributedSampler', 'DistributedSampler', (['train_dataset'], {}), '(train_dataset)\n', (4531, 4546), False, 'from torch.utils.data import DataLoader, DistributedSampler\n'), ((4565, 4596), 'torch.utils.data.DistributedSampler', 'DistributedSampler', (['val_dataset'], {}), '(val_dataset)\n', (4583, 4596), False, 'from torch.utils.data import DataLoader, DistributedSampler\n'), ((4617, 4758), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(train_sampler is None)', 'num_workers': 'args.num_workers', 'sampler': 'train_sampler'}), '(train_dataset, batch_size=args.batch_size, shuffle=train_sampler is\n None, num_workers=args.num_workers, sampler=train_sampler)\n', (4627, 4758), False, 'from torch.utils.data import DataLoader, DistributedSampler\n'), ((4822, 4930), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'sampler': 'val_sampler'}), '(val_dataset, batch_size=1, shuffle=False, num_workers=args.\n num_workers, sampler=val_sampler)\n', (4832, 4930), False, 'from torch.utils.data import DataLoader, DistributedSampler\n'), ((4989, 5007), 'fastmri.SSIMLoss', 'fastmri.SSIMLoss', ([], {}), '()\n', (5005, 5007), False, 'import fastmri\n'), ((5109, 5173), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer', 'args.lr_step_size', 'args.lr_gamma'], {}), '(optimizer, args.lr_step_size, args.lr_gamma)\n', (5128, 5173), False, 'from torch.optim import Adam, lr_scheduler\n'), ((4372, 4380), 'fastmri.models.VarNet', 'VarNet', ([], {}), '()\n', (4378, 4380), False, 'from fastmri.models import VarNet\n'), ((5522, 5541), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (5529, 5541), True, 'import numpy as np\n'), ((5631, 5646), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5644, 5646), False, 'import torch\n'), ((5965, 5982), 'numpy.mean', 'np.mean', (['val_loss'], {}), '(val_loss)\n', (5972, 5982), True, 'import numpy as np\n'), ((738, 745), 'pathlib.Path', 'Path', (['x'], {}), '(x)\n', (742, 745), False, 'from pathlib import Path\n'), ((1006, 1013), 'pathlib.Path', 'Path', (['x'], {}), '(x)\n', (1010, 1013), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import numpy as np
import cv2
import unittest
from random import randint
from ocr.classify import Classifier
from tempfile import NamedTemporaryFile
PARENT_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(os.path.dirname(PARENT_DIR), "data")
SEED = 1
class TestDigits(unittest.TestCase):
"""Test case for classifying handwritten digits."""
TRHESH = 0.85
SAVE_FILE = "TestDigitsClassifier.p"
DIGITS_FILE = os.path.join(DATA_DIR, "digits.png")
@classmethod
def setUpClass(cls):
# Load digits
cls.__X, cls.__y = cls.load_digits()
# Train classifier
clf = Classifier(random_state=SEED)
clf.train(cls.__X, cls.__y)
cls.__clf = clf
def test_save(self):
"""Test save"""
results = self.__test_digits(self.__X, self.__y, self.__clf)
clf = self.__clf
clf.save(self.SAVE_FILE)
clf = Classifier.from_pickle(self.SAVE_FILE)
self.assertEqual(results, self.__test_digits(self.__X, self.__y, clf))
def __test_digits(self, X, y, clf):
"""Test that the digits are classified correctly by a classifier."""
self.assertEqual(len(X), len(y))
correct = 0
for i in xrange(len(y)):
expected = y[i]
prediction = clf.classify([X[i]])[0]
if expected == prediction:
correct += 1
self.assertGreaterEqual(correct, self.TRHESH * len(y))
return correct
@staticmethod
def imgfile_to_grayscale(filename):
"""Load an image as grayscale."""
img = cv2.imread(filename)
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
@classmethod
def load_digits(cls):
"""Load training data from digits.png"""
gray = cls.imgfile_to_grayscale(cls.DIGITS_FILE)
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row, 100) for row in np.vsplit(gray, 50)]
# Make it into a Numpy array. It size will be (50,100,20,20)
x = np.array(cells)
# Training data
X = [np.reshape(x[y][x_], (400, )).astype(np.float32) / 256
for x_ in xrange(100) for y in xrange(50)]
# Expected
y = [y for y in xrange(10) for x_ in xrange(len(X) / 10)]
assert len(X) == len(y)
return X, y
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"numpy.vsplit",
"ocr.classify.Classifier",
"ocr.classify.Classifier.from_pickle",
"cv2.cvtColor",
"os.path.dirname",
"numpy.hsplit",
"cv2.imread",
"numpy.array",
"numpy.reshape",
"os.path.join"
] |
[((261, 286), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (276, 286), False, 'import os\n'), ((311, 338), 'os.path.dirname', 'os.path.dirname', (['PARENT_DIR'], {}), '(PARENT_DIR)\n', (326, 338), False, 'import os\n'), ((530, 566), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""digits.png"""'], {}), "(DATA_DIR, 'digits.png')\n", (542, 566), False, 'import os\n'), ((2453, 2468), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2466, 2468), False, 'import unittest\n'), ((719, 748), 'ocr.classify.Classifier', 'Classifier', ([], {'random_state': 'SEED'}), '(random_state=SEED)\n', (729, 748), False, 'from ocr.classify import Classifier\n'), ((1000, 1038), 'ocr.classify.Classifier.from_pickle', 'Classifier.from_pickle', (['self.SAVE_FILE'], {}), '(self.SAVE_FILE)\n', (1022, 1038), False, 'from ocr.classify import Classifier\n'), ((1677, 1697), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (1687, 1697), False, 'import cv2\n'), ((1713, 1750), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1725, 1750), False, 'import cv2\n'), ((2117, 2132), 'numpy.array', 'np.array', (['cells'], {}), '(cells)\n', (2125, 2132), True, 'import numpy as np\n'), ((1983, 2002), 'numpy.hsplit', 'np.hsplit', (['row', '(100)'], {}), '(row, 100)\n', (1992, 2002), True, 'import numpy as np\n'), ((2014, 2033), 'numpy.vsplit', 'np.vsplit', (['gray', '(50)'], {}), '(gray, 50)\n', (2023, 2033), True, 'import numpy as np\n'), ((2171, 2199), 'numpy.reshape', 'np.reshape', (['x[y][x_]', '(400,)'], {}), '(x[y][x_], (400,))\n', (2181, 2199), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
def create_gratings(n_spf, n_ori, n_phase, input_shape, x_train_mean, plot=False,
output_path=''):
# create various grating stimuli
grating_all = np.zeros((n_spf * n_ori * n_phase, ) + input_shape[:-1] + (3, ))
for s in range(n_spf):
spf = np.pi / (input_shape[0] / 2 + 4) * (s + 1)
for o in range(n_ori):
ori = np.pi / n_ori * o
for p in range(n_phase):
phase = 2 * np.pi / n_phase * p
# create a gray-scale grating
gray_grating = create_grating(ori, phase, spf, input_shape[0], input_shape[1])
# standardize the grating into [0, 1]
gray_grating = (gray_grating - np.min(gray_grating)) / \
(np.max(gray_grating) - np.min(gray_grating))
grating = np.zeros(input_shape[:-1] + (3,))
for ch in range(3):
grating[:, :, ch] = gray_grating
grating_all[(s * n_ori + o) * n_phase + p, :, :, :] = grating - x_train_mean
# shuffled gratings
ctrl_grating_all = grating_all[np.random.permutation(len(grating_all))]
# plot
if plot:
for p in range(n_phase):
fig = plt.figure(figsize=(10, 10))
for s in range(n_spf):
for o in range(n_ori):
ax = plt.subplot(n_spf, n_ori, s * n_ori + o + 1)
img = grating_all[(s * n_ori + o) * n_phase + p]
ax.imshow((img - np.min(img)) / (np.max(img) - np.min(img)))
plt.axis('off')
plt.savefig(output_path + 'stims/grating_phase' + str(p) + '.png')
plt.savefig(output_path + 'stims/grating_phase' + str(p) + '.pdf')
plt.close()
for p in range(n_phase):
fig = plt.figure(figsize=(10, 10))
for s in range(n_spf):
for o in range(n_ori):
ax = plt.subplot(n_spf, n_ori, s * n_ori + o + 1)
img = ctrl_grating_all[(s * n_ori + o) * n_phase + p]
ax.imshow((img - np.min(img)) / (np.max(img) - np.min(img)))
plt.axis('off')
plt.savefig(output_path + 'stims/grating_shuffled_phase' + str(p) + '.png')
plt.savefig(output_path + 'stims/grating_shuffled_phase' + str(p) + '.pdf')
plt.close()
return grating_all, ctrl_grating_all
def create_grating(phi, tau, k, h, w):
"""
phi, tau, k: Gabor parameters (ori, phase, SPF)
h, w: shape parameters
"""
gx, gy = np.ogrid[0:h, 0:w]
gx -= h // 2
gy -= w // 2
rot = np.array([[np.cos(phi), -np.sin(phi)], [np.sin(phi), np.cos(phi)]])
k_rot = np.dot(np.transpose(rot), np.array([k, 0]))
grating = np.cos((k_rot[1] * gx + k_rot[0] * gy) + tau)
return grating
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"numpy.transpose",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.min",
"numpy.cos",
"numpy.max"
] |
[((226, 288), 'numpy.zeros', 'np.zeros', (['((n_spf * n_ori * n_phase,) + input_shape[:-1] + (3,))'], {}), '((n_spf * n_ori * n_phase,) + input_shape[:-1] + (3,))\n', (234, 288), True, 'import numpy as np\n'), ((2844, 2887), 'numpy.cos', 'np.cos', (['(k_rot[1] * gx + k_rot[0] * gy + tau)'], {}), '(k_rot[1] * gx + k_rot[0] * gy + tau)\n', (2850, 2887), True, 'import numpy as np\n'), ((2793, 2810), 'numpy.transpose', 'np.transpose', (['rot'], {}), '(rot)\n', (2805, 2810), True, 'import numpy as np\n'), ((2812, 2828), 'numpy.array', 'np.array', (['[k, 0]'], {}), '([k, 0])\n', (2820, 2828), True, 'import numpy as np\n'), ((1294, 1322), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1304, 1322), True, 'import matplotlib.pyplot as plt\n'), ((1823, 1834), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1832, 1834), True, 'import matplotlib.pyplot as plt\n'), ((1887, 1915), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1897, 1915), True, 'import matplotlib.pyplot as plt\n'), ((2439, 2450), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2448, 2450), True, 'import matplotlib.pyplot as plt\n'), ((900, 933), 'numpy.zeros', 'np.zeros', (['(input_shape[:-1] + (3,))'], {}), '(input_shape[:-1] + (3,))\n', (908, 933), True, 'import numpy as np\n'), ((2717, 2728), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2723, 2728), True, 'import numpy as np\n'), ((2746, 2757), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2752, 2757), True, 'import numpy as np\n'), ((2759, 2770), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2765, 2770), True, 'import numpy as np\n'), ((1422, 1466), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_spf', 'n_ori', '(s * n_ori + o + 1)'], {}), '(n_spf, n_ori, s * n_ori + o + 1)\n', (1433, 1466), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1652), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1645, 1652), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2059), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_spf', 'n_ori', '(s * n_ori + o + 1)'], {}), '(n_spf, n_ori, s * n_ori + o + 1)\n', (2026, 2059), True, 'import matplotlib.pyplot as plt\n'), ((2235, 2250), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2243, 2250), True, 'import matplotlib.pyplot as plt\n'), ((2731, 2742), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2737, 2742), True, 'import numpy as np\n'), ((771, 791), 'numpy.min', 'np.min', (['gray_grating'], {}), '(gray_grating)\n', (777, 791), True, 'import numpy as np\n'), ((829, 849), 'numpy.max', 'np.max', (['gray_grating'], {}), '(gray_grating)\n', (835, 849), True, 'import numpy as np\n'), ((852, 872), 'numpy.min', 'np.min', (['gray_grating'], {}), '(gray_grating)\n', (858, 872), True, 'import numpy as np\n'), ((1573, 1584), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (1579, 1584), True, 'import numpy as np\n'), ((1589, 1600), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1595, 1600), True, 'import numpy as np\n'), ((1603, 1614), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (1609, 1614), True, 'import numpy as np\n'), ((2171, 2182), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (2177, 2182), True, 'import numpy as np\n'), ((2187, 2198), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (2193, 2198), True, 'import numpy as np\n'), ((2201, 2212), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (2207, 2212), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
import pytest
from motmot._queue import Queue
pytestmark = pytest.mark.order(3)
def test():
self = Queue(10)
assert not self
for i in range(7):
self.append(i)
assert len(self) == 7
assert self
assert np.all(self.queue[:7] == np.arange(7))
assert np.all(self.in_waiting == np.arange(7))
assert self.consume() == 0
assert self.consume() == 1
assert self.consume() == 2
assert self.consume_index == 3
assert np.array_equal(self.in_waiting, np.arange(3, 7))
assert repr(self) == "Queue [ - - - 3 4 5 6 - - - ]"
assert len(self) == 4
self.appends(np.arange(7, 12) % self.max_size)
assert self.in_waiting.tolist() == [3, 4, 5, 6, 7, 8, 9, 0, 1]
assert repr(self) == "Queue [ 0 1 - 3 4 5 6 7 8 9 ]"
assert len(self) == 9
def test_add():
self = Queue(6)
self.add(4)
assert self.in_waiting.tolist() == [4]
self.add(4)
assert self.in_waiting.tolist() == [4]
self.add(5)
assert self.in_waiting.tolist() == [4, 5]
self.add(4)
assert self.in_waiting.tolist() == [4, 5]
self.consume()
assert self.in_waiting.tolist() == [5]
self.add(4)
assert self.in_waiting.tolist() == [5, 4]
|
[
"pytest.mark.order",
"motmot._queue.Queue",
"numpy.arange"
] |
[((113, 133), 'pytest.mark.order', 'pytest.mark.order', (['(3)'], {}), '(3)\n', (130, 133), False, 'import pytest\n'), ((159, 168), 'motmot._queue.Queue', 'Queue', (['(10)'], {}), '(10)\n', (164, 168), False, 'from motmot._queue import Queue\n'), ((885, 893), 'motmot._queue.Queue', 'Queue', (['(6)'], {}), '(6)\n', (890, 893), False, 'from motmot._queue import Queue\n'), ((553, 568), 'numpy.arange', 'np.arange', (['(3)', '(7)'], {}), '(3, 7)\n', (562, 568), True, 'import numpy as np\n'), ((315, 327), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (324, 327), True, 'import numpy as np\n'), ((366, 378), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (375, 378), True, 'import numpy as np\n'), ((671, 687), 'numpy.arange', 'np.arange', (['(7)', '(12)'], {}), '(7, 12)\n', (680, 687), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
#This is from https://github.com/weixsong vocoder implementation, please see LICENSE-weixsong
import librosa
import librosa.filters
import numpy as np
from scipy import signal
from params import hparams
def preemphasis(x):
return signal.lfilter([1, -hparams.preemphasis], [1], x)
def spectrogram(y):
D = _stft(preemphasis(y))
S = _amp_to_db(np.abs(D)) - hparams.ref_level_db
return _normalize(S)
def melspectrogram(y):
D = _stft(preemphasis(y))
S = _amp_to_db(_linear_to_mel(np.abs(D))) - hparams.ref_level_db
mel_spec = _normalize(S)
return mel_spec.T
def _stft(y):
n_fft, hop_length, win_length = _stft_parameters()
return librosa.stft(y=y, n_fft=n_fft, hop_length=hop_length, win_length=win_length)
def _stft_parameters():
n_fft = hparams.n_fft
hop_length = hparams.hop_length
win_length = hparams.win_length
return n_fft, hop_length, win_length
def _amp_to_db(x):
return 20 * np.log10(np.maximum(1e-5, x))
_mel_basis = None
def _linear_to_mel(spectrogram):
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis()
return np.dot(_mel_basis, spectrogram)
def _build_mel_basis():
n_fft = hparams.n_fft
return librosa.filters.mel(hparams.sample_rate, n_fft, n_mels=hparams.num_mels)
def _normalize(S):
return np.clip((S - hparams.min_level_db) / -hparams.min_level_db, 0, 1)
|
[
"numpy.abs",
"numpy.maximum",
"scipy.signal.lfilter",
"numpy.clip",
"librosa.filters.mel",
"numpy.dot",
"librosa.stft"
] |
[((262, 311), 'scipy.signal.lfilter', 'signal.lfilter', (['[1, -hparams.preemphasis]', '[1]', 'x'], {}), '([1, -hparams.preemphasis], [1], x)\n', (276, 311), False, 'from scipy import signal\n'), ((699, 775), 'librosa.stft', 'librosa.stft', ([], {'y': 'y', 'n_fft': 'n_fft', 'hop_length': 'hop_length', 'win_length': 'win_length'}), '(y=y, n_fft=n_fft, hop_length=hop_length, win_length=win_length)\n', (711, 775), False, 'import librosa\n'), ((1163, 1194), 'numpy.dot', 'np.dot', (['_mel_basis', 'spectrogram'], {}), '(_mel_basis, spectrogram)\n', (1169, 1194), True, 'import numpy as np\n'), ((1258, 1330), 'librosa.filters.mel', 'librosa.filters.mel', (['hparams.sample_rate', 'n_fft'], {'n_mels': 'hparams.num_mels'}), '(hparams.sample_rate, n_fft, n_mels=hparams.num_mels)\n', (1277, 1330), False, 'import librosa\n'), ((1363, 1428), 'numpy.clip', 'np.clip', (['((S - hparams.min_level_db) / -hparams.min_level_db)', '(0)', '(1)'], {}), '((S - hparams.min_level_db) / -hparams.min_level_db, 0, 1)\n', (1370, 1428), True, 'import numpy as np\n'), ((383, 392), 'numpy.abs', 'np.abs', (['D'], {}), '(D)\n', (389, 392), True, 'import numpy as np\n'), ((987, 1007), 'numpy.maximum', 'np.maximum', (['(1e-05)', 'x'], {}), '(1e-05, x)\n', (997, 1007), True, 'import numpy as np\n'), ((531, 540), 'numpy.abs', 'np.abs', (['D'], {}), '(D)\n', (537, 540), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
from tqdm import tqdm, trange
data = pd.read_csv("result.csv", encoding="latin1").fillna(method="ffill")
print(data.tail(10))
class SentenceGetter(object):
def __init__(self, data):
self.n_sent = 1
self.data = data
self.empty = False
agg_func = lambda s: [(w, p, t) for w, p, t in zip(s["Word"].values.tolist(),
s["POS"].values.tolist(),
s["Tag"].values.tolist())]
self.grouped = self.data.groupby("Sentence #").apply(agg_func)
self.sentences = [s for s in self.grouped]
def get_next(self):
try:
s = self.grouped["Sentence: {}".format(self.n_sent)]
self.n_sent += 1
return s
except:
return None
getter = SentenceGetter(data)
sentences = [[word[0] for word in sentence] for sentence in getter.sentences]
print(sentences[0])
labels = [[s[2] for s in sentence] for sentence in getter.sentences]
print(labels[0])
tag_values = list(set(data["Tag"].values))
tag_values.append("PAD")
tag_values.sort()
tag2idx = {t: i for i, t in enumerate(tag_values)}
print(tag_values)
print(tag2idx)
#Apply Bert
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertTokenizer, BertConfig, BertModel
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
print(torch.__version__)
MAX_LEN = 75
bs = 32
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print(torch.cuda.get_device_name(0))
tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False)
def tokenize_and_preserve_labels(sentence, text_labels):
tokenized_sentence = []
labels = []
for word, label in zip(sentence, text_labels):
# Tokenize the word and count # of subwords the word is broken into
tokenized_word = tokenizer.tokenize(word)
n_subwords = len(tokenized_word)
# Add the tokenized word to the final tokenized word list
tokenized_sentence.extend(tokenized_word)
# Add the same label to the new list of labels `n_subwords` times
labels.extend([label] * n_subwords)
return tokenized_sentence, labels
tokenized_texts_and_labels = [
tokenize_and_preserve_labels(sent, labs)
for sent, labs in zip(sentences, labels)
]
tokenized_texts = [token_label_pair[0] for token_label_pair in tokenized_texts_and_labels]
labels = [token_label_pair[1] for token_label_pair in tokenized_texts_and_labels]
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_texts],
maxlen=MAX_LEN, dtype="long", value=0.0,
truncating="post", padding="post")
tags = pad_sequences([[tag2idx.get(l) for l in lab] for lab in labels],
maxlen=MAX_LEN, value=tag2idx["PAD"], padding="post",
dtype="long", truncating="post")
attention_masks = [[float(i != 0.0) for i in ii] for ii in input_ids]
tr_inputs, val_inputs, tr_tags, val_tags = train_test_split(input_ids, tags,
random_state=2018, test_size=0.1)
tr_masks, val_masks, _, _ = train_test_split(attention_masks, input_ids,
random_state=2018, test_size=0.1)
tr_inputs = torch.tensor(tr_inputs)
val_inputs = torch.tensor(val_inputs)
tr_tags = torch.tensor(tr_tags)
val_tags = torch.tensor(val_tags)
tr_masks = torch.tensor(tr_masks)
val_masks = torch.tensor(val_masks)
train_data = TensorDataset(tr_inputs, tr_masks, tr_tags)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=bs)
valid_data = TensorDataset(val_inputs, val_masks, val_tags)
valid_sampler = SequentialSampler(valid_data)
valid_dataloader = DataLoader(valid_data, sampler=valid_sampler, batch_size=bs)
import transformers
from transformers import BertForTokenClassification, AdamW
print(transformers.__version__)
model = BertForTokenClassification.from_pretrained(
"bert-base-cased",
num_labels=len(tag2idx),
output_attentions = False,
output_hidden_states = False
)
model.cuda();
FULL_FINETUNING = True
if FULL_FINETUNING:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
else:
param_optimizer = list(model.classifier.named_parameters())
optimizer_grouped_parameters = [{"params": [p for n, p in param_optimizer]}]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=3e-5,
eps=1e-8
)
from transformers import get_linear_schedule_with_warmup
epochs = 3
max_grad_norm = 1.0
# Total number of training steps is number of batches * number of epochs.
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
#Fit BERT for named entity recognition
from seqeval.metrics import f1_score, accuracy_score
## Store the average loss after each epoch so we can plot them.
loss_values, validation_loss_values = [], []
for _ in trange(epochs, desc="Epoch"):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
# Put the model into training mode.
model.train()
# Reset the total loss for this epoch.
total_loss = 0
# Training loop
for step, batch in enumerate(train_dataloader):
# add batch to gpu
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
# Always clear any previously calculated gradients before performing a backward pass.
model.zero_grad()
# forward pass
# This will return the loss (rather than the model output)
# because we have provided the `labels`.
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# get the loss
loss = outputs[0]
# Perform a backward pass to calculate the gradients.
loss.backward()
# track train loss
total_loss += loss.item()
# Clip the norm of the gradient
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=max_grad_norm)
# update parameters
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_dataloader)
print("Average train loss: {}".format(avg_train_loss))
# Store the loss value for plotting the learning curve.
loss_values.append(avg_train_loss)
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
# Put the model into evaluation mode
model.eval()
# Reset the validation loss for this epoch.
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predictions , true_labels = [], []
for batch in valid_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients,
# saving memory and speeding up validation
with torch.no_grad():
# Forward pass, calculate logit predictions.
# This will return the logits rather than the loss because we have not provided labels.
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# Move logits and labels to CPU
logits = outputs[1].detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences.
eval_loss += outputs[0].mean().item()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
true_labels.extend(label_ids)
eval_loss = eval_loss / len(valid_dataloader)
validation_loss_values.append(eval_loss)
print("Validation loss: {}".format(eval_loss))
pred_tags = [tag_values[p_i] for p, l in zip(predictions, true_labels)
for p_i, l_i in zip(p, l) if tag_values[l_i] != "PAD"]
valid_tags = [tag_values[l_i] for l in true_labels
for l_i in l if tag_values[l_i] != "PAD"]
print("Validation Accuracy: {}".format(accuracy_score(pred_tags, valid_tags)))
#print("Validation F1-Score: {}".format(f1_score(pred_tags, valid_tags)))
print()
# save the model to disk
import joblib
filename = 'finalized_model.sav'
joblib.dump(model, filename)
model = joblib.load(filename)
model.to(device)
test_sentence = """
Ousted WeWork founder <NAME> lists his Manhattan penthouse for $37.5 million.
"""
tokenized_sentence = tokenizer.encode(test_sentence)
input_ids = torch.tensor([tokenized_sentence]).cuda()
with torch.no_grad():
output = model(input_ids)
label_indices = np.argmax(output[0].to('cpu').numpy(), axis=2)
# join bpe split tokens
tokens = tokenizer.convert_ids_to_tokens(input_ids.to('cpu').numpy()[0])
new_tokens, new_labels = [], []
for token, label_idx in zip(tokens, label_indices[0]):
if token.startswith("##"):
new_tokens[-1] = new_tokens[-1] + token[2:]
else:
new_labels.append(tag_values[label_idx])
new_tokens.append(token)
for token, label in zip(new_tokens, new_labels):
print("{}\t{}".format(label, token))
|
[
"seqeval.metrics.accuracy_score",
"torch.utils.data.RandomSampler",
"numpy.argmax",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"joblib.dump",
"torch.cuda.device_count",
"torch.utils.data.TensorDataset",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.utils.data.SequentialSampler",
"torch.cuda.get_device_name",
"tqdm.trange",
"transformers.BertTokenizer.from_pretrained",
"transformers.AdamW",
"transformers.get_linear_schedule_with_warmup",
"torch.cuda.is_available",
"joblib.load",
"torch.tensor"
] |
[((1670, 1695), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1693, 1695), False, 'import torch\n'), ((1745, 1814), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-cased"""'], {'do_lower_case': '(False)'}), "('bert-base-cased', do_lower_case=False)\n", (1774, 1814), False, 'from transformers import BertTokenizer, BertConfig, BertModel\n'), ((3242, 3309), 'sklearn.model_selection.train_test_split', 'train_test_split', (['input_ids', 'tags'], {'random_state': '(2018)', 'test_size': '(0.1)'}), '(input_ids, tags, random_state=2018, test_size=0.1)\n', (3258, 3309), False, 'from sklearn.model_selection import train_test_split\n'), ((3398, 3476), 'sklearn.model_selection.train_test_split', 'train_test_split', (['attention_masks', 'input_ids'], {'random_state': '(2018)', 'test_size': '(0.1)'}), '(attention_masks, input_ids, random_state=2018, test_size=0.1)\n', (3414, 3476), False, 'from sklearn.model_selection import train_test_split\n'), ((3534, 3557), 'torch.tensor', 'torch.tensor', (['tr_inputs'], {}), '(tr_inputs)\n', (3546, 3557), False, 'import torch\n'), ((3571, 3595), 'torch.tensor', 'torch.tensor', (['val_inputs'], {}), '(val_inputs)\n', (3583, 3595), False, 'import torch\n'), ((3606, 3627), 'torch.tensor', 'torch.tensor', (['tr_tags'], {}), '(tr_tags)\n', (3618, 3627), False, 'import torch\n'), ((3639, 3661), 'torch.tensor', 'torch.tensor', (['val_tags'], {}), '(val_tags)\n', (3651, 3661), False, 'import torch\n'), ((3673, 3695), 'torch.tensor', 'torch.tensor', (['tr_masks'], {}), '(tr_masks)\n', (3685, 3695), False, 'import torch\n'), ((3708, 3731), 'torch.tensor', 'torch.tensor', (['val_masks'], {}), '(val_masks)\n', (3720, 3731), False, 'import torch\n'), ((3745, 3788), 'torch.utils.data.TensorDataset', 'TensorDataset', (['tr_inputs', 'tr_masks', 'tr_tags'], {}), '(tr_inputs, tr_masks, tr_tags)\n', (3758, 3788), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3805, 3830), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (3818, 3830), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3850, 3910), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'bs'}), '(train_data, sampler=train_sampler, batch_size=bs)\n', (3860, 3910), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3925, 3971), 'torch.utils.data.TensorDataset', 'TensorDataset', (['val_inputs', 'val_masks', 'val_tags'], {}), '(val_inputs, val_masks, val_tags)\n', (3938, 3971), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((3988, 4017), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['valid_data'], {}), '(valid_data)\n', (4005, 4017), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4037, 4097), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_data'], {'sampler': 'valid_sampler', 'batch_size': 'bs'}), '(valid_data, sampler=valid_sampler, batch_size=bs)\n', (4047, 4097), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n'), ((4988, 5044), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': '(3e-05)', 'eps': '(1e-08)'}), '(optimizer_grouped_parameters, lr=3e-05, eps=1e-08)\n', (4993, 5044), False, 'from transformers import BertForTokenClassification, AdamW\n'), ((5317, 5415), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(0)', 'num_training_steps': 'total_steps'}), '(optimizer, num_warmup_steps=0,\n num_training_steps=total_steps)\n', (5348, 5415), False, 'from transformers import get_linear_schedule_with_warmup\n'), ((5638, 5666), 'tqdm.trange', 'trange', (['epochs'], {'desc': '"""Epoch"""'}), "(epochs, desc='Epoch')\n", (5644, 5666), False, 'from tqdm import tqdm, trange\n'), ((9420, 9448), 'joblib.dump', 'joblib.dump', (['model', 'filename'], {}), '(model, filename)\n', (9431, 9448), False, 'import joblib\n'), ((9457, 9478), 'joblib.load', 'joblib.load', (['filename'], {}), '(filename)\n', (9468, 9478), False, 'import joblib\n'), ((1702, 1731), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (1728, 1731), False, 'import torch\n'), ((9711, 9726), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9724, 9726), False, 'import torch\n'), ((77, 121), 'pandas.read_csv', 'pd.read_csv', (['"""result.csv"""'], {'encoding': '"""latin1"""'}), "('result.csv', encoding='latin1')\n", (88, 121), True, 'import pandas as pd\n'), ((1624, 1649), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1647, 1649), False, 'import torch\n'), ((9664, 9698), 'torch.tensor', 'torch.tensor', (['[tokenized_sentence]'], {}), '([tokenized_sentence])\n', (9676, 9698), False, 'import torch\n'), ((8061, 8076), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8074, 8076), False, 'import torch\n'), ((9218, 9255), 'seqeval.metrics.accuracy_score', 'accuracy_score', (['pred_tags', 'valid_tags'], {}), '(pred_tags, valid_tags)\n', (9232, 9255), False, 'from seqeval.metrics import f1_score, accuracy_score\n'), ((8668, 8693), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(2)'}), '(logits, axis=2)\n', (8677, 8693), True, 'import numpy as np\n')]
|
import unittest
import subprocess
import sys
from jaxdax import core
from absl import logging
from absl.testing import absltest, parameterized
from jax._src import test_util as jtu
from jax._src.util import partial
import jax.numpy as jnp
import numpy as np
import jax
import builtins
def f(x, lib=core):
y = lib.sin(x) * 2.0
z = - y + x
return z
class BasicTest(jtu.JaxTestCase):
def check(self, f1, f2, *args, **kws):
jval = f1(*args, **kws)
nval = f2(*args, **kws)
logging.info('jval: %s nval: %s', jval, nval)
self.assertAllClose(jval, nval)
def test_basic(self):
logging.info('info')
self.assertEqual(1, 1)
fnp = partial(f, lib=jnp)
self.check(f, fnp, 3.0)
self.check(core.vmap(f, (0,)), jax.vmap(fnp, (0,)), np.arange(3))
class BackendsTest(jtu.JaxTestCase):
@unittest.skipIf(not sys.executable, "test requires sys.executable")
@jtu.skip_on_devices("gpu", "tpu")
def test_cpu_warning_suppression(self):
warning_expected = (
"import jax; "
"jax.numpy.arange(10)")
warning_not_expected = (
"import jax; "
"jax.config.update('jax_platform_name', 'cpu'); "
"jax.numpy.arange(10)")
result = subprocess.run([sys.executable, '-c', warning_expected],
check=True, capture_output=True)
assert "No GPU/TPU found" in result.stderr.decode()
result = subprocess.run([sys.executable, '-c', warning_not_expected],
check=True, capture_output=True)
assert "No GPU/TPU found" not in result.stderr.decode()
if __name__ == '__main__':
builtins.__stdout__ = sys.__stdout__
builtins.__stderr__ = sys.__stderr__
logging.set_verbosity('DEBUG')
#logging.get_absl_handler().python_handler.stream = sys.__stdout__
logging.use_absl_handler()
absltest.main(testLoader=jtu.JaxTestLoader())
|
[
"unittest.skipIf",
"subprocess.run",
"jax.vmap",
"absl.logging.use_absl_handler",
"jax._src.util.partial",
"jaxdax.core.vmap",
"absl.logging.info",
"numpy.arange",
"jax._src.test_util.JaxTestLoader",
"jax._src.test_util.skip_on_devices",
"absl.logging.set_verbosity"
] |
[((871, 938), 'unittest.skipIf', 'unittest.skipIf', (['(not sys.executable)', '"""test requires sys.executable"""'], {}), "(not sys.executable, 'test requires sys.executable')\n", (886, 938), False, 'import unittest\n'), ((942, 975), 'jax._src.test_util.skip_on_devices', 'jtu.skip_on_devices', (['"""gpu"""', '"""tpu"""'], {}), "('gpu', 'tpu')\n", (961, 975), True, 'from jax._src import test_util as jtu\n'), ((1729, 1759), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['"""DEBUG"""'], {}), "('DEBUG')\n", (1750, 1759), False, 'from absl import logging\n'), ((1835, 1861), 'absl.logging.use_absl_handler', 'logging.use_absl_handler', ([], {}), '()\n', (1859, 1861), False, 'from absl import logging\n'), ((514, 559), 'absl.logging.info', 'logging.info', (['"""jval: %s nval: %s"""', 'jval', 'nval'], {}), "('jval: %s nval: %s', jval, nval)\n", (526, 559), False, 'from absl import logging\n'), ((635, 655), 'absl.logging.info', 'logging.info', (['"""info"""'], {}), "('info')\n", (647, 655), False, 'from absl import logging\n'), ((701, 720), 'jax._src.util.partial', 'partial', (['f'], {'lib': 'jnp'}), '(f, lib=jnp)\n', (708, 720), False, 'from jax._src.util import partial\n'), ((1244, 1337), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-c', warning_expected]"], {'check': '(True)', 'capture_output': '(True)'}), "([sys.executable, '-c', warning_expected], check=True,\n capture_output=True)\n", (1258, 1337), False, 'import subprocess\n'), ((1432, 1529), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-c', warning_not_expected]"], {'check': '(True)', 'capture_output': '(True)'}), "([sys.executable, '-c', warning_not_expected], check=True,\n capture_output=True)\n", (1446, 1529), False, 'import subprocess\n'), ((772, 790), 'jaxdax.core.vmap', 'core.vmap', (['f', '(0,)'], {}), '(f, (0,))\n', (781, 790), False, 'from jaxdax import core\n'), ((792, 811), 'jax.vmap', 'jax.vmap', (['fnp', '(0,)'], {}), '(fnp, (0,))\n', (800, 811), False, 'import jax\n'), ((813, 825), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (822, 825), True, 'import numpy as np\n'), ((1891, 1910), 'jax._src.test_util.JaxTestLoader', 'jtu.JaxTestLoader', ([], {}), '()\n', (1908, 1910), True, 'from jax._src import test_util as jtu\n')]
|
# -*- coding: utf-8 -*-
"""
#------------------------------------------------------------------------------#
# #
# Project Name : Atmosphere&Ocean #
# #
# File Name : data_read.py #
# #
# Version : 0.0.1 #
# #
# Contributor : D.CW #
# #
# Start Date : 2020-04-01 09:15:09 #
# #
# Last Update : 2020-07-17 23:08:57 #
# #
# Email : <EMAIL> #
# #
#------------------------------------------------------------------------------#
# Introduction: #
# Provides data reading function for linear grid netcdf files, HDF files #
# and WRF-ouput. #
# #
#-----------------------------------------------------------------------------*#
# Functions: #
#******************************* class: WrfData *******************************#
# #
# #
#******************************* class: NcData ********************************#
# set_rng -- Set the value range. #
# get_var -- Get the value of the specified variable. #
# #
#******************************* class: HdfData *******************************#
# set_rng -- Set the value range. #
# get_var -- Get the value of the specified variable. #
# #
#------------------------------------------------------------------------------#
"""
# Standard libraries
from __future__ import (absolute_import, division, print_function)
from datetime import datetime
from glob import glob
from os import remove
# Third-party libraries
from netCDF4 import Dataset, MFDataset, MFTime
from xarray import open_mfdataset, DataArray
import numpy as np
import pyresample
import wrf
# Local libraries
from .decorator import is_none
from .util import path2nc, extra_same_elem, get_dims, adjust_dim, \
cnv_ls2slice,get_time
class WrfData(object):
def __init__(self):
pass
def read(self, wrf_fpaths, var_name, lev_seq, lat_seq, lon_seq,
interp_ena=False):
nc_ls = path2nc(wrf_fpaths)
var = wrf.getvar(nc_ls, var_name, method='join')
if interp_ena:
var = self._interp(wrf_fpaths, var, lev_seq, lat_seq, lon_seq)
return var
def pvo(self, wrf_fpaths, lev_seq, lat_seq, lon_seq, interp_ena=False):
nc_ls = path2nc(wrf_fpaths)
U = wrf.getvar(nc_ls, "U", method='join')
V = wrf.getvar(nc_ls, "V", method='join')
THETA = wrf.getvar(nc_ls, "T", method='join')
P = wrf.getvar(nc_ls, "P", method='join')
PB = wrf.getvar(nc_ls, "PB", method='join')
MSFU = wrf.getvar(nc_ls, "MAPFAC_U", method='join')
MSFV = wrf.getvar(nc_ls, "MAPFAC_V", method='join')
MSFM = wrf.getvar(nc_ls, "MAPFAC_M", method='join')
COR = wrf.getvar(nc_ls, "F", method='join')
DX = nc_ls[0].DX
DY = nc_ls[0].DY
# 数据处理
THETA = THETA + 300
P = P + PB
PV = wrf.pvo(U, V, THETA, P, MSFU, MSFV, MSFM, COR, DX, DY)
if interp_ena:
PV = self._interp(wrf_fpaths, PV, lev_seq, lat_seq, lon_seq)
return PV
def _interp(self, wrf_fpaths, var, lev_seq, lat_seq, lon_seq):
lev_num = np.size(lev_seq)
lat_num = np.size(lat_seq)
lon_num = np.size(lon_seq)
nc_ls = path2nc(wrf_fpaths)
lon_curv = wrf.getvar(nc_ls, "XLONG")
lat_curv = wrf.getvar(nc_ls, "XLAT")
p = wrf.getvar(nc_ls, "pressure", method='join')
orig_shp = np.shape(p)
wrf_var = np.ones([orig_shp[0], lev_num, lat_num, lon_num])
lon2d_inter, lat2d_inter = np.meshgrid(lon_seq, lat_seq)
orig_def = pyresample.geometry.SwathDefinition(
lons=lon_curv, lats=lat_curv)
targ_def = pyresample.geometry.SwathDefinition(
lons=lon2d_inter, lats=lat2d_inter)
var = self.eliminate_stagger(var)
for i in range(0, orig_shp[0]):
var_vert_interp = wrf.to_np(
wrf.interplevel(var[i], p[i], lev_seq, False))
for j in range(0, lev_num):
wrf_var[i, j, :, :] = pyresample.kd_tree.resample_nearest(
orig_def, var_vert_interp[j], targ_def,
radius_of_influence=500000, fill_value=None)
return wrf_var
@staticmethod
def eliminate_stagger(var: DataArray):
"""Interpolate stagger grid to regular grid
:param var: class:xarray.DataArray, The variable of wrf-output with
stagger dimension
:return: class:xarray.DataArray, The variable of wrf-output with
normal dimensions
------------------------------------------------------------------
Examples:
"""
dims = var.dims
# 插值到需要的层次、区域,同时也将域坐标转换为了经纬坐标
for i in range(np.size(dims)):
if "stag" in dims[i]:
var = wrf.destagger(var, i, meta=True)
return var
class NcData(object):
"""Read linear grid file in netCDF
Created date: 2020-06-03 19:03:06
Last modified date: 2020-06-06 17:46:21
Contributor: D.CW
Email: <EMAIL>
"""
time = None
lev = None
lat = None
lon = None
_rng = None
dims = None
def __init__(self, fnames: str, engine: str = "netcdf",
group: str = None, concat_dim: str = "time", **kwargs):
engines = ["netcdf", "xarray"]
if engine not in engines:
raise ValueError(
"unrecognized engine for open_dataset: {}\n"
"must be one of: {}".format(engine, engines)
)
fname_ls = glob(fnames)
if engine == "xarray":
self.data_obj = open_mfdataset(fname_ls, group=group,
concat_dim=concat_dim,
combine='by_coords', **kwargs)
engine = 0
elif engine == "netcdf":
if len(fname_ls) == 1:
self.data_obj = Dataset(fname_ls[0], **kwargs)
else:
self.data_obj = MFDataset(fname_ls, aggdim=concat_dim,
**kwargs)
if hasattr(self.data_obj['time'], 'calendar'):
cal = self.data_obj['time'].calendar
else:
cal = 'standard'
self._orig_time = MFTime(self.data_obj['time'], calendar=cal)
engine = 1
self.engine = engine
def set_rng(self, time_rng: tuple = None, lev_rng: tuple = None,
lat_rng: tuple = None, lon_rng: tuple = None):
"""Set the value range.
:param time_rng: class:tuple, time range
:param lev_rng: class:tuple, level or height range
:param lat_rng: class:tuple, latitude range
:param lon_rng: class:tuple, longitude range
:return: class: list, valid limited range, which is prepared for
extracting the values of specified variable
------------------------------------------------------------------
Examples:
"""
self._rng = []
self.dims = []
dims = get_dims(self.data_obj)
for dim in dims:
if dim.upper() in ['TIME', 'TIMES']:
orig_time_exist = hasattr(self, '_orig_time')
if orig_time_exist:
trgt_time_rng = cnv_ls2slice(
self._get_multi_rng(var=self._orig_time,
boundary=time_rng, pos=0))
self.time = self._engine_sel(
self._orig_time, trgt_time_rng)
else:
trgt_time_rng = cnv_ls2slice(
self._get_multi_rng(var=self.data_obj[dim],
boundary=time_rng, pos=0))
self.time = self._engine_sel(self.data_obj[dim],
trgt_time_rng)
self._rng.append(trgt_time_rng)
if self.engine:
if orig_time_exist:
time = get_time(self._orig_time)
else:
time = get_time(self.data_obj[dim])
self.time.values = time[trgt_time_rng]
self.dims.append(dim)
elif dim.upper() in ['LEV', 'LEVEL', 'LEVELS', 'EXPVER']:
lev_trgt_rng = cnv_ls2slice(
self._get_multi_rng(var=self.data_obj[dim],
boundary=lev_rng, pos=1))
self._rng.append(lev_trgt_rng)
self.lev = self._engine_sel(self.data_obj[dim], lev_trgt_rng)
self.dims.append(dim)
elif dim.upper() in ['LAT', 'LATITUDE', 'LATITUDES']:
lat_trgt_rng = cnv_ls2slice(
self._get_multi_rng(var=self.data_obj[dim],
boundary=lat_rng, pos=2))
self._rng.append(lat_trgt_rng)
self.lat = self._engine_sel(self.data_obj[dim], lat_trgt_rng)
self.dims.append(dim)
elif dim.upper() in ['LON', 'LONGITUDE', 'LONGITUDES']:
lon_trgt_rng = cnv_ls2slice(
self._get_multi_rng(var=self.data_obj[dim],
boundary=lon_rng, pos=3))
self._rng.append(lon_trgt_rng)
self.lon = self._engine_sel(self.data_obj[dim], lon_trgt_rng)
self.dims.append(dim)
return self._rng
def get_var(self, var_name: str):
"""Extract specified variable values in a limited area.
:param var_name: class:str, the name of variable
:return: class:xarray.Dataset, class:xarray.Variable, dataset with
attributes
------------------------------------------------------------------
Examples:
"""
var = self.data_obj[var_name]
self._rng, self.dims = adjust_dim(self._rng, self.dims, get_dims(var))
rslt = self._engine_sel(var, tuple(self._rng))
return rslt.squeeze()
def _engine_sel(self, var_obj, rng: tuple or list):
if self.engine:
rslt = DataArray(data=var_obj[rng], dims=get_dims(var_obj),
attrs=var_obj.__dict__)
else:
rslt = var_obj[rng]
return rslt
@is_none
def _get_multi_rng(self, **kwargs):
rslt = []
if np.size(np.shape(kwargs['boundary'])) > 1:
for bdry in kwargs['boundary']:
if kwargs['pos'] == 1:
rslt.extend(
self._get_lev_rng(var=kwargs['var'], boundary=bdry))
elif kwargs['pos'] == 2:
rslt.extend(
self._get_lat_rng(var=kwargs['var'], boundary=bdry))
elif kwargs['pos'] == 3:
rslt.extend(
self._get_lon_rng(var=kwargs['var'], boundary=bdry))
else:
rslt.extend(
self._get_time_rng(var=kwargs['var'], boundary=bdry))
else:
if kwargs['pos'] == 1:
return self._get_lev_rng(var=kwargs['var'],
boundary=kwargs['boundary'])
elif kwargs['pos'] == 2:
return self._get_lat_rng(var=kwargs['var'],
boundary=kwargs['boundary'])
elif kwargs['pos'] == 3:
return self._get_lon_rng(var=kwargs['var'],
boundary=kwargs['boundary'])
else:
return self._get_time_rng(var=kwargs['var'],
boundary=kwargs['boundary'])
return rslt
def _get_lon_rng(self, **kwargs):
lon = kwargs['var'][:]
min_bdry = kwargs['boundary'][0]
max_bdry = kwargs['boundary'][-1]
if min_bdry < -180 or max_bdry > 180:
raise ValueError("Longitude range is [-180,180]")
if max_bdry < min_bdry:
raise ValueError("In the setting of longitude range, the value "
"on the left needs to be smaller than the value "
"on the right.")
if not len(np.where(lon < 0)[0]):
if min_bdry < 0:
min_bdry = 360 + min_bdry
if max_bdry < 0:
max_bdry = 360 + max_bdry
ind = np.where(lon >= min_bdry)[0]
ind2 = np.where(lon <= max_bdry)[0]
rslt = extra_same_elem(ind, ind2)
if not len(rslt):
raise ValueError("Due to the longitude resolution, the "
"longitude range you set is too fine, "
"please expand your setting range.")
return rslt
def _get_lat_rng(self, **kwargs):
lat = kwargs['var'][:]
min_bdry = kwargs['boundary'][0]
max_bdry = kwargs['boundary'][-1]
if min_bdry < -90 or max_bdry > 90:
raise ValueError("Longitude range is [-90,90]")
if max_bdry < min_bdry:
raise ValueError("In the latitude range setting, the value "
"on the left needs to be smaller than "
"the value on the right.")
ind = np.where(lat >= min_bdry)[0]
ind2 = np.where(lat <= max_bdry)[0]
rslt = extra_same_elem(ind, ind2)
if not len(rslt):
raise ValueError("Due to latitude resolution, the "
"latitude range you set is too fine, "
"please expand your setting range.")
return rslt
def _get_lev_rng(self, **kwargs):
lev = kwargs['var'][:]
min_bdry = kwargs['boundary'][0]
max_bdry = kwargs['boundary'][-1]
if max_bdry < min_bdry:
raise ValueError("In the height range setting, the value "
"on the left needs to be smaller than "
"the value on the right.")
ind = np.where(lev >= min_bdry)[0]
ind2 = np.where(lev <= max_bdry)[0]
rslt = extra_same_elem(ind, ind2)
if not len(rslt):
raise ValueError("Due to the height resolution, the height range "
"you set is too fine, please expand your setting "
"range.")
return rslt
def _get_time_rng(self, **kwargs):
min_bdry = kwargs['boundary'][0]
max_bdry = kwargs['boundary'][-1]
var = kwargs['var']
min_bdry = datetime.strptime(min_bdry, '%Y-%m-%d %H:%M:%S')
max_bdry = datetime.strptime(max_bdry, '%Y-%m-%d %H:%M:%S')
if max_bdry < min_bdry:
raise ValueError("In the time setting, the value on the left "
"precedes the value on the right.")
if self.engine:
time = get_time(var)
ind = np.where(time >= min_bdry)[0]
ind2 = np.where(time <= max_bdry)[0]
else:
min_bdry = np.datetime64(min_bdry)
max_bdry = np.datetime64(max_bdry)
ind = np.where(var >= min_bdry)[0]
ind2 = np.where(var <= max_bdry)[0]
rslt = extra_same_elem(ind, ind2)
if not len(rslt):
raise ValueError("Due to time resolution, the time range you set "
"is too fine, please expand your setting range.")
return rslt
class HdfData(object):
"""HDF file reading
Created date: 2020-06-05 12:54:13
Last modified date: 2020-06-05 16:09:26
Contributor: D.CW
Email: <EMAIL>
"""
time = None
lev = None
lat = None
lon = None
def __init__(self, fnames: str, group: str = "Merged",
concat_dim: str = "time", engine: str = 'xarray', **kwargs):
engines = ["netcdf", "xarray"]
if engine not in engines:
raise ValueError(
"unrecognized engine for open_dataset: {}\n"
"must be one of: {}".format(engine, engines)
)
if engine == "xarray":
self.nc_obj = NcData(fnames, group=group, concat_dim=concat_dim,
engine=engine, **kwargs)
elif engine == "netcdf":
fname_ls = glob(fnames)
xd = open_mfdataset(fname_ls, group=group,
concat_dim=concat_dim,
combine="by_coords", **kwargs)
self.tmp_fname = ''.join(
[datetime.now().strftime("%Y%m%d%H%M%S%f"), '.nc'])
xd.to_netcdf(self.tmp_fname)
self.nc_obj = NcData(self.tmp_fname, concat_dim=concat_dim,
**kwargs)
def set_rng(self, time_rng: tuple = None, lev_rng: tuple = None,
lat_rng: tuple = None, lon_rng: tuple = None):
"""Set the value range.
:param time_rng: class:tuple, time range
:param lev_rng: class:tuple, level or height range
:param lat_rng: class:tuple, latitude range
:param lon_rng: class:tuple, longitude range
:return: class: list, valid limited range, which is prepared for
extracting the values of specified variable
------------------------------------------------------------------
Examples:
"""
rng = self.nc_obj.set_rng(time_rng=time_rng, lev_rng=lev_rng,
lat_rng=lat_rng, lon_rng=lon_rng)
self.time = self.nc_obj.time
self.lev = self.nc_obj.lev
self.lat = self.nc_obj.lat
self.lon = self.nc_obj.lon
return rng
def get_var(self, var_name: str):
"""Extract specified variable values in a limited area.
:param var_name: class:str, the name of variable
:return: class:xarray.Dataset, class:xarray.Variable, dataset with
attributes
------------------------------------------------------------------
Examples:
"""
return self.nc_obj.get_var(var_name)
def __del__(self):
if self.nc_obj.engine:
self.nc_obj.data_obj.close()
remove(self.tmp_fname)
|
[
"os.remove",
"wrf.pvo",
"numpy.ones",
"numpy.shape",
"pyresample.geometry.SwathDefinition",
"glob.glob",
"netCDF4.Dataset",
"numpy.meshgrid",
"wrf.getvar",
"netCDF4.MFDataset",
"datetime.datetime.now",
"numpy.size",
"netCDF4.MFTime",
"datetime.datetime.strptime",
"wrf.interplevel",
"xarray.open_mfdataset",
"numpy.datetime64",
"pyresample.kd_tree.resample_nearest",
"numpy.where",
"wrf.destagger"
] |
[((3536, 3578), 'wrf.getvar', 'wrf.getvar', (['nc_ls', 'var_name'], {'method': '"""join"""'}), "(nc_ls, var_name, method='join')\n", (3546, 3578), False, 'import wrf\n'), ((3823, 3860), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""U"""'], {'method': '"""join"""'}), "(nc_ls, 'U', method='join')\n", (3833, 3860), False, 'import wrf\n'), ((3873, 3910), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""V"""'], {'method': '"""join"""'}), "(nc_ls, 'V', method='join')\n", (3883, 3910), False, 'import wrf\n'), ((3927, 3964), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""T"""'], {'method': '"""join"""'}), "(nc_ls, 'T', method='join')\n", (3937, 3964), False, 'import wrf\n'), ((3977, 4014), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""P"""'], {'method': '"""join"""'}), "(nc_ls, 'P', method='join')\n", (3987, 4014), False, 'import wrf\n'), ((4028, 4066), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""PB"""'], {'method': '"""join"""'}), "(nc_ls, 'PB', method='join')\n", (4038, 4066), False, 'import wrf\n'), ((4082, 4126), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""MAPFAC_U"""'], {'method': '"""join"""'}), "(nc_ls, 'MAPFAC_U', method='join')\n", (4092, 4126), False, 'import wrf\n'), ((4142, 4186), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""MAPFAC_V"""'], {'method': '"""join"""'}), "(nc_ls, 'MAPFAC_V', method='join')\n", (4152, 4186), False, 'import wrf\n'), ((4202, 4246), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""MAPFAC_M"""'], {'method': '"""join"""'}), "(nc_ls, 'MAPFAC_M', method='join')\n", (4212, 4246), False, 'import wrf\n'), ((4261, 4298), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""F"""'], {'method': '"""join"""'}), "(nc_ls, 'F', method='join')\n", (4271, 4298), False, 'import wrf\n'), ((4424, 4478), 'wrf.pvo', 'wrf.pvo', (['U', 'V', 'THETA', 'P', 'MSFU', 'MSFV', 'MSFM', 'COR', 'DX', 'DY'], {}), '(U, V, THETA, P, MSFU, MSFV, MSFM, COR, DX, DY)\n', (4431, 4478), False, 'import wrf\n'), ((4680, 4696), 'numpy.size', 'np.size', (['lev_seq'], {}), '(lev_seq)\n', (4687, 4696), True, 'import numpy as np\n'), ((4715, 4731), 'numpy.size', 'np.size', (['lat_seq'], {}), '(lat_seq)\n', (4722, 4731), True, 'import numpy as np\n'), ((4750, 4766), 'numpy.size', 'np.size', (['lon_seq'], {}), '(lon_seq)\n', (4757, 4766), True, 'import numpy as np\n'), ((4822, 4848), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""XLONG"""'], {}), "(nc_ls, 'XLONG')\n", (4832, 4848), False, 'import wrf\n'), ((4868, 4893), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""XLAT"""'], {}), "(nc_ls, 'XLAT')\n", (4878, 4893), False, 'import wrf\n'), ((4906, 4950), 'wrf.getvar', 'wrf.getvar', (['nc_ls', '"""pressure"""'], {'method': '"""join"""'}), "(nc_ls, 'pressure', method='join')\n", (4916, 4950), False, 'import wrf\n'), ((4971, 4982), 'numpy.shape', 'np.shape', (['p'], {}), '(p)\n', (4979, 4982), True, 'import numpy as np\n'), ((5001, 5050), 'numpy.ones', 'np.ones', (['[orig_shp[0], lev_num, lat_num, lon_num]'], {}), '([orig_shp[0], lev_num, lat_num, lon_num])\n', (5008, 5050), True, 'import numpy as np\n'), ((5086, 5115), 'numpy.meshgrid', 'np.meshgrid', (['lon_seq', 'lat_seq'], {}), '(lon_seq, lat_seq)\n', (5097, 5115), True, 'import numpy as np\n'), ((5135, 5200), 'pyresample.geometry.SwathDefinition', 'pyresample.geometry.SwathDefinition', ([], {'lons': 'lon_curv', 'lats': 'lat_curv'}), '(lons=lon_curv, lats=lat_curv)\n', (5170, 5200), False, 'import pyresample\n'), ((5233, 5304), 'pyresample.geometry.SwathDefinition', 'pyresample.geometry.SwathDefinition', ([], {'lons': 'lon2d_inter', 'lats': 'lat2d_inter'}), '(lons=lon2d_inter, lats=lat2d_inter)\n', (5268, 5304), False, 'import pyresample\n'), ((7083, 7095), 'glob.glob', 'glob', (['fnames'], {}), '(fnames)\n', (7087, 7095), False, 'from glob import glob\n'), ((16167, 16215), 'datetime.datetime.strptime', 'datetime.strptime', (['min_bdry', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(min_bdry, '%Y-%m-%d %H:%M:%S')\n", (16184, 16215), False, 'from datetime import datetime\n'), ((16235, 16283), 'datetime.datetime.strptime', 'datetime.strptime', (['max_bdry', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(max_bdry, '%Y-%m-%d %H:%M:%S')\n", (16252, 16283), False, 'from datetime import datetime\n'), ((6275, 6288), 'numpy.size', 'np.size', (['dims'], {}), '(dims)\n', (6282, 6288), True, 'import numpy as np\n'), ((7155, 7251), 'xarray.open_mfdataset', 'open_mfdataset', (['fname_ls'], {'group': 'group', 'concat_dim': 'concat_dim', 'combine': '"""by_coords"""'}), "(fname_ls, group=group, concat_dim=concat_dim, combine=\n 'by_coords', **kwargs)\n", (7169, 7251), False, 'from xarray import open_mfdataset, DataArray\n'), ((14014, 14039), 'numpy.where', 'np.where', (['(lon >= min_bdry)'], {}), '(lon >= min_bdry)\n', (14022, 14039), True, 'import numpy as np\n'), ((14058, 14083), 'numpy.where', 'np.where', (['(lon <= max_bdry)'], {}), '(lon <= max_bdry)\n', (14066, 14083), True, 'import numpy as np\n'), ((14881, 14906), 'numpy.where', 'np.where', (['(lat >= min_bdry)'], {}), '(lat >= min_bdry)\n', (14889, 14906), True, 'import numpy as np\n'), ((14925, 14950), 'numpy.where', 'np.where', (['(lat <= max_bdry)'], {}), '(lat <= max_bdry)\n', (14933, 14950), True, 'import numpy as np\n'), ((15637, 15662), 'numpy.where', 'np.where', (['(lev >= min_bdry)'], {}), '(lev >= min_bdry)\n', (15645, 15662), True, 'import numpy as np\n'), ((15681, 15706), 'numpy.where', 'np.where', (['(lev <= max_bdry)'], {}), '(lev <= max_bdry)\n', (15689, 15706), True, 'import numpy as np\n'), ((16648, 16671), 'numpy.datetime64', 'np.datetime64', (['min_bdry'], {}), '(min_bdry)\n', (16661, 16671), True, 'import numpy as np\n'), ((16695, 16718), 'numpy.datetime64', 'np.datetime64', (['max_bdry'], {}), '(max_bdry)\n', (16708, 16718), True, 'import numpy as np\n'), ((19774, 19796), 'os.remove', 'remove', (['self.tmp_fname'], {}), '(self.tmp_fname)\n', (19780, 19796), False, 'from os import remove\n'), ((5458, 5503), 'wrf.interplevel', 'wrf.interplevel', (['var[i]', 'p[i]', 'lev_seq', '(False)'], {}), '(var[i], p[i], lev_seq, False)\n', (5473, 5503), False, 'import wrf\n'), ((5583, 5707), 'pyresample.kd_tree.resample_nearest', 'pyresample.kd_tree.resample_nearest', (['orig_def', 'var_vert_interp[j]', 'targ_def'], {'radius_of_influence': '(500000)', 'fill_value': 'None'}), '(orig_def, var_vert_interp[j], targ_def,\n radius_of_influence=500000, fill_value=None)\n', (5618, 5707), False, 'import pyresample\n'), ((6347, 6379), 'wrf.destagger', 'wrf.destagger', (['var', 'i'], {'meta': '(True)'}), '(var, i, meta=True)\n', (6360, 6379), False, 'import wrf\n'), ((11971, 11999), 'numpy.shape', 'np.shape', (["kwargs['boundary']"], {}), "(kwargs['boundary'])\n", (11979, 11999), True, 'import numpy as np\n'), ((16532, 16558), 'numpy.where', 'np.where', (['(time >= min_bdry)'], {}), '(time >= min_bdry)\n', (16540, 16558), True, 'import numpy as np\n'), ((16581, 16607), 'numpy.where', 'np.where', (['(time <= max_bdry)'], {}), '(time <= max_bdry)\n', (16589, 16607), True, 'import numpy as np\n'), ((16737, 16762), 'numpy.where', 'np.where', (['(var >= min_bdry)'], {}), '(var >= min_bdry)\n', (16745, 16762), True, 'import numpy as np\n'), ((16785, 16810), 'numpy.where', 'np.where', (['(var <= max_bdry)'], {}), '(var <= max_bdry)\n', (16793, 16810), True, 'import numpy as np\n'), ((17903, 17915), 'glob.glob', 'glob', (['fnames'], {}), '(fnames)\n', (17907, 17915), False, 'from glob import glob\n'), ((17933, 18029), 'xarray.open_mfdataset', 'open_mfdataset', (['fname_ls'], {'group': 'group', 'concat_dim': 'concat_dim', 'combine': '"""by_coords"""'}), "(fname_ls, group=group, concat_dim=concat_dim, combine=\n 'by_coords', **kwargs)\n", (17947, 18029), False, 'from xarray import open_mfdataset, DataArray\n'), ((7456, 7486), 'netCDF4.Dataset', 'Dataset', (['fname_ls[0]'], {}), '(fname_ls[0], **kwargs)\n', (7463, 7486), False, 'from netCDF4 import Dataset, MFDataset, MFTime\n'), ((7537, 7585), 'netCDF4.MFDataset', 'MFDataset', (['fname_ls'], {'aggdim': 'concat_dim'}), '(fname_ls, aggdim=concat_dim, **kwargs)\n', (7546, 7585), False, 'from netCDF4 import Dataset, MFDataset, MFTime\n'), ((7841, 7884), 'netCDF4.MFTime', 'MFTime', (["self.data_obj['time']"], {'calendar': 'cal'}), "(self.data_obj['time'], calendar=cal)\n", (7847, 7884), False, 'from netCDF4 import Dataset, MFDataset, MFTime\n'), ((13834, 13851), 'numpy.where', 'np.where', (['(lon < 0)'], {}), '(lon < 0)\n', (13842, 13851), True, 'import numpy as np\n'), ((18144, 18158), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (18156, 18158), False, 'from datetime import datetime\n')]
|
"""
@author: <NAME>, UvA
Aim: apply Random Forest for classifying segments into given vegetation classes
Input: path of polygon with segment related features + label
Output: accuracy report, feature importance, classified shapefile
Example usage (from command line):
ToDo:
1. automatize feature_list definition
"""
import sys
import argparse
import numpy as np
import pandas as pd
import geopandas as gpd
from geopandas.tools import sjoin
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score,precision_score,recall_score
from sklearn.metrics import classification_report
from collections import Counter
from imblearn.under_sampling import RandomUnderSampler
import matplotlib.pyplot as plt
import seaborn as sns
def cohenkappa_calc(cm):
"""
Cohen Kappa calculator.
Input: confusion_matrix function from sklearn results
Output: cohen kappa
"""
import numpy as np
sum_diag=sum(cm.diagonal())
sum_rows=np.ones((1,len(cm)))
sum_cols=np.ones((len(cm)+1,1))
bychance=np.ones((1,len(cm)))
for k in range(0,len(cm)):
sum_rows[0,k]=sum(cm[:,k])
for h in range(0,len(cm)):
sum_cols[h,0]=sum(cm[h,:])
sum_cols[len(cm),0]=sum(sum_cols)-1
for j in range(0,len(cm)):
bychance[0,j]=(sum_rows[0,j]/sum_cols[len(cm),0])*sum_cols[j,0]
sum_bychance=sum(bychance[0,:])
cohenkappa=(sum_diag-sum_bychance)/((sum_cols[len(cm),0])-sum_bychance)
sumsum=np.concatenate((cm, sum_rows), axis=0)
sumsum2=np.concatenate((sumsum, sum_cols), axis=1)
return cohenkappa
parser = argparse.ArgumentParser()
parser.add_argument('path', help='where the files are located')
parser.add_argument('segments', help='polygon shape file with features and classes')
args = parser.parse_args()
# Import and define feature and label
print("------ Import data and re-organize------ ")
segments = gpd.GeoDataFrame.from_file(args.path+args.segments)
#segments=segments[segments['Highestid']!='Open water']
#segments=segments[segments['Highestid']!='Bos']
segments['Highestid']=segments['Highestid'].replace(['Landriet, structuurarm', 'Landriet, structuurrijk','Waterriet'], 'Riet')
segments['Highestid']=segments['Highestid'].replace(['Riet','Struweel','Grasland'], 'Non-water')
# pre-organize the data
#feature_list=['mean_echo_','mean_Plana','mean_Curva','mean_kurto','mean_sigma','mean_media','mean_Spher']
#feature_list=['mean_echo_','mean_Plana','mean_Curva','mean_kurto','mean_sigma','mean_mean_','mean_media','std_echo_r','std_Planar','std_Curvat','std_kurto_','std_sigma_']
#feature_list=['mean_Plana','mean_Curva','mean_kurto','mean_Spher']
feature_list=segments.columns[7:35]
segments_whighprob=segments[(segments['Prob']>0.4)&(segments['poly_area']>0)]
feature=segments_whighprob[feature_list].values
feature_all=segments[feature_list].values
fea_list_forvis=np.array(feature_list)
label=segments_whighprob['Highestid'].values
# Under-sampling -- get equal number of samples per class + split training and testing
rus = RandomUnderSampler(random_state=0)
feature_resampled, label_resampled = rus.fit_sample(feature, label)
#print(sorted(Counter(label_resampled).items()))
mytrain, mytest, mytrainlabel, mytestlabel = train_test_split(feature_resampled, label_resampled,train_size = 0.6)
# Random Forest
print("------ Apply Random Forest ------ ")
n_estimators=30
criterion='gini'
max_depth=30
min_samples_split=5
min_samples_leaf=5
max_features='auto'
max_leaf_nodes=None
bootstrap=True
oob_score=True
n_jobs=1
random_state=None
verbose=0
class_weight='balanced'
forest = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion, max_depth=max_depth,
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf,
max_features=max_features, max_leaf_nodes=max_leaf_nodes, bootstrap=bootstrap, oob_score=oob_score,
n_jobs=n_jobs, random_state=random_state, verbose=verbose,class_weight=class_weight)
RF_classifier = forest.fit(mytrain, mytrainlabel)
# Validation
print("------ Validation ------ ")
mypredtest=RF_classifier.predict(mytest)
print(classification_report(mytestlabel, mypredtest))
print(confusion_matrix(mytestlabel, mypredtest))
mypred=RF_classifier.predict(feature_all)
segments['pred_class']=mypred
segments.to_file(args.path+args.segments+"_RFclass.shp", driver='ESRI Shapefile')
importances=RF_classifier.feature_importances_
indices = np.argsort(importances)[::-1]
# Plot the feature importances of the forest
print("------ Export ------ ")
plt.figure()
plt.title("Feature importances")
plt.bar(range(mytrain.shape[1]), importances[indices],
color="r", align="center")
plt.xticks(range(mytrain.shape[1]), fea_list_forvis[indices],rotation=45,horizontalalignment='right')
plt.xlim([-1, mytrain.shape[1]])
plt.tight_layout()
#plt.show()
plt.savefig(args.path+args.segments+"_RFclass_feaimp.png")
# Export classification report
with open(args.path+args.segments+"_RFclass_acc.txt", 'w') as f:
f.write(np.array2string(confusion_matrix(mytestlabel, mypredtest), separator=', '))
f.write(classification_report(mytestlabel, mypredtest))
f.write(np.array2string(cohenkappa_calc(confusion_matrix(mytestlabel, mypredtest))))
f.write(np.array2string(importances[indices]))
f.write(np.array2string(feature_list[indices]))
|
[
"imblearn.under_sampling.RandomUnderSampler",
"sklearn.cross_validation.train_test_split",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"argparse.ArgumentParser",
"numpy.concatenate",
"numpy.array2string",
"sklearn.metrics.classification_report",
"numpy.argsort",
"geopandas.GeoDataFrame.from_file",
"matplotlib.pyplot.figure",
"numpy.array",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] |
[((1710, 1735), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1733, 1735), False, 'import argparse\n'), ((2015, 2068), 'geopandas.GeoDataFrame.from_file', 'gpd.GeoDataFrame.from_file', (['(args.path + args.segments)'], {}), '(args.path + args.segments)\n', (2041, 2068), True, 'import geopandas as gpd\n'), ((2994, 3016), 'numpy.array', 'np.array', (['feature_list'], {}), '(feature_list)\n', (3002, 3016), True, 'import numpy as np\n'), ((3158, 3192), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {'random_state': '(0)'}), '(random_state=0)\n', (3176, 3192), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((3356, 3424), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['feature_resampled', 'label_resampled'], {'train_size': '(0.6)'}), '(feature_resampled, label_resampled, train_size=0.6)\n', (3372, 3424), False, 'from sklearn.cross_validation import train_test_split\n'), ((3716, 4086), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_estimators', 'criterion': 'criterion', 'max_depth': 'max_depth', 'min_samples_split': 'min_samples_split', 'min_samples_leaf': 'min_samples_leaf', 'max_features': 'max_features', 'max_leaf_nodes': 'max_leaf_nodes', 'bootstrap': 'bootstrap', 'oob_score': 'oob_score', 'n_jobs': 'n_jobs', 'random_state': 'random_state', 'verbose': 'verbose', 'class_weight': 'class_weight'}), '(n_estimators=n_estimators, criterion=criterion,\n max_depth=max_depth, min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf, max_features=max_features,\n max_leaf_nodes=max_leaf_nodes, bootstrap=bootstrap, oob_score=oob_score,\n n_jobs=n_jobs, random_state=random_state, verbose=verbose, class_weight\n =class_weight)\n', (3738, 4086), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4724, 4736), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4734, 4736), True, 'import matplotlib.pyplot as plt\n'), ((4737, 4769), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature importances"""'], {}), "('Feature importances')\n", (4746, 4769), True, 'import matplotlib.pyplot as plt\n'), ((4961, 4993), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-1, mytrain.shape[1]]'], {}), '([-1, mytrain.shape[1]])\n', (4969, 4993), True, 'import matplotlib.pyplot as plt\n'), ((4994, 5012), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5010, 5012), True, 'import matplotlib.pyplot as plt\n'), ((5025, 5087), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.path + args.segments + '_RFclass_feaimp.png')"], {}), "(args.path + args.segments + '_RFclass_feaimp.png')\n", (5036, 5087), True, 'import matplotlib.pyplot as plt\n'), ((1583, 1621), 'numpy.concatenate', 'np.concatenate', (['(cm, sum_rows)'], {'axis': '(0)'}), '((cm, sum_rows), axis=0)\n', (1597, 1621), True, 'import numpy as np\n'), ((1634, 1676), 'numpy.concatenate', 'np.concatenate', (['(sumsum, sum_cols)'], {'axis': '(1)'}), '((sumsum, sum_cols), axis=1)\n', (1648, 1676), True, 'import numpy as np\n'), ((4302, 4348), 'sklearn.metrics.classification_report', 'classification_report', (['mytestlabel', 'mypredtest'], {}), '(mytestlabel, mypredtest)\n', (4323, 4348), False, 'from sklearn.metrics import classification_report\n'), ((4357, 4398), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['mytestlabel', 'mypredtest'], {}), '(mytestlabel, mypredtest)\n', (4373, 4398), False, 'from sklearn.metrics import confusion_matrix\n'), ((4615, 4638), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (4625, 4638), True, 'import numpy as np\n'), ((5276, 5322), 'sklearn.metrics.classification_report', 'classification_report', (['mytestlabel', 'mypredtest'], {}), '(mytestlabel, mypredtest)\n', (5297, 5322), False, 'from sklearn.metrics import classification_report\n'), ((5419, 5456), 'numpy.array2string', 'np.array2string', (['importances[indices]'], {}), '(importances[indices])\n', (5434, 5456), True, 'import numpy as np\n'), ((5467, 5505), 'numpy.array2string', 'np.array2string', (['feature_list[indices]'], {}), '(feature_list[indices])\n', (5482, 5505), True, 'import numpy as np\n'), ((5207, 5248), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['mytestlabel', 'mypredtest'], {}), '(mytestlabel, mypredtest)\n', (5223, 5248), False, 'from sklearn.metrics import confusion_matrix\n'), ((5365, 5406), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['mytestlabel', 'mypredtest'], {}), '(mytestlabel, mypredtest)\n', (5381, 5406), False, 'from sklearn.metrics import confusion_matrix\n')]
|
import numpy as onp
import scipy.sparse
import scipy.sparse.linalg as spalg
from veros import logger, veros_kernel, veros_routine, distributed, runtime_state as rst
from veros.variables import allocate
from veros.core.operators import update, at, numpy as npx
from veros.core.external.solvers.base import LinearSolver
from veros.core.external.poisson_matrix import assemble_poisson_matrix
class SciPySolver(LinearSolver):
@veros_routine(
local_variables=(
"hu",
"hv",
"hvr",
"hur",
"dxu",
"dxt",
"dyu",
"dyt",
"cosu",
"cost",
"isle_boundary_mask",
"maskT",
),
dist_safe=False,
)
def __init__(self, state):
self._matrix, self._boundary_mask = self._assemble_poisson_matrix(state)
jacobi_precon = self._jacobi_preconditioner(state, self._matrix)
self._matrix = jacobi_precon * self._matrix
self._rhs_scale = jacobi_precon.diagonal()
self._extra_args = {}
logger.info("Computing ILU preconditioner...")
ilu_preconditioner = spalg.spilu(self._matrix.tocsc(), drop_tol=1e-6, fill_factor=100)
self._extra_args["M"] = spalg.LinearOperator(self._matrix.shape, ilu_preconditioner.solve)
def _scipy_solver(self, state, rhs, x0, boundary_val):
orig_shape = x0.shape
orig_dtype = x0.dtype
rhs = npx.where(self._boundary_mask, rhs, boundary_val) # set right hand side on boundaries
rhs = onp.asarray(rhs.reshape(-1) * self._rhs_scale, dtype="float64")
x0 = onp.asarray(x0.reshape(-1), dtype="float64")
linear_solution, info = spalg.bicgstab(
self._matrix,
rhs,
x0=x0,
atol=1e-8,
tol=0,
maxiter=1000,
**self._extra_args,
)
if info > 0:
logger.warning("Streamfunction solver did not converge after {} iterations", info)
return npx.asarray(linear_solution, dtype=orig_dtype).reshape(orig_shape)
def solve(self, state, rhs, x0, boundary_val=None):
"""
Main solver for streamfunction. Solves a 2D Poisson equation. Uses scipy.sparse.linalg
linear solvers.
Arguments:
rhs: Right-hand side vector
x0: Initial guess
boundary_val: Array containing values to set on boundary elements. Defaults to `x0`.
"""
rhs_global, x0_global, boundary_val = gather_variables(state, rhs, x0, boundary_val)
if rst.proc_rank == 0:
linear_solution = self._scipy_solver(state, rhs_global, x0_global, boundary_val=boundary_val)
else:
linear_solution = npx.empty_like(rhs)
return scatter_variables(state, linear_solution)
@staticmethod
def _jacobi_preconditioner(state, matrix):
"""
Construct a simple Jacobi preconditioner
"""
settings = state.settings
eps = 1e-20
precon = allocate(state.dimensions, ("xu", "yu"), fill=1, local=False)
diag = npx.reshape(matrix.diagonal().copy(), (settings.nx + 4, settings.ny + 4))[2:-2, 2:-2]
precon = update(precon, at[2:-2, 2:-2], npx.where(npx.abs(diag) > eps, 1.0 / (diag + eps), 1.0))
precon = onp.asarray(precon)
return scipy.sparse.dia_matrix((precon.reshape(-1), 0), shape=(precon.size, precon.size)).tocsr()
@staticmethod
def _assemble_poisson_matrix(state):
settings = state.settings
diags, offsets, boundary_mask = assemble_poisson_matrix(state)
# flatten offsets (as expected by scipy.sparse)
offsets = tuple(-dx * diags[0].shape[1] - dy for dx, dy in offsets)
if settings.enable_cyclic_x:
# add cyclic boundary conditions as additional matrix diagonals
# (only works in single-process mode)
wrap_diag_east, wrap_diag_west = (allocate(state.dimensions, ("xu", "yu"), local=False) for _ in range(2))
wrap_diag_east = update(wrap_diag_east, at[2, 2:-2], diags[2][2, 2:-2] * boundary_mask[2, 2:-2])
wrap_diag_west = update(wrap_diag_west, at[-3, 2:-2], diags[1][-3, 2:-2] * boundary_mask[-3, 2:-2])
diags[2] = update(diags[2], at[2, 2:-2], 0.0)
diags[1] = update(diags[1], at[-3, 2:-2], 0.0)
offsets += (-diags[0].shape[1] * (settings.nx - 1), diags[0].shape[1] * (settings.nx - 1))
diags += (wrap_diag_east, wrap_diag_west)
diags = tuple(onp.asarray(diag.reshape(-1)) for diag in (diags))
matrix = scipy.sparse.dia_matrix(
(diags, offsets),
shape=(diags[0].size, diags[0].size),
dtype="float64",
).T.tocsr()
return matrix, boundary_mask
@veros_kernel
def gather_variables(state, rhs, x0, boundary_val):
rhs_global = distributed.gather(rhs, state.dimensions, ("xt", "yt"))
x0_global = distributed.gather(x0, state.dimensions, ("xt", "yt"))
if boundary_val is None:
boundary_val = x0_global
else:
boundary_val = distributed.gather(boundary_val, state.dimensions, ("xt", "yt"))
return rhs_global, x0_global, boundary_val
@veros_kernel
def scatter_variables(state, linear_solution):
return distributed.scatter(linear_solution, state.dimensions, ("xt", "yt"))
|
[
"veros.variables.allocate",
"veros.core.operators.numpy.where",
"veros.distributed.scatter",
"veros.core.external.poisson_matrix.assemble_poisson_matrix",
"numpy.asarray",
"veros.core.operators.numpy.empty_like",
"scipy.sparse.linalg.bicgstab",
"veros.logger.warning",
"scipy.sparse.linalg.LinearOperator",
"veros.core.operators.numpy.asarray",
"veros.veros_routine",
"veros.core.operators.numpy.abs",
"veros.core.operators.update",
"veros.logger.info",
"veros.distributed.gather"
] |
[((430, 588), 'veros.veros_routine', 'veros_routine', ([], {'local_variables': "('hu', 'hv', 'hvr', 'hur', 'dxu', 'dxt', 'dyu', 'dyt', 'cosu', 'cost',\n 'isle_boundary_mask', 'maskT')", 'dist_safe': '(False)'}), "(local_variables=('hu', 'hv', 'hvr', 'hur', 'dxu', 'dxt',\n 'dyu', 'dyt', 'cosu', 'cost', 'isle_boundary_mask', 'maskT'), dist_safe\n =False)\n", (443, 588), False, 'from veros import logger, veros_kernel, veros_routine, distributed, runtime_state as rst\n'), ((4917, 4972), 'veros.distributed.gather', 'distributed.gather', (['rhs', 'state.dimensions', "('xt', 'yt')"], {}), "(rhs, state.dimensions, ('xt', 'yt'))\n", (4935, 4972), False, 'from veros import logger, veros_kernel, veros_routine, distributed, runtime_state as rst\n'), ((4989, 5043), 'veros.distributed.gather', 'distributed.gather', (['x0', 'state.dimensions', "('xt', 'yt')"], {}), "(x0, state.dimensions, ('xt', 'yt'))\n", (5007, 5043), False, 'from veros import logger, veros_kernel, veros_routine, distributed, runtime_state as rst\n'), ((5327, 5395), 'veros.distributed.scatter', 'distributed.scatter', (['linear_solution', 'state.dimensions', "('xt', 'yt')"], {}), "(linear_solution, state.dimensions, ('xt', 'yt'))\n", (5346, 5395), False, 'from veros import logger, veros_kernel, veros_routine, distributed, runtime_state as rst\n'), ((1086, 1132), 'veros.logger.info', 'logger.info', (['"""Computing ILU preconditioner..."""'], {}), "('Computing ILU preconditioner...')\n", (1097, 1132), False, 'from veros import logger, veros_kernel, veros_routine, distributed, runtime_state as rst\n'), ((1260, 1326), 'scipy.sparse.linalg.LinearOperator', 'spalg.LinearOperator', (['self._matrix.shape', 'ilu_preconditioner.solve'], {}), '(self._matrix.shape, ilu_preconditioner.solve)\n', (1280, 1326), True, 'import scipy.sparse.linalg as spalg\n'), ((1462, 1511), 'veros.core.operators.numpy.where', 'npx.where', (['self._boundary_mask', 'rhs', 'boundary_val'], {}), '(self._boundary_mask, rhs, boundary_val)\n', (1471, 1511), True, 'from veros.core.operators import update, at, numpy as npx\n'), ((1719, 1816), 'scipy.sparse.linalg.bicgstab', 'spalg.bicgstab', (['self._matrix', 'rhs'], {'x0': 'x0', 'atol': '(1e-08)', 'tol': '(0)', 'maxiter': '(1000)'}), '(self._matrix, rhs, x0=x0, atol=1e-08, tol=0, maxiter=1000,\n **self._extra_args)\n', (1733, 1816), True, 'import scipy.sparse.linalg as spalg\n'), ((3059, 3120), 'veros.variables.allocate', 'allocate', (['state.dimensions', "('xu', 'yu')"], {'fill': '(1)', 'local': '(False)'}), "(state.dimensions, ('xu', 'yu'), fill=1, local=False)\n", (3067, 3120), False, 'from veros.variables import allocate\n'), ((3344, 3363), 'numpy.asarray', 'onp.asarray', (['precon'], {}), '(precon)\n', (3355, 3363), True, 'import numpy as onp\n'), ((3605, 3635), 'veros.core.external.poisson_matrix.assemble_poisson_matrix', 'assemble_poisson_matrix', (['state'], {}), '(state)\n', (3628, 3635), False, 'from veros.core.external.poisson_matrix import assemble_poisson_matrix\n'), ((5140, 5204), 'veros.distributed.gather', 'distributed.gather', (['boundary_val', 'state.dimensions', "('xt', 'yt')"], {}), "(boundary_val, state.dimensions, ('xt', 'yt'))\n", (5158, 5204), False, 'from veros import logger, veros_kernel, veros_routine, distributed, runtime_state as rst\n'), ((1941, 2027), 'veros.logger.warning', 'logger.warning', (['"""Streamfunction solver did not converge after {} iterations"""', 'info'], {}), "('Streamfunction solver did not converge after {} iterations',\n info)\n", (1955, 2027), False, 'from veros import logger, veros_kernel, veros_routine, distributed, runtime_state as rst\n'), ((2770, 2789), 'veros.core.operators.numpy.empty_like', 'npx.empty_like', (['rhs'], {}), '(rhs)\n', (2784, 2789), True, 'from veros.core.operators import update, at, numpy as npx\n'), ((4081, 4160), 'veros.core.operators.update', 'update', (['wrap_diag_east', 'at[2, 2:-2]', '(diags[2][2, 2:-2] * boundary_mask[2, 2:-2])'], {}), '(wrap_diag_east, at[2, 2:-2], diags[2][2, 2:-2] * boundary_mask[2, 2:-2])\n', (4087, 4160), False, 'from veros.core.operators import update, at, numpy as npx\n'), ((4190, 4276), 'veros.core.operators.update', 'update', (['wrap_diag_west', 'at[-3, 2:-2]', '(diags[1][-3, 2:-2] * boundary_mask[-3, 2:-2])'], {}), '(wrap_diag_west, at[-3, 2:-2], diags[1][-3, 2:-2] * boundary_mask[-3,\n 2:-2])\n', (4196, 4276), False, 'from veros.core.operators import update, at, numpy as npx\n'), ((4296, 4330), 'veros.core.operators.update', 'update', (['diags[2]', 'at[2, 2:-2]', '(0.0)'], {}), '(diags[2], at[2, 2:-2], 0.0)\n', (4302, 4330), False, 'from veros.core.operators import update, at, numpy as npx\n'), ((4354, 4389), 'veros.core.operators.update', 'update', (['diags[1]', 'at[-3, 2:-2]', '(0.0)'], {}), '(diags[1], at[-3, 2:-2], 0.0)\n', (4360, 4389), False, 'from veros.core.operators import update, at, numpy as npx\n'), ((2040, 2086), 'veros.core.operators.numpy.asarray', 'npx.asarray', (['linear_solution'], {'dtype': 'orig_dtype'}), '(linear_solution, dtype=orig_dtype)\n', (2051, 2086), True, 'from veros.core.operators import update, at, numpy as npx\n'), ((3979, 4032), 'veros.variables.allocate', 'allocate', (['state.dimensions', "('xu', 'yu')"], {'local': '(False)'}), "(state.dimensions, ('xu', 'yu'), local=False)\n", (3987, 4032), False, 'from veros.variables import allocate\n'), ((3280, 3293), 'veros.core.operators.numpy.abs', 'npx.abs', (['diag'], {}), '(diag)\n', (3287, 3293), True, 'from veros.core.operators import update, at, numpy as npx\n')]
|
import glob
import os
import numpy as np
from PIL import Image
import torch
import torch.utils.data as data
from configuration.base_config import BaseConfig, DataMode
class SmartSegmentationLoader(data.Dataset):
def __init__(self, config, img_files, mask_files, transforms):
super().__init__()
self._img_files = img_files
self._mask_files = mask_files
self._config = config
self._transforms = transforms
def __len__(self):
assert len(self._img_files) == len(self._mask_files), "Num masks and num images should be equal"
return len(self._img_files)
def __getitem__(self, item):
img = self._img_files[item]
mask = self._mask_files[item]
img = np.array(Image.open(img))
mask = np.array(Image.open(mask))
mask = (mask < 128).astype(np.uint8)
image_crop, mask_crop = self._crop(img, mask)
image, mask, _ = self._transforms(image_crop, mask_crop.copy(), mask_crop.copy())
return image, mask
def _crop(self, image, mask):
rand_row, rand_col = self._get_random_crop(image.shape, self._config.crop_size)
image_crop = np.copy(image[rand_row: rand_row + self._config.crop_size[0],
rand_col: rand_col + self._config.crop_size[1], :])
mask_crop = np.copy(mask[rand_row: rand_row + self._config.crop_size[0],
rand_col: rand_col + self._config.crop_size[1]])
return image_crop, mask_crop
@staticmethod
def _get_random_crop(image_size, crop_size):
rand_row = torch.randint(low=0, high=image_size[0] - crop_size[0], size=[1])
rand_col = torch.randint(low=0, high=image_size[1] - crop_size[1], size=[1])
return rand_row.item(), rand_col.item()
def get_data_loaders(config: BaseConfig):
images = sorted(glob.glob(os.path.join(config.path[DataMode.train], "*")))
masks = sorted(glob.glob(os.path.join(config.mask_path[DataMode.train], "*")))
images_val = sorted(glob.glob(os.path.join(config.path[DataMode.eval], "*")))
masks_val = sorted(glob.glob(os.path.join(config.mask_path[DataMode.eval], "*")))
data_loader = data.DataLoader(
SmartSegmentationLoader(config=config, img_files=images, mask_files=masks,
transforms=config.augmentation),
batch_size=config.batch_size, num_workers=config.num_workers)
data_loader_val = data.DataLoader(
SmartSegmentationLoader(config=config, img_files=images_val, mask_files=masks_val,
transforms=config.val_augmentation),
batch_size=1, num_workers=2)
return {
DataMode.eval: data_loader_val,
DataMode.train: data_loader
}
|
[
"torch.randint",
"os.path.join",
"numpy.copy",
"PIL.Image.open"
] |
[((1169, 1285), 'numpy.copy', 'np.copy', (['image[rand_row:rand_row + self._config.crop_size[0], rand_col:rand_col +\n self._config.crop_size[1], :]'], {}), '(image[rand_row:rand_row + self._config.crop_size[0], rand_col:\n rand_col + self._config.crop_size[1], :])\n', (1176, 1285), True, 'import numpy as np\n'), ((1338, 1450), 'numpy.copy', 'np.copy', (['mask[rand_row:rand_row + self._config.crop_size[0], rand_col:rand_col +\n self._config.crop_size[1]]'], {}), '(mask[rand_row:rand_row + self._config.crop_size[0], rand_col:\n rand_col + self._config.crop_size[1]])\n', (1345, 1450), True, 'import numpy as np\n'), ((1607, 1672), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(image_size[0] - crop_size[0])', 'size': '[1]'}), '(low=0, high=image_size[0] - crop_size[0], size=[1])\n', (1620, 1672), False, 'import torch\n'), ((1692, 1757), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(image_size[1] - crop_size[1])', 'size': '[1]'}), '(low=0, high=image_size[1] - crop_size[1], size=[1])\n', (1705, 1757), False, 'import torch\n'), ((749, 764), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (759, 764), False, 'from PIL import Image\n'), ((790, 806), 'PIL.Image.open', 'Image.open', (['mask'], {}), '(mask)\n', (800, 806), False, 'from PIL import Image\n'), ((1880, 1926), 'os.path.join', 'os.path.join', (['config.path[DataMode.train]', '"""*"""'], {}), "(config.path[DataMode.train], '*')\n", (1892, 1926), False, 'import os\n'), ((1958, 2009), 'os.path.join', 'os.path.join', (['config.mask_path[DataMode.train]', '"""*"""'], {}), "(config.mask_path[DataMode.train], '*')\n", (1970, 2009), False, 'import os\n'), ((2046, 2091), 'os.path.join', 'os.path.join', (['config.path[DataMode.eval]', '"""*"""'], {}), "(config.path[DataMode.eval], '*')\n", (2058, 2091), False, 'import os\n'), ((2127, 2177), 'os.path.join', 'os.path.join', (['config.mask_path[DataMode.eval]', '"""*"""'], {}), "(config.mask_path[DataMode.eval], '*')\n", (2139, 2177), False, 'import os\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import sys, os
from scipy.special import erf
from scipy.optimize import minimize_scalar
from math import isnan
from math import isinf
from dispsol import Jpole8, Jpole12
from dispsol import ES1d
plt.rc('font', family='serif')
plt.rc('xtick', labelsize=7)
plt.rc('ytick', labelsize=7)
plt.rc('axes', labelsize=9)
fig = plt.figure(figsize=(3.54, 4.0)) #single column fig
#fig = plt.figure(figsize=(7.48, 4.0)) #two column figure
gs = plt.GridSpec(2, 1, hspace=0.15)
axs = []
axs.append( plt.subplot(gs[0,0]) )
axs.append( plt.subplot(gs[1,0]) )
for ax in axs:
ax.minorticks_on()
#ax.set_xlabel(r'$k \lambda_D$')
ax.set_xlabel(r'$\hat{k}$')
ax.set_xlim((0.0, 0.55))
axs[0].set_ylabel(r'Re{ $\omega/\omega_p$ }')
axs[1].set_ylabel(r'-Im{ $\omega/\omega_p$ }')
axs[0].set_ylim((0.9, 1.50))
#axs[1].set_ylim((0.02, -0.16))
#axs[1].set_ylim((-0.16, 0.02))
#logscale growth rates
axs[1].set_ylim((1.0e-6, 1.0e0))
axs[1].set_yscale('log')
#Langmuir wave Landau damping
qs = np.array([1.0])
ms = np.array([1.0])
ns0 = np.array([1.0])
Ts = np.array([1.0])
vs0 = np.array([0.0])
wps = np.sqrt(ns0 * qs**2.0 / ms) #plasma frequency
vts = np.sqrt(2.0*Ts/ms) #thermal speed
lDeb = np.sqrt(Ts / (ns0 * qs**2.0)) #Debye length
kDeb = 1.0/lDeb #Debye length
print("vth= ", vts)
print("ldeB=", lDeb)
print("kDeb=", kDeb)
print("wps= ", wps)
print("vs0= ", vs0)
lDebTot = np.sqrt(1.0/np.sum(ns0/Ts))
print("lDebtot:", lDebTot)
print("vs/vth:", vs0/vts)
##################################################
# testing J-Pole expansion
#bj, cj = Jpole8()
bj, cj = Jpole12()
J = len(bj) #number of pole expansion
S = len(ns0) #number of species
print("sum bj : ", np.sum(bj))
print("sum bj*cj : ", np.sum(bj*cj))
print("sum bj*cj^2: ", np.sum(bj*cj**2.0))
# visualize Langmuir wave dispersion relation
params = {'vts': vts,
'vs0': vs0,
'lDeb': lDeb,
'S':S,
'J':J}
karr = np.linspace(0.01, 0.55, 100)
warr = np.zeros((len(karr), S*J), np.complex)
for i,k in enumerate(karr):
w = ES1d(k, params)
warr[i,:] = w[:]
ms = 1.0
Nsol = 1
for nsol in range(Nsol):
axs[0].plot(karr, np.abs(np.real(warr[:,nsol])), 'k-', markersize=ms)
#axs[1].plot(karr, np.zeros(len(karr)), "r--")
for nsol in range(Nsol):
axs[1].plot(karr, -np.imag(warr[:,nsol]), 'k-', markersize=ms)
# for printing
karrp = [0.0496729413289805, #mode1
0.099345882657961, #2
0.1490188239869415, #3
0.198691765315922, #4
0.2483647066449025, #5
0.298037647973883, #6
0.3477105893028635, #7
0.397383530631844, #8
0.4470564719608245, #9
0.496729413289805, #10
]
for i,k in enumerate(karrp):
w = ES1d(k, params)
print("mode=",i+1)
print("khat=",k)
print("omeg=",w[1])
print()
wr = np.abs(np.real(w[0]))
wi = np.abs(np.imag(w[0]))
print(wi)
axs[0].plot( k, wr, "r.")
axs[1].plot( k, wi, "r.")
plt.subplots_adjust(left=0.18, bottom=0.09, right=0.98, top=0.95, wspace=0.0, hspace=0.0)
plt.savefig('landau.pdf')
|
[
"matplotlib.pyplot.subplot",
"numpy.sum",
"matplotlib.pyplot.subplots_adjust",
"dispsol.ES1d",
"matplotlib.pyplot.figure",
"numpy.imag",
"numpy.array",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.GridSpec",
"dispsol.Jpole12",
"numpy.linspace",
"numpy.real",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((252, 282), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (258, 282), True, 'import matplotlib.pyplot as plt\n'), ((283, 311), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(7)'}), "('xtick', labelsize=7)\n", (289, 311), True, 'import matplotlib.pyplot as plt\n'), ((312, 340), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(7)'}), "('ytick', labelsize=7)\n", (318, 340), True, 'import matplotlib.pyplot as plt\n'), ((341, 368), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': '(9)'}), "('axes', labelsize=9)\n", (347, 368), True, 'import matplotlib.pyplot as plt\n'), ((376, 407), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3.54, 4.0)'}), '(figsize=(3.54, 4.0))\n', (386, 407), True, 'import matplotlib.pyplot as plt\n'), ((491, 522), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(2)', '(1)'], {'hspace': '(0.15)'}), '(2, 1, hspace=0.15)\n', (503, 522), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1066), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1059, 1066), True, 'import numpy as np\n'), ((1073, 1088), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1081, 1088), True, 'import numpy as np\n'), ((1095, 1110), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1103, 1110), True, 'import numpy as np\n'), ((1117, 1132), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1125, 1132), True, 'import numpy as np\n'), ((1139, 1154), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (1147, 1154), True, 'import numpy as np\n'), ((1164, 1193), 'numpy.sqrt', 'np.sqrt', (['(ns0 * qs ** 2.0 / ms)'], {}), '(ns0 * qs ** 2.0 / ms)\n', (1171, 1193), True, 'import numpy as np\n'), ((1219, 1241), 'numpy.sqrt', 'np.sqrt', (['(2.0 * Ts / ms)'], {}), '(2.0 * Ts / ms)\n', (1226, 1241), True, 'import numpy as np\n'), ((1271, 1302), 'numpy.sqrt', 'np.sqrt', (['(Ts / (ns0 * qs ** 2.0))'], {}), '(Ts / (ns0 * qs ** 2.0))\n', (1278, 1302), True, 'import numpy as np\n'), ((1671, 1680), 'dispsol.Jpole12', 'Jpole12', ([], {}), '()\n', (1678, 1680), False, 'from dispsol import Jpole8, Jpole12\n'), ((2033, 2061), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.55)', '(100)'], {}), '(0.01, 0.55, 100)\n', (2044, 2061), True, 'import numpy as np\n'), ((3071, 3165), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.18)', 'bottom': '(0.09)', 'right': '(0.98)', 'top': '(0.95)', 'wspace': '(0.0)', 'hspace': '(0.0)'}), '(left=0.18, bottom=0.09, right=0.98, top=0.95, wspace=\n 0.0, hspace=0.0)\n', (3090, 3165), True, 'import matplotlib.pyplot as plt\n'), ((3161, 3186), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""landau.pdf"""'], {}), "('landau.pdf')\n", (3172, 3186), True, 'import matplotlib.pyplot as plt\n'), ((546, 567), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (557, 567), True, 'import matplotlib.pyplot as plt\n'), ((581, 602), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 0]'], {}), '(gs[1, 0])\n', (592, 602), True, 'import matplotlib.pyplot as plt\n'), ((1777, 1787), 'numpy.sum', 'np.sum', (['bj'], {}), '(bj)\n', (1783, 1787), True, 'import numpy as np\n'), ((1812, 1827), 'numpy.sum', 'np.sum', (['(bj * cj)'], {}), '(bj * cj)\n', (1818, 1827), True, 'import numpy as np\n'), ((1850, 1872), 'numpy.sum', 'np.sum', (['(bj * cj ** 2.0)'], {}), '(bj * cj ** 2.0)\n', (1856, 1872), True, 'import numpy as np\n'), ((2145, 2160), 'dispsol.ES1d', 'ES1d', (['k', 'params'], {}), '(k, params)\n', (2149, 2160), False, 'from dispsol import ES1d\n'), ((2832, 2847), 'dispsol.ES1d', 'ES1d', (['k', 'params'], {}), '(k, params)\n', (2836, 2847), False, 'from dispsol import ES1d\n'), ((1493, 1509), 'numpy.sum', 'np.sum', (['(ns0 / Ts)'], {}), '(ns0 / Ts)\n', (1499, 1509), True, 'import numpy as np\n'), ((2947, 2960), 'numpy.real', 'np.real', (['w[0]'], {}), '(w[0])\n', (2954, 2960), True, 'import numpy as np\n'), ((2979, 2992), 'numpy.imag', 'np.imag', (['w[0]'], {}), '(w[0])\n', (2986, 2992), True, 'import numpy as np\n'), ((2256, 2278), 'numpy.real', 'np.real', (['warr[:, nsol]'], {}), '(warr[:, nsol])\n', (2263, 2278), True, 'import numpy as np\n'), ((2397, 2419), 'numpy.imag', 'np.imag', (['warr[:, nsol]'], {}), '(warr[:, nsol])\n', (2404, 2419), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Code for reading and working with calibration data.
Author <NAME>, 2019
Author <NAME>, 2019
"""
import cv2
import numpy as np
import os
from typing import Tuple, List
from enum import Enum
import yaml
import functools
from libartipy.dataset import Constants, get_logger
from libartipy.dataset import CameraType
logger = get_logger()
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
@functools.wraps(func)
def new_func(*args, **kwargs):
logger.warning('Call to deprecated function {}'.format(func.__name__))
return func(*args, **kwargs)
return new_func
class DistortionModel(Enum):
Pinhole = 'Pinhole'
Equidistant = 'Equidistant'
@staticmethod
def get_all_types() -> list:
return [DistortionModel.Pinhole, DistortionModel.Equidistant]
LEFT_CAM_PROJ_MAT = "P1"
RIGHT_CAM_PROJ_MAT = "P2"
LEFT_CAM_ROT_MAT = "R1"
RIGHT_CAM_ROT_MAT = "R2"
DISP_DEPTH_MAP_MAT = "Q"
class CameraCalibration(object):
"""
This class contains the information written in the calibration files.
"""
def __init__(self, distortion_model: DistortionModel, calib_mat: np.ndarray,
distortion_params: List[float], img_dims: List[int],
cropped_img_dims: List[int]):
"""
:param distortion_model:
:param calib_mat: 3x3
:param distortion_params: distortion parameters as list
:param img_dims:
:param cropped_img_dims:
"""
assert calib_mat.shape == (3, 3)
assert len(distortion_params) >= 4
assert len(img_dims) == 2
assert len(cropped_img_dims) == 2
try:
self.distortion_model = DistortionModel(distortion_model)
except:
logger.error('Distortion model not supported. Supported models: {}'.format(DistortionModel.get_all_types()))
# parse calibration file
self.calib_mat = calib_mat
self.distortion_params = distortion_params
self.img_dims = img_dims
self.cropped_img_dims = cropped_img_dims
class CalibrationFactory(object):
@staticmethod
def get_single_cam_info(curr_cam_yaml) -> Tuple[DistortionModel, np.ndarray, np.ndarray, tuple]:
"""
:param curr_cam_yaml:
:return:
"""
DIST_MODELS = {'equidistant': DistortionModel.Equidistant,
'pinhole': DistortionModel.Pinhole,
'none': DistortionModel.Pinhole}
# 1. parse distortion model
dist_model = DIST_MODELS[curr_cam_yaml['distortion_model'].lower()]
# 2. parse intrinsics
intrinsic_params = curr_cam_yaml['intrinsics']
assert len(intrinsic_params) >= 4
calib_mat = np.eye(3)
calib_mat[0, 0] = intrinsic_params[0]
calib_mat[1, 1] = intrinsic_params[1]
calib_mat[0, 2] = intrinsic_params[2]
calib_mat[1, 2] = intrinsic_params[3]
# 3. parse distortion coeffs
dist_params = curr_cam_yaml['distortion_coeffs']
assert len(dist_params) >= 4
dist_params = np.array(dist_params)
# 4. parse image info, resolution
resolution = curr_cam_yaml['resolution']
assert len(resolution) == 2
resolution = tuple(resolution)
return dist_model, calib_mat, dist_params, resolution
@staticmethod
def get_stereo_cam_info(curr_cam_yaml: dict) -> np.ndarray:
"""
Parse stereo transformation
:param curr_cam_yaml:
:return: 4x4
"""
STEREO_FIELD = 'T_cn_cnm1'
stereo_trans = []
for field in curr_cam_yaml[STEREO_FIELD]:
stereo_trans.append(field)
stereo_trans = np.array(stereo_trans)
assert stereo_trans.shape == (4, 4)
return stereo_trans
@staticmethod
@deprecated
def create_from_txt_files(calib_folder_path: str, distorted: bool, constants_calib: Constants) -> \
Tuple[CameraCalibration, CameraCalibration, np.ndarray]:
"""
Parse calibration from txt files.
:param calib_folder_path:
:param distorted:
:param constants_calib:
:return: Left camera calibration, right camera calibration and stereo calibration as transformation matrix
"""
if distorted:
calib_0_path = os.path.join(calib_folder_path, constants_calib.CALIBRATION_DIST_CAMERA_0)
calib_1_path = os.path.join(calib_folder_path, constants_calib.CALIBRATION_DIST_CAMERA_1)
calib_stereo_path = os.path.join(calib_folder_path, constants_calib.CALIBRATION_DIST_STEREO)
else:
calib_0_path = os.path.join(calib_folder_path, constants_calib.CALIBRATION_UNDIST_CAMERA_0)
calib_1_path = os.path.join(calib_folder_path, constants_calib.CALIBRATION_UNDIST_CAMERA_1)
calib_stereo_path = os.path.join(calib_folder_path, constants_calib.CALIBRATION_UNDIST_STEREO)
assert os.path.exists(calib_0_path), 'Calibration file for left camera {} does not exist.'.format(calib_0_path)
assert os.path.exists(calib_1_path), 'Calibration file for right camera{} does not exist.'.format(calib_1_path)
assert os.path.exists(calib_stereo_path), 'Calibration for stereo {} does not exist.'.format(calib_stereo_path)
calib_0 = CalibrationFactory.read_calibration_from_file(fpath=calib_0_path)
calib_1 = CalibrationFactory.read_calibration_from_file(fpath=calib_1_path)
calib_stereo_mat = np.loadtxt(fname=calib_stereo_path, delimiter=' ')
assert calib_stereo_mat.shape == (4, 4)
return calib_0, calib_1, calib_stereo_mat
@staticmethod
def create_from_yaml(calib_folder_path: str, distorted: bool, constants_calib: Constants) -> \
Tuple[CameraCalibration, CameraCalibration, np.ndarray]:
"""
Parse calibration from yaml files.
:param calib_folder_path:
:param distorted:
:param constants_calib:
:return: Left camera calibration, right camera calibration and stereo calibration as transformation matrix
"""
assert distorted, 'So far undistorted not supported'
yaml_file_name = os.path.join(calib_folder_path, constants_calib.CALIBRATION_YAML)
assert os.path.exists(yaml_file_name), '{}'.format(yaml_file_name)
with open(yaml_file_name, 'r') as yaml_file:
# contain 2 cameras and extrinsic calibration
yaml_entries = yaml.load(yaml_file, Loader=yaml.Loader)
# hardcode 2 cameras
assert 'cam0' in yaml_entries.keys() and 'cam1' in yaml_entries.keys(), '{}'.format(yaml_entries.keys())
calibrations = []
for ind in range(2):
dist_model, calib_mat, dist_params, resolution = CalibrationFactory.get_single_cam_info(
yaml_entries['cam' + str(ind)])
camera = CameraCalibration(dist_model, calib_mat, dist_params, resolution, cropped_img_dims=resolution)
calibrations.append(camera)
# TODO(Dmytro) parse extrinsics into dictionary and provide generic getters
# Note: we do not parse extrinscis to mount right now
calib_stereo_mat = CalibrationFactory.get_stereo_cam_info(yaml_entries['cam1'])
return calibrations[0], calibrations[1], calib_stereo_mat
@staticmethod
def read_calibration_from_file(fpath: str) -> CameraCalibration:
"""
This method reads calibration files and extracts the relevant information.
:param fpath: filepath to calibration information
:return: camera calibration object
"""
with open(fpath, 'r') as calib_file:
lines = calib_file.readlines()
# parse first line containing distortion mode, calibration matrix and distortion parameter
distortion_model = lines[0].split()[0]
fx, fy, cx, cy, k1, k2, k3, k4 = list(map(float, lines[0].split(' ')[1:]))
# parse second line containing image dimensions
img_width, img_height = list(map(int, lines[1].split(' ')))
# parse fourth line containing image dimensions of cropped image
c_img_width, c_img_height = list(map(int, lines[3].split(' ')))
calib_mat = np.eye(3)
calib_mat[0, 0] = fx
calib_mat[1, 1] = fy
calib_mat[0, 2] = cx
calib_mat[1, 2] = cy
distortion_params = np.array([k1, k2, k3, k4])
img_dims = (img_width, img_height)
cropped_img_dims = (c_img_width, c_img_height)
return CameraCalibration(distortion_model, calib_mat, distortion_params, img_dims, cropped_img_dims)
class Calibration(object):
"""
This class contains all calibration data captured for one sensor kit.
"""
def __init__(self, calib_folder_path: str, distorted: bool = True):
self.constants_calib = Constants()
try:
self.calib_0, self.calib_1, self.calib_stereo_mat = CalibrationFactory.create_from_yaml(
calib_folder_path, distorted, self.constants_calib)
except Exception as e:
logger.warning("Did not succeed parsing from yaml file in {0} distorted {1} due to {2}".
format(calib_folder_path, distorted, str(e)))
self.calib_0, self.calib_1, self.calib_stereo_mat = CalibrationFactory.create_from_txt_files(calib_folder_path, distorted,
self.constants_calib)
assert self.calib_stereo_mat is not None
assert self.calib_0.distortion_model == self.calib_1.distortion_model, 'Camera distortion models differ!'
assert self.calib_0.img_dims == self.calib_1.img_dims, 'Image dimensions differ!'
self.img_dims = self.calib_0.img_dims
self.distortion_model = self.calib_0.distortion_model
assert self.distortion_model in [DistortionModel.Equidistant, DistortionModel.Pinhole], \
'Only {} model are implemented so far, not {}'.format([DistortionModel.Equidistant, DistortionModel.Pinhole],
self.distortion_model)
self.map0_x, self.map0_y = None, None
self.map1_x, self.map1_y = None, None
self.K0_optimized, self.K1_optimized = None, None
self.rect_trans = None
if self.distortion_model == DistortionModel.Equidistant:
self._get_undistortion_to_distortion_map()
elif self.distortion_model == DistortionModel.Pinhole:
assert (self.calib_0.distortion_params == np.zeros(4)).all(), \
'Calib1: Distortion parameters of Pinhole model are non-zero.'
assert (self.calib_1.distortion_params == np.zeros(4)).all(), \
'Calib2: Distortion parameters of Pinhole model are non-zero.'
self._adapt_pinhole_parameters_to_equidistant_output()
else:
raise AssertionError
def _get_undistortion_to_distortion_map(self) -> None:
"""
This method performs stereo rectification and undistortion and calculates optimized calibration data as well as
remaps that map from rectified to distorted image.
:return:
"""
# perform stereo rectification
# https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#stereorectify
# The function computes the rotation matrices for each camera that (virtually) make both camera image planes
# the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies the dense
# stereo correspondence problem. The function takes the matrices computed by stereoCalibrate() as input.
# As output, it provides two rotation matrices and also two projection matrices in the new coordinates.
# The function distinguishes the following two cases: Horizontal stereo and Vertical STereo
# CALIB_ZERO_DISPARITY: the function makes the principal points of each camera have
# the same pixel coordinates in the rectified views.
R1, R2, P1, P2, Q = cv2.fisheye.stereoRectify(K1=self.calib_0.calib_mat, D1=self.calib_0.distortion_params,
K2=self.calib_1.calib_mat, D2=self.calib_1.distortion_params,
imageSize=self.img_dims,
R=self.calib_stereo_mat[:3, :3],
tvec=self.calib_stereo_mat[:3, 3],
flags=cv2.CALIB_ZERO_DISPARITY)
self.rect_trans = {LEFT_CAM_ROT_MAT: R1, # Output 3x3 rectification transform (rotation matrix) for the first camera.
# Output 3x3 rectification transform (rotation matrix) for the second camera.
RIGHT_CAM_ROT_MAT: R2,
# Output 3x4 projection matrix in the new (rectified) coordinate systems for the first camera.
LEFT_CAM_PROJ_MAT: P1,
RIGHT_CAM_PROJ_MAT: P2,
DISP_DEPTH_MAP_MAT: Q} # Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D )
# generates maps from rectified image to distorted image
self.map0_x, self.map0_y = cv2.fisheye.initUndistortRectifyMap(K=self.calib_0.calib_mat,
D=self.calib_0.distortion_params,
R=R1, P=P1, size=self.img_dims,
m1type=cv2.CV_32FC1)
self.map1_x, self.map1_y = cv2.fisheye.initUndistortRectifyMap(K=self.calib_1.calib_mat,
D=self.calib_1.distortion_params,
R=R2, P=P2, size=self.img_dims,
m1type=cv2.CV_32FC1)
self.K0_optimized = P1[:3, :3]
self.K1_optimized = P2[:3, :3]
def _adapt_pinhole_parameters_to_equidistant_output(self):
self.rect_trans = dict()
self.rect_trans[LEFT_CAM_PROJ_MAT] = np.zeros((3, 4)) # projection matrix of first camera
# projection matrix of second camera (incl. baseline [in pixel] scaled by focal length)
self.rect_trans[RIGHT_CAM_PROJ_MAT] = np.zeros((3, 4))
self.rect_trans[LEFT_CAM_ROT_MAT] = np.eye(3) # Identity when as no rectificatio necessary
self.rect_trans[RIGHT_CAM_ROT_MAT] = np.eye(3) # Identity when as no rectificatio necessary
self.rect_trans[DISP_DEPTH_MAP_MAT] = np.eye(4) # disparity to depth mapping, maps from ucd1 to XYZ1
# update optimized K1 and K2
self.K0_optimized = self.calib_0.calib_mat
self.K1_optimized = self.calib_1.calib_mat
focal_length_x = self.K0_optimized[0, 0]
cx = self.K0_optimized[0, 2]
cy = self.K0_optimized[1, 2]
# update P1 and P2
baseline = self.calib_stereo_mat[0, 3]
self.rect_trans[LEFT_CAM_PROJ_MAT][:3, :3] = self.K0_optimized
self.rect_trans[RIGHT_CAM_PROJ_MAT][:3, :3] = self.K1_optimized
self.rect_trans[RIGHT_CAM_PROJ_MAT][0, 3] = self.K1_optimized[0, 0] * baseline
# update Q: mapping from [u, v, disp, 1] to [X, Y, Z, 1]
self.rect_trans[DISP_DEPTH_MAP_MAT][2, 2] = 0
self.rect_trans[DISP_DEPTH_MAP_MAT][3, 3] = 0
self.rect_trans[DISP_DEPTH_MAP_MAT][0, 3] = -cx
self.rect_trans[DISP_DEPTH_MAP_MAT][1, 3] = -cy
self.rect_trans[DISP_DEPTH_MAP_MAT][2, 3] = focal_length_x
self.rect_trans[DISP_DEPTH_MAP_MAT][3, 2] = 1 / np.abs(baseline)
# update maps 1 and 2 x and y
self.map0_x = None
self.map0_y = None
self.map1_x = None
self.map1_y = None
class DistortionMapper(object):
"""
This function contains all calibration parameters and can transform images or pixel coordinates from rectified
to distorted space.
"""
def __init__(self, calib_data: Calibration):
self.calib = calib_data
assert self.calib.distortion_model == DistortionModel.Equidistant,\
'DistorionMapper requires Equidistant distortion model.'
def remap_rectified_image_to_distorted_image(self, rectified_image: np.ndarray,
camera_position: CameraType = CameraType.LEFT,
interpolation=None) -> (np.ndarray, np.ndarray, np.ndarray):
"""
This method remaps a rectified image to the corresponding distorted image.
:param rectified_image:
:param camera_position:
:param interpolation:
:return:
"""
assert interpolation is None, 'No Interpolation implemented'
# select correct image maps dependent on selected camera
map_column = self.calib.map0_x if camera_position == CameraType.LEFT else self.calib.map1_x
map_row = self.calib.map0_y if camera_position == CameraType.LEFT else self.calib.map1_y
assert rectified_image.shape == map_column.shape, 'Image size and x/column-map size do not match.'
assert rectified_image.shape == map_row.shape, 'Image size and y/row-map size do not match'
# initialize distorted image with zeros
distorted_image = np.zeros_like(rectified_image)
# use zero image to retrieve a list of all pixel indices
rectified_row_coords, rectified_column_coords = np.where(distorted_image == 0)
# generate positions in distorted image using the precomputed maps
rows, cols = self.remap_rectified_coordinates_to_distorted_coordinates(coords_row=rectified_row_coords,
coords_column=rectified_column_coords,
image_dims=rectified_image.shape,
camera_position=camera_position)
# check if transformed coordinates are still on canvas
on_canvas_mask = (0 < rows) & (rows < rectified_image.shape[0]) & \
(0 < cols) & (cols < rectified_image.shape[1])
rows, cols = rows[on_canvas_mask], cols[on_canvas_mask]
rectified_row_coords = rectified_row_coords[on_canvas_mask]
rectified_column_coords = rectified_column_coords[on_canvas_mask]
# transfer value of rectified image to respective position in distorted image
# TODO: integer casting of values not optimal. Implement interpolation mechanisms!
distorted_image[np.floor(rows).astype(np.int), np.floor(cols).astype(np.int)] = \
rectified_image[rectified_row_coords, rectified_column_coords]
return distorted_image, rows, cols
# TODO: may use fisheye::distortPoints
def remap_rectified_coordinates_to_distorted_coordinates(self, coords_row: np.ndarray,
coords_column: np.ndarray,
image_dims: Tuple,
camera_position: CameraType = CameraType.LEFT) -> (np.ndarray, np.ndarray):
"""
This function distorts the rectified coordinates and generates the respective coordinates in the distorted image
:param coords_row: [N]
:param coords_column: [N]
:param image_dims: (H, W)
:param camera_position:
:return:
"""
assert coords_column.size == coords_row.size
# select correct image maps dependent on selected camera
map_row = self.calib.map0_y if camera_position == CameraType.LEFT else self.calib.map1_y
map_column = self.calib.map0_x if camera_position == CameraType.LEFT else self.calib.map1_x
assert image_dims == map_row.shape, 'Image size and y/rows-map size do not match'
assert image_dims == map_column.shape, 'Image size and x/column-map size do not match.'
# get corresponding coordinates in distorted image
result = map_row[coords_row, coords_column], map_column[coords_row, coords_column]
return result
def undistort_image(self, dist_image: np.ndarray,
camera_position: CameraType = CameraType.LEFT,
interpolation=cv2.INTER_LINEAR) -> np.ndarray:
"""
This method remaps a distorted image to the corresponding rectified image.
:param dist_image:
:param camera_position:
:param interpolation:
:return:
"""
map1 = self.calib.map0_x if camera_position == CameraType.LEFT else self.calib.map1_x
map2 = self.calib.map0_y if camera_position == CameraType.LEFT else self.calib.map1_y
undist_image = cv2.remap(dist_image, map1=map1, map2=map2, interpolation=interpolation)
return undist_image
def undistort_coordinates(self, dist_coords: np.ndarray,
camera_position: CameraType = CameraType.LEFT) -> np.ndarray:
"""
This function rectify the distorted coordinates and generates the respective coordinates in the rectified image
:param dist_coords: [N x 2]
:param camera_position:
:return: v, v, u, u
"""
# select correct parameters for the left/right camera
camera_matrix = self.calib.calib_0.calib_mat if camera_position == CameraType.LEFT else self.calib.calib_1.calib_mat
dist_coeffs = self.calib.calib_0.distortion_params if camera_position == CameraType.LEFT else self.calib.calib_1.distortion_params
R = self.calib.rect_trans[LEFT_CAM_ROT_MAT] if camera_position == CameraType.LEFT else self.calib.rect_trans[RIGHT_CAM_ROT_MAT]
P = self.calib.rect_trans[LEFT_CAM_PROJ_MAT] if camera_position == CameraType.LEFT else self.calib.rect_trans[RIGHT_CAM_PROJ_MAT]
# transfer (row column) to (x, y) and match the input format
dist_coords = np.array([dist_coords[1], dist_coords[0]], dtype=np.float32)
dist_coords = np.transpose(dist_coords)
dist_coords = np.expand_dims(dist_coords, axis=1)
# calculate the coordinates
undist_coords = cv2.fisheye.undistortPoints(dist_coords, K=camera_matrix, D=dist_coeffs, R=R, P=P)
# transfer (x, y) to (row column) and match the output format
undist_coords = undist_coords.squeeze()
undist_coords = np.array([undist_coords[:, 1], undist_coords[:, 0]])
return np.array(undist_coords)
|
[
"yaml.load",
"numpy.abs",
"numpy.floor",
"cv2.remap",
"os.path.join",
"numpy.zeros_like",
"os.path.exists",
"numpy.transpose",
"numpy.loadtxt",
"libartipy.dataset.Constants",
"functools.wraps",
"cv2.fisheye.stereoRectify",
"cv2.fisheye.initUndistortRectifyMap",
"libartipy.dataset.get_logger",
"numpy.zeros",
"numpy.expand_dims",
"numpy.where",
"numpy.array",
"cv2.fisheye.undistortPoints",
"numpy.eye"
] |
[((381, 393), 'libartipy.dataset.get_logger', 'get_logger', ([], {}), '()\n', (391, 393), False, 'from libartipy.dataset import Constants, get_logger\n'), ((581, 602), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (596, 602), False, 'import functools\n'), ((2896, 2905), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2902, 2905), True, 'import numpy as np\n'), ((3244, 3265), 'numpy.array', 'np.array', (['dist_params'], {}), '(dist_params)\n', (3252, 3265), True, 'import numpy as np\n'), ((3865, 3887), 'numpy.array', 'np.array', (['stereo_trans'], {}), '(stereo_trans)\n', (3873, 3887), True, 'import numpy as np\n'), ((5118, 5146), 'os.path.exists', 'os.path.exists', (['calib_0_path'], {}), '(calib_0_path)\n', (5132, 5146), False, 'import os\n'), ((5238, 5266), 'os.path.exists', 'os.path.exists', (['calib_1_path'], {}), '(calib_1_path)\n', (5252, 5266), False, 'import os\n'), ((5358, 5391), 'os.path.exists', 'os.path.exists', (['calib_stereo_path'], {}), '(calib_stereo_path)\n', (5372, 5391), False, 'import os\n'), ((5659, 5709), 'numpy.loadtxt', 'np.loadtxt', ([], {'fname': 'calib_stereo_path', 'delimiter': '""" """'}), "(fname=calib_stereo_path, delimiter=' ')\n", (5669, 5709), True, 'import numpy as np\n'), ((6357, 6422), 'os.path.join', 'os.path.join', (['calib_folder_path', 'constants_calib.CALIBRATION_YAML'], {}), '(calib_folder_path, constants_calib.CALIBRATION_YAML)\n', (6369, 6422), False, 'import os\n'), ((6439, 6469), 'os.path.exists', 'os.path.exists', (['yaml_file_name'], {}), '(yaml_file_name)\n', (6453, 6469), False, 'import os\n'), ((8418, 8427), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (8424, 8427), True, 'import numpy as np\n'), ((8573, 8599), 'numpy.array', 'np.array', (['[k1, k2, k3, k4]'], {}), '([k1, k2, k3, k4])\n', (8581, 8599), True, 'import numpy as np\n'), ((9032, 9043), 'libartipy.dataset.Constants', 'Constants', ([], {}), '()\n', (9041, 9043), False, 'from libartipy.dataset import Constants, get_logger\n'), ((12333, 12621), 'cv2.fisheye.stereoRectify', 'cv2.fisheye.stereoRectify', ([], {'K1': 'self.calib_0.calib_mat', 'D1': 'self.calib_0.distortion_params', 'K2': 'self.calib_1.calib_mat', 'D2': 'self.calib_1.distortion_params', 'imageSize': 'self.img_dims', 'R': 'self.calib_stereo_mat[:3, :3]', 'tvec': 'self.calib_stereo_mat[:3, 3]', 'flags': 'cv2.CALIB_ZERO_DISPARITY'}), '(K1=self.calib_0.calib_mat, D1=self.calib_0.\n distortion_params, K2=self.calib_1.calib_mat, D2=self.calib_1.\n distortion_params, imageSize=self.img_dims, R=self.calib_stereo_mat[:3,\n :3], tvec=self.calib_stereo_mat[:3, 3], flags=cv2.CALIB_ZERO_DISPARITY)\n', (12358, 12621), False, 'import cv2\n'), ((13622, 13780), 'cv2.fisheye.initUndistortRectifyMap', 'cv2.fisheye.initUndistortRectifyMap', ([], {'K': 'self.calib_0.calib_mat', 'D': 'self.calib_0.distortion_params', 'R': 'R1', 'P': 'P1', 'size': 'self.img_dims', 'm1type': 'cv2.CV_32FC1'}), '(K=self.calib_0.calib_mat, D=self.\n calib_0.distortion_params, R=R1, P=P1, size=self.img_dims, m1type=cv2.\n CV_32FC1)\n', (13657, 13780), False, 'import cv2\n'), ((14020, 14178), 'cv2.fisheye.initUndistortRectifyMap', 'cv2.fisheye.initUndistortRectifyMap', ([], {'K': 'self.calib_1.calib_mat', 'D': 'self.calib_1.distortion_params', 'R': 'R2', 'P': 'P2', 'size': 'self.img_dims', 'm1type': 'cv2.CV_32FC1'}), '(K=self.calib_1.calib_mat, D=self.\n calib_1.distortion_params, R=R2, P=P2, size=self.img_dims, m1type=cv2.\n CV_32FC1)\n', (14055, 14178), False, 'import cv2\n'), ((14604, 14620), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (14612, 14620), True, 'import numpy as np\n'), ((14800, 14816), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (14808, 14816), True, 'import numpy as np\n'), ((14861, 14870), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (14867, 14870), True, 'import numpy as np\n'), ((14962, 14971), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (14968, 14971), True, 'import numpy as np\n'), ((15064, 15073), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (15070, 15073), True, 'import numpy as np\n'), ((17810, 17840), 'numpy.zeros_like', 'np.zeros_like', (['rectified_image'], {}), '(rectified_image)\n', (17823, 17840), True, 'import numpy as np\n'), ((17963, 17993), 'numpy.where', 'np.where', (['(distorted_image == 0)'], {}), '(distorted_image == 0)\n', (17971, 17993), True, 'import numpy as np\n'), ((21363, 21435), 'cv2.remap', 'cv2.remap', (['dist_image'], {'map1': 'map1', 'map2': 'map2', 'interpolation': 'interpolation'}), '(dist_image, map1=map1, map2=map2, interpolation=interpolation)\n', (21372, 21435), False, 'import cv2\n'), ((22551, 22611), 'numpy.array', 'np.array', (['[dist_coords[1], dist_coords[0]]'], {'dtype': 'np.float32'}), '([dist_coords[1], dist_coords[0]], dtype=np.float32)\n', (22559, 22611), True, 'import numpy as np\n'), ((22634, 22659), 'numpy.transpose', 'np.transpose', (['dist_coords'], {}), '(dist_coords)\n', (22646, 22659), True, 'import numpy as np\n'), ((22682, 22717), 'numpy.expand_dims', 'np.expand_dims', (['dist_coords'], {'axis': '(1)'}), '(dist_coords, axis=1)\n', (22696, 22717), True, 'import numpy as np\n'), ((22779, 22866), 'cv2.fisheye.undistortPoints', 'cv2.fisheye.undistortPoints', (['dist_coords'], {'K': 'camera_matrix', 'D': 'dist_coeffs', 'R': 'R', 'P': 'P'}), '(dist_coords, K=camera_matrix, D=dist_coeffs, R=\n R, P=P)\n', (22806, 22866), False, 'import cv2\n'), ((23005, 23057), 'numpy.array', 'np.array', (['[undist_coords[:, 1], undist_coords[:, 0]]'], {}), '([undist_coords[:, 1], undist_coords[:, 0]])\n', (23013, 23057), True, 'import numpy as np\n'), ((23074, 23097), 'numpy.array', 'np.array', (['undist_coords'], {}), '(undist_coords)\n', (23082, 23097), True, 'import numpy as np\n'), ((4491, 4565), 'os.path.join', 'os.path.join', (['calib_folder_path', 'constants_calib.CALIBRATION_DIST_CAMERA_0'], {}), '(calib_folder_path, constants_calib.CALIBRATION_DIST_CAMERA_0)\n', (4503, 4565), False, 'import os\n'), ((4593, 4667), 'os.path.join', 'os.path.join', (['calib_folder_path', 'constants_calib.CALIBRATION_DIST_CAMERA_1'], {}), '(calib_folder_path, constants_calib.CALIBRATION_DIST_CAMERA_1)\n', (4605, 4667), False, 'import os\n'), ((4700, 4772), 'os.path.join', 'os.path.join', (['calib_folder_path', 'constants_calib.CALIBRATION_DIST_STEREO'], {}), '(calib_folder_path, constants_calib.CALIBRATION_DIST_STEREO)\n', (4712, 4772), False, 'import os\n'), ((4814, 4890), 'os.path.join', 'os.path.join', (['calib_folder_path', 'constants_calib.CALIBRATION_UNDIST_CAMERA_0'], {}), '(calib_folder_path, constants_calib.CALIBRATION_UNDIST_CAMERA_0)\n', (4826, 4890), False, 'import os\n'), ((4918, 4994), 'os.path.join', 'os.path.join', (['calib_folder_path', 'constants_calib.CALIBRATION_UNDIST_CAMERA_1'], {}), '(calib_folder_path, constants_calib.CALIBRATION_UNDIST_CAMERA_1)\n', (4930, 4994), False, 'import os\n'), ((5027, 5101), 'os.path.join', 'os.path.join', (['calib_folder_path', 'constants_calib.CALIBRATION_UNDIST_STEREO'], {}), '(calib_folder_path, constants_calib.CALIBRATION_UNDIST_STEREO)\n', (5039, 5101), False, 'import os\n'), ((6638, 6678), 'yaml.load', 'yaml.load', (['yaml_file'], {'Loader': 'yaml.Loader'}), '(yaml_file, Loader=yaml.Loader)\n', (6647, 6678), False, 'import yaml\n'), ((16105, 16121), 'numpy.abs', 'np.abs', (['baseline'], {}), '(baseline)\n', (16111, 16121), True, 'import numpy as np\n'), ((19146, 19160), 'numpy.floor', 'np.floor', (['rows'], {}), '(rows)\n', (19154, 19160), True, 'import numpy as np\n'), ((19177, 19191), 'numpy.floor', 'np.floor', (['cols'], {}), '(cols)\n', (19185, 19191), True, 'import numpy as np\n'), ((10788, 10799), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (10796, 10799), True, 'import numpy as np\n'), ((10943, 10954), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (10951, 10954), True, 'import numpy as np\n')]
|
"""
=========================
Fit exotic Hawkes kernels
=========================
This learner assumes Hawkes kernels are linear combinations of a given number
of kernel basis.
Here it is run on a an exotic data set generated with mixtures of two cosinus
functions. We observe that we can correctly retrieve the kernels and the two
cosinus basis functions which have generated the kernels. This experiment
is run on toy datasets in the `original paper`_.
It could have been more precise if end_time or kernel_size was increased.
.. _original paper: http://jmlr.org/proceedings/papers/v28/zhou13.html
"""
import itertools
import numpy as np
import matplotlib.pyplot as plt
from tick.plot import plot_basis_kernels, plot_hawkes_kernels
from tick.hawkes import SimuHawkes, HawkesKernelTimeFunc, HawkesBasisKernels
end_time = 1e9
C = 1e-3
kernel_size = 40
max_iter = 100
# We first simulate a similar Hawkes process
def g1(t):
return np.cos(np.pi * t / 10) + 1.1
def g2(t):
return np.cos(np.pi * (t / 10 + 1)) + 1.1
t_values = np.linspace(0, 20, 1000)
u_values = [(0.007061, 0.001711),
(0.005445, 0.003645),
(0.003645, 0.005445),
(0.001790, 0.007390)]
hawkes = SimuHawkes(baseline=[1e-5, 1e-5], seed=1093, verbose=False)
for i, j in itertools.product(range(2), repeat=2):
u1, u2 = u_values[2 * i + j]
y_values = g1(t_values) * u1 + g2(t_values) * u2
kernel = HawkesKernelTimeFunc(t_values=t_values, y_values=y_values)
hawkes.set_kernel(i, j, kernel)
hawkes.end_time = end_time
hawkes.simulate()
ticks = hawkes.timestamps
# And then perform estimation with two basis kernels
kernel_support = 20
n_basis = 2
em = HawkesBasisKernels(kernel_support, n_basis=n_basis,
kernel_size=kernel_size, C=C,
n_threads=4, max_iter=max_iter,
verbose=False, ode_tol=1e-5)
em.fit(ticks)
fig = plot_hawkes_kernels(em, hawkes=hawkes, support=19.9, show=False)
for ax in fig.axes:
ax.set_ylim([0, 0.025])
fig = plot_basis_kernels(em, basis_kernels=[g2, g1], show=False)
for ax in fig.axes:
ax.set_ylim([0, 0.5])
plt.show()
|
[
"tick.hawkes.HawkesBasisKernels",
"tick.hawkes.SimuHawkes",
"matplotlib.pyplot.show",
"numpy.linspace",
"numpy.cos",
"tick.hawkes.HawkesKernelTimeFunc",
"tick.plot.plot_hawkes_kernels",
"tick.plot.plot_basis_kernels"
] |
[((1044, 1068), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(1000)'], {}), '(0, 20, 1000)\n', (1055, 1068), True, 'import numpy as np\n'), ((1215, 1276), 'tick.hawkes.SimuHawkes', 'SimuHawkes', ([], {'baseline': '[1e-05, 1e-05]', 'seed': '(1093)', 'verbose': '(False)'}), '(baseline=[1e-05, 1e-05], seed=1093, verbose=False)\n', (1225, 1276), False, 'from tick.hawkes import SimuHawkes, HawkesKernelTimeFunc, HawkesBasisKernels\n'), ((1684, 1831), 'tick.hawkes.HawkesBasisKernels', 'HawkesBasisKernels', (['kernel_support'], {'n_basis': 'n_basis', 'kernel_size': 'kernel_size', 'C': 'C', 'n_threads': '(4)', 'max_iter': 'max_iter', 'verbose': '(False)', 'ode_tol': '(1e-05)'}), '(kernel_support, n_basis=n_basis, kernel_size=kernel_size,\n C=C, n_threads=4, max_iter=max_iter, verbose=False, ode_tol=1e-05)\n', (1702, 1831), False, 'from tick.hawkes import SimuHawkes, HawkesKernelTimeFunc, HawkesBasisKernels\n'), ((1920, 1984), 'tick.plot.plot_hawkes_kernels', 'plot_hawkes_kernels', (['em'], {'hawkes': 'hawkes', 'support': '(19.9)', 'show': '(False)'}), '(em, hawkes=hawkes, support=19.9, show=False)\n', (1939, 1984), False, 'from tick.plot import plot_basis_kernels, plot_hawkes_kernels\n'), ((2040, 2098), 'tick.plot.plot_basis_kernels', 'plot_basis_kernels', (['em'], {'basis_kernels': '[g2, g1]', 'show': '(False)'}), '(em, basis_kernels=[g2, g1], show=False)\n', (2058, 2098), False, 'from tick.plot import plot_basis_kernels, plot_hawkes_kernels\n'), ((2146, 2156), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2154, 2156), True, 'import matplotlib.pyplot as plt\n'), ((1425, 1483), 'tick.hawkes.HawkesKernelTimeFunc', 'HawkesKernelTimeFunc', ([], {'t_values': 't_values', 'y_values': 'y_values'}), '(t_values=t_values, y_values=y_values)\n', (1445, 1483), False, 'from tick.hawkes import SimuHawkes, HawkesKernelTimeFunc, HawkesBasisKernels\n'), ((943, 965), 'numpy.cos', 'np.cos', (['(np.pi * t / 10)'], {}), '(np.pi * t / 10)\n', (949, 965), True, 'import numpy as np\n'), ((996, 1024), 'numpy.cos', 'np.cos', (['(np.pi * (t / 10 + 1))'], {}), '(np.pi * (t / 10 + 1))\n', (1002, 1024), True, 'import numpy as np\n')]
|
import numpy as np
import scipy
from scipy import interpolate
from scipy.interpolate import interp1d
#configure paremeter
K = 64 # number of OFDM subcarriers
CP = K//4 # length of the cyclic prefix: 25% of the block
P = 8 # number of pilot carriers per OFDM block
pilotValue = 3+3j # The known value each pilot transmits
allCarriers = np.arange(K) # indices of all subcarriers ([0, 1, ... K-1])
paddingCarriers = allCarriers[::K//P] # pad empty for real channel
pilotCarriers = allCarriers[::K//P] # Pilots is every (K/P)th carrier.
# For convenience of channel estimation, let's make the last carriers also be a pilot
pilotCarriers = np.hstack([pilotCarriers, np.array([allCarriers[-1]])])
P = P+1
# data carriers are all remaining carriers
dataCarriers = np.delete(allCarriers, pilotCarriers)
mu = 4 # bits per symbol (i.e. 16QAM)
payloadBits_per_OFDM = len(dataCarriers)*mu # number of payload bits per OFDM symbol
mapping_table = {
(0,0,0,0) : -3-3j,
(0,0,0,1) : -3-1j,
(0,0,1,0) : -3+3j,
(0,0,1,1) : -3+1j,
(0,1,0,0) : -1-3j,
(0,1,0,1) : -1-1j,
(0,1,1,0) : -1+3j,
(0,1,1,1) : -1+1j,
(1,0,0,0) : 3-3j,
(1,0,0,1) : 3-1j,
(1,0,1,0) : 3+3j,
(1,0,1,1) : 3+1j,
(1,1,0,0) : 1-3j,
(1,1,0,1) : 1-1j,
(1,1,1,0) : 1+3j,
(1,1,1,1) : 1+1j
}
demapping_table = {v : k for k, v in mapping_table.items()}
channelResponse = np.array([1, 0, 0.3+0.3j]) # the impulse response of the wireless channel
H_exact = np.fft.fft(channelResponse, K)
SNRdb = 25 # signal to noise-ratio in dB at the receiver
def SP(bits):
return bits.reshape((len(dataCarriers), mu))
def Mapping(bits):
return np.array([mapping_table[tuple(b)] for b in bits])
def OFDM_symbol(QAM_payload):
symbol = np.zeros(K, dtype=complex) # the overall K subcarriers
symbol[pilotCarriers] = pilotValue # allocate the pilot subcarriers
symbol[dataCarriers] = QAM_payload # allocate the pilot subcarriers
return symbol
def RealizeIDFT(OFDM_data):
conj_data = np.conjugate(OFDM_data)
rev_data = conj_data[::-1]
app_data = np.append([0.0+0.j], rev_data)
app_data = np.append(app_data, OFDM_data)
ifft_data = np.fft.ifft(app_data)
return ifft_data.real
def IDFT(OFDM_data):
return np.fft.ifft(OFDM_data)
def addCP(OFDM_time):
cp = OFDM_time[-CP:] # take the last CP samples ...
return np.hstack([cp, OFDM_time]) # ... and add them to the beginning
def channel(signal):
convolved = np.convolve(signal, channelResponse)
signal_power = np.mean(abs(convolved**2))
sigma2 = signal_power * 10**(-SNRdb/10) # calculate noise power based on signal power and SNR
print ("RX Signal power: %.4f. Noise power: %.4f" % (signal_power, sigma2))
# Generate complex noise with given variance
noise = np.sqrt(sigma2/2) * (np.random.randn(*convolved.shape)+1j*np.random.randn(*convolved.shape))
return convolved + noise
def removeCP(signal):
return signal[CP:(CP+2*K+1)]
def DFT(OFDM_RX):
return np.fft.fft(OFDM_RX)
def RealizeDFT(OFDM_RX):
fft_data = np.fft.fft(OFDM_RX)
app_data = fft_data[K+1:2*K+1]
return app_data
def channelEstimate(OFDM_demod):
pilots = OFDM_demod[pilotCarriers] # extract the pilot values from the RX signal
Hest_at_pilots = pilots / pilotValue # divide by the transmitted pilot values
# Perform interpolation between the pilot carriers to get an estimate
# of the channel in the data carriers. Here, we interpolate absolute value and phase
# separately
Hest_abs = scipy.interpolate.interp1d(pilotCarriers, abs(Hest_at_pilots), kind='linear')(allCarriers)
Hest_phase = scipy.interpolate.interp1d(pilotCarriers, np.angle(Hest_at_pilots), kind='linear')(allCarriers)
Hest = Hest_abs * np.exp(1j*Hest_phase)
return Hest, Hest_at_pilots
def equalize(OFDM_demod, Hest):
return OFDM_demod / Hest
def get_payload(equalized):
return equalized[dataCarriers]
def Demapping(QAM):
# array of possible constellation points
constellation = np.array([x for x in demapping_table.keys()])
# calculate distance of each RX point to each possible point
dists = abs(QAM.reshape((-1,1)) - constellation.reshape((1,-1)))
# for each element in QAM, choose the index in constellation
# that belongs to the nearest constellation point
const_index = dists.argmin(axis=1)
# get back the real constellation point
hardDecision = constellation[const_index]
# transform the constellation point into the bit groups
return np.vstack([demapping_table[C] for C in hardDecision]), hardDecision
def PS(bits):
return bits.reshape((-1,))
#Over Sample
def OverSample(data, rate = 1/2):
#y = data
y = np.hstack([data, np.array([0])])
x = np.arange(len(y))
f = interpolate.interp1d(x, y)
f2 = interpolate.interp1d(x, y, kind='cubic')
xOverSample = np.arange(0, len(y)-1, rate)
yOverSample = f(xOverSample) # use interpolation function returned by `interp1d`
return yOverSample
def Sample(data, rate = 1/2):
K = len(data)
P = int(K*rate)
allIndexs= np.arange(K)
sampleIndexs= allIndexs[::K//P]
ySample = data[sampleIndexs]
xSample = np.arange(len(ySample))
return ySample
# bits shoud be length of payloadBits_per_OFDM
def ofdm_encode(bits):
#bits = np.random.binomial(n=1, p=0.5, size=(payloadBits_per_OFDM, ))
bitsLen = len(bits)
if(bitsLen >= payloadBits_per_OFDM):
fullBits = bits[:payloadBits_per_OFDM]
else:
bPadding = np.random.binomial(n=1, p=0.5, size=(payloadBits_per_OFDM - bitsLen, ))
fullBits = np.hstack([bits, bPadding])
bits_SP = SP(fullBits)
QAM = Mapping(bits_SP)
OFDM_data = OFDM_symbol(QAM)
OFDM_time = RealizeIDFT(OFDM_data)
OFDM_withCP = addCP(OFDM_time)
OFDM_TX = OverSample(OFDM_withCP)
#float to int16
symbol = (np.array(OFDM_TX)*0x3FFF).astype(np.int16)
return symbol
def ofdm_decode(symbol):
#sampling
OFDM_RX_Sampled = Sample(symbol)
#int16 to float
#OFDM_RX = np.array(OFDM_RX_Sampled)/0x3FFF
OFDM_RX= OFDM_RX_Sampled
OFDM_RX_noCP = removeCP(OFDM_RX)
OFDM_demod = RealizeDFT(OFDM_RX_noCP)
Hest, Hest_at_pilots = channelEstimate(OFDM_demod)
#plt.savefig("channelEstimate.png")
equalized_Hest = equalize(OFDM_demod, Hest)
QAM_est = get_payload(equalized_Hest)
PS_est, hardDecision = Demapping(QAM_est)
bits_est = PS(PS_est)
return bits_est
|
[
"numpy.fft.ifft",
"numpy.random.binomial",
"numpy.random.randn",
"numpy.fft.fft",
"numpy.angle",
"numpy.zeros",
"numpy.hstack",
"numpy.append",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.arange",
"numpy.exp",
"numpy.convolve",
"numpy.conjugate",
"numpy.delete",
"numpy.vstack",
"numpy.sqrt"
] |
[((338, 350), 'numpy.arange', 'np.arange', (['K'], {}), '(K)\n', (347, 350), True, 'import numpy as np\n'), ((764, 801), 'numpy.delete', 'np.delete', (['allCarriers', 'pilotCarriers'], {}), '(allCarriers, pilotCarriers)\n', (773, 801), True, 'import numpy as np\n'), ((1394, 1422), 'numpy.array', 'np.array', (['[1, 0, 0.3 + 0.3j]'], {}), '([1, 0, 0.3 + 0.3j])\n', (1402, 1422), True, 'import numpy as np\n'), ((1479, 1509), 'numpy.fft.fft', 'np.fft.fft', (['channelResponse', 'K'], {}), '(channelResponse, K)\n', (1489, 1509), True, 'import numpy as np\n'), ((1758, 1784), 'numpy.zeros', 'np.zeros', (['K'], {'dtype': 'complex'}), '(K, dtype=complex)\n', (1766, 1784), True, 'import numpy as np\n'), ((2022, 2045), 'numpy.conjugate', 'np.conjugate', (['OFDM_data'], {}), '(OFDM_data)\n', (2034, 2045), True, 'import numpy as np\n'), ((2092, 2125), 'numpy.append', 'np.append', (['[0.0 + 0.0j]', 'rev_data'], {}), '([0.0 + 0.0j], rev_data)\n', (2101, 2125), True, 'import numpy as np\n'), ((2138, 2168), 'numpy.append', 'np.append', (['app_data', 'OFDM_data'], {}), '(app_data, OFDM_data)\n', (2147, 2168), True, 'import numpy as np\n'), ((2185, 2206), 'numpy.fft.ifft', 'np.fft.ifft', (['app_data'], {}), '(app_data)\n', (2196, 2206), True, 'import numpy as np\n'), ((2266, 2288), 'numpy.fft.ifft', 'np.fft.ifft', (['OFDM_data'], {}), '(OFDM_data)\n', (2277, 2288), True, 'import numpy as np\n'), ((2394, 2420), 'numpy.hstack', 'np.hstack', (['[cp, OFDM_time]'], {}), '([cp, OFDM_time])\n', (2403, 2420), True, 'import numpy as np\n'), ((2496, 2532), 'numpy.convolve', 'np.convolve', (['signal', 'channelResponse'], {}), '(signal, channelResponse)\n', (2507, 2532), True, 'import numpy as np\n'), ((3037, 3056), 'numpy.fft.fft', 'np.fft.fft', (['OFDM_RX'], {}), '(OFDM_RX)\n', (3047, 3056), True, 'import numpy as np\n'), ((3098, 3117), 'numpy.fft.fft', 'np.fft.fft', (['OFDM_RX'], {}), '(OFDM_RX)\n', (3108, 3117), True, 'import numpy as np\n'), ((4817, 4843), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'y'], {}), '(x, y)\n', (4837, 4843), False, 'from scipy import interpolate\n'), ((4853, 4893), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x', 'y'], {'kind': '"""cubic"""'}), "(x, y, kind='cubic')\n", (4873, 4893), False, 'from scipy import interpolate\n'), ((5138, 5150), 'numpy.arange', 'np.arange', (['K'], {}), '(K)\n', (5147, 5150), True, 'import numpy as np\n'), ((667, 694), 'numpy.array', 'np.array', (['[allCarriers[-1]]'], {}), '([allCarriers[-1]])\n', (675, 694), True, 'import numpy as np\n'), ((2829, 2848), 'numpy.sqrt', 'np.sqrt', (['(sigma2 / 2)'], {}), '(sigma2 / 2)\n', (2836, 2848), True, 'import numpy as np\n'), ((3797, 3822), 'numpy.exp', 'np.exp', (['(1.0j * Hest_phase)'], {}), '(1.0j * Hest_phase)\n', (3803, 3822), True, 'import numpy as np\n'), ((4566, 4619), 'numpy.vstack', 'np.vstack', (['[demapping_table[C] for C in hardDecision]'], {}), '([demapping_table[C] for C in hardDecision])\n', (4575, 4619), True, 'import numpy as np\n'), ((5565, 5635), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.5)', 'size': '(payloadBits_per_OFDM - bitsLen,)'}), '(n=1, p=0.5, size=(payloadBits_per_OFDM - bitsLen,))\n', (5583, 5635), True, 'import numpy as np\n'), ((5656, 5683), 'numpy.hstack', 'np.hstack', (['[bits, bPadding]'], {}), '([bits, bPadding])\n', (5665, 5683), True, 'import numpy as np\n'), ((2850, 2883), 'numpy.random.randn', 'np.random.randn', (['*convolved.shape'], {}), '(*convolved.shape)\n', (2865, 2883), True, 'import numpy as np\n'), ((3721, 3745), 'numpy.angle', 'np.angle', (['Hest_at_pilots'], {}), '(Hest_at_pilots)\n', (3729, 3745), True, 'import numpy as np\n'), ((4767, 4780), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (4775, 4780), True, 'import numpy as np\n'), ((2887, 2920), 'numpy.random.randn', 'np.random.randn', (['*convolved.shape'], {}), '(*convolved.shape)\n', (2902, 2920), True, 'import numpy as np\n'), ((5928, 5945), 'numpy.array', 'np.array', (['OFDM_TX'], {}), '(OFDM_TX)\n', (5936, 5945), True, 'import numpy as np\n')]
|
import functools
import pickle
import torch
import numpy as np
def get_labels_stats(labels):
labels_set = torch.unique(labels).numpy().tolist()
num_labels = len(labels_set)
n_sample_per_label = labels.shape[0] // num_labels
return num_labels, n_sample_per_label
def data_subset(data, labels, n_way, ways=None):
# WARNING: ASSUME CONTIGUOUS LABELS IN EQUAL NUMBER
num_labels, n_sample_per_label = get_labels_stats(labels)
if ways is None:
ways = np.random.choice(range(num_labels), n_way, replace=False)
subset_data = []
subset_labels = []
for new_l, l in enumerate(ways):
start_sample, end_sample = l*n_sample_per_label, (l+1)*n_sample_per_label
subset_data.append(data[start_sample : end_sample])
subset_labels.append(torch.LongTensor([new_l]*n_sample_per_label))
subset_data = torch.cat(subset_data, dim=0)
subset_labels = torch.cat(subset_labels, dim=0)
return subset_data, subset_labels
def extract_from_slice(cur_data_slice, select_train, select_test, train_set, test_set):
cur_train = cur_data_slice[select_train]
cur_test = cur_data_slice[select_test]
train_set.append(cur_train)
test_set.append(cur_test)
def split_train_test(data, labels, n_shot, n_val):
num_labels, n_sample_per_label = get_labels_stats(labels)
assert n_shot + n_val <= n_sample_per_label
train_set, test_set = [], []
train_labels, test_labels = [], []
for l in range(num_labels):
select_elems = np.random.choice(range(n_sample_per_label), n_shot + n_val, replace=False)
select_train = select_elems[:n_shot]
select_test = select_elems[n_shot:]
start_sample = l * n_sample_per_label
end_sample = start_sample + n_sample_per_label
extract_from_slice(data[start_sample:end_sample], select_train, select_test, train_set, test_set)
extract_from_slice(labels[start_sample:end_sample], select_train, select_test, train_labels, test_labels)
train_set = torch.cat(train_set, dim=0)
test_set = torch.cat(test_set, dim=0)
train_labels = torch.cat(train_labels, dim=0)
test_labels = torch.cat(test_labels, dim=0)
return train_set, train_labels, test_set, test_labels
def load_pickle(file):
with open(file, 'rb') as f:
data = pickle.load(f)
labels = [np.full(shape=len(data[key]), fill_value=key) for key in data]
data = [features for key in data for features in data[key]]
dataset = dict()
dataset['data'] = torch.FloatTensor(np.stack(data, axis=0))
dataset['labels'] = torch.LongTensor(np.concatenate(labels))
return dataset
@functools.lru_cache()
def get_dataset_from_datapath(data_path):
original_data = []
original_labels = []
paths = data_path.split('&')
assert len(paths) == 1 or 'densenet-t' not in data_path, 'Not available for TieredImageNet'
for path in paths:
if data_path.endswith('.plk') or data_path.endswith('.pkl'):
dataset = load_pickle(path)
elif data_path.endswith('.pt'):
dataset = torch.load(path)
else:
assert False
original_data.append(dataset['data'])
cur_num_labels = len(set(dataset['labels'].numpy().tolist()))
if len(paths) == 1 or cur_num_labels == 64:
delta = 0
elif cur_num_labels == 16:
delta += 64
elif cur_num_labels == 20:
delta += 16+64
else:
raise ValueError
original_labels.append(delta+dataset['labels'])
original_data = torch.cat(original_data)
original_labels = torch.cat(original_labels)
return original_data, original_labels
def get_train_test_datasets(data_path, n_way, n_shot, n_val):
original_data, original_labels = get_dataset_from_datapath(data_path)
data, labels = data_subset(original_data, original_labels, n_way)
return split_train_test(data, labels, n_shot, n_val)
def get_train_test_datasets_labels(data_path, n_way, n_shot, n_val, ways=None):
original_data, original_labels = get_dataset_from_datapath(data_path)
num_labels, n_sample_per_label = get_labels_stats(original_labels)
if ways is None:
ways = np.random.choice(range(num_labels), n_way, replace=False)
selected_labels = [int(original_labels[i*n_sample_per_label]) for i in ways]
data, labels = data_subset(original_data, original_labels, n_way, ways=ways)
return split_train_test(data, labels, n_shot, n_val), selected_labels
def get_all_pairs_datasets(data_path, n_way, crop, parts=None):
assert n_way == 2
original_data, original_labels = get_dataset_from_datapath(data_path)
num_labels, n_sample_per_label = get_labels_stats(original_labels)
if parts is None:
start_i = 0
yield int(num_labels) * int(num_labels-1) // 2
else:
start_i = parts
yield parts * (num_labels - parts)
for i in range(start_i, num_labels):
if crop and i == 5:
break
end_j = parts if parts is not None else i
for j in range(0, end_j):
ways = np.array([i, j])
label_a = int(original_labels[i*n_sample_per_label])
label_b = int(original_labels[j*n_sample_per_label])
data, labels = data_subset(original_data, original_labels, n_way, ways)
yield data, labels, label_a, label_b, 0, 1
|
[
"numpy.stack",
"torch.unique",
"torch.LongTensor",
"torch.load",
"torch.cat",
"pickle.load",
"numpy.array",
"functools.lru_cache",
"numpy.concatenate"
] |
[((2656, 2677), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (2675, 2677), False, 'import functools\n'), ((858, 887), 'torch.cat', 'torch.cat', (['subset_data'], {'dim': '(0)'}), '(subset_data, dim=0)\n', (867, 887), False, 'import torch\n'), ((908, 939), 'torch.cat', 'torch.cat', (['subset_labels'], {'dim': '(0)'}), '(subset_labels, dim=0)\n', (917, 939), False, 'import torch\n'), ((2008, 2035), 'torch.cat', 'torch.cat', (['train_set'], {'dim': '(0)'}), '(train_set, dim=0)\n', (2017, 2035), False, 'import torch\n'), ((2051, 2077), 'torch.cat', 'torch.cat', (['test_set'], {'dim': '(0)'}), '(test_set, dim=0)\n', (2060, 2077), False, 'import torch\n'), ((2097, 2127), 'torch.cat', 'torch.cat', (['train_labels'], {'dim': '(0)'}), '(train_labels, dim=0)\n', (2106, 2127), False, 'import torch\n'), ((2146, 2175), 'torch.cat', 'torch.cat', (['test_labels'], {'dim': '(0)'}), '(test_labels, dim=0)\n', (2155, 2175), False, 'import torch\n'), ((3577, 3601), 'torch.cat', 'torch.cat', (['original_data'], {}), '(original_data)\n', (3586, 3601), False, 'import torch\n'), ((3624, 3650), 'torch.cat', 'torch.cat', (['original_labels'], {}), '(original_labels)\n', (3633, 3650), False, 'import torch\n'), ((2305, 2319), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2316, 2319), False, 'import pickle\n'), ((794, 840), 'torch.LongTensor', 'torch.LongTensor', (['([new_l] * n_sample_per_label)'], {}), '([new_l] * n_sample_per_label)\n', (810, 840), False, 'import torch\n'), ((2538, 2560), 'numpy.stack', 'np.stack', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (2546, 2560), True, 'import numpy as np\n'), ((2607, 2629), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (2621, 2629), True, 'import numpy as np\n'), ((5109, 5125), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (5117, 5125), True, 'import numpy as np\n'), ((3091, 3107), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (3101, 3107), False, 'import torch\n'), ((112, 132), 'torch.unique', 'torch.unique', (['labels'], {}), '(labels)\n', (124, 132), False, 'import torch\n')]
|
"""Image 3D to vector / scalar conv net"""
import numpy as np
from micro_dl.networks.base_image_to_vector_net import BaseImageToVectorNet
class Image3DToVectorNet(BaseImageToVectorNet):
"""Uses 3D images as input"""
def __init__(self, network_config, predict=False):
"""Init
:param dict network_config: dict with all network associated parameters
"""
super().__init__(network_config, predict)
if not predict and self.config['num_dims'] == 3 and \
'num_initial_filters' in self.config:
depth = self.config['depth']
assert(np.mod(np.log2(network_config['depth']), 1) <= 0), \
'Input image dimensions has to be in powers of 2 as the' \
'receptive field is the entire image'
assert network_config['width'] == network_config['depth'], \
'Expecting an isotropic shape'
@property
def _get_input_shape(self):
"""Return shape of input"""
if self.config['data_format'] == 'channels_first':
shape = (self.config['num_input_channels'],
self.config['depth'],
self.config['height'],
self.config['width'])
else:
shape = (self.config['depth'],
self.config['height'],
self.config['width'],
self.config['num_input_channels'])
return shape
|
[
"numpy.log2"
] |
[((622, 654), 'numpy.log2', 'np.log2', (["network_config['depth']"], {}), "(network_config['depth'])\n", (629, 654), True, 'import numpy as np\n')]
|
"""Defines the class for OVR (one-versus-rest) classification."""
import functools
import numpy as np
from .predictors import Classifier
class OVRClassifier(Classifier):
"""Multiclass classification by solving a binary problem for each class.
"OVR" stands for "one-versus-rest", meaning that for each class label, the
binary classification problem of whether or not data belongs to that label
is solved. The label with the highest estimated probability of being the
true label is the one predicted for each sample.
"""
# List of Classifier estimators corresponding to each class label.
# For a particular class, the corresponding estimator estimates the
# probability that an input belongs to that class.
_estimators = None
def __init__(self, base: type, *args, **kwargs):
"""Initialize an OVR classifier by specifying how to create the
underlying binary classifier.
Parameters
----------
base : type
A subclass of Classifier. Used to create binary classifiers for each
class label.
args : sequence, optional
Positional arguments for the binary classifier constructor.
kwargs : dict, optional
Keyword arguments for the binary classifier constructor.
"""
if not issubclass(base, Classifier):
raise TypeError(
"Parameter 'base' must be a classifier type.")
self.base = functools.partial(base, *args, **kwargs)
def fit(self, x, y, *args, **kwargs):
"""Fit the OVR classifier.
Parameters
----------
x : array-like
Explanatory variable.
y : array-like
Categorical response variable vector.
args : sequence, optional
Positional arguments to pass to the underlying binary classifier's
fit() method.
kwargs : dict, optional
Keyword arguments to pass to the underlying binary classifier's
fit() method.
Returns
-------
This OVRClassifier instance.
"""
y = self._preprocess_classes(y, max_classes=None)
self._estimators = []
for i in range(len(self.classes)):
clf = self.base()
clf.fit(x, (y == i), *args, **kwargs)
self._estimators.append(clf)
return self
def predict_prob(self, x, *args, **kwargs):
"""Predict probability of each class for each input.
These probabilities themselves are useless, because they are always
0 or 1.
Parameters
----------
x : array-like
Explanatory variable.
args : sequence, optional
Positional arguments to pass to each class label estimator's
`predict_prob` method.
kwargs : dict, optional
Keyword arguments to pass to each class label estimator's
`predict_prob` method.
"""
q = self.predict(x, *args, **kwargs)
p = np.zeros((len(x), len(self.classes)))
for i in range(len(x)):
j = np.where(self.classes == q[i])[0]
p[i, j] = 1
return p
def predict(self, x, *args, **kwargs):
"""Classify input samples according to their probability estimates.
Parameters
----------
x : array-like
Explanatory variable.
args : sequence, optional
Positional arguments to pass to each class label estimator's
`predict_prob` method.
kwargs : dict, optional
Keyword arguments to pass to each class label estimator's
`predict_prob` method.
Returns
-------
Vector of predicted class labels.
"""
p = [c.predict_prob(x, *args, **kwargs)[:, 1] for c in self._estimators]
return self.classes[np.argmax(p, axis=0)]
|
[
"functools.partial",
"numpy.where",
"numpy.argmax"
] |
[((1477, 1517), 'functools.partial', 'functools.partial', (['base', '*args'], {}), '(base, *args, **kwargs)\n', (1494, 1517), False, 'import functools\n'), ((3890, 3910), 'numpy.argmax', 'np.argmax', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (3899, 3910), True, 'import numpy as np\n'), ((3126, 3156), 'numpy.where', 'np.where', (['(self.classes == q[i])'], {}), '(self.classes == q[i])\n', (3134, 3156), True, 'import numpy as np\n')]
|
"""Tools helping with the TIMIT dataset.
Based on the version from:
https://www.kaggle.com/mfekadu/darpa-timit-acousticphonetic-continuous-speech
"""
import re
from os.path import join, splitext, dirname
from pathlib import Path
import numpy as np
import pandas as pd
import soundfile as sf
from audio_loader.ground_truth.challenge import Challenge
PHON = ['b', 'd', 'g', 'p', 't', 'k', 'dx', 'q', # Stops
'bcl', 'dcl', 'gcl', 'kcl', 'pcl', 'tcl', # Closure
'jh', 'ch', # Affricates
's', 'sh', 'z', 'zh', 'f', 'th', 'v', 'dh', # Fricatives
'm', 'n', 'ng', 'em', 'en', 'eng', 'nx', # Nasals
'l', 'r', 'w', 'y', 'hh', 'hv', 'el', # Semivowels and Glides
'iy', 'ih', 'eh', 'ey', 'ae', 'aa', 'aw', 'ay', # Vowels
'ah', 'ao', 'oy', 'ow', 'uh', 'uw', 'ux', 'er',
'ax', 'ix', 'axr', 'ax-h',
'pau', 'h#', 'epi' # Non-speech event
]
SILENCES = ['pau', 'epi', 'h#']
CLOSURES = ['bcl', 'vcl', 'dcl', 'gcl', 'kcl', 'pcl', 'tcl']
DF_PHON = pd.read_csv(join(dirname(__file__), 'timit_map.csv'), names=["original", "phon_class1", "phon_class2", "phon_class3"])
class TimitGroundTruth(Challenge):
"""Ground truth getter for TIMIT like datasets."""
def __init__(self, timit_like_root_folderpath, datapath="data", gtpath="data", gt_grouped_file=None, with_silences=True, phon_class="original", fuse_closures=True, return_original_gt=False):
"""Compatible with the TIMIT DARPA dataset available on kaggle.
To use the TIMIT DARPA dataset leave the default arguments as is.
"""
super().__init__(timit_like_root_folderpath, datapath, gtpath)
self.with_silences = with_silences
self.phon_class = phon_class
self.fuse_closures = fuse_closures
self.return_original_gt = return_original_gt
if gt_grouped_file is None:
df_train = pd.read_csv(join(self.root_folderpath, "train_data.csv"))
df_train = df_train[pd.notnull(df_train['path_from_data_dir'])]
df_test = pd.read_csv(join(self.root_folderpath, "test_data.csv"))
df_test = df_test[pd.notnull(df_test['path_from_data_dir'])]
self.df_all = df_train.append(df_test, ignore_index=True)
else:
self.df_all = pd.read_csv(join(self.root_folderpath, gt_grouped_file))
self.df_all = self.df_all[pd.notnull(self.df_all['path_from_data_dir'])]
# create the is_audio column if not present
if "is_audio" not in self.df_all.keys():
self.df_all["is_audio"] = self.df_all["path_from_data_dir"].str.match(
".*.wav",
flags=re.IGNORECASE
)
if "is_converted_audio" in self.df_all.keys():
self.df_all = self.df_all[np.logical_and(self.df_all["is_audio"],
self.df_all["is_converted_audio"])]
else:
self.df_all = self.df_all[self.df_all["is_audio"]]
if self.phon_class == "original":
self.phon2index = {phon:index for index, phon in enumerate(PHON)}
self.index2phn = PHON
self.silences = SILENCES
else:
self.index2phn = DF_PHON[self.phon_class].unique()
# put silence at last
self.index2phn = np.append(np.delete(self.index2phn, np.where(self.index2phn == "sil")), "sil")
tmp_phon2index = {phon:index for index, phon in enumerate(self.index2phn)}
# from original label to desired label
self.phon2index = {phon:tmp_phon2index[DF_PHON.loc[DF_PHON["original"] == phon][self.phon_class].values[0]] for phon in DF_PHON["original"].unique()}
self.silences = ["sil"]
self.index2speaker_id = pd.unique(self.df_all["speaker_id"])
self.speaker_id2index = {speaker_id:index for index, speaker_id in enumerate(self.index2speaker_id)}
self.dict_gt = get_dict_gt(join(self.root_folderpath, self.gtpath), self.df_all)
self.set_gt_format()
@property
def number_of_speakers(self):
"""Return the number of speakers in the Timit challenge."""
return len(self.index2speaker_id)
@property
def training_set(self):
"""Return array of filepaths from training test."""
return self.df_all[self.df_all["test_or_train"] == "TRAIN"]["path_from_data_dir"].values
@property
def testing_set(self):
"""Return array of filepaths from testing test."""
return self.df_all[self.df_all["test_or_train"] == "TEST"]["path_from_data_dir"].values
@property
def gt_size(self):
"""Return the size of the ground_truth."""
size = 0
if self.phonetic:
size += self.phon_size
if self.word:
raise Exception("Word not yet implemented.")
if self.speaker_id:
size += 1
return size
@property
def phon_size(self):
if self.with_silences:
return len(self.index2phn)
return len(self.index2phn) - len(self.silences)
@property
def speaker_id_size(self):
return 1
def get_phonem_from(self, index):
"""Return the phoneme corresponding to the given index."""
return self.index2phn[index]
def get_index_from(self, phn):
"""Return the index corresponding to the given phoneme."""
return self.phon2index[phn]
def set_gt_format(self, phonetic=True, word=False, speaker_id=False):
"""Select the ground truth to show"""
self.phonetic = phonetic
self.word = word
self.speaker_id = speaker_id
def get_samples_time_in(self, filepath):
"""Return a list of tuples corresponding to the start and end times of each sample.
Parameters:
-----------
filepath: str
Filepath of the audio file we want to get the ground truth times.
"""
audio_id = self.get_id(filepath)
df_file, speaker_id = self.dict_gt[audio_id]
res_list = []
for row in df_file.iterrows():
res_list.append((row[1][0], row[1][1]))
return res_list
def get_gt_for(self, filepath):
"""Get tuples corresponding to the start, end times of each sample and
the ground truth expected.
Parameters:
-----------
filepath: str
Filepath of the audio file we want to get the ground truth.
"""
audio_id = self.get_id(filepath)
df_file, speaker_id = self.dict_gt[audio_id]
ys = np.zeros((len(df_file.index), self.gt_size))
res_list = []
i = 0
if self.fuse_closures:
previous_label = None
previous_sample_begin = None
for row in df_file.iterrows():
sample_begin, sample_end = row[1][0], row[1][1]
self._fill_output(audio_id, sample_begin, sample_end, ys[i])
# other way to get gt label
gt_label = row[1][2]
if gt_label in CLOSURES:
previous_label = gt_label
previous_sample_begin = sample_begin
else:
if previous_label is not None and previous_label[0] == gt_label:
sample_begin = previous_sample_begin
if self.with_silences or np.sum(ys[i]) > 0:
if self.return_original_gt:
res_list.append((sample_begin, sample_end, (ys[i], gt_label)))
else:
res_list.append((sample_begin, sample_end, ys[i]))
previous_label = None
previous_sample_begin = None
i += 1
else:
for row in df_file.iterrows():
sample_begin, sample_end = row[1][0], row[1][1]
self._fill_output(audio_id, sample_begin, sample_end, ys[i])
if self.with_silences or np.sum(ys[i]) > 0:
res_list.append((sample_begin, sample_end, ys[i]))
i += 1
return res_list
def _fill_output(self, id_audio, sample_begin, sample_end, output):
"""Tool to fill an output array.
Parameters
----------
id_audio: str
id of the audio file
sample_begin: integer > 0
sample_end: integer > 0
output: np.array
Array to fill with ground truth (supposed zeros).
"""
if self.phonetic:
self._fill_phon_output(id_audio, sample_begin, sample_end, output[:self.phon_size])
if self.word:
raise Exception("Word not yet implemented.")
if self.speaker_id:
output[-self.speaker_id_size] = self._get_speaker_id(id_audio)
def get_majority_gt_at_sample(self, filepath, sample_begin, sample_end):
"""Return an integer that represent the majority class for a specific sample."""
output = []
if self.phonetic:
output += self._phon_majority(self.get_id(filepath), sample_begin, sample_end)
if self.word:
raise Exception("Word not yet implemented.")
if self.speaker_id:
output += self._get_speaker_id(self.get_id(filepath))
return output
def get_output_description(self):
"""Return a list that describe the output."""
output = []
if self.phonetic:
output += PHON
if self.word:
raise Exception("Word not yet implemented.")
if self.speaker_id:
output += "Speaker Id"
return output
def _phon_majority(self, id_audio, sample_begin, sample_end):
df_file, speaker_id = self.dict_gt[id_audio]
df_corresponding_time = df_file[np.logical_and(df_file["start_time"] < sample_end,
df_file["end_time"] >= sample_begin)]
if len(df_corresponding_time) > 1:
raise Exception("phon majority does not handle multiple labels")
return df_corresponding_time["phn"].values
def _fill_phon_output(self, id_audio, sample_begin, sample_end, output):
"""Tool to fill an output array.
Parameters
----------
id_audio: str
Id of the audio file.
sample_begin: integer > 0
sample_end: integer > 0
output: np.array
Array to modify/fill with ground truth.
"""
df_file, speaker_id = self.dict_gt[id_audio]
df_corresponding_time = df_file[np.logical_and(df_file["start_time"] <= sample_end,
df_file["end_time"] >= sample_begin)]
total_samples = sample_end - sample_begin
for row in df_corresponding_time.iterrows():
start_frame = max(row[1][0], sample_begin)
end_frame = min(row[1][1], sample_end)
if self.with_silences or self.phon2index[row[1][2]] < len(output):
output[self.phon2index[row[1][2]]] += (end_frame - start_frame) / total_samples
def _get_speaker_id(self, id_audio):
"""Tool to fill an output array.
Parameters
----------
id_audio: str
Id of the audio file.
output: np.array
Array to modify/fill with ground truth.
"""
_, speaker_id = self.dict_gt[id_audio]
return self.speaker_id2index[speaker_id]
def get_dict_gt(gt_folderpath, df_data):
"""Get dataframe corresponding to the gt."""
if gt_folderpath[-1] != "/":
gt_folderpath += "/"
dic = {}
for filename in Path(gt_folderpath).glob('**/*.PHN'):
id_fn = splitext(str(filename).replace(gt_folderpath, ""))[0]
speaker_id = df_data[df_data["path_from_data_dir"].str.contains(id_fn, regex=False)]["speaker_id"].iloc[0]
df_file = pd.read_csv(filename, names=["start_time", "end_time", "phn"], delimiter=" ")
dic[id_fn] = df_file, speaker_id
return dic
|
[
"numpy.sum",
"numpy.logical_and",
"pandas.read_csv",
"os.path.dirname",
"pandas.unique",
"pandas.notnull",
"pathlib.Path",
"numpy.where",
"os.path.join"
] |
[((1131, 1148), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (1138, 1148), False, 'from os.path import join, splitext, dirname\n'), ((3867, 3903), 'pandas.unique', 'pd.unique', (["self.df_all['speaker_id']"], {}), "(self.df_all['speaker_id'])\n", (3876, 3903), True, 'import pandas as pd\n'), ((12022, 12099), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'names': "['start_time', 'end_time', 'phn']", 'delimiter': '""" """'}), "(filename, names=['start_time', 'end_time', 'phn'], delimiter=' ')\n", (12033, 12099), True, 'import pandas as pd\n'), ((4049, 4088), 'os.path.join', 'join', (['self.root_folderpath', 'self.gtpath'], {}), '(self.root_folderpath, self.gtpath)\n', (4053, 4088), False, 'from os.path import join, splitext, dirname\n'), ((9920, 10011), 'numpy.logical_and', 'np.logical_and', (["(df_file['start_time'] < sample_end)", "(df_file['end_time'] >= sample_begin)"], {}), "(df_file['start_time'] < sample_end, df_file['end_time'] >=\n sample_begin)\n", (9934, 10011), True, 'import numpy as np\n'), ((10701, 10793), 'numpy.logical_and', 'np.logical_and', (["(df_file['start_time'] <= sample_end)", "(df_file['end_time'] >= sample_begin)"], {}), "(df_file['start_time'] <= sample_end, df_file['end_time'] >=\n sample_begin)\n", (10715, 10793), True, 'import numpy as np\n'), ((11781, 11800), 'pathlib.Path', 'Path', (['gt_folderpath'], {}), '(gt_folderpath)\n', (11785, 11800), False, 'from pathlib import Path\n'), ((1999, 2043), 'os.path.join', 'join', (['self.root_folderpath', '"""train_data.csv"""'], {}), "(self.root_folderpath, 'train_data.csv')\n", (2003, 2043), False, 'from os.path import join, splitext, dirname\n'), ((2077, 2119), 'pandas.notnull', 'pd.notnull', (["df_train['path_from_data_dir']"], {}), "(df_train['path_from_data_dir'])\n", (2087, 2119), True, 'import pandas as pd\n'), ((2155, 2198), 'os.path.join', 'join', (['self.root_folderpath', '"""test_data.csv"""'], {}), "(self.root_folderpath, 'test_data.csv')\n", (2159, 2198), False, 'from os.path import join, splitext, dirname\n'), ((2230, 2271), 'pandas.notnull', 'pd.notnull', (["df_test['path_from_data_dir']"], {}), "(df_test['path_from_data_dir'])\n", (2240, 2271), True, 'import pandas as pd\n'), ((2395, 2438), 'os.path.join', 'join', (['self.root_folderpath', 'gt_grouped_file'], {}), '(self.root_folderpath, gt_grouped_file)\n', (2399, 2438), False, 'from os.path import join, splitext, dirname\n'), ((2478, 2523), 'pandas.notnull', 'pd.notnull', (["self.df_all['path_from_data_dir']"], {}), "(self.df_all['path_from_data_dir'])\n", (2488, 2523), True, 'import pandas as pd\n'), ((2880, 2954), 'numpy.logical_and', 'np.logical_and', (["self.df_all['is_audio']", "self.df_all['is_converted_audio']"], {}), "(self.df_all['is_audio'], self.df_all['is_converted_audio'])\n", (2894, 2954), True, 'import numpy as np\n'), ((3454, 3487), 'numpy.where', 'np.where', (["(self.index2phn == 'sil')"], {}), "(self.index2phn == 'sil')\n", (3462, 3487), True, 'import numpy as np\n'), ((8102, 8115), 'numpy.sum', 'np.sum', (['ys[i]'], {}), '(ys[i])\n', (8108, 8115), True, 'import numpy as np\n'), ((7475, 7488), 'numpy.sum', 'np.sum', (['ys[i]'], {}), '(ys[i])\n', (7481, 7488), True, 'import numpy as np\n')]
|
import os
from collections import defaultdict
import sqlite3
import numpy as np
import pandas as pd
import lsst.afw.table as afw_table
import lsst.daf.persistence as dp
import lsst.geom
import desc.sims_ci_pipe as scp
def make_SourceCatalog(df):
bands = 'ugrizy'
schema = afw_table.SourceTable.makeMinimalSchema()
for band in bands:
schema.addField(f'flux_{band}', type=float, doc=f'{band} flux in nJy')
src_cat = afw_table.SourceCatalog(schema)
for iloc in range(len(df)):
row = df.iloc[iloc]
new_rec = src_cat.addNew()
new_rec.set('id', int(row['id']))
new_rec.set('coord_ra', lsst.geom.Angle(row.ra, lsst.geom.degrees))
new_rec.set('coord_dec', lsst.geom.Angle(row.dec, lsst.geom.degrees))
for band in bands:
colname = f'flux_{band}'
new_rec.set(colname, row[colname])
return src_cat
def get_point_sources(butler, visit, flux_type='base_PsfFlux'):
datarefs = butler.subset('src', visit=visit)
dfs = []
for dataref in list(datarefs):
print(dataref.dataId)
src = scp.get_point_sources(dataref.get('src'))
calexp = dataref.get('calexp')
calib = calexp.getPhotoCalib()
flux, fluxerr = calib.instFluxToNanojansky(src, flux_type).transpose()
dfs.append(pd.DataFrame(data=dict(ra=np.degrees(src.get('coord_ra')),
dec=np.degrees(src.get('coord_dec')),
flux=flux, fluxerr=fluxerr)))
return pd.concat(dfs)
def match_meas_fluxes(butler, visit, star_truth_summary_file,
flux_type='base_PsfFlux', max_offset=0.1):
flux_col = f'{flux_type}_instFlux'
conn = sqlite3.connect(star_truth_summary_file)
radius = lsst.geom.Angle(max_offset, lsst.geom.arcseconds)
dfs = []
datarefs = butler.subset('src', visit=visit)
for i, dataref in enumerate(list(datarefs)):
print(i)
calib = dataref.get('calexp').getPhotoCalib()
src = scp.get_point_sources(dataref.get('src'))
ras = np.degrees(src.get('coord_ra'))
decs = np.degrees(src.get('coord_dec'))
ra_min, ra_max = min(ras), max(ras)
dec_min, dec_max = min(decs), max(decs)
query = f'''select * from truth_summary where
{ra_min} <= ra and ra <= {ra_max} and
{dec_min} <= dec and dec <= {dec_max}'''
truth_df = pd.read_sql(query, conn)
truth_cat = make_SourceCatalog(truth_df)
matches = afw_table.matchRaDec(truth_cat, src, radius)
num_matches = len(matches)
ids = np.zeros(num_matches, dtype=np.int)
offsets = np.zeros(num_matches, dtype=np.float)
true_fluxes = np.zeros(num_matches, dtype=np.float)
meas_fluxes = np.zeros(num_matches, dtype=np.float)
meas_fluxerrs = np.zeros(num_matches, dtype=np.float)
for i, match in enumerate(matches):
ids[i] = match.first['id']
offsets[i] = np.degrees(match.distance)*3600*1000.
true_fluxes[i] = match.first[f'flux_{band}']
meas_fluxes[i] = calib.instFluxToNanojansky(match.second[flux_col])
meas_fluxerrs[i] \
= calib.instFluxToNanojansky(match.second[flux_col + 'Err'])
dfs.append(pd.DataFrame(data=dict(id=ids, offset=offsets,
true_flux=true_fluxes,
meas_flux=meas_fluxes,
meas_fluxerr=meas_fluxerrs)))
df = pd.concat(dfs)
return df
def compute_delta_fluxes(stars_db_file, ids, mjd):
import desc.sims_truthcatalog as stc
lc_factory = stc.StellarLightCurveFactory(stars_db_file)
mjds = [mjd]
delta_fluxes = defaultdict(dict)
for obj_id in ids:
dm, m0 = lc_factory.create(obj_id, mjds)
for band in dm:
delta_fluxes[obj_id][band] \
= (10.**((8.9 - (m0[band] + dm[band]))/2.5)
- 10.**((8.9 - m0[band])/2.5))*1e9
return delta_fluxes
if __name__ == '__main__':
star_truth_summary_file = '/global/cscratch1/sd/descim/star_truth/star_truth_summary.db'
repo = 'repo_lensed_sne'
butler = dp.Butler(repo)
visit = 906935
band = 'i'
outfile = f'src_truth_match_v{visit}-{band}.pkl'
if not os.path.isfile(outfile):
df = match_meas_fluxes(butler, visit, star_truth_summary_file)
df.to_pickle(outfile)
else:
df = pd.read_pickle(outfile)
|
[
"lsst.afw.table.matchRaDec",
"lsst.daf.persistence.Butler",
"numpy.degrees",
"desc.sims_truthcatalog.StellarLightCurveFactory",
"lsst.afw.table.SourceTable.makeMinimalSchema",
"numpy.zeros",
"collections.defaultdict",
"os.path.isfile",
"sqlite3.connect",
"pandas.read_sql",
"pandas.read_pickle",
"lsst.afw.table.SourceCatalog",
"pandas.concat"
] |
[((282, 323), 'lsst.afw.table.SourceTable.makeMinimalSchema', 'afw_table.SourceTable.makeMinimalSchema', ([], {}), '()\n', (321, 323), True, 'import lsst.afw.table as afw_table\n'), ((440, 471), 'lsst.afw.table.SourceCatalog', 'afw_table.SourceCatalog', (['schema'], {}), '(schema)\n', (463, 471), True, 'import lsst.afw.table as afw_table\n'), ((1541, 1555), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (1550, 1555), True, 'import pandas as pd\n'), ((1735, 1775), 'sqlite3.connect', 'sqlite3.connect', (['star_truth_summary_file'], {}), '(star_truth_summary_file)\n', (1750, 1775), False, 'import sqlite3\n'), ((3587, 3601), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (3596, 3601), True, 'import pandas as pd\n'), ((3727, 3770), 'desc.sims_truthcatalog.StellarLightCurveFactory', 'stc.StellarLightCurveFactory', (['stars_db_file'], {}), '(stars_db_file)\n', (3755, 3770), True, 'import desc.sims_truthcatalog as stc\n'), ((3807, 3824), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3818, 3824), False, 'from collections import defaultdict\n'), ((4265, 4280), 'lsst.daf.persistence.Butler', 'dp.Butler', (['repo'], {}), '(repo)\n', (4274, 4280), True, 'import lsst.daf.persistence as dp\n'), ((2455, 2479), 'pandas.read_sql', 'pd.read_sql', (['query', 'conn'], {}), '(query, conn)\n', (2466, 2479), True, 'import pandas as pd\n'), ((2548, 2592), 'lsst.afw.table.matchRaDec', 'afw_table.matchRaDec', (['truth_cat', 'src', 'radius'], {}), '(truth_cat, src, radius)\n', (2568, 2592), True, 'import lsst.afw.table as afw_table\n'), ((2643, 2678), 'numpy.zeros', 'np.zeros', (['num_matches'], {'dtype': 'np.int'}), '(num_matches, dtype=np.int)\n', (2651, 2678), True, 'import numpy as np\n'), ((2697, 2734), 'numpy.zeros', 'np.zeros', (['num_matches'], {'dtype': 'np.float'}), '(num_matches, dtype=np.float)\n', (2705, 2734), True, 'import numpy as np\n'), ((2757, 2794), 'numpy.zeros', 'np.zeros', (['num_matches'], {'dtype': 'np.float'}), '(num_matches, dtype=np.float)\n', (2765, 2794), True, 'import numpy as np\n'), ((2817, 2854), 'numpy.zeros', 'np.zeros', (['num_matches'], {'dtype': 'np.float'}), '(num_matches, dtype=np.float)\n', (2825, 2854), True, 'import numpy as np\n'), ((2879, 2916), 'numpy.zeros', 'np.zeros', (['num_matches'], {'dtype': 'np.float'}), '(num_matches, dtype=np.float)\n', (2887, 2916), True, 'import numpy as np\n'), ((4381, 4404), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (4395, 4404), False, 'import os\n'), ((4530, 4553), 'pandas.read_pickle', 'pd.read_pickle', (['outfile'], {}), '(outfile)\n', (4544, 4553), True, 'import pandas as pd\n'), ((3026, 3052), 'numpy.degrees', 'np.degrees', (['match.distance'], {}), '(match.distance)\n', (3036, 3052), True, 'import numpy as np\n')]
|
import math
import copy
import cv2
import numpy as np
from .connected_component import ConnectedComponentData
from typing import List
__sw_median_max_ratio = 2
__height_max_ratio = 1.5
__max_chain_height = 150
__max_distance_multiplier = 3
__min_chain_size = 3
__max_average_gray_diff = 3
__gray_variance_coefficient = 1.25
__max_char_width_to_heigh_ratio = 1
# Produce the final set of letter chains and get their bounding boxes
def run(cc_data_filtered: List[ConnectedComponentData]):
chains = populate_pairs(cc_data_filtered)
# Get rid of chains if component height ratio > 2
chains = remove_if_pair_area_too_different(chains)
chains = remove_if_heights_too_different(chains)
chains = remove_if_grays_dissimilar(chains)
chains = remove_if_stroke_widths_too_different(chains)
# This is Daniel's idea, any it only works well for some images
# chains = filter_by_chain_gray_variance(chains)
if len(chains) > 0:
__max_chain_height = max([chain.get_height() for chain in chains])
chains = lengthen_chains(chains)
chains = remove_short_chains(chains)
chains = filter_chains_by_height(chains)
chains = filter_height_to_width_ratio(chains)
# chains = filter_by_expected_width_given_height_and_num_components(chains)
return chains
# Check each pair of connected components and produce a tuple of sufficicently close letter candidates
def populate_pairs(cc_data_filtered: List[ConnectedComponentData]):
# Only need to check each pair of elements once
chains = []
for i in range(len(cc_data_filtered)):
for j in range(i + 1, len(cc_data_filtered)):
# If the two components are close enough together, add them together in a chain
if is_within_relative_distance(cc_data_filtered[i], cc_data_filtered[j]):
chains.append(build_chain(cc_data_filtered[i], cc_data_filtered[j]))
return chains
def is_within_relative_distance(cc_1: ConnectedComponentData, cc_2: ConnectedComponentData):
# Ensure one letter candidate is not floating above the other
if cc_1.row_min >= cc_2.row_max or cc_2.row_min >= cc_1.row_max:
return False
# Computing distance from bottom_corner right corner to bottom_corner left because this should not change much
# Euclidean distance
dist = math.sqrt((cc_2.row_max - cc_1.row_max) ** 2 + (cc_2.col_min - cc_1.col_max) ** 2)
largest_width = max(cc_1.col_max - cc_1.col_min, cc_2.col_max - cc_2.col_min)
return dist <= largest_width * __max_distance_multiplier
class Chain:
def __init__(self):
self.chain = []
self.row_min = -1
self.row_max = -1
self.col_min = -1
self.col_max = -1
# Returns the coordinates for the bounding box: [top-left, top-right, bottom-right, bottom-left]
def get_bounding_box(self):
return [
[self.row_min, self.col_min], # Top-left
[self.row_min, self.col_max], # Top-right
[self.row_max, self.col_max], # Bottom-right
[self.row_max, self.col_min] # Bottom-left
]
def get_height(self):
return self.row_max - self.row_min
def get_width(self):
return self.col_max - self.col_min
# python does not allow multiple constructors....
def build_chain(cc_1: ConnectedComponentData, cc_2: ConnectedComponentData):
chain = Chain()
chain.row_min = min(cc_1.row_min, cc_2.row_min)
chain.row_max = max(cc_1.row_max, cc_2.row_max)
chain.col_min = min(cc_1.col_min, cc_2.col_min)
chain.col_max = max(cc_1.col_max, cc_2.col_max)
chain.chain = [cc_1, cc_2]
return chain
def build_chain_from_merge(row_min: int, row_max: int, col_min: int, col_max: int, ccs: List[ConnectedComponentData]):
chain = Chain()
chain.row_min = row_min
chain.row_max = row_max
chain.col_min = col_min
chain.col_max = col_max
chain.chain = ccs
return chain
def merge_chains(c1: Chain, c2: Chain):
c1.row_min = min(c1.row_min, c2.row_min)
c1.row_max = max(c1.row_max, c2.row_max)
c1.col_min = min(c1.col_min, c2.col_min)
c1.col_max = max(c1.col_max, c2.col_max)
c1.chain = list(set().union(c1.chain, c2.chain))
return c1
def remove_if_heights_too_different(chains: List[Chain]):
filtered_chains = []
for chain in chains:
cc_0 = chain.chain[0]
cc_1 = chain.chain[1]
height_0 = cc_0.row_max - cc_0.row_min
height_1 = cc_1.row_max - cc_1.row_min
# heights are non-zero from the component filtering step
if height_0 / height_1 <= __height_max_ratio or height_1 / height_0 <= __height_max_ratio:
filtered_chains.append(chain)
return filtered_chains
def remove_if_stroke_widths_too_different(chains: List[Chain]):
filtered_chains = []
for chain in chains:
sw_median_0 = chain.chain[0].get_median_stroke_width()
sw_median_1 = chain.chain[1].get_median_stroke_width()
# see paper for reason for this magic number
if sw_median_0 / sw_median_1 <= __sw_median_max_ratio or sw_median_1 / sw_median_0 <= __sw_median_max_ratio:
filtered_chains.append(chain)
return filtered_chains
def filter_height_to_width_ratio(chains: List[Chain]):
filtered_chains = []
for chain in chains:
if chain.get_height()/chain.get_width() <= 0.66:
filtered_chains.append(chain)
return filtered_chains
def remove_if_pair_area_too_different(chains: List[Chain]):
filtered_chains = []
for chain in chains:
cc_1 = chain.chain[0]
cc_2 = chain.chain[1]
if cc_1.area / cc_2.area <= 5 or cc_2.area / cc_1.area <= 5:
filtered_chains.append(chain)
return filtered_chains
def remove_if_grays_dissimilar(chains: List[Chain]):
filtered_chains = []
for chain in chains:
avg_gray_0 = chain.chain[0].get_mean_gray()
avg_gray_1 = chain.chain[1].get_mean_gray()
if abs(avg_gray_1 - avg_gray_0) < __max_average_gray_diff:
filtered_chains.append(chain)
return filtered_chains
# return true if they share a connected component, false otherwise
def contain_new_chain_link(chain_1: Chain, chain_2: Chain):
# if their bounding boxes do not over lap, then they cannot contain the same element. if too slow, implement
# cycle through all cc elements to see if they contain the same cc
if chain_1 is chain_2:
return False
for cc_1 in chain_1.chain:
for cc_2 in chain_2.chain:
if cc_1 is cc_2:
return True
return False
# TODO: build a doubly linked list implementation to reduce time complexity to n^2
def lengthen_chains(chains: List[Chain]):
chains_copy = list.copy(chains)
lengthened_chains = []
i = 0
while i < len(chains_copy):
altered = False
lengthened_chain = chains_copy[i]
j = i + 1
while j < len(chains_copy):
if contain_new_chain_link(chains_copy[i], chains_copy[j]):
altered = True
# Make a new larger chain that is he union of the 2
lengthened_chain = merge_chains(lengthened_chain, chains_copy[j])
# This ensures we will catch all relations of near by components
chains_copy[j] = lengthened_chain
j += 1
if not altered:
i += 1
lengthened_chains.append(lengthened_chain)
else:
chains_copy[i] = lengthened_chain
# Return unique elements
return list(set(lengthened_chains))
# This is a function <NAME> thought would be good
def filter_by_chain_gray_variance(chains: List[Chain]):
filtered_chains = []
gray_variances = []
areas = []
for i in range(len(chains)):
chain = chains[i]
count = 0
average_gray = 0
area = 0
for cc in chain.chain:
count += len(cc.grays)
area += cc.area
for g in cc.grays:
average_gray += g
average_gray = average_gray/count
variance_gray = 0
for cc in chain.chain:
for g in cc.grays:
variance_gray += (g - average_gray)**2
variance_gray = variance_gray/count
gray_variances.append(variance_gray)
areas.append(area)
print((variance_gray, area))
max_gray_variance = np.average(gray_variances)*__gray_variance_coefficient
for i in range(len(chains)):
# if gray_variances[i] <= max_gray_variance:
if gray_variances[i]/areas[i] < 0.5: # This might be a better filter?
filtered_chains.append(chains[i])
return filtered_chains
def filter_chains_by_height(chains: List[Chain]):
filtered_chains = []
for chain in chains:
if chain.row_max - chain.row_min <= __max_chain_height:
filtered_chains.append(chain)
return filtered_chains
def remove_short_chains(chains: List[Chain]):
long_chains = []
for chain in chains:
if len(chain.chain) >= __min_chain_size:
long_chains.append(chain)
return long_chains
def filter_by_expected_width_given_height_and_num_components(chains: List[Chain]):
filtered_chains = []
for chain in chains:
num_cc = len(chain.chain)
expected_width_upperbound = num_cc * __max_char_width_to_heigh_ratio*chain.get_height()
if chain.get_width() <= expected_width_upperbound:
filtered_chains.append(chain)
return filtered_chains
# Default color is red
def make_image_with_bounding_boxes(img, chains: List[Chain], color=(0, 0, 255)):
img_drawn = copy.deepcopy(img)
for chain in chains:
# Bounding-box top-left clockwise
bb = chain.get_bounding_box()
top_left = (bb[0][1], bb[0][0])
bottom_right = (bb[2][1], bb[2][0])
cv2.rectangle(img_drawn, top_left, bottom_right, color, 2)
return img_drawn
|
[
"copy.deepcopy",
"numpy.average",
"math.sqrt",
"cv2.rectangle"
] |
[((2325, 2412), 'math.sqrt', 'math.sqrt', (['((cc_2.row_max - cc_1.row_max) ** 2 + (cc_2.col_min - cc_1.col_max) ** 2)'], {}), '((cc_2.row_max - cc_1.row_max) ** 2 + (cc_2.col_min - cc_1.col_max\n ) ** 2)\n', (2334, 2412), False, 'import math\n'), ((9658, 9676), 'copy.deepcopy', 'copy.deepcopy', (['img'], {}), '(img)\n', (9671, 9676), False, 'import copy\n'), ((8407, 8433), 'numpy.average', 'np.average', (['gray_variances'], {}), '(gray_variances)\n', (8417, 8433), True, 'import numpy as np\n'), ((9874, 9932), 'cv2.rectangle', 'cv2.rectangle', (['img_drawn', 'top_left', 'bottom_right', 'color', '(2)'], {}), '(img_drawn, top_left, bottom_right, color, 2)\n', (9887, 9932), False, 'import cv2\n')]
|
import collections
import numpy
__all__ = [
"Output",
]
Output = collections.namedtuple("Output", ["type", "format", "time", "labels", "data"])
def to_output(file_type, file_format, labels_order, headers, times, labels, variables):
"""Create an Output namedtuple."""
outputs = [
Output(
file_type,
file_format,
time,
numpy.array(label),
{k: v for k, v in zip(headers, numpy.transpose(variable))},
)
for time, label, variable in zip(times, labels, variables)
]
return (
[reorder_labels(out, labels_order) for out in outputs]
if labels_order is not None and file_type == "element"
else outputs
)
def reorder_labels(data, labels):
"""Reorder output or save cell data according to input labels."""
if len(data.labels) != len(labels):
raise ValueError()
mapper = {k: v for v, k in enumerate(data.labels)}
idx = [mapper[label] for label in labels]
data.labels[:] = data.labels[idx]
for k, v in data.data.items():
data.data[k] = v[idx]
return data
|
[
"numpy.transpose",
"numpy.array",
"collections.namedtuple"
] |
[((73, 151), 'collections.namedtuple', 'collections.namedtuple', (['"""Output"""', "['type', 'format', 'time', 'labels', 'data']"], {}), "('Output', ['type', 'format', 'time', 'labels', 'data'])\n", (95, 151), False, 'import collections\n'), ((391, 409), 'numpy.array', 'numpy.array', (['label'], {}), '(label)\n', (402, 409), False, 'import numpy\n'), ((454, 479), 'numpy.transpose', 'numpy.transpose', (['variable'], {}), '(variable)\n', (469, 479), False, 'import numpy\n')]
|
import numpy as np
x = 1.0
y = 2.0
#exponents and logarithms
print(np.exp(x)) #e^x
print(np.log(x)) #ln x
print(np.log10(x)) #log_10 x
print(np.log2(x)) #log_2 x
#min/max/misc
print(np.fabs(x)) #absolute cal as a float
print(np.fmin(x,y)) #min of x and y
print(np.fmax(x,y)) #max of x and y
#populate arrays
n = 100
z = np.arange(n,dtype=float) #make an array
z *= 2.0*np.pi / float(n-1) #z=[0.2*pi]
sin_z = np.sin(z) #an array of sin(z)
#interpolation
print(np.interp(0.75,z,sin_z))
print(np.sin(0.75))
print(z)
print(sin_z)
|
[
"numpy.fmin",
"numpy.fmax",
"numpy.log",
"numpy.log2",
"numpy.sin",
"numpy.arange",
"numpy.exp",
"numpy.fabs",
"numpy.interp",
"numpy.log10"
] |
[((327, 352), 'numpy.arange', 'np.arange', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (336, 352), True, 'import numpy as np\n'), ((416, 425), 'numpy.sin', 'np.sin', (['z'], {}), '(z)\n', (422, 425), True, 'import numpy as np\n'), ((69, 78), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (75, 78), True, 'import numpy as np\n'), ((91, 100), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (97, 100), True, 'import numpy as np\n'), ((114, 125), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (122, 125), True, 'import numpy as np\n'), ((143, 153), 'numpy.log2', 'np.log2', (['x'], {}), '(x)\n', (150, 153), True, 'import numpy as np\n'), ((185, 195), 'numpy.fabs', 'np.fabs', (['x'], {}), '(x)\n', (192, 195), True, 'import numpy as np\n'), ((229, 242), 'numpy.fmin', 'np.fmin', (['x', 'y'], {}), '(x, y)\n', (236, 242), True, 'import numpy as np\n'), ((266, 279), 'numpy.fmax', 'np.fmax', (['x', 'y'], {}), '(x, y)\n', (273, 279), True, 'import numpy as np\n'), ((471, 496), 'numpy.interp', 'np.interp', (['(0.75)', 'z', 'sin_z'], {}), '(0.75, z, sin_z)\n', (480, 496), True, 'import numpy as np\n'), ((502, 514), 'numpy.sin', 'np.sin', (['(0.75)'], {}), '(0.75)\n', (508, 514), True, 'import numpy as np\n')]
|
# congklak game environment
# version 1.0.0
import numpy as np
import random
class congklak_board:
def __init__(self):
# array to save the score, also function as the big holes' stone counter
self.score = np.full(shape=2, fill_value=0, dtype=np.int)
# array to save the state of the board (small holes' stone counter)
self.state = None
# board size
self.N = None
# starting hole
self.starting_hole = None
# turn player marker (0 or 1, the player who goes first is 0 and the other one is 1)
self.turn = None
# the state of the game (if the game is finished, done = True)
self.done = False
# the ruleset being used for the game
self.ruleset = None
# logging variables
self.turns_count = None
self.games_count = None
self.log = None
def setup(self, board_size, max_iter, rule='original'):
# set up the board state and the marker for turn player
self.ruleset = rule
self.N = board_size
self.state = np.full(shape=int(2*self.N), fill_value=self.N, dtype=np.int)
self.turn = 0
self.turns_count = 0
self.games_count = 0
self.log = None
self.max_iter = max_iter
def reset(self):
# reset the board
if self.N == None:
return print("Setup the board first.")
else:
self.state = np.full(shape=int(2*self.N), fill_value=self.N, dtype=np.int)
self.score = np.full(shape=2, fill_value=0, dtype=np.int)
self.done = False
self.turn = 0
self.turns_count = 0
def observation_space(self):
if self.N == None:
return print("Setup the board first.")
else:
return (2*self.N)**2
def action_space(self):
if self.N == None:
return print("Setup the board first.")
else:
return self.N
def step(self, action):
# update the state of the board and score
if self.N == None:
return print("Setup the board first.")
else:
# check if the action is legal
if self.is_legal(action):
# do the rotation
last_hole, scoring = self.rotation()
# if the last hole is the turn player's big hole, scoring = True --> wait for the new selection of the starting hole
# but if the last hole is not big hole, scoring = False --> then...
if scoring == False:
# if the last hole's stones = 1 --> 2 scenarios
if self.state[last_hole] == 1:
# if the last hole is the turn player's hole --> shooting --> end turn
if int(last_hole/self.N) == self.turn:
# the shooting function here is already modified.
# if the hole across of the last hole is NOT empty --> do the shooting
# else --> do nothing
self.shooting(last_hole)
self.end_turn()
# if the last hole is the opponent's hole --> end turn
else:
self.end_turn()
# if the last hole's stones > 1
else:
# if the ruleset being used is the original ruleset
if self.ruleset == 'original':
# action's range is [0,N-1], last hole's range is [0, 2N-1], self.state's range is [0, 2N-1].
# Action needs to be increased by self.turn*self.N to match the self.state's range.
# The last hole needs to be reduced by self.turn*self.N to match the action's range.
# repeat the rotation with starting_hole = last_hole
self.step(last_hole - self.turn*self.N)
# if the ruleset being used is the ruleset suggested by Kasim
elif self.ruleset == 'kasim2016':
# if the last hole is the turn player's hole --> same as in the original ruleset
# repeat the rotation with starting_hole = last_hole
if int(last_hole/self.N) == self.turn:
self.step(last_hole - self.turn*self.N)
# if the last hole is the opponent's hole --> end turn (rule revision by Kasim)
else:
self.end_turn()
else:
#return print("The move is illegal.")
pass
def rotation(self):
scoring = False
hole = int(self.starting_hole + self.turn*self.N)
stones = self.state[hole]
self.state[hole] = 0
while stones > 0:
hole += 1
if int(hole - self.turn*self.N) == self.N:
self.score[self.turn] += 1
scoring = True
stones -= 1
if stones > 0:
hole = hole%(2*self.N)
self.state[hole] += 1
scoring = False
stones -= 1
return hole, scoring
def shooting(self, last_hole):
d = abs(last_hole - ((self.N-1) + self.turn))
hole_across = (self.N-1) + int(not(self.turn)) - d*(2*self.turn - 1)
# if the hole across of the last hole is empty --> do nothing
if self.state[hole_across] == 0:
pass
# if the hole across of the last hole is NOT empty --> do the shooting
else:
self.score[self.turn] += self.state[last_hole] + self.state[hole_across]
self.state[last_hole] = 0
self.state[hole_across] = 0
#print(N-1, int(not(self.turn)), d*(2*self.turn - 1))
def end_turn(self):
# check for the winner, if there is a winner --> done = True (end the game)
if self.score[self.turn] > self.N**2:
self.done = True
self.games_count += 1
# if both player's score is equal to N**2 --> draw --> done = True (end the game)
elif self.score[self.turn] == self.score[int(not(self.turn))] and self.score[self.turn] == self.N**2:
self.done = True
self.games_count += 1
# else --> continue the game, pass the turn to the opponent
else:
self.turn = int(not(self.turn))
self.turns_count += 1
def is_legal(self, action):
# check whether the move is legal/valid or not
self.starting_hole = action
hole = int(self.starting_hole + self.turn*self.N)
if self.state[hole] == 0:
return False
else:
return True
def whose_turn(self):
# check whose turn now
if self.N == None:
return print("Setup the board first.")
else:
return self.turn
def possible_action(self):
# take a random sample of possible/valid action
pa = np.array([], dtype=np.int)
for i in range (len(self.state[(0+self.turn*self.N):(self.N+self.turn*self.N)])):
if self.state[(0+self.turn*self.N):(self.N+self.turn*self.N)][i] != 0:
pa = np.append(pa, i)
return pa
def one_hot_state(self):
# convert board state to one-hot-encoded board state
input_units = np.zeros(((2*self.N)**2,))
for i in range (len(self.state)):
if self.state[i] > (2*self.N-1):
input_units[i*2*self.N + 2*self.N-1] = self.state[i] - (2*self.N-1)
else:
input_units[i*2*self.N + self.state[i]] = 1
return input_units
def logging(self, score):
# log the score
new_log = np.concatenate((score, 0.0, 0.0, self.turns_count), axis=None).reshape(1,-1)
if self.log is None:
self.log = new_log
else:
self.log = np.append(self.log, new_log, axis=0)
if len(self.log) >= self.max_iter*0.2:
self.log[-1, 2] = np.average(self.log[int(len(self.log)-0.2*self.max_iter):len(self.log), 0])
self.log[-1, 3] = np.average(self.log[int(len(self.log)-0.2*self.max_iter):len(self.log), 1])
def log_to_txt(self, fdir):
# saving the log to txt file
np.savetxt(fdir + ".txt", self.log)
print("Log score to text file")
|
[
"numpy.full",
"numpy.savetxt",
"numpy.zeros",
"numpy.append",
"numpy.array",
"numpy.concatenate"
] |
[((227, 271), 'numpy.full', 'np.full', ([], {'shape': '(2)', 'fill_value': '(0)', 'dtype': 'np.int'}), '(shape=2, fill_value=0, dtype=np.int)\n', (234, 271), True, 'import numpy as np\n'), ((7282, 7308), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int'}), '([], dtype=np.int)\n', (7290, 7308), True, 'import numpy as np\n'), ((7651, 7681), 'numpy.zeros', 'np.zeros', (['((2 * self.N) ** 2,)'], {}), '(((2 * self.N) ** 2,))\n', (7659, 7681), True, 'import numpy as np\n'), ((8581, 8616), 'numpy.savetxt', 'np.savetxt', (["(fdir + '.txt')", 'self.log'], {}), "(fdir + '.txt', self.log)\n", (8591, 8616), True, 'import numpy as np\n'), ((1604, 1648), 'numpy.full', 'np.full', ([], {'shape': '(2)', 'fill_value': '(0)', 'dtype': 'np.int'}), '(shape=2, fill_value=0, dtype=np.int)\n', (1611, 1648), True, 'import numpy as np\n'), ((8203, 8239), 'numpy.append', 'np.append', (['self.log', 'new_log'], {'axis': '(0)'}), '(self.log, new_log, axis=0)\n', (8212, 8239), True, 'import numpy as np\n'), ((7503, 7519), 'numpy.append', 'np.append', (['pa', 'i'], {}), '(pa, i)\n', (7512, 7519), True, 'import numpy as np\n'), ((8029, 8091), 'numpy.concatenate', 'np.concatenate', (['(score, 0.0, 0.0, self.turns_count)'], {'axis': 'None'}), '((score, 0.0, 0.0, self.turns_count), axis=None)\n', (8043, 8091), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 12 11:56:46 2019
@author: <NAME>
Assignment 1:
- Convert a RAW rgb image to a YUV format then Reconstruct the RGB channels.
- Compute the PSNR between the source rgb image and the Reconstructed RGB using
4:4:4, 4:2:2 and 4:1:1 format.
- You may select to use the average or the median, you shall show the different resultant images.
- Remember to normalize the pixel values by 128.
- You may use any three of the RAW images provided under the class Moodle account.
- You can use any language, however, a low level language is recommended.
Report is due on October 18, 2019.
"""
# In[1]: Import Packages
## Importing OpenCV(cv2) module
import cv2
import math
import numpy as np
from scipy import misc
# In[2]:
def Read_Image(Image_Path):
## Read RGB image
img = cv2.imread(Image_Path)
return(img)
def Display_Image(Name,Image):
## Output img with window name as 'image'
cv2.imshow(Name, Image)
## Save Image
cv2.imwrite(Name+'.png',Image)
## Maintain output window utill, user presses a key
cv2.waitKey(2000)
## Destroying present windows on screen
cv2.destroyAllWindows()
return(1)
# In[2-1]: Set Height and Width
W = 512
H = 512
# In[2-2]: Read Image and Reshape
img = np.fromfile("Lena Gray Raw Image.txt", dtype='uint8', sep="")
img = np.reshape(img, (W, H))
'''Please remove # below to proceed with barbara_gray and comment at the previous two lines'''
#img = misc.imread('barbara_gray.bmp', flatten= 1)
Display_Image("Original Image", img)
Image = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB) #Convert to 3D Image
# In[3]:
def Normalize(Image):
return (Image / 128)
def RGB_To_YUV(RGB_Image):
RGB_Image = Normalize(RGB_Image)
Coeff = np.array([[0.299, -0.14713, 0.615],
[0.587, -0.28886, -0.51499],
[0.114, 0.436, -0.10001]])
YUV_Image = RGB_Image.dot(Coeff)
YUV_Image = YUV_Image*128
YUV_Image = YUV_Image.astype(np.uint8)
YUV_Image[:,:,1] += 128 #b1
YUV_Image[:,:,2] += 128 #b2
return(YUV_Image)
# In[3-1]: 4:4:4 means no downsampling of the chroma channels.
yuv_444 = RGB_To_YUV(Image)
Display_Image("YUV Image 444", yuv_444)
# In[4]:
def YUV_To_RGB(YUV_Image):
Coeff = np.array([[1, 1, 1],
[0, -0.39465, 2.03211],
[1.13983, -0.58060, 0]])
RGB_Image = YUV_Image.dot(Coeff)
RGB_Image = RGB_Image.astype(np.uint8)
RGB_Image[:,:,0] -= 128 #b0
RGB_Image[:,:,1] -= 128 #b1
return(RGB_Image)
# In[4-1]:
Recover_rgb_444 = YUV_To_RGB(yuv_444)
Display_Image("Recover RGB Image 444", Recover_rgb_444)
# In[5]:
def Compute_MSE(OriginalImage,RecoveriedImage):
err = 0.0
w = np.shape(OriginalImage)[0] #Width
h = np.shape(OriginalImage)[1] #Hight
err = np.sum((OriginalImage - RecoveriedImage)** 2)
err /= (w * h)
return(err)
def Compute_PSNR(Computed_MSE):
if (Computed_MSE == 0):
return 100
else:
PIXEL_MAX = 255
PSNR = 20 * math.log10(PIXEL_MAX / math.sqrt(Computed_MSE))
return(PSNR)
# In[5-1]:
mse_1 = Compute_MSE(Image,Recover_rgb_444)
psnr_1 = Compute_PSNR(mse_1)
# In[6]:
def Filter(Array2D,SamplingType):
W = np.shape(Array2D)[0]
H = np.shape(Array2D)[1]
for Row in range(0,W,2):
for Col in range(0,H,2):
Temp = Array2D[Row:Row+2, Col:Col+2]
if(SamplingType == '422'):
Temp[:,1] = Temp[:,0]
elif(SamplingType == '411'):
Temp[:,:] = Temp[0,0]
elif(SamplingType == 'Mean'):
Temp = np.mean(Temp.ravel())
else:
return(0)
Array2D[Row:Row+2, Col:Col+2] = Temp
return(Array2D)
# In[7]:
## 4:2:2 means 2:1 horizontal downsampling, with no vertical downsampling.
## Every scan line contains four Y samples for every two U or V samples.
def Get_422_Partitioning(YUVImage):
Y = YUVImage[:,:,0]
U = Filter(YUVImage[:,:,1],'422')
V = Filter(YUVImage[:,:,2],'422')
New_Image = cv2.merge((Y,U,V))
return (New_Image)
# In[7-1]:
yuv_422 = Get_422_Partitioning(yuv_444)
rgb_422 = YUV_To_RGB(yuv_422)
Display_Image("Recover RGB Image 422", rgb_422)
mse_2 = Compute_MSE(Image,rgb_422)
psnr_2 = Compute_PSNR(mse_2)
# In[8]:
## 4:1:1 means 4:1 horizontal downsampling, with no vertical downsampling.
## Every scan line contains four Y samples for each U and V sample.
def Get_411_Partitioning(YUVImage):
Y = YUVImage[:,:,0]
U = Filter(YUVImage[:,:,1],'411')
V = Filter(YUVImage[:,:,2],'411')
New_Image = cv2.merge((Y,U,V))
return (New_Image)
# In[8-1]:
yuv_411 = Get_411_Partitioning(yuv_444)
rgb_411 = YUV_To_RGB(yuv_411)
Display_Image("Recover RGB Image 411", rgb_411)
mse_3 = Compute_MSE(Image,rgb_411)
psnr_3 = Compute_PSNR(mse_3)
# In[9]:
def Get_Mean_Partitioning(YUVImage):
Y = YUVImage[:,:,0]
U = Filter(YUVImage[:,:,1],'Mean')
V = Filter(YUVImage[:,:,2],'Mean')
New_Image = cv2.merge((Y,U,V))
return (New_Image)
# In[9-1]:
yuv_Mean = Get_Mean_Partitioning(yuv_444)
rgb_Mean = YUV_To_RGB(yuv_Mean)
Display_Image("Recover RGB Image Mean", rgb_Mean)
mse_4 = Compute_MSE(Image,rgb_Mean)
psnr_4 = Compute_PSNR(mse_4)
# In[10]:
print("\n")
print("At 444 Format,the MSE= ",mse_1,"and PSNR= ",psnr_1)
print("At 422 Format,the MSE= ",mse_2,"and PSNR= ",psnr_2)
print("At 411 Format,the MSE= ",mse_3,"and PSNR= ",psnr_3)
print("At Mean Format,the MSE= ",mse_4,"and PSNR= ",psnr_4)
# In[10]:
def CV2_Library(Image_Path):
img_yuv = cv2.cvtColor(Image_Path, cv2.COLOR_BGR2YUV)
Display_Image("BGR2YUV",img_yuv)
img_bgr =cv2.cvtColor(img_yuv,cv2.COLOR_YUV2BGR)
Display_Image("YUV2BGR",img_bgr)
return(0)
#CV2_Library(Image)
|
[
"numpy.sum",
"math.sqrt",
"numpy.fromfile",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.shape",
"cv2.imread",
"numpy.array",
"numpy.reshape",
"cv2.merge",
"cv2.imshow"
] |
[((1307, 1368), 'numpy.fromfile', 'np.fromfile', (['"""Lena Gray Raw Image.txt"""'], {'dtype': '"""uint8"""', 'sep': '""""""'}), "('Lena Gray Raw Image.txt', dtype='uint8', sep='')\n", (1318, 1368), True, 'import numpy as np\n'), ((1375, 1398), 'numpy.reshape', 'np.reshape', (['img', '(W, H)'], {}), '(img, (W, H))\n', (1385, 1398), True, 'import numpy as np\n'), ((1592, 1629), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2RGB'], {}), '(img, cv2.COLOR_GRAY2RGB)\n', (1604, 1629), False, 'import cv2\n'), ((828, 850), 'cv2.imread', 'cv2.imread', (['Image_Path'], {}), '(Image_Path)\n', (838, 850), False, 'import cv2\n'), ((954, 977), 'cv2.imshow', 'cv2.imshow', (['Name', 'Image'], {}), '(Name, Image)\n', (964, 977), False, 'import cv2\n'), ((1004, 1037), 'cv2.imwrite', 'cv2.imwrite', (["(Name + '.png')", 'Image'], {}), "(Name + '.png', Image)\n", (1015, 1037), False, 'import cv2\n'), ((1097, 1114), 'cv2.waitKey', 'cv2.waitKey', (['(2000)'], {}), '(2000)\n', (1108, 1114), False, 'import cv2\n'), ((1175, 1198), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1196, 1198), False, 'import cv2\n'), ((1805, 1901), 'numpy.array', 'np.array', (['[[0.299, -0.14713, 0.615], [0.587, -0.28886, -0.51499], [0.114, 0.436, -\n 0.10001]]'], {}), '([[0.299, -0.14713, 0.615], [0.587, -0.28886, -0.51499], [0.114, \n 0.436, -0.10001]])\n', (1813, 1901), True, 'import numpy as np\n'), ((2357, 2425), 'numpy.array', 'np.array', (['[[1, 1, 1], [0, -0.39465, 2.03211], [1.13983, -0.5806, 0]]'], {}), '([[1, 1, 1], [0, -0.39465, 2.03211], [1.13983, -0.5806, 0]])\n', (2365, 2425), True, 'import numpy as np\n'), ((2943, 2989), 'numpy.sum', 'np.sum', (['((OriginalImage - RecoveriedImage) ** 2)'], {}), '((OriginalImage - RecoveriedImage) ** 2)\n', (2949, 2989), True, 'import numpy as np\n'), ((4282, 4302), 'cv2.merge', 'cv2.merge', (['(Y, U, V)'], {}), '((Y, U, V))\n', (4291, 4302), False, 'import cv2\n'), ((4838, 4858), 'cv2.merge', 'cv2.merge', (['(Y, U, V)'], {}), '((Y, U, V))\n', (4847, 4858), False, 'import cv2\n'), ((5255, 5275), 'cv2.merge', 'cv2.merge', (['(Y, U, V)'], {}), '((Y, U, V))\n', (5264, 5275), False, 'import cv2\n'), ((5825, 5868), 'cv2.cvtColor', 'cv2.cvtColor', (['Image_Path', 'cv2.COLOR_BGR2YUV'], {}), '(Image_Path, cv2.COLOR_BGR2YUV)\n', (5837, 5868), False, 'import cv2\n'), ((5921, 5961), 'cv2.cvtColor', 'cv2.cvtColor', (['img_yuv', 'cv2.COLOR_YUV2BGR'], {}), '(img_yuv, cv2.COLOR_YUV2BGR)\n', (5933, 5961), False, 'import cv2\n'), ((2850, 2873), 'numpy.shape', 'np.shape', (['OriginalImage'], {}), '(OriginalImage)\n', (2858, 2873), True, 'import numpy as np\n'), ((2893, 2916), 'numpy.shape', 'np.shape', (['OriginalImage'], {}), '(OriginalImage)\n', (2901, 2916), True, 'import numpy as np\n'), ((3391, 3408), 'numpy.shape', 'np.shape', (['Array2D'], {}), '(Array2D)\n', (3399, 3408), True, 'import numpy as np\n'), ((3420, 3437), 'numpy.shape', 'np.shape', (['Array2D'], {}), '(Array2D)\n', (3428, 3437), True, 'import numpy as np\n'), ((3192, 3215), 'math.sqrt', 'math.sqrt', (['Computed_MSE'], {}), '(Computed_MSE)\n', (3201, 3215), False, 'import math\n')]
|
import math
import numpy as np
import scipy as s
import scipy.integrate as q
import matplotlib.pyplot as plt
#Constants
H0 = 2.19507453e-18 #using 67.74
Wm = 0.279
Wq = 1 - Wm
w = -1
#Basic Functions
def H(a):
return H0*np.sqrt(Wm*a**(-3) + Wq*a**(-3*(1+w)))
def dH(a):
return (H0**2/(2*H(a)))*(-3*Wm*a**(-4) - 3*(1 + w)*Wq*a**(-3*w -4))
def Dplus(a):
def integrand(x):
return (x*H(x))**(-3)
[y,err] = q.quad(integrand,0,a)
return 2.5*H0**2*Wm*H(a)*y
def fplus(a):
h = H(a)
dh = dH(a)
d = Dplus(a)
return (a/d)*((dh*d/h) + 2.5*H0**2*Wm/(h**2*a**3))
def fminus(a):
return dH(a)*a/H(a)
#Primary Functions
#Code to call values of n = 2
def z2(y,a):
fp = fplus(a)
fm = fminus(a)
return [fp/a*(2 + y[1] - 2*y[0]) , fp/a*((2/3) + (fm*fp**(-2))*(y[1] - y[0]) - y[1])]
def y2(x):
a = np.linspace(a0,x,N)
sol = q.odeint(z2,y20,a)
return [sol[-1,0], sol[-1,1]]
#Let y = [nu,mu], z = dy/dt
#This is code for n = 3
def z3(y,a):
[nu2,mu2] = y2(a)
fp = fplus(a)
fm = fminus(a)
return [fp/a*(y[1] - 3*y[0] + 3*(nu2 + mu2)), fp/a*(-2*y[1] + (fm*fp**(-2))*(y[1] - y[0]) + 2*mu2)]
#Main Code
#Initial conditions - Using EdS values
y20 = [34/21,26/21]
y30 = [682/189, 142/63]
#Integration
a0 = 0.01
N = 10000
a = np.linspace(a0,1,N)
soln = q.odeint(z3,y30,a)
nuads = []
muads = []
for i in range(0,N):
nuads.append(y30[0])
muads.append(y30[1])
#Plotting
plt.figure(1)
plt.subplot(111)
plt.plot((1-a)/a,soln[:,0], 'k', label = r'$\nu_3$')
plt.plot((1-a)/a, nuads, '--r', label = r'$\nu_{EdS}$')
plt.title(r'$\nu_3$ as a function of redshift', fontsize=20)
plt.xlabel(r'$z$', fontsize=20)
plt.ylabel(r'$\nu_3$', fontsize=20)
plt.legend(fontsize=15)
plt.xlim([0,3])
plt.figure(2)
plt.subplot(111)
plt.plot((1-a)/a,soln[:,1], 'k', label=r'$\mu_3$')
plt.plot((1-a)/a, muads, '--r', label=r'$\mu_{EdS}$')
plt.title(r'$\mu_3$ as a function of redshift', fontsize=20)
plt.xlabel(r'$z$', fontsize=20)
plt.ylabel(r'$\mu_3$', fontsize=20)
plt.legend(fontsize=15)
plt.xlim([0,3])
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.integrate.quad",
"scipy.integrate.odeint",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] |
[((1292, 1313), 'numpy.linspace', 'np.linspace', (['a0', '(1)', 'N'], {}), '(a0, 1, N)\n', (1303, 1313), True, 'import numpy as np\n'), ((1319, 1339), 'scipy.integrate.odeint', 'q.odeint', (['z3', 'y30', 'a'], {}), '(z3, y30, a)\n', (1327, 1339), True, 'import scipy.integrate as q\n'), ((1443, 1456), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1453, 1456), True, 'import matplotlib.pyplot as plt\n'), ((1457, 1473), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1468, 1473), True, 'import matplotlib.pyplot as plt\n'), ((1474, 1530), 'matplotlib.pyplot.plot', 'plt.plot', (['((1 - a) / a)', 'soln[:, 0]', '"""k"""'], {'label': '"""$\\\\nu_3$"""'}), "((1 - a) / a, soln[:, 0], 'k', label='$\\\\nu_3$')\n", (1482, 1530), True, 'import matplotlib.pyplot as plt\n'), ((1527, 1584), 'matplotlib.pyplot.plot', 'plt.plot', (['((1 - a) / a)', 'nuads', '"""--r"""'], {'label': '"""$\\\\nu_{EdS}$"""'}), "((1 - a) / a, nuads, '--r', label='$\\\\nu_{EdS}$')\n", (1535, 1584), True, 'import matplotlib.pyplot as plt\n'), ((1583, 1643), 'matplotlib.pyplot.title', 'plt.title', (['"""$\\\\nu_3$ as a function of redshift"""'], {'fontsize': '(20)'}), "('$\\\\nu_3$ as a function of redshift', fontsize=20)\n", (1592, 1643), True, 'import matplotlib.pyplot as plt\n'), ((1644, 1674), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$z$"""'], {'fontsize': '(20)'}), "('$z$', fontsize=20)\n", (1654, 1674), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1711), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\nu_3$"""'], {'fontsize': '(20)'}), "('$\\\\nu_3$', fontsize=20)\n", (1686, 1711), True, 'import matplotlib.pyplot as plt\n'), ((1712, 1735), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (1722, 1735), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1752), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 3]'], {}), '([0, 3])\n', (1744, 1752), True, 'import matplotlib.pyplot as plt\n'), ((1753, 1766), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (1763, 1766), True, 'import matplotlib.pyplot as plt\n'), ((1767, 1783), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1778, 1783), True, 'import matplotlib.pyplot as plt\n'), ((1784, 1840), 'matplotlib.pyplot.plot', 'plt.plot', (['((1 - a) / a)', 'soln[:, 1]', '"""k"""'], {'label': '"""$\\\\mu_3$"""'}), "((1 - a) / a, soln[:, 1], 'k', label='$\\\\mu_3$')\n", (1792, 1840), True, 'import matplotlib.pyplot as plt\n'), ((1835, 1892), 'matplotlib.pyplot.plot', 'plt.plot', (['((1 - a) / a)', 'muads', '"""--r"""'], {'label': '"""$\\\\mu_{EdS}$"""'}), "((1 - a) / a, muads, '--r', label='$\\\\mu_{EdS}$')\n", (1843, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1889, 1949), 'matplotlib.pyplot.title', 'plt.title', (['"""$\\\\mu_3$ as a function of redshift"""'], {'fontsize': '(20)'}), "('$\\\\mu_3$ as a function of redshift', fontsize=20)\n", (1898, 1949), True, 'import matplotlib.pyplot as plt\n'), ((1950, 1980), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$z$"""'], {'fontsize': '(20)'}), "('$z$', fontsize=20)\n", (1960, 1980), True, 'import matplotlib.pyplot as plt\n'), ((1982, 2017), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\mu_3$"""'], {'fontsize': '(20)'}), "('$\\\\mu_3$', fontsize=20)\n", (1992, 2017), True, 'import matplotlib.pyplot as plt\n'), ((2018, 2041), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (2028, 2041), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2058), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 3]'], {}), '([0, 3])\n', (2050, 2058), True, 'import matplotlib.pyplot as plt\n'), ((2059, 2069), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2067, 2069), True, 'import matplotlib.pyplot as plt\n'), ((429, 452), 'scipy.integrate.quad', 'q.quad', (['integrand', '(0)', 'a'], {}), '(integrand, 0, a)\n', (435, 452), True, 'import scipy.integrate as q\n'), ((847, 868), 'numpy.linspace', 'np.linspace', (['a0', 'x', 'N'], {}), '(a0, x, N)\n', (858, 868), True, 'import numpy as np\n'), ((877, 897), 'scipy.integrate.odeint', 'q.odeint', (['z2', 'y20', 'a'], {}), '(z2, y20, a)\n', (885, 897), True, 'import scipy.integrate as q\n'), ((225, 273), 'numpy.sqrt', 'np.sqrt', (['(Wm * a ** -3 + Wq * a ** (-3 * (1 + w)))'], {}), '(Wm * a ** -3 + Wq * a ** (-3 * (1 + w)))\n', (232, 273), True, 'import numpy as np\n')]
|
import os
import numpy as np
import pandas as pd
from scipy.io import loadmat, savemat
import matplotlib.pyplot as plt
import seaborn as sns
def match_strings(strings, path, any_or_all='any'):
if any_or_all == 'any':
return any([string in path for string in strings])
elif any_or_all == 'all':
return all([string in path for string in strings])
def fix(datum, path):
## hopefully vestigial code used previously used in load_data to fix a massive diagonal EV matrix.
if datum['EV_vec'].shape[0] > 1:
print('fixing EV_vec')
print(datum['EV_vec'].shape)
print(path)
datum['EV_vec'] = np.diag(datum['EV_vec'])
savemat(path, datum, appendmat=False)
return datum
def load_data(mani_dir, exclude=[]):
# takes a directory and loads all the .mat files from batch_manifold_analysis.m
# return paths and data
paths = np.sort(np.array(os.listdir(mani_dir)))
if len(exclude)>0:
paths = [path for path in paths if not match_strings(exclude, path)]
data = []
for path in paths:
datum = loadmat(mani_dir+path)
if 'EV_vec' in datum.keys():
#datum = fix(datum, mani_dir+path)
pass
data.append(datum)
return paths, np.array(data)
#def load_data(mani_dir, exclude=[]):
# # takes a directory and loads all the .mat files from batch_manifold_analysis.m
# # return paths and data
# paths = np.sort(np.array(os.listdir(mani_dir)))
# if len(exclude)>0:
# paths = [path for path in paths if not match_strings(exclude, path)]
# data = np.array([loadmat(mani_dir+path) for path in paths])
# return paths, data
def get_layer_type(path, types):
for t in types:
if t in path:
return t
def mi_outliers(data_vec):
for i in range(len(data_vec)):
row_mean = data_vec[i].mean()
row_std = data_vec[i].std()
for j in range(len(data_vec[i])):
if np.abs(data_vec[i][j] - row_mean) > row_std*2:
data_vec[i][j] = row_mean
return data_vec
def fill_input_features(df, input_features=3072):
df['featnum'] = df['featnum'].fillna(value=input_features)
def frame_constructor(paths, data, key, tag=None, mean=False, verbose=False, rm_outliers=True):
perm_seed = [catch(path, 'seed') for path in paths]
featnum = [catch(path, 'featnum') for path in paths]
acc = [catch(path, 'acc') for path in paths]
arch = [catch(path, 'arch') for path in paths]
RP = [catch(path, 'RP') for path in paths]
lnum = [path.split('-')[3].split('_')[1] for path in paths]
coding = [path.split('-')[3].split('_')[0] for path in paths]
epochs = np.array([int(path.split('-')[1].split('_')[1]) for path in paths])
image_set = np.array([path.split('-')[0] for path in paths])
data_vec = np.array([np.squeeze(datum[key]) for datum in data])
if mean:
if rm_outliers:
mi_outliers(data_vec)
data_vec = np.mean(data_vec,axis=1)
data_vec = np.atleast_2d(data_vec)
if verbose:
print('data_vec.shape: ', data_vec.shape)
if data_vec.shape[0]<data_vec.shape[1]:
data_vec = data_vec.T
df = pd.DataFrame(
columns=[
'path',
'imageset',
'epoch',
'layer number',
'coding',
'seed',
'featnum',
'acc',
'arch',
'RP',
'value',
'measure',
'tag'
],
data=np.array([
np.repeat([paths],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([image_set],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([epochs],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([lnum],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([coding],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([perm_seed],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([featnum],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([acc],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([arch],data_vec.shape[-1],axis=0).T.reshape(-1),
np.repeat([RP],data_vec.shape[-1],axis=0).T.reshape(-1),
data_vec.reshape(-1),
np.repeat(key,data_vec.size),
np.repeat(tag,data_vec.size)
]).T
)
types = ['input', 'AvgPool2d', 'MaxPool2d', 'Conv2d', 'ReLU', 'Sequential', 'Linear', 'BatchNorm2d', 'Softmax']
df['type'] = df.path.apply(lambda x: get_layer_type(x, types))
df['value'] = pd.to_numeric(df['value'], errors='coerce')
df['acc'] = pd.to_numeric(df['acc'], errors='coerce')
df['epoch'] = pd.to_numeric(df['epoch'], errors='coerce')
df['seed'] = pd.to_numeric(df['seed'], errors='coerce')
df['featnum'] = pd.to_numeric(df['featnum'], errors='coerce')
df['layer number'] = pd.to_numeric(df['layer number'], errors='coerce')
df['layer number og'] = pd.to_numeric(df['layer number'], errors='coerce')
df['layer name'] = df['coding'].values + '.'+ df['layer number og'].astype('str').values
df.loc[df['coding']=='features', 'layer number'] += 1
df.loc[df['coding']=='classifier', 'layer number'] = df.loc[
df['coding']=='classifier', 'layer number'] + df[
(df['coding']=='features') & (df['imageset']=='train') & (df['epoch']==epochs.max()) # this breaks if epoch 1 is not in the mix..
].shape[0] + 1
df.round(2)
return df
def compile_info(mani_dir, path):
mani_dir.replace('manifold_', '-')
info = path.replace('.h5', '')
info += '-seed_'+catch(mani_dir, 'seed', ind=1)
info += '-arch_'+catch(mani_dir, 'arch')
info += '-RP_'+catch(mani_dir, 'RP')
return info
def multi_frame_constructor(mani_dirs, tags, measures, exclude=[], verbose=False):
df = None
for i, mani_dir in enumerate(mani_dirs):
paths, data = load_data(mani_dir, exclude=exclude)
paths_info = [compile_info(mani_dir, path) for path in paths]
for measure in measures:
mean = True
single = ['CCcorr', 'pr', 'K0', 'S_vec', 'D_pr', 'D_expvar', 'D_feature_ds', 'asim0_g', 'asim0_m', 'Nc0_g', 'Nc0_m',]
if measure in single: mean = False
if type(df) == type(None):
df = frame_constructor(paths_info, data, measure, tag=tags[i], mean=mean, verbose=verbose)
else:
df = df.append(frame_constructor(paths_info, data, measure, tag=tags[i], mean=mean))
return df
def make_contiguous(a):
return np.arange(len(a))
def display(df, x, y, measure, coding, title, opts={'sortby':[], 'hue':'tag', 'fix_legend':False, 'dims': (12,7)}):
unique_tags = np.unique(df.tag.values)
data = df[
(df['measure']==measure)
# &(df['coding']==coding)
].sort_values(by=['layer number']).copy()
for unique_tag in unique_tags:
contiguous_layer_num = make_contiguous(data[data['tag']==unique_tag]['layer number'].values)
data.loc[data['tag']==unique_tag, 'layer number'] = contiguous_layer_num
# re sort by layer number, as everything will be shifted if one set was not contiguous
data = data.sort_values(by=['layer number'])
layer_types = data['type'].values[::len(unique_tags)]
layer_features = data['featnum'].values[::len(unique_tags)]
xlabels = [layer_types[i]+' ({})'.format(layer_features[i]) for i in range(len(layer_types))]
if len(opts['sortby'])>0:
data = data.sort_values(by=opts['sortby'])
fig, ax = plt.subplots(figsize=opts['dims'])
p = sns.cubehelix_palette(len(unique_tags), start=1, rot=0, dark=.20, light=.80)
sns.set_palette('Reds')
with sns.color_palette("PuBuGn_d"):
ax = sns.scatterplot(x=x,
y=y,
# units="tag",
# size=opts['hue'],
sizes=(150,150),
ax=ax,
hue=opts['hue'],
palette=p,
legend='brief',
data=data)
if opts['fix_legend']:
handles, labels = ax.get_legend_handles_labels()
l = plt.legend(handles[0:1+len(unique_tags)], labels[0:1+len(unique_tags)], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
else:
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_title(title)
ax.set_ylabel(names[measure])
ax.set_xticks(ticks=range(len(data.type)/len(unique_tags)))
ax.set_xticklabels(xlabels,rotation=90)
ax.set_xlabel('layer type')
return data, ax
names = {
'D_pr': 'Total Data PR',
'D_expvar': 'Total Data EV Dimension',
'CCcorr': 'Mean Manifold Center Correlation',
'D_S_vec': 'Mean Category EV Dimension',
'Dpr_S_vec': 'Mean Category PR',
'R_S_vec': 'Mean Category Radius',
'D_M_vec': 'Mean Dichotomy Dimension',
'R_M_vec': 'Mean Dichotomy Radius',
'a_Mfull_vec': 'Mean Dichotomy Capacity',
}
def get_losses(log, epochs=300, accuracy=False):
f = open(log)
val_loss = np.array([float(line.split(' ')[3]) for line in f if " * Prec@1" in line])
f = open(log)
train_loss = np.array([float(line.split(" ")[8]) for line in f if ("Prec@1" in line) & ("Epoch" in line)])
step_num = len(train_loss)/epochs
train_loss = np.array([train_loss[i*step_num:i*step_num+step_num].mean() for i in range(epochs)])
if not accuracy:
val_loss = 100-val_loss
train_loss = 100-train_loss
return train_loss, val_loss
def add_loss(df, loss_df):
df['loss'] = df.apply(lambda x : loss_df[(loss_df['epoch']==x['epoch'])&
(loss_df['seed']==x['seed'])&
(loss_df['arch']==x['arch'])&
(loss_df['imageset']==x['imageset'])]['loss'].values[0], axis=1)
df['trainacc'] = df.apply(lambda x : 100-loss_df[(loss_df['epoch']==x['epoch'])&
(loss_df['seed']==x['seed'])&
(loss_df['arch']==x['arch'])&
(loss_df['imageset']=='train')]['loss'].values[0], axis=1)
df['valacc'] = df.apply(lambda x : 100-loss_df[(loss_df['epoch']==x['epoch'])&
(loss_df['seed']==x['seed'])&
(loss_df['arch']==x['arch'])&
(loss_df['imageset']=='val')]['loss'].values[0], axis=1)
return df
def delta_data(df):
layer_nums = np.unique(df['layer number'])
measure_id = df.loc[df['type']=='input', 'measure'].values
initial = np.zeros([2, measure_id.shape[0]]).astype('object')
initial[0,:] = measure_id
deltas = np.zeros([layer_nums.shape[0], measure_id.shape[0]]).astype('object')
deltas[0,:] = measure_id
for n in layer_nums:
if n == 0:
initial[1,:] = df.loc[df['layer number']==n, 'value'].values
else:
deltas[n,:] = df.loc[df['layer number']==n, 'value'].values - df.loc[df['layer number']==n-1, 'value'].values
return deltas.T, initial.T
def get_delta_frame(dir_template, ep, seeds=[0,10], expand_input_files=False, measures=[], skip=[], exclude=[], length=0, verbose=False):
success = []
for seed in range(*seeds):
if seed in skip:
pass
else:
mani_dirs = [dir_template.format(seed)]
tags = ["seed_{}".format(seed)]
if expand_input_files:
expand_input(mani_dirs)
df = multi_frame_constructor(mani_dirs, tags, measures, exclude=exclude)
if df.shape[0] < length:
pass
else:
if verbose:
print('try seed:', seed)
df = add_volume(df)
df = df[(df['imageset']=='train')&(df['epoch']==ep)]
data, initial = delta_data(df)
columns = (df.sort_values(by=['layer number'])['type']+'_'+df.sort_values(by=['layer number'])['layer number'].astype('str')).unique()
columns[0] = 'measure'
#print(columns)
#print(data)
if seed == seeds[0]:
delta_df = pd.DataFrame(columns=columns, data=data)
initial_df = pd.DataFrame(columns=['measure','initial'], data=initial)
else:
delta_df = delta_df.append(pd.DataFrame(columns=columns, data=data))
initial_df = initial_df.append(pd.DataFrame(columns=['measure','initial'], data=initial))
if verbose:
print('success')
success.append(seed)
print('success for seeds:', success)
return delta_df, initial_df
def plot_losses(log):
train_loss, val_loss = get_losses(log)
ax = pd.DataFrame(columns=['training error', 'validation error'], data=np.array([train_loss, val_loss]).T).plot()
ax.set_xlabel('Epoch number')
ax.set_ylabel('Error (%)')
ax.set_title('Training curves')
def delta_plot(delta_df, x, y, name, minmax=True, hline=[-0.5,0.5], vline=[-3,3]):
xy_df = delta_df[delta_df['measure']==x].melt('measure')
y_df = delta_df[delta_df['measure']==y].melt('measure')
xy_df['value2'] = y_df['value'].values
xy_df['type'] = xy_df['variable'].apply(lambda x : x.split('_')[0])
xy_df['depth'] = xy_df['variable'].apply(lambda x : float(x.split('_')[1])).astype('float')
xy_df['depth'] = xy_df['variable'].apply(lambda x : float(x.split('_')[1])).astype('float')
sns.reset_defaults()
# unique_tags = xy_df['variable'].unique()
# p = sns.cubehelix_palette(len(unique_tags), light=.8, start=.5, rot=-.75)
# ax = sns.scatterplot(x='value', y='value2', hue='variable', style='type', palette=p, data=xy_df)
ax = sns.scatterplot(x='value', y='value2', hue='depth', style='type', legend='brief', data=xy_df)
ax.set_xlabel('delta '+x.replace('_vec',''))
ax.set_ylabel('delta '+y.replace('_vec',''))
if minmax:
ax.hlines(0,xy_df['value'].min()-.01,xy_df['value'].max()+.01)
ax.set_ylim(xy_df['value'].min()-.01,xy_df['value'].max()+.01)
ax.vlines(0,xy_df['value2'].min()-.01,xy_df['value2'].max()+.01)
ax.set_ylim(xy_df['value2'].min()-.01,xy_df['value2'].max()+.01)
else:
ax.hlines(0,*hline)
ax.set_xlim(*hline)
ax.vlines(0,*vline)
ax.set_ylim(*vline)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_title('{}, d{} vs d{}'.format(name, x, y))
return xy_df, ax
#def delta_plot(delta_df, x, y, name, minmax=True, hline=[-0.5,0.5], vline=[-3,3]):
# xy_df = delta_df[delta_df['measure']==x].melt('measure')
# y_df = delta_df[delta_df['measure']==y].melt('measure')
# xy_df['value2'] = y_df['value'].values
#
# ax = sns.scatterplot(x='value', y='value2', hue='variable', data=xy_df)
# ax.set_xlabel('delta '+x.replace('_vec',''))
# ax.set_ylabel('delta '+y.replace('_vec',''))
# if minmax:
# ax.hlines(0,xy_df['value'].min()-.01,xy_df['value'].max()+.01)
# ax.set_ylim(xy_df['value'].min()-.01,xy_df['value'].max()+.01)
# ax.vlines(0,xy_df['value2'].min()-.01,xy_df['value2'].max()+.01)
# ax.set_ylim(xy_df['value2'].min()-.01,xy_df['value2'].max()+.01)
# else:
# ax.hlines(0,*hline)
# ax.set_xlim(*hline)
# ax.vlines(0,*vline)
# ax.set_ylim(*vline)
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# ax.set_title('{}, d{} vs d{}'.format(name, x, y))
# return ax
def catch(filepath, target, ind=1, verbose=False):
parts = filepath.split('-')
match = [part for part in parts if target in part]
if len(match) == 1:
return match[0].split('_')[ind]
else:
if verbose:
print('target {} not found in filepath {}'.format(target,filepath))
return None
def get_meta_dict(filepath,targets):
"""get_meta_dict(fs, ['seed', 'drop', 'imageset'])"""
meta_dict = {}
for target in targets:
meta_dict[target] = catch(filepath,target)
return meta_dict
def add_meta(df,targets):
for target in targets:
df[target] = df['log'].apply(lambda x : catch(x,target))
def add_volume(df):
v_df = df.loc[df['measure']=='D_M_vec'].copy()
v_df['measure'] = 'Rm*sqrt(Dm)'
v_df['value'] = np.sqrt(df.loc[df['measure']=='D_M_vec', 'value'].values)*df.loc[df['measure']=='R_M_vec', 'value'].values
return df.append(v_df)
def expand_input(mani_dirs):
from shutil import copyfile
for md in mani_dirs:
files = os.listdir(md)
eps = np.unique([catch(p, 'ep_') for p in files if not match_strings(['input'], p)])
inputs = [p for p in files if match_strings(['-input'], p)]
template = files[0]
for og_file in inputs:
for ep in eps:
og_file_path = os.path.join(md, og_file)
dest = og_file.replace('-', '-ep_{}-'.format(ep)).replace('input', 'acc_0-layer_0-type_input-features_3072')
dest = os.path.join(md, dest)
copyfile(og_file_path, dest)
os.remove(og_file_path)
|
[
"os.remove",
"numpy.abs",
"scipy.io.loadmat",
"numpy.mean",
"numpy.diag",
"os.path.join",
"numpy.unique",
"numpy.atleast_2d",
"pandas.DataFrame",
"seaborn.reset_defaults",
"shutil.copyfile",
"matplotlib.pyplot.subplots",
"numpy.repeat",
"seaborn.scatterplot",
"matplotlib.pyplot.legend",
"numpy.squeeze",
"seaborn.set_palette",
"os.listdir",
"pandas.to_numeric",
"numpy.zeros",
"scipy.io.savemat",
"numpy.array",
"seaborn.color_palette",
"numpy.sqrt"
] |
[((3043, 3066), 'numpy.atleast_2d', 'np.atleast_2d', (['data_vec'], {}), '(data_vec)\n', (3056, 3066), True, 'import numpy as np\n'), ((4643, 4686), 'pandas.to_numeric', 'pd.to_numeric', (["df['value']"], {'errors': '"""coerce"""'}), "(df['value'], errors='coerce')\n", (4656, 4686), True, 'import pandas as pd\n'), ((4703, 4744), 'pandas.to_numeric', 'pd.to_numeric', (["df['acc']"], {'errors': '"""coerce"""'}), "(df['acc'], errors='coerce')\n", (4716, 4744), True, 'import pandas as pd\n'), ((4763, 4806), 'pandas.to_numeric', 'pd.to_numeric', (["df['epoch']"], {'errors': '"""coerce"""'}), "(df['epoch'], errors='coerce')\n", (4776, 4806), True, 'import pandas as pd\n'), ((4824, 4866), 'pandas.to_numeric', 'pd.to_numeric', (["df['seed']"], {'errors': '"""coerce"""'}), "(df['seed'], errors='coerce')\n", (4837, 4866), True, 'import pandas as pd\n'), ((4887, 4932), 'pandas.to_numeric', 'pd.to_numeric', (["df['featnum']"], {'errors': '"""coerce"""'}), "(df['featnum'], errors='coerce')\n", (4900, 4932), True, 'import pandas as pd\n'), ((4958, 5008), 'pandas.to_numeric', 'pd.to_numeric', (["df['layer number']"], {'errors': '"""coerce"""'}), "(df['layer number'], errors='coerce')\n", (4971, 5008), True, 'import pandas as pd\n'), ((5037, 5087), 'pandas.to_numeric', 'pd.to_numeric', (["df['layer number']"], {'errors': '"""coerce"""'}), "(df['layer number'], errors='coerce')\n", (5050, 5087), True, 'import pandas as pd\n'), ((6803, 6827), 'numpy.unique', 'np.unique', (['df.tag.values'], {}), '(df.tag.values)\n', (6812, 6827), True, 'import numpy as np\n'), ((7644, 7678), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': "opts['dims']"}), "(figsize=opts['dims'])\n", (7656, 7678), True, 'import matplotlib.pyplot as plt\n'), ((7768, 7791), 'seaborn.set_palette', 'sns.set_palette', (['"""Reds"""'], {}), "('Reds')\n", (7783, 7791), True, 'import seaborn as sns\n'), ((10623, 10652), 'numpy.unique', 'np.unique', (["df['layer number']"], {}), "(df['layer number'])\n", (10632, 10652), True, 'import numpy as np\n'), ((13738, 13758), 'seaborn.reset_defaults', 'sns.reset_defaults', ([], {}), '()\n', (13756, 13758), True, 'import seaborn as sns\n'), ((14000, 14098), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""value"""', 'y': '"""value2"""', 'hue': '"""depth"""', 'style': '"""type"""', 'legend': '"""brief"""', 'data': 'xy_df'}), "(x='value', y='value2', hue='depth', style='type', legend=\n 'brief', data=xy_df)\n", (14015, 14098), True, 'import seaborn as sns\n'), ((14629, 14691), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)\n', (14639, 14691), True, 'import matplotlib.pyplot as plt\n'), ((648, 672), 'numpy.diag', 'np.diag', (["datum['EV_vec']"], {}), "(datum['EV_vec'])\n", (655, 672), True, 'import numpy as np\n'), ((681, 718), 'scipy.io.savemat', 'savemat', (['path', 'datum'], {'appendmat': '(False)'}), '(path, datum, appendmat=False)\n', (688, 718), False, 'from scipy.io import loadmat, savemat\n'), ((1092, 1116), 'scipy.io.loadmat', 'loadmat', (['(mani_dir + path)'], {}), '(mani_dir + path)\n', (1099, 1116), False, 'from scipy.io import loadmat, savemat\n'), ((1270, 1284), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1278, 1284), True, 'import numpy as np\n'), ((3002, 3027), 'numpy.mean', 'np.mean', (['data_vec'], {'axis': '(1)'}), '(data_vec, axis=1)\n', (3009, 3027), True, 'import numpy as np\n'), ((7801, 7830), 'seaborn.color_palette', 'sns.color_palette', (['"""PuBuGn_d"""'], {}), "('PuBuGn_d')\n", (7818, 7830), True, 'import seaborn as sns\n'), ((7845, 7955), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'x', 'y': 'y', 'sizes': '(150, 150)', 'ax': 'ax', 'hue': "opts['hue']", 'palette': 'p', 'legend': '"""brief"""', 'data': 'data'}), "(x=x, y=y, sizes=(150, 150), ax=ax, hue=opts['hue'], palette\n =p, legend='brief', data=data)\n", (7860, 7955), True, 'import seaborn as sns\n'), ((8450, 8512), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)\n', (8460, 8512), True, 'import matplotlib.pyplot as plt\n'), ((16589, 16648), 'numpy.sqrt', 'np.sqrt', (["df.loc[df['measure'] == 'D_M_vec', 'value'].values"], {}), "(df.loc[df['measure'] == 'D_M_vec', 'value'].values)\n", (16596, 16648), True, 'import numpy as np\n'), ((16828, 16842), 'os.listdir', 'os.listdir', (['md'], {}), '(md)\n', (16838, 16842), False, 'import os\n'), ((915, 935), 'os.listdir', 'os.listdir', (['mani_dir'], {}), '(mani_dir)\n', (925, 935), False, 'import os\n'), ((2859, 2881), 'numpy.squeeze', 'np.squeeze', (['datum[key]'], {}), '(datum[key])\n', (2869, 2881), True, 'import numpy as np\n'), ((10730, 10764), 'numpy.zeros', 'np.zeros', (['[2, measure_id.shape[0]]'], {}), '([2, measure_id.shape[0]])\n', (10738, 10764), True, 'import numpy as np\n'), ((10825, 10877), 'numpy.zeros', 'np.zeros', (['[layer_nums.shape[0], measure_id.shape[0]]'], {}), '([layer_nums.shape[0], measure_id.shape[0]])\n', (10833, 10877), True, 'import numpy as np\n'), ((17376, 17399), 'os.remove', 'os.remove', (['og_file_path'], {}), '(og_file_path)\n', (17385, 17399), False, 'import os\n'), ((1978, 2011), 'numpy.abs', 'np.abs', (['(data_vec[i][j] - row_mean)'], {}), '(data_vec[i][j] - row_mean)\n', (1984, 2011), True, 'import numpy as np\n'), ((17122, 17147), 'os.path.join', 'os.path.join', (['md', 'og_file'], {}), '(md, og_file)\n', (17134, 17147), False, 'import os\n'), ((17296, 17318), 'os.path.join', 'os.path.join', (['md', 'dest'], {}), '(md, dest)\n', (17308, 17318), False, 'import os\n'), ((17335, 17363), 'shutil.copyfile', 'copyfile', (['og_file_path', 'dest'], {}), '(og_file_path, dest)\n', (17343, 17363), False, 'from shutil import copyfile\n'), ((12352, 12392), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns', 'data': 'data'}), '(columns=columns, data=data)\n', (12364, 12392), True, 'import pandas as pd\n'), ((12426, 12484), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['measure', 'initial']", 'data': 'initial'}), "(columns=['measure', 'initial'], data=initial)\n", (12438, 12484), True, 'import pandas as pd\n'), ((4352, 4381), 'numpy.repeat', 'np.repeat', (['key', 'data_vec.size'], {}), '(key, data_vec.size)\n', (4361, 4381), True, 'import numpy as np\n'), ((4394, 4423), 'numpy.repeat', 'np.repeat', (['tag', 'data_vec.size'], {}), '(tag, data_vec.size)\n', (4403, 4423), True, 'import numpy as np\n'), ((12553, 12593), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns', 'data': 'data'}), '(columns=columns, data=data)\n', (12565, 12593), True, 'import pandas as pd\n'), ((12646, 12704), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['measure', 'initial']", 'data': 'initial'}), "(columns=['measure', 'initial'], data=initial)\n", (12658, 12704), True, 'import pandas as pd\n'), ((13061, 13093), 'numpy.array', 'np.array', (['[train_loss, val_loss]'], {}), '([train_loss, val_loss])\n', (13069, 13093), True, 'import numpy as np\n'), ((3593, 3639), 'numpy.repeat', 'np.repeat', (['[paths]', 'data_vec.shape[-1]'], {'axis': '(0)'}), '([paths], data_vec.shape[-1], axis=0)\n', (3602, 3639), True, 'import numpy as np\n'), ((3665, 3715), 'numpy.repeat', 'np.repeat', (['[image_set]', 'data_vec.shape[-1]'], {'axis': '(0)'}), '([image_set], data_vec.shape[-1], axis=0)\n', (3674, 3715), True, 'import numpy as np\n'), ((3741, 3788), 'numpy.repeat', 'np.repeat', (['[epochs]', 'data_vec.shape[-1]'], {'axis': '(0)'}), '([epochs], data_vec.shape[-1], axis=0)\n', (3750, 3788), True, 'import numpy as np\n'), ((3814, 3859), 'numpy.repeat', 'np.repeat', (['[lnum]', 'data_vec.shape[-1]'], {'axis': '(0)'}), '([lnum], data_vec.shape[-1], axis=0)\n', (3823, 3859), True, 'import numpy as np\n'), ((3885, 3932), 'numpy.repeat', 'np.repeat', (['[coding]', 'data_vec.shape[-1]'], {'axis': '(0)'}), '([coding], data_vec.shape[-1], axis=0)\n', (3894, 3932), True, 'import numpy as np\n'), ((3958, 4008), 'numpy.repeat', 'np.repeat', (['[perm_seed]', 'data_vec.shape[-1]'], {'axis': '(0)'}), '([perm_seed], data_vec.shape[-1], axis=0)\n', (3967, 4008), True, 'import numpy as np\n'), ((4034, 4082), 'numpy.repeat', 'np.repeat', (['[featnum]', 'data_vec.shape[-1]'], {'axis': '(0)'}), '([featnum], data_vec.shape[-1], axis=0)\n', (4043, 4082), True, 'import numpy as np\n'), ((4108, 4152), 'numpy.repeat', 'np.repeat', (['[acc]', 'data_vec.shape[-1]'], {'axis': '(0)'}), '([acc], data_vec.shape[-1], axis=0)\n', (4117, 4152), True, 'import numpy as np\n'), ((4178, 4223), 'numpy.repeat', 'np.repeat', (['[arch]', 'data_vec.shape[-1]'], {'axis': '(0)'}), '([arch], data_vec.shape[-1], axis=0)\n', (4187, 4223), True, 'import numpy as np\n'), ((4249, 4292), 'numpy.repeat', 'np.repeat', (['[RP]', 'data_vec.shape[-1]'], {'axis': '(0)'}), '([RP], data_vec.shape[-1], axis=0)\n', (4258, 4292), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import os
import json
import argparse
from collections import OrderedDict
import h5py
import ntpath
import cv2
import numpy as np
from xfdnn.rt.xdnn_util import literal_eval
from ext.PyTurboJPEG import imread as _imread
class image_preprocessing(object):
def __init__(self, resize=[], crop=[], pxlscale=[], meansub=[], chtranspose=None, chswap=None,
plot=None):
self.resize = resize
self.crop = crop
self.pxlscale = pxlscale
self.meansub = meansub
self.chtranspose = chtranspose
self.chswap = chswap
def max_batch_size(x):
maxb = 16
if int(x) > maxb:
print ("Limiting batch size to %d" % maxb)
x = min( int(x), maxb)
return x
def extant_file(x):
"""
'Type' for argparse - checks that file exists but does not open.
"""
if x == "-":
# skip file check and allow empty string
return ""
if not os.path.exists(x):
# Argparse uses the ArgumentTypeError to give a rejection message like:
# error: argument input: x does not exist
raise argparse.ArgumentTypeError("{0} does not exist".format(x))
return x
def default_parser_args():
parser = argparse.ArgumentParser(description='pyXDNN')
parser.add_argument('--xclbin', help='.xclbin file', required=True, type=extant_file, metavar="FILE")
parser.add_argument('--batch_sz', type=max_batch_size, default=-1, help='batch size')
parser.add_argument('--dsp', type=int, default=28, help="xclbin's DSP array width")
parser.add_argument('--netcfg', help='FPGA instructions generated by compiler for the network',
required=True, type=extant_file, metavar="FILE")
parser.add_argument('--quantizecfg', help="Network's quantization parameters file",
required=True, type=extant_file, metavar="FILE")
parser.add_argument('--net_def', help='prototxt file for caffe',
type=extant_file, metavar="FILE")
parser.add_argument('--net_weights', help="caffe model file",
type=extant_file, metavar="FILE")
parser.add_argument('--xlnxlib',
help='FPGA xfDNN lib .so (deprecated)', type=extant_file, metavar="FILE")
parser.add_argument('--outsz', type=int, default=1000,
help='size of last layer\'s output blob')
parser.add_argument('--weights',
help="Folder path to network parameters/weights",
required=True, type=extant_file, metavar="FILE")
parser.add_argument('--labels',
help='result -> labels translation file', type=extant_file, metavar="FILE")
parser.add_argument('--golden', help='file idx -> expected label file', type=extant_file, metavar="FILE")
parser.add_argument('--jsoncfg',
help='json file with nets, data and PEs to use',
type=extant_file, metavar="FILE")
parser.add_argument('--images', nargs='*',
help='directory or raw image files to use as input', required=True, type=extant_file, metavar="FILE")
parser.add_argument('--scaleA', type=int, default=10000,
help='weights scaling value')
parser.add_argument('--scaleB', type=int, default=30,
help='activation scaling value ')
parser.add_argument('--img_raw_scale', type=float, default=255.0,
help='image raw scale value ')
parser.add_argument('--img_mean', type=int, nargs=3, default=[104.007,116.669,122.679], # BGR for Caffe
help='image mean values ')
parser.add_argument('--img_input_scale', type=float, default=1.0,
help='image input scale value ')
parser.add_argument('--zmqpub', default=False, action='store_true',
help='publish predictions to zmq port 5555')
parser.add_argument('--perpetual', default=False, action='store_true',
help='loop over input images forever')
parser.add_argument('--PE', nargs='?', type=int, default=-1,
help='preferred PE to run the classification on. Default is auto-select')
parser.add_argument('--endLayerName', default="",
help='layer name till the network should be run, helpful for debugging')
parser.add_argument('--diffStartLayer', type=int, default=0,
help="if 1 then we can run from any given layer ignoring the X's of first layers")
parser.add_argument('--v2WeightsFormat', type=bool, default=False,
help="Weights File specified as KernSizex KernSizey instead of only KernSize, supporting rectangle kernels")
parser.add_argument('--layerName', default="",
help='layername until which pyfpga should run, if left default, would run the entire model')
parser.add_argument('--binaryFormatWeights', type=bool, default=False,
help="Binary Format Weights Files")
return parser
def default_xdnn_arg_parser_compiled(base='TF'):
parser = argparse.ArgumentParser(description='XDLF_compiled')
parser.add_argument("--base", type=str, default="TF")
parser.add_argument("--compilerjson", type=str, default=None)
parser.add_argument("--weights", type=str, default=None)
parser.add_argument("--data_format", type=str, default='NCHW')
parser.add_argument("--input_shape", type=str, default=None)
parser.add_argument("--labels", type=str, default=None)
parser.add_argument("--image_path", type=str, default=None)
parser.add_argument('--images', type=extant_file, metavar='FILE', nargs='*', help='directory or raw image files to use as input')
parser.add_argument("--image", type=str, default=None)
parser.add_argument('--batch_sz', type=max_batch_size, default=-1, help='batch size')
parser.add_argument("--image_transforms", nargs='+', type=str, help="""None if no
preprocessing is needed. <name> if using prespecified reprocessings; . list of
preprocesses.""")
parser.add_argument("--val", type=str, default=None)
parser.add_argument("--num_batches", type=int, default=-1)
parser.add_argument("--batch", type=int, default=4)
parser.add_argument("--xclbin", type=str, default='')
parser.add_argument('--netcfg', type=extant_file, metavar='FILE', help="""FPGA instructions
generated by compiler for the network""")
parser.add_argument('--jsoncfg', type=extant_file, metavar='FILE', help='json file with nets, data and PEs to use')
parser.add_argument('--quantizecfg', type=extant_file, metavar='FILE', help="""Network's
quantization parameters file""")
parser.add_argument('--outsz', type=int, default=1000, help='size of last layer\'s output blob')
parser.add_argument('--datadir', type=extant_file, metavar='FILE', help='Folder path to network parameters/weights')
parser.add_argument("--xdnnv3", action='store_true', default=False)
parser.add_argument("--usedeephi", action='store_true', default=False)
parser.add_argument("--device", type=str, default='CPU')
parser.add_argument("--quant_cfgfile", type=str, default=None)
parser.add_argument("--quant_recipe", type=str, default=None)
parser.add_argument("--fpga_recipe", type=str, default=None)
parser.add_argument("--save", type=str, default=None)
parser.add_argument("--verify_dir", type=str, default=None)
parser.add_argument('--save2modeldir', action='store_true', default=False, help="""store network
partitions and compiler outpults at model directory (not at script's
directory.)""")
parser.add_argument('--scaleA', type=int, default=10000, help='weights scaling value')
parser.add_argument('--scaleB', type=int, default=30, help='activation scaling value ')
parser.add_argument('--img_raw_scale',type=float, default=255.0, help='image raw scale value ')
parser.add_argument('--img_mean', type=int, nargs=3, default=[104.007,116.669,122.679], # BGR for Caffe
help='image mean values ')
parser.add_argument('--img_input_scale', type=float, default=1.0, help='image input scale value ')
parser.add_argument('--zmqpub', action='store_true', default=False, help='publish predictions to zmq port 5555')
parser.add_argument('--perpetual', action='store_true', default=False, help='loop over input images forever')
parser.add_argument('--PE', type=int, nargs='?', default=-1, help='preferred PE to run the classification on. Default is auto-select')
parser.add_argument('--endLayerName', type=str, default='', help='layer name till the network should be run, helpful for debugging')
parser.add_argument('--diffStartLayer', type=int, default=0, help="if 1 then we can run from any given layer ignoring the X's of first layers")
parser.add_argument('--v2WeightsFormat', action='store_true', default=False, help="Weights File specified as KernSizex KernSizey instead of only KernSize, supporting rectangle kernels")
parser.add_argument('--layerName', type=str, default='', help='layername until which pyfpga should run, if left default, would run the entire model')
parser.add_argument('--binaryFormatWeights', action='store_true', default=False, help="Binary Format Weights Files")
return parser
def default_xdnn_arg_parser(base='TF'):
if base.lower() == 'tf':
## FIXME: Hack to by pass caffe and tensorflow co-existance issues
from xfdnn.tools.compile.bin.xfdnn_compiler_tensorflow import default_compiler_arg_parser as default_TF_compiler_arg_parser
parser = default_TF_compiler_arg_parser()
elif base.lower() == 'caffe':
## FIXME: Hack to by pass caffe and tensorflow co-existance issues
from xfdnn.tools.compile.bin.xfdnn_compiler_caffe import default_compiler_arg_parser as default_CAFFE_compiler_arg_parser
parser = default_CAFFE_compiler_arg_parser()
else:
raise AttributeError('unsupported paltform')
parser.add_argument("--base", type=str, default="TF")
parser.add_argument("--data_format", type=str, default='NCHW')
parser.add_argument("--input_shape", type=str, default=None)
parser.add_argument('--golden', type=extant_file, metavar='FILE', help='file idx -> expected label file')
parser.add_argument("--labels", type=str, default=None)
parser.add_argument("--image_path", type=str, default=None)
parser.add_argument('--images', type=extant_file, metavar='FILE', nargs='*', help='directory or raw image files to use as input')
parser.add_argument("--image", type=str, default=None)
parser.add_argument('--batch_sz', type=max_batch_size, default=-1, help='batch size')
parser.add_argument("--image_transforms", nargs='+', type=str, help="""None if no
preprocessing is needed. <name> if using prespecified reprocessings; . list of
preprocesses.""")
parser.add_argument("--val", type=str, default=None)
parser.add_argument("--num_batches", type=int, default=-1)
parser.add_argument("--batch", type=int, default=4)
parser.add_argument("--xclbin", type=str, default='')
parser.add_argument('--netcfg', type=extant_file, metavar='FILE', help="""FPGA instructions
generated by compiler for the network""")
parser.add_argument('--jsoncfg', type=extant_file, metavar='FILE', help='json file with nets, data and PEs to use')
parser.add_argument('--quantizecfg', type=extant_file, metavar='FILE', help="""Network's
quantization parameters file""")
parser.add_argument('--outsz', type=int, default=1000, help='size of last layer\'s output blob')
parser.add_argument('--datadir', type=extant_file, metavar='FILE', help='Folder path to network parameters/weights')
parser.add_argument("--xdnnv3", action='store_true', default=False)
parser.add_argument("--device", type=str, default='CPU')
parser.add_argument("--quant_recipe", type=str, default=None)
parser.add_argument("--fpga_recipe", type=str, default=None)
parser.add_argument("--save", type=str, default=None)
parser.add_argument("--verify_dir", type=str, default=None)
parser.add_argument('--save2modeldir', action='store_true', default=False, help="""store network
partitions and compiler outpults at model directory (not at script's
directory.)""")
parser.add_argument('--scaleA', type=int, default=10000, help='weights scaling value')
parser.add_argument('--scaleB', type=int, default=30, help='activation scaling value ')
parser.add_argument('--img_raw_scale',type=float, default=255.0, help='image raw scale value ')
parser.add_argument('--img_mean', type=int, nargs=3, default=[104.007,116.669,122.679], # BGR for Caffe
help='image mean values ')
parser.add_argument('--img_input_scale', type=float, default=1.0, help='image input scale value ')
parser.add_argument('--zmqpub', action='store_true', default=False, help='publish predictions to zmq port 5555')
parser.add_argument('--perpetual', action='store_true', default=False, help='loop over input images forever')
parser.add_argument('--PE', type=int, nargs='?', default=-1, help='preferred PE to run the classification on. Default is auto-select')
parser.add_argument('--endLayerName', type=str, default='', help='layer name till the network should be run, helpful for debugging')
parser.add_argument('--diffStartLayer', type=int, default=0, help="if 1 then we can run from any given layer ignoring the X's of first layers")
parser.add_argument('--v2WeightsFormat', action='store_true', default=False, help="Weights File specified as KernSizex KernSizey instead of only KernSize, supporting rectangle kernels")
parser.add_argument('--layerName', type=str, default='', help='layername until which pyfpga should run, if left default, would run the entire model')
parser.add_argument('--binaryFormatWeights', action='store_true', default=False, help="Binary Format Weights Files")
return parser
def make_dict_args(args):
def find_all_images(input_dict):
if 'images' in input_dict and input_dict['images'] is not None:
inputFiles = []
for dir_or_image in literal_eval(str(input_dict['images'])):
if os.path.isdir(dir_or_image):
inputFiles += [os.path.join(dir_or_image, f) for f in os.listdir(dir_or_image) if os.path.isfile(os.path.join(dir_or_image, f))]
else:
inputFiles += [dir_or_image]
input_dict['images'] = inputFiles
def eval_string(input_dict):
for key, val in list(input_dict.items()):
try:
input_dict[key] = literal_eval(str(val))
except:
pass
#if val and str(val).isdigit():
# input_dict[key] = int(val)
def ingest_xclbin_json_config(input_dict):
fname = input_dict['xclbin'] + ".json"
with open(fname) as data:
xclbinJson = json.load(data)
input_dict['overlaycfg'] = xclbinJson
isV3 = False
if 'XDNN_VERSION_MAJOR' in xclbinJson \
and xclbinJson['XDNN_VERSION_MAJOR'] == "3":
isV3 = True
if isV3:
input_dict['xdnnv3'] = True
libPath = os.environ['LIBXDNN_PATH'] + ".v3"
if os.path.isfile(libPath):
os.environ['LIBXDNN_PATH'] = libPath
if 'XDNN_CSR_BASE' in xclbinJson and input_dict['batch_sz'] == -1:
csrAddrs = xclbinJson['XDNN_CSR_BASE'].split(",")
input_dict['batch_sz'] = len(csrAddrs)
if not isV3:
input_dict['batch_sz'] *= 2
try:
args_dict = vars(args)
except:
args_dict = args
find_all_images(args_dict)
eval_string(args_dict)
ingest_xclbin_json_config(args_dict)
jsoncfg_exists = args_dict.get('jsoncfg')
if jsoncfg_exists:
with open(args_dict['jsoncfg']) as jsoncfgFile:
jsoncfgs = json.load(jsoncfgFile)['confs']
for jsoncfg in jsoncfgs:
find_all_images(jsoncfg)
eval_string(jsoncfg)
# include all args not in args_dict['jsoncfg'] from original args_dict
for key, value in list(args_dict.items()):
if key not in jsoncfg:
jsoncfg[key] = value
args_dict['jsoncfg'] = jsoncfgs
return args_dict
def processCommandLine(argv=None, base='TF'):
"""
Invoke command line parser for command line deployment flows.
"""
#parser = default_xdnn_arg_parser(base=base)
parser = default_parser_args()
args = parser.parse_args(argv)
return make_dict_args(args)
# Generic list of image manipulation functions for simplifying preprocess code
def loadImageBlobFromFileScriptBase(imgFile, cmdSeq):
if isinstance(imgFile, str):
img = _imread(imgFile)
else:
img = imgFile
orig_shape = img.shape
for (cmd,param) in cmdSeq:
#print "command:",cmd,"param:",param
#print "imshape:",img.shape
if cmd == 'resize':
img = cv2.resize(img, (param[0], param[1]))
elif cmd == 'resize2mindim':
height, width, __ = img.shape
newdim = min(height, width)
scalew = float(width) / newdim
scaleh = float(height) / newdim
mindim = min(param[0], param[1])
neww = int(mindim * scalew)
newh = int(mindim * scaleh)
img = cv2.resize(img, (neww, newh))
elif cmd == 'resize2maxdim':
# Currently doesn't work for rectangular output dimensions...
height, width, __ = img.shape
newdim = max(height, width)
scalew = float(width) / newdim
scaleh = float(height) / newdim
maxdim = max(param)
neww = int(maxdim * scalew)
newh = int(maxdim * scaleh)
img = cv2.resize(img, (neww, newh))
elif cmd == 'crop_letterbox':
height, width, channels = img.shape
newdim = max(height, width)
letter_image = np.zeros((newdim, newdim, channels))
letter_image[:, :, :] = param
if newdim == width:
letter_image[(newdim-height)/2:((newdim-height)/2+height),0:width] = img
else:
letter_image[0:height,(newdim-width)/2:((newdim-width)/2+width)] = img
img = letter_image
elif cmd == 'crop_center':
size_x = img.shape[0]
size_y = img.shape[1]
ll_x = size_x//2 - param[0]//2
ll_y = size_y//2 - param[1]//2
img = img[ll_x:ll_x+param[0],ll_y:ll_y+param[1]]
elif cmd == 'plot':
toshow = img.astype(np.uint8)
if param is not None:
toshow = np.transpose(toshow, (param[0], param[1], param[2]))
plt.imshow(toshow, cmap = 'gray', interpolation = 'bicubic')
plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
plt.show()
elif cmd == 'pxlscale':
if img.dtype != np.float32:
img = img.astype(np.float32, order='C')
if param != 1.0:
img = img * param
elif cmd == 'meansub':
if img.dtype != np.float32:
img = img.astype(np.float32, order='C')
if isinstance(param, np.ndarray):
img -= param
else:
img -= np.array(param, dtype = np.float32, order='C')
elif cmd == 'chtranspose':
# HWC->CWH = 2,0,1
# CWH->HWC = 1,2,0
img = np.transpose(img, (param[0], param[1], param[2]))
elif cmd == 'chswap':
# BGR->RGB = 2,1,0
# RGB->BGR = 2,1,0
ch = 3*[None]
if img.shape[0] == 3:
ch[0] = img[0,:,:]
ch[1] = img[1,:,:]
ch[2] = img[2,:,:]
img = np.stack((ch[param[0]],ch[param[1]],ch[param[2]]), axis=0)
else:
ch[0] = img[:,:,0]
ch[1] = img[:,:,1]
ch[2] = img[:,:,2]
img = np.stack((ch[param[0]],ch[param[1]],ch[param[2]]), axis=2)
else:
raise NotImplementedError(cmd)
# print "final imshape:",img.shape
return img, orig_shape
# This runs image manipulation script
def loadImageBlobFromFile(imgFile, raw_scale, mean, input_scale, img_h, img_w):
# Direct resize only
cmdseqResize = [
('resize',(img_w,img_h)),
('pxlscale',float(raw_scale)/255),
('meansub', mean),
('pxlscale', input_scale),
('chtranspose',(2,0,1))
]
img, orig_shape = loadImageBlobFromFileScriptBase(imgFile, cmdseqResize)
# Change initial resize to match network training (shown as {alpha x 256 or 256 x alpha}->224,224,
# alpha being at least 256 such that the original aspect ratio is maintained)
#cmdseqCenterCrop = [
# ('resize2mindim',(256,256)),
# ('crop_center',(img_h,img_w)),
# ('pxlscale',float(raw_scale)/255),
# ('meansub', mean),
# ('pxlscale', input_scale),
# ('chtranspose',(2,0,1))
# ]
#img, orig_shape = loadImageBlobFromFileScriptBase(imgFile, cmdseqCenterCrop)
img = img[ np.newaxis, ...]
np.ascontiguousarray(img, dtype=np.float32)
return img, None
def loadYoloImageBlobFromFile(imgFile, img_h, img_w):
# This first loads the image
# letterboxes/resizes
# divides by 255 to create values from 0.0 to 1.0
# Letter boxing
# When given a rectangular image
# If the network expects a square input
# Reshape the image such that its longer dimension fits exactly in the square
# i.e.
# ----------
# |--------|
# | IMAGE |
# |--------|
# ----------
cmdseqYolov2 = [
('resize2maxdim',(img_w,img_h)),
('pxlscale',(1.0/255.0)),
('crop_letterbox',(0.5)),
('chtranspose',(2,0,1)),
('chswap',(2,1,0))
]
img, orig_shape = loadImageBlobFromFileScriptBase(imgFile, cmdseqYolov2)
img = img[ np.newaxis, ...]
np.ascontiguousarray(img, dtype=np.float32)
return img, orig_shape
def getFilePaths(paths_list):
ext = (".jpg",".jpeg",".JPG",".JPEG")
img_paths = []
for p in paths_list:
if os.path.isfile(p) and p.endswith(ext):
img_paths.append( os.path.abspath(p) )
else:
for dirpath,_,filenames in os.walk(p):
for f in filenames:
if f.endswith(ext):
img_paths.append( os.path.abspath(os.path.join(dirpath, f)))
return img_paths
def getTopK(output, labels, topK):
output = output.flatten()
topKIdx = np.argsort(output)[-topK:]
topKVals = [output[ti] for ti in topKIdx]
topKList = zip( topKVals, topKIdx )
topKList.reverse()
return [(topKList[j][0], labels[topKList[j][1]]) for j in range(topK)]
def getGoldenMap(goldenFile):
goldenMap = OrderedDict()
with open(goldenFile, 'r') as f:
for line in f:
fname = line[:line.rfind(' ')]
goldenIdx = int(line[line.rfind(' ')+1:])
goldenMap[fname] = goldenIdx
return goldenMap
def isTopK ( out, goldenMap, fileName, labels, topK = 5):
f = ntpath.basename(fileName)
topKs = getTopK(out, labels, topK)
for (_, label) in topKs:
if ( label == labels[goldenMap[f]]):
return True
return False
def get_labels (label_file):
labels = None
if (label_file):
with open(label_file, 'r') as f:
labels = [line.strip() for line in f]
return labels
def printClassification(output, img_paths, labels, topK = 5):
if labels is not None:
print ( getClassification ( output, img_paths, labels, topK))
def getClassification(output, img_paths, labels, topK = 5, zmqPub = False):
"""
Print the result of classification given class scores, and a synset labels file.
:param output: Class scores, typically the output of the softmax layer.
:type output: numpy.ndarray.
:param img_paths: list of path(s) to image(s)
:param label_file: path to label file
:type args: dict.
"""
ret = ""
if not isinstance(img_paths, list):
img_paths = [img_paths]
for i,p in enumerate(img_paths):
topXs = getTopK(output[i,...], labels, topK)
inputImage = "for {:s} ".format(p if isinstance(p, str) else 'raw_input')
if zmqPub :
ret += (img_paths[i] + '\n')
else :
ret += "---------- Prediction {:d}/{:d} {:s}----------\n".format(i+1, output.shape[0], inputImage)
for (prob, label) in topXs:
ret += ("{:.4f} \"{:s}\"\n".format(prob, label))
return ret
def getNearFileMatchWithPrefix(path, prefix, index = 0):
nearMatches = [f for f in os.listdir(path) if f.startswith(prefix)]
nearMatches.sort()
if len(nearMatches) > 0:
return "%s/%s" % (path, nearMatches[index])
return None
def loadFCWeightsBias(arg, index = 0):
data_dir = arg['weights']
if ".h5" in data_dir:
with h5py.File(data_dir,'r') as f:
#keys = f.keys()
#print (keys)
key = list(f.keys())[0]
weight = list(np.array(f.get(key)).flatten())
key = list(f.keys())[1]
bias = list(np.array(f.get(key)).flatten())
else:
fname = "%s/fc" % data_dir
if not os.path.exists(fname):
nearMatch = getNearFileMatchWithPrefix(data_dir, "fc", index)
if nearMatch:
fname = nearMatch
if os.path.exists(fname):
with open(fname, 'r') as f:
line = f.read()
vals = line.strip().split(' ')
weight = [float(v) for v in vals]
else:
print("No FC layers found in {:s}".format(data_dir))
return (None, None)
fname = "%s/fc_bias" % data_dir
if not os.path.exists(fname):
nearMatch = getNearFileMatchWithPrefix(data_dir, "fc_bias", index)
if nearMatch:
fname = nearMatch
with open(fname, 'r') as f:
line = f.read()
vals = line.strip().split(' ')
bias = [float(v) for v in vals]
return (np.asarray(weight, dtype=np.float32), np.asarray(bias, dtype=np.float32))
|
[
"argparse.ArgumentParser",
"xfdnn.tools.compile.bin.xfdnn_compiler_caffe.default_compiler_arg_parser",
"os.walk",
"numpy.argsort",
"os.path.isfile",
"os.path.join",
"os.path.abspath",
"os.path.exists",
"numpy.transpose",
"cv2.resize",
"numpy.stack",
"h5py.File",
"numpy.asarray",
"xfdnn.tools.compile.bin.xfdnn_compiler_tensorflow.default_compiler_arg_parser",
"os.listdir",
"json.load",
"ntpath.basename",
"os.path.isdir",
"ext.PyTurboJPEG.imread",
"numpy.zeros",
"numpy.array",
"collections.OrderedDict",
"numpy.ascontiguousarray"
] |
[((1306, 1351), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""pyXDNN"""'}), "(description='pyXDNN')\n", (1329, 1351), False, 'import argparse\n'), ((4937, 4989), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""XDLF_compiled"""'}), "(description='XDLF_compiled')\n", (4960, 4989), False, 'import argparse\n'), ((21562, 21605), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (21582, 21605), True, 'import numpy as np\n'), ((22407, 22450), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (22427, 22450), True, 'import numpy as np\n'), ((23284, 23297), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23295, 23297), False, 'from collections import OrderedDict\n'), ((23585, 23610), 'ntpath.basename', 'ntpath.basename', (['fileName'], {}), '(fileName)\n', (23600, 23610), False, 'import ntpath\n'), ((1030, 1047), 'os.path.exists', 'os.path.exists', (['x'], {}), '(x)\n', (1044, 1047), False, 'import os\n'), ((9613, 9645), 'xfdnn.tools.compile.bin.xfdnn_compiler_tensorflow.default_compiler_arg_parser', 'default_TF_compiler_arg_parser', ([], {}), '()\n', (9643, 9645), True, 'from xfdnn.tools.compile.bin.xfdnn_compiler_tensorflow import default_compiler_arg_parser as default_TF_compiler_arg_parser\n'), ((17043, 17059), 'ext.PyTurboJPEG.imread', '_imread', (['imgFile'], {}), '(imgFile)\n', (17050, 17059), True, 'from ext.PyTurboJPEG import imread as _imread\n'), ((23026, 23044), 'numpy.argsort', 'np.argsort', (['output'], {}), '(output)\n', (23036, 23044), True, 'import numpy as np\n'), ((25760, 25781), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (25774, 25781), False, 'import os\n'), ((26349, 26385), 'numpy.asarray', 'np.asarray', (['weight'], {'dtype': 'np.float32'}), '(weight, dtype=np.float32)\n', (26359, 26385), True, 'import numpy as np\n'), ((26387, 26421), 'numpy.asarray', 'np.asarray', (['bias'], {'dtype': 'np.float32'}), '(bias, dtype=np.float32)\n', (26397, 26421), True, 'import numpy as np\n'), ((9888, 9923), 'xfdnn.tools.compile.bin.xfdnn_compiler_caffe.default_compiler_arg_parser', 'default_CAFFE_compiler_arg_parser', ([], {}), '()\n', (9921, 9923), True, 'from xfdnn.tools.compile.bin.xfdnn_compiler_caffe import default_compiler_arg_parser as default_CAFFE_compiler_arg_parser\n'), ((15155, 15170), 'json.load', 'json.load', (['data'], {}), '(data)\n', (15164, 15170), False, 'import json\n'), ((17279, 17316), 'cv2.resize', 'cv2.resize', (['img', '(param[0], param[1])'], {}), '(img, (param[0], param[1]))\n', (17289, 17316), False, 'import cv2\n'), ((22607, 22624), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (22621, 22624), False, 'import os\n'), ((22750, 22760), 'os.walk', 'os.walk', (['p'], {}), '(p)\n', (22757, 22760), False, 'import os\n'), ((25066, 25082), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (25076, 25082), False, 'import os\n'), ((25331, 25355), 'h5py.File', 'h5py.File', (['data_dir', '"""r"""'], {}), "(data_dir, 'r')\n", (25340, 25355), False, 'import h5py\n'), ((25616, 25637), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (25630, 25637), False, 'import os\n'), ((26066, 26087), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (26080, 26087), False, 'import os\n'), ((14427, 14454), 'os.path.isdir', 'os.path.isdir', (['dir_or_image'], {}), '(dir_or_image)\n', (14440, 14454), False, 'import os\n'), ((15488, 15511), 'os.path.isfile', 'os.path.isfile', (['libPath'], {}), '(libPath)\n', (15502, 15511), False, 'import os\n'), ((16133, 16155), 'json.load', 'json.load', (['jsoncfgFile'], {}), '(jsoncfgFile)\n', (16142, 16155), False, 'import json\n'), ((17674, 17703), 'cv2.resize', 'cv2.resize', (['img', '(neww, newh)'], {}), '(img, (neww, newh))\n', (17684, 17703), False, 'import cv2\n'), ((22676, 22694), 'os.path.abspath', 'os.path.abspath', (['p'], {}), '(p)\n', (22691, 22694), False, 'import os\n'), ((18122, 18151), 'cv2.resize', 'cv2.resize', (['img', '(neww, newh)'], {}), '(img, (neww, newh))\n', (18132, 18151), False, 'import cv2\n'), ((14491, 14520), 'os.path.join', 'os.path.join', (['dir_or_image', 'f'], {}), '(dir_or_image, f)\n', (14503, 14520), False, 'import os\n'), ((18305, 18341), 'numpy.zeros', 'np.zeros', (['(newdim, newdim, channels)'], {}), '((newdim, newdim, channels))\n', (18313, 18341), True, 'import numpy as np\n'), ((14530, 14554), 'os.listdir', 'os.listdir', (['dir_or_image'], {}), '(dir_or_image)\n', (14540, 14554), False, 'import os\n'), ((14573, 14602), 'os.path.join', 'os.path.join', (['dir_or_image', 'f'], {}), '(dir_or_image, f)\n', (14585, 14602), False, 'import os\n'), ((22896, 22920), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (22908, 22920), False, 'import os\n'), ((19027, 19079), 'numpy.transpose', 'np.transpose', (['toshow', '(param[0], param[1], param[2])'], {}), '(toshow, (param[0], param[1], param[2]))\n', (19039, 19079), True, 'import numpy as np\n'), ((19692, 19736), 'numpy.array', 'np.array', (['param'], {'dtype': 'np.float32', 'order': '"""C"""'}), "(param, dtype=np.float32, order='C')\n", (19700, 19736), True, 'import numpy as np\n'), ((19854, 19903), 'numpy.transpose', 'np.transpose', (['img', '(param[0], param[1], param[2])'], {}), '(img, (param[0], param[1], param[2]))\n', (19866, 19903), True, 'import numpy as np\n'), ((20185, 20245), 'numpy.stack', 'np.stack', (['(ch[param[0]], ch[param[1]], ch[param[2]])'], {'axis': '(0)'}), '((ch[param[0]], ch[param[1]], ch[param[2]]), axis=0)\n', (20193, 20245), True, 'import numpy as np\n'), ((20391, 20451), 'numpy.stack', 'np.stack', (['(ch[param[0]], ch[param[1]], ch[param[2]])'], {'axis': '(2)'}), '((ch[param[0]], ch[param[1]], ch[param[2]]), axis=2)\n', (20399, 20451), True, 'import numpy as np\n')]
|
from PIL import Image
from torchvision import transforms
import torch
from torch.utils import data
import torch.nn.functional as F
import numpy as np
import pandas as pd
import json
import copy
from sklearn import metrics
import sys, getopt
import os
import glob
import random
import collections
import time
from tqdm import tqdm
import torch.nn as nn
sys.path.append('../utils/')
from Models import StudentModel
from Data_Generator import Dataset_WSI
import argparse
import argparse
print( torch.cuda.current_device())
print( torch.cuda.device_count())
parser = argparse.ArgumentParser(description='TMA patches extractor')
parser.add_argument('-v','--VARIANT', type=str, default='train', help='student training variant to use (I,II,III,baseline)')
parser.add_argument('-a','--APPROACH', type=str, default='ssl', help='teacher/student approach: ssl (semi-supervised), swsl (semi-weakly supervised)')
parser.add_argument('-s','--SUBSET', type=int, default='19', help='subset of pseudo-labels to use 19=1000, 0=20000 pseudo labels per class')
parser.add_argument('-n','--N_EXP', type=int, default=0, help='number experiment')
parser.add_argument('-b','--BATCH_SIZE', type=int, default=32, help='batch size')
parser.add_argument('-t','--THRESHOLD', type=int, default=500, help='patches to select')
args = parser.parse_args()
THRESHOLD = args.THRESHOLD
MEDICAL_SOURCE = 'TCGA'
#MEDICAL_SOURCE = 'panda'
def create_dir(directory):
if not os.path.isdir(directory):
try:
os.mkdir(directory)
except OSError:
print ("Creation of the directory %s failed" % directory)
else:
print ("Successfully created the directory %s " % directory)
if (args.APPROACH=='ssl'):
approach = 'Semi_Supervised'
elif (args.APPROACH=='swsl'):
approach = 'Semi_Weakly_Supervised'
models_folder = 'LOCAL/PATH/../Teacher_Student_models/models_weights/'
create_dir(models_folder)
models_path = models_folder+approach+'/'
create_dir(models_path)
models_path = models_path+'Student_model/'
create_dir(models_path)
if (args.VARIANT!='baseline'):
models_path = models_path+'student_variant_'+args.VARIANT+'/'
create_dir(models_path)
models_path = models_path+'perc_'+str(args.SUBSET)+'/'
create_dir(models_path)
else:
models_path = models_path+'fully_supervised/'
create_dir(models_path)
models_path = models_path+'N_EXP_'+str(args.N_EXP)+'/'
create_dir(models_path)
checkpoint_path = models_path+'checkpoints/'
create_dir(checkpoint_path)
model_weights = models_path+'student_model.pt'
model = torch.load(model_weights)
#load testing data
test_dir = 'LOCAL/PATH/../Teacher_Student_models/WSI_patches/test_densely/'
csv_test = test_dir+'csv_test_densely.csv'
data_test = pd.read_csv(csv_test,header=None).values
data_test_paths = data_test[:,0]
data_test_labels = data_test[:,1:]
data_test_labels = data_test_labels.astype('int64')
print(data_test.shape)
print(data_test_paths.shape)
print(data_test_labels.shape)
array_probs = []
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = torch.device('cpu')
#DATA NORMALIZATION
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
#model.eval()
model.to(device)
def create_dir(directory):
if not os.path.isdir(directory):
try:
os.mkdir(directory)
except OSError:
print ("Creation of the directory %s failed" % directory)
batch_size = args.BATCH_SIZE
num_workers = 32
params_test = {'batch_size': batch_size,
#'shuffle': True,
#'sampler': ImbalancedDatasetSampler(test_dataset),
'num_workers': num_workers}
def find_first_two(array):
x = np.copy(array)
max_1 = np.argmax(x)
max_val1 = x[max_1]
x[max_1]=-1
max_2 = np.argmax(x)
max_val2 = x[max_2]
if (MEDICAL_SOURCE=='TCGA'):
if(max_1==0 or max_2==0):
max_1 = max(max_1,max_2)
max_2 = max(max_1,max_2)
if (max_val1>(2*max_val2)):
max_2 = max_1
return max_1,max_2
def majority_voting(array):
majority = [0,0,0,0]
for i in range(array.shape[0]):
#print(prob)
idx = np.argmax(array[i])
majority[idx] = majority[idx]+1
if (MEDICAL_SOURCE=='TCGA'):
majority[0]=0
pgp, sgp = find_first_two(majority)
return pgp, sgp, majority
def load_and_evaluate(list_f,elems,directory_histograms):
testing_set = Dataset_WSI(list_f)
testing_generator = data.DataLoader(testing_set, **params_test)
array_probs = []
local_filenames = []
with torch.no_grad():
j = 0
for inputs, filenames in testing_generator:
inputs = inputs.to(device)
# zero the parameter gradients
#optimizer.zero_grad()
# forward + backward + optimize
try:
outputs = model(inputs)
except:
outputs = model.module(inputs)
probs = F.softmax(outputs)
#print(probs)
#accumulate values
probs = probs.cpu().data.numpy()
array_probs = np.append(array_probs,probs)
local_filenames = np.append(local_filenames,filenames)
array_probs = np.reshape(array_probs,(elems,4))
#array_probs = np.squeeze(array_probs)
#majority voting
pgp,sgp, histogram = majority_voting(array_probs)
y_preds.append([pgp,sgp])
#add pgp,sgp to y_preds
def gleason_score(primary,secondary):
array = []
if (MEDICAL_SOURCE=='panda'):
for i in range(len(primary)):
gs = primary[i]+secondary[i]
if (gs==3 and primary[i]==2):
gs = 3
elif (gs==3 and primary[i]==1):
gs = 2
elif (gs==4):
gs = 4
elif (gs>4):
gs = 5
array.append(gs)
elif (MEDICAL_SOURCE=='TCGA'):
for i in range(len(primary)):
a = primary[i]
b = secondary[i]
gs = a+b-2
if (a==1 and b==2):
gs = 1
elif (a==2 and b==1):
gs = 2
elif (gs==2):
gs = 3
elif (gs>2):
gs = 4
array.append(gs)
return array
def predict_metrics(y_pred,y_true,metric):
if(metric=='primary'):
#primary gleason pattern
y_true = y_true[:,0]
y_pred = y_pred[:,0]
k_score_primary = metrics.cohen_kappa_score(y_true,y_pred, weights='quadratic')
print("k_score_primary " + str(k_score_primary))
return k_score_primary
elif(metric=='secondary'):
#secondary gleason pattern
y_true = y_true[:,1]
y_pred = y_pred[:,1]
k_score_secondary = metrics.cohen_kappa_score(y_true,y_pred, weights='quadratic')
print("k_score_secondary " + str(k_score_secondary))
return k_score_secondary
else:
#gleason score
#y_true = y_true[:,0]+y_true[:,1]
#y_pred = y_pred[:,0]+y_pred[:,1]
y_true = gleason_score(y_true[:,0],y_true[:,1])
y_pred = gleason_score(y_pred[:,0],y_pred[:,1])
#print(y_pred)
k_score_score = metrics.cohen_kappa_score(y_true,y_pred, weights='quadratic')
print("k_score_score " + str(k_score_score))
return k_score_score
y_preds = []
filenames_array = []
histo_preds = []
histo_true = []
for p in data_test_paths:
d = os.path.split(p)[1]
directory = test_dir+d
#csv_file = directory+'/'+d+'_densely.csv'
csv_file = directory+'/'+d+'_densely_sorted_br_patches.csv'
directory_histograms = directory+'/histograms/'
create_dir(directory_histograms)
local_csv = pd.read_csv(csv_file,header=None).values[:THRESHOLD,0]
load_and_evaluate(local_csv,len(local_csv),directory_histograms)
filenames_array.append(d)
#METRICS
y_preds = np.array(y_preds)
y_true = data_test_labels
y_preds = y_preds.astype('int64')
"""
for i in range(len(y_preds)):
print(y_preds[i],y_true[i])
"""
kappa_score_primary = predict_metrics(y_preds,y_true,'primary')
kappa_score_secondary = predict_metrics(y_preds,y_true,'secondary')
kappa_score_score = predict_metrics(y_preds,y_true,'score')
kappa_score_best_PGP_filename = checkpoint_path+'kappa_score_PGP.csv'
kappa_score_best_SGP_filename = checkpoint_path+'kappa_score_SGP.csv'
kappa_score_best_GS_filename = checkpoint_path+'kappa_score_GS.csv'
kappas = [kappa_score_primary]
File = {'val':kappas}
df = pd.DataFrame(File,columns=['val'])
np.savetxt(kappa_score_best_PGP_filename, df.values, fmt='%s',delimiter=',')
kappas = [kappa_score_secondary]
File = {'val':kappas}
df = pd.DataFrame(File,columns=['val'])
np.savetxt(kappa_score_best_SGP_filename, df.values, fmt='%s',delimiter=',')
kappas = [kappa_score_score]
File = {'val':kappas}
df = pd.DataFrame(File,columns=['val'])
np.savetxt(kappa_score_best_GS_filename, df.values, fmt='%s',delimiter=',')
|
[
"os.mkdir",
"argparse.ArgumentParser",
"numpy.argmax",
"pandas.read_csv",
"torch.cuda.device_count",
"torch.cuda.current_device",
"torchvision.transforms.Normalize",
"torch.no_grad",
"sys.path.append",
"pandas.DataFrame",
"numpy.copy",
"Data_Generator.Dataset_WSI",
"torch.utils.data.DataLoader",
"torch.load",
"numpy.savetxt",
"numpy.append",
"numpy.reshape",
"sklearn.metrics.cohen_kappa_score",
"torch.cuda.is_available",
"os.path.isdir",
"torch.nn.functional.softmax",
"numpy.array",
"os.path.split",
"torchvision.transforms.ToTensor"
] |
[((354, 382), 'sys.path.append', 'sys.path.append', (['"""../utils/"""'], {}), "('../utils/')\n", (369, 382), False, 'import sys, getopt\n'), ((568, 628), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""TMA patches extractor"""'}), "(description='TMA patches extractor')\n", (591, 628), False, 'import argparse\n'), ((2572, 2597), 'torch.load', 'torch.load', (['model_weights'], {}), '(model_weights)\n', (2582, 2597), False, 'import torch\n'), ((8116, 8133), 'numpy.array', 'np.array', (['y_preds'], {}), '(y_preds)\n', (8124, 8133), True, 'import numpy as np\n'), ((8732, 8767), 'pandas.DataFrame', 'pd.DataFrame', (['File'], {'columns': "['val']"}), "(File, columns=['val'])\n", (8744, 8767), True, 'import pandas as pd\n'), ((8767, 8844), 'numpy.savetxt', 'np.savetxt', (['kappa_score_best_PGP_filename', 'df.values'], {'fmt': '"""%s"""', 'delimiter': '""","""'}), "(kappa_score_best_PGP_filename, df.values, fmt='%s', delimiter=',')\n", (8777, 8844), True, 'import numpy as np\n'), ((8906, 8941), 'pandas.DataFrame', 'pd.DataFrame', (['File'], {'columns': "['val']"}), "(File, columns=['val'])\n", (8918, 8941), True, 'import pandas as pd\n'), ((8941, 9018), 'numpy.savetxt', 'np.savetxt', (['kappa_score_best_SGP_filename', 'df.values'], {'fmt': '"""%s"""', 'delimiter': '""","""'}), "(kappa_score_best_SGP_filename, df.values, fmt='%s', delimiter=',')\n", (8951, 9018), True, 'import numpy as np\n'), ((9076, 9111), 'pandas.DataFrame', 'pd.DataFrame', (['File'], {'columns': "['val']"}), "(File, columns=['val'])\n", (9088, 9111), True, 'import pandas as pd\n'), ((9111, 9187), 'numpy.savetxt', 'np.savetxt', (['kappa_score_best_GS_filename', 'df.values'], {'fmt': '"""%s"""', 'delimiter': '""","""'}), "(kappa_score_best_GS_filename, df.values, fmt='%s', delimiter=',')\n", (9121, 9187), True, 'import numpy as np\n'), ((495, 522), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (520, 522), False, 'import torch\n'), ((531, 556), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (554, 556), False, 'import torch\n'), ((2750, 2784), 'pandas.read_csv', 'pd.read_csv', (['csv_test'], {'header': 'None'}), '(csv_test, header=None)\n', (2761, 2784), True, 'import pandas as pd\n'), ((3769, 3783), 'numpy.copy', 'np.copy', (['array'], {}), '(array)\n', (3776, 3783), True, 'import numpy as np\n'), ((3796, 3808), 'numpy.argmax', 'np.argmax', (['x'], {}), '(x)\n', (3805, 3808), True, 'import numpy as np\n'), ((3861, 3873), 'numpy.argmax', 'np.argmax', (['x'], {}), '(x)\n', (3870, 3873), True, 'import numpy as np\n'), ((4519, 4538), 'Data_Generator.Dataset_WSI', 'Dataset_WSI', (['list_f'], {}), '(list_f)\n', (4530, 4538), False, 'from Data_Generator import Dataset_WSI\n'), ((4563, 4606), 'torch.utils.data.DataLoader', 'data.DataLoader', (['testing_set'], {}), '(testing_set, **params_test)\n', (4578, 4606), False, 'from torch.utils import data\n'), ((5316, 5351), 'numpy.reshape', 'np.reshape', (['array_probs', '(elems, 4)'], {}), '(array_probs, (elems, 4))\n', (5326, 5351), True, 'import numpy as np\n'), ((1447, 1471), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (1460, 1471), False, 'import os\n'), ((3045, 3070), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3068, 3070), False, 'import torch\n'), ((3173, 3194), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3192, 3194), False, 'from torchvision import transforms\n'), ((3200, 3275), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3220, 3275), False, 'from torchvision import transforms\n'), ((3351, 3375), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (3364, 3375), False, 'import os\n'), ((4256, 4275), 'numpy.argmax', 'np.argmax', (['array[i]'], {}), '(array[i])\n', (4265, 4275), True, 'import numpy as np\n'), ((4664, 4679), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4677, 4679), False, 'import torch\n'), ((6601, 6663), 'sklearn.metrics.cohen_kappa_score', 'metrics.cohen_kappa_score', (['y_true', 'y_pred'], {'weights': '"""quadratic"""'}), "(y_true, y_pred, weights='quadratic')\n", (6626, 6663), False, 'from sklearn import metrics\n'), ((7673, 7689), 'os.path.split', 'os.path.split', (['p'], {}), '(p)\n', (7686, 7689), False, 'import os\n'), ((1498, 1517), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (1506, 1517), False, 'import os\n'), ((3402, 3421), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (3410, 3421), False, 'import os\n'), ((5053, 5071), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {}), '(outputs)\n', (5062, 5071), True, 'import torch.nn.functional as F\n'), ((5201, 5230), 'numpy.append', 'np.append', (['array_probs', 'probs'], {}), '(array_probs, probs)\n', (5210, 5230), True, 'import numpy as np\n'), ((5260, 5297), 'numpy.append', 'np.append', (['local_filenames', 'filenames'], {}), '(local_filenames, filenames)\n', (5269, 5297), True, 'import numpy as np\n'), ((6930, 6992), 'sklearn.metrics.cohen_kappa_score', 'metrics.cohen_kappa_score', (['y_true', 'y_pred'], {'weights': '"""quadratic"""'}), "(y_true, y_pred, weights='quadratic')\n", (6955, 6992), False, 'from sklearn import metrics\n'), ((7399, 7461), 'sklearn.metrics.cohen_kappa_score', 'metrics.cohen_kappa_score', (['y_true', 'y_pred'], {'weights': '"""quadratic"""'}), "(y_true, y_pred, weights='quadratic')\n", (7424, 7461), False, 'from sklearn import metrics\n'), ((7939, 7973), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {'header': 'None'}), '(csv_file, header=None)\n', (7950, 7973), True, 'import pandas as pd\n')]
|
from abc import ABC, abstractmethod
import numpy as np
class User(ABC):
"""
User in the typing environment. Can be simulated or a real user.
:param input_dim: (Tuple(int)) The dimensionality of the inputs this users produces.
"""
def __init__(self, input_dim, n_samples, baseline_temp, boltzmann_exploration):
self.env = None
self.input_dim = input_dim
self.n_samples = n_samples
self.model = None
self.baseline_temp = baseline_temp
self.boltzmann_exploration = boltzmann_exploration
def setup(self, env):
"""
Sets up the environment with the user. Must be called before running the user.
"""
self.env = env
def setup_model(self, model):
"""
Sets up the predictive model. Should be called by the model during initialization.
"""
self.model = model
@abstractmethod
def run(self, total_timesteps, callback, tb_log_name, mode, disable_learning):
"""
Runs the user with the typing environment.
"""
pass
def predict(self, obs):
"""
Returns the predictive model's distribution over actions given inputs.
"""
with self.model.sess.as_default():
obs = np.array(obs)[None]
pred = self.model.act(obs, self.env.targets)
pred = np.squeeze(pred)
return pred
def reset(self):
pass
@abstractmethod
def get_input(self):
"""
Gets the input at the current timestep
"""
pass
@abstractmethod
def baseline(self, obs):
"""
Baseline predictive method
"""
pass
@abstractmethod
def get_next_action_index(self):
"""
Gets the next desired action index according to the user's goal.
"""
pass
@staticmethod
def softmax(x):
unnormalized = np.exp(x)
return unnormalized / np.sum(unnormalized)
|
[
"numpy.squeeze",
"numpy.sum",
"numpy.exp",
"numpy.array"
] |
[((1370, 1386), 'numpy.squeeze', 'np.squeeze', (['pred'], {}), '(pred)\n', (1380, 1386), True, 'import numpy as np\n'), ((1924, 1933), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1930, 1933), True, 'import numpy as np\n'), ((1964, 1984), 'numpy.sum', 'np.sum', (['unnormalized'], {}), '(unnormalized)\n', (1970, 1984), True, 'import numpy as np\n'), ((1278, 1291), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1286, 1291), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import random
from keras.utils import np_utils
from keras.datasets import mnist
from keras.models import Model,Sequential
from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add
from keras.optimizers import RMSprop
from keras import backend as K
import os
from keras.callbacks import TensorBoard
num_classes = 74
epochs = 10
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
# return 1
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
margin = 1
return K.mean(y_true * K.square(y_pred) +
(1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
# def create_pairs(x, digit_indices):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
def create_pairs(x, y):
print(np.array(x).shape)
pairs = []
labels = []
# nums = len(x)
nums = 100
for i in range(nums):
for j in range(i, nums):
# print (x)
# print (np.array(ran1))
z1, z2 = x[i], x[j]
# print (z1)
pairs +=[[z1, z2]]
if y[i] == y[j]:
labels += [0]
else:
labels += [1]
# ran1 = []
# ran2 = []
# for k in range(0,90):
# ran1.append(random.random()*random.random()*1000.0)
# ran2.append(random.random()*random.random()*400.0)
# z1, z2 = np.array(ran1), np.array(ran2)
# # print (z1)
# pairs +=[[z1, z2]]
# labels += [random.randint(0,1)]
# # n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1
# for d in range(num_classes):
# print (len(digit_indices[d]))
# n = -1
# # print (n)
# for d in range(num_classes):
# for i in range(n):
# z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
# pairs += [[x[z1], x[z2]]]
# inc = random.randrange(1, num_classes)
# dn = (d + inc) % num_classes
# z1, z2 = digit_indices[d][i], digit_indices[dn][i]
# pairs += [[x[z1], x[z2]]]
# labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_pairs_2(x1, x2, y1, y2):
# print(np.array(xs).shape)
pairs = []
labels = []
for i in range(len(x2)):
for j in range(len(x1)):
# print (x)
# print (np.array(ran1))
z1, z2 = x2[i], x1[j]
# print (z1)
pairs +=[[z1, z2]]
if y2[i] == y1[j]:
labels += [0]
else:
labels += [1]
# ran1 = []
# ran2 = []
# for k in range(0,90):
# ran1.append(random.random()*random.random()*1000.0)
# ran2.append(random.random()*random.random()*400.0)
# z1, z2 = np.array(ran1), np.array(ran2)
# # print (z1)
# pairs +=[[z1, z2]]
# labels += [random.randint(0,1)]
# # n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1
# for d in range(num_classes):
# print (len(digit_indices[d]))
# n = -1
# # print (n)
# for d in range(num_classes):
# for i in range(n):
# z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
# pairs += [[x[z1], x[z2]]]
# inc = random.randrange(1, num_classes)
# dn = (d + inc) % num_classes
# z1, z2 = digit_indices[d][i], digit_indices[dn][i]
# pairs += [[x[z1], x[z2]]]
# labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_base_network(input_shape):
'''Base network to be shared (eq. to feature extraction).
# '''
# print (input_shape)
input = Input(shape=input_shape)
# x = Flatten()(input)
# print (input)
x = Dense(400, activation ='relu',name='l1')(input)
x = Dropout(0.2,name='d1')(x)
x = Dense(300, activation ='relu',name='l2')(x)
x = Dropout(0.2,name='d2')(x)
x = Dense(200, activation='relu', name='l3')(x)
x = Dropout(0.2,name='d3')(x)
x = Dense(200, activation='relu', name='l4')(x)
x = Dropout(0.2, name='d4')(x)
# model.add(Dense(num_classes, activation = 'softmax'))
# x = Dense(128, activation='relu')(x)
# x = Dropout(0.1)(x)
# x = Dense(128, activation='relu')(x)
# x = Dropout(0.1)(x)
# x = Dense(128, activation='relu')(x)
return Model(input, x)
def compute_accuracy(y_true, y_pred):
'''Compute classification accuracy with a fixed threshold on distances.
'''
pred = y_pred.ravel() < 0.5
# print(pred)
return np.mean(pred == y_true)
def accuracy(y_true, y_pred):
'''Compute classification accuracy with a fixed threshold on distances.
'''
return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))
# the data, split between train and test sets
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
# x_train = x_train.astype('float32')
# x_test = x_test.astype('float32')
# x_train /= 255
# x_test /= 255
def get_name(filename):
name_list = os.listdir(filename)
return name_list
def readfile(filename):
with open(filename) as file:
data = file.read()
return data
def get_data(path, x, y, label):
name_list = get_name(path)
data1 = readfile(os.path.join(path, name_list[0]))
data1 = data1.split(",")
# print data
data1.pop()
ori = len(data1)
# print (ori)
for each in name_list:
if each == ".DS_Store":
continue
data = readfile(os.path.join(path, each))
data = data.split(",")
# print data
data.pop()
# if len(data) >= 33:
# data.pop(33)
l = len(data)
# print (data)
# print(np.array(data).shape)
for k in range(0, len(data)):
data[k] = float(data[k])
# trainx.append()
if l == ori:
# print (data)
x.append(data)
a = each.split("_")[1]
y.append([label[a]])
path_train = './trainingfeature'
path_test = './testingfeature'
x_test = []
y_test = []
x_train = []
y_train = []
label = dict()
name_list = get_name(path_train)
p = 0
for i in range(0,len(name_list)):
if name_list[i] == ".DS_Store":
continue
a = name_list[i].split("_")[1]
if a in label.keys():
continue
label[a] = p
p += 1
# print(len(label.keys()))
# print (p)
# print len(label)
get_data(path_train, x_train, y_train, label)
get_data(path_test, x_test, y_test, label)
# print (y_test)
# num_classes = 74
x_train = np.array(x_train)
x_test = np.array(x_test)
# y_train = np_utils.to_categorical(y_train,74)
# y_test = np_utils.to_categorical(y_test,74)
y_train = np.array(y_train)
y_test = np.array(y_test)
# print (y_test)
# print (y_train)
input_shape = x_train.shape[1:]
# input_shape = (1090,)
# create training+test positive and negative pairs
digit_indices = [np.where(y_train == i)[0] for i in range(num_classes)]
# print(digit_indices)
# tr_pairs, tr_y = create_pairs(x_train, digit_indices)
tr_pairs, tr_y = create_pairs(x_train, y_train)
print (tr_pairs.shape)
digit_indices = [np.where(y_test == i)[0] for i in range(num_classes)]
# te_pairs, te_y = create_pairs_2(x_test, x_train, y_test, y_train)
te_pairs, te_y = create_pairs(x_test, y_test)
print (te_pairs.shape)
te_y = np_utils.to_categorical(te_y,2)
tr_y = np_utils.to_categorical(tr_y,2)
# for i in range(len(te_))
# tr_y = np_utils.to_categorical(tr_y,2)
# te_y = np_utils.to_categorical(te_y,2)
# print (tr_pairs)
# print (te_y)
# network definition
base_network = create_base_network(input_shape)
base_network.load_weights("./weights/base_model.hdf5", by_name = True)
input_a = Input(shape=input_shape)
input_b = Input(shape=input_shape)
# because we re-use the same instance `base_network`,
# the weights of the network
# will be shared across the two branches
processed_a = base_network(input_a)
# processed_a.load_weights("./weights/base_model.hdf5", by_name = True)
processed_b = base_network(input_b)
# processed_b.load_weights("./weights/base_model.hdf5", by_name = True)
distance = Lambda(euclidean_distance,
output_shape=eucl_dist_output_shape)([processed_a, processed_b])
# print(distance)
# model = Model([input_a, input_b], distance)
# model = Model([input_a, input_b])
# model = Sequential()
# x = merge([processed_a, processed_b],mode='concat')
# x = Dense(3000, activation ='relu')(x)
# x = Dropout(0.2)(x)
# x = Dense(2000, activation='relu')(x)
# x = Dropout(0.2)(x)
# x = Dense(2000, activation='relu')(x)
# x = Dropout(0.2)(x)
# x = Dense(2, activation='softmax')(x)
# model = Model([input_a, input_b],x)
added = concatenate([processed_a, processed_b]) # equivalent to added = keras.layers.add([x1, x2])
print (processed_a.shape)
out = Dense(400,activation ='relu')(added)
out = Dropout(0.2)(out)
out = Dense(200,activation ='relu')(out)
out = Dropout(0.2)(out)
out = Dense(2,activation ='softmax')(out)
model = Model(inputs=[input_a, input_b], outputs=out)
model.summary()
# model.add(Add()([processed_a, processed_b]))
# model.add(400, activation ='relu')
# model.add(Dropout(0.2))
# model.add(200, activation ='relu')
# model.add(Dropout(0.2))
# model.add(Dense(2, activation='softmax'))
# train
rms = RMSprop()
tensorboard = TensorBoard(log_dir='log', histogram_freq=0,write_graph=True,write_images=True)
# model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy])
# model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
# batch_size=100,
# validation_split=0.2,epochs=epochs,callbacks=[tensorboard])
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
validation_split=0.2, epochs=epochs, batch_size=32, verbose=2, callbacks=[tensorboard])
scores = model.evaluate([te_pairs[:, 0], te_pairs[:, 1]], te_y,verbose=0)
# print (scores)
# compute final accuracy on training and test sets
y_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
tr_acc = compute_accuracy(tr_y, y_pred)
# print (te_pairs[:,0])
# print(np.array([tr_pairs[:, 0], tr_pairs[:, 1]]).shape)
# print (x_train)
# print(len(x_train[1]))
# for i in range(len(x_test)):
# min_pro = 0.0
# index = 0
# for j in range(len(x_train)):
# # print(x_test)
# # print(x_test[i,:])
# # print()
# z1, z2 = x_test[i], x_train[j]
# # print(len(z1))
# # print((np.array([np.array([z1]), np.array([z2])]).shape))
# pre = model.predict([np.array([z1]), np.array([z2])])
# if pre[0][0] > min_pro:
# min_pro = pre[0][0]
# print (min_pro)
# print (pre)
y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
# print (np.array(y_pred).shape)
print (model.predict([te_pairs[:, 0], te_pairs[:, 1]]))
te_acc = compute_accuracy(te_y, y_pred)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
|
[
"keras.layers.Dropout",
"keras.backend.epsilon",
"keras.models.Model",
"keras.backend.square",
"keras.optimizers.RMSprop",
"keras.utils.np_utils.to_categorical",
"keras.callbacks.TensorBoard",
"numpy.array",
"numpy.mean",
"keras.layers.Lambda",
"keras.layers.Dense",
"keras.layers.Input",
"numpy.where",
"keras.layers.concatenate",
"keras.backend.cast",
"os.path.join",
"os.listdir",
"keras.backend.maximum"
] |
[((7099, 7116), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (7107, 7116), True, 'import numpy as np\n'), ((7126, 7142), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (7134, 7142), True, 'import numpy as np\n'), ((7248, 7265), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (7256, 7265), True, 'import numpy as np\n'), ((7275, 7291), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (7283, 7291), True, 'import numpy as np\n'), ((7877, 7909), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['te_y', '(2)'], {}), '(te_y, 2)\n', (7900, 7909), False, 'from keras.utils import np_utils\n'), ((7916, 7948), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['tr_y', '(2)'], {}), '(tr_y, 2)\n', (7939, 7948), False, 'from keras.utils import np_utils\n'), ((8243, 8267), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (8248, 8267), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((8278, 8302), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (8283, 8302), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((9216, 9255), 'keras.layers.concatenate', 'concatenate', (['[processed_a, processed_b]'], {}), '([processed_a, processed_b])\n', (9227, 9255), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((9516, 9561), 'keras.models.Model', 'Model', ([], {'inputs': '[input_a, input_b]', 'outputs': 'out'}), '(inputs=[input_a, input_b], outputs=out)\n', (9521, 9561), False, 'from keras.models import Model, Sequential\n'), ((9810, 9819), 'keras.optimizers.RMSprop', 'RMSprop', ([], {}), '()\n', (9817, 9819), False, 'from keras.optimizers import RMSprop\n'), ((9834, 9920), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""log"""', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(True)'}), "(log_dir='log', histogram_freq=0, write_graph=True, write_images\n =True)\n", (9845, 9920), False, 'from keras.callbacks import TensorBoard\n'), ((4243, 4267), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (4248, 4267), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((4917, 4932), 'keras.models.Model', 'Model', (['input', 'x'], {}), '(input, x)\n', (4922, 4932), False, 'from keras.models import Model, Sequential\n'), ((5118, 5141), 'numpy.mean', 'np.mean', (['(pred == y_true)'], {}), '(pred == y_true)\n', (5125, 5141), True, 'import numpy as np\n'), ((5583, 5603), 'os.listdir', 'os.listdir', (['filename'], {}), '(filename)\n', (5593, 5603), False, 'import os\n'), ((8656, 8719), 'keras.layers.Lambda', 'Lambda', (['euclidean_distance'], {'output_shape': 'eucl_dist_output_shape'}), '(euclidean_distance, output_shape=eucl_dist_output_shape)\n', (8662, 8719), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((9340, 9369), 'keras.layers.Dense', 'Dense', (['(400)'], {'activation': '"""relu"""'}), "(400, activation='relu')\n", (9345, 9369), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((9383, 9395), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (9390, 9395), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((9407, 9436), 'keras.layers.Dense', 'Dense', (['(200)'], {'activation': '"""relu"""'}), "(200, activation='relu')\n", (9412, 9436), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((9448, 9460), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (9455, 9460), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((9472, 9502), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (9477, 9502), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((2608, 2623), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (2616, 2623), True, 'import numpy as np\n'), ((2625, 2641), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2633, 2641), True, 'import numpy as np\n'), ((4059, 4074), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (4067, 4074), True, 'import numpy as np\n'), ((4076, 4092), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (4084, 4092), True, 'import numpy as np\n'), ((4323, 4363), 'keras.layers.Dense', 'Dense', (['(400)'], {'activation': '"""relu"""', 'name': '"""l1"""'}), "(400, activation='relu', name='l1')\n", (4328, 4363), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((4379, 4402), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {'name': '"""d1"""'}), "(0.2, name='d1')\n", (4386, 4402), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((4413, 4453), 'keras.layers.Dense', 'Dense', (['(300)'], {'activation': '"""relu"""', 'name': '"""l2"""'}), "(300, activation='relu', name='l2')\n", (4418, 4453), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((4465, 4488), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {'name': '"""d2"""'}), "(0.2, name='d2')\n", (4472, 4488), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((4499, 4539), 'keras.layers.Dense', 'Dense', (['(200)'], {'activation': '"""relu"""', 'name': '"""l3"""'}), "(200, activation='relu', name='l3')\n", (4504, 4539), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((4551, 4574), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {'name': '"""d3"""'}), "(0.2, name='d3')\n", (4558, 4574), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((4585, 4625), 'keras.layers.Dense', 'Dense', (['(200)'], {'activation': '"""relu"""', 'name': '"""l4"""'}), "(200, activation='relu', name='l4')\n", (4590, 4625), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((4637, 4660), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {'name': '"""d4"""'}), "(0.2, name='d4')\n", (4644, 4660), False, 'from keras.layers import Input, Flatten, Dense, Dropout, Lambda, concatenate, Add\n'), ((5812, 5844), 'os.path.join', 'os.path.join', (['path', 'name_list[0]'], {}), '(path, name_list[0])\n', (5824, 5844), False, 'import os\n'), ((7455, 7477), 'numpy.where', 'np.where', (['(y_train == i)'], {}), '(y_train == i)\n', (7463, 7477), True, 'import numpy as np\n'), ((7678, 7699), 'numpy.where', 'np.where', (['(y_test == i)'], {}), '(y_test == i)\n', (7686, 7699), True, 'import numpy as np\n'), ((614, 625), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (623, 625), True, 'from keras import backend as K\n'), ((1213, 1224), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1221, 1224), True, 'import numpy as np\n'), ((5292, 5326), 'keras.backend.cast', 'K.cast', (['(y_pred < 0.5)', 'y_true.dtype'], {}), '(y_pred < 0.5, y_true.dtype)\n', (5298, 5326), True, 'from keras import backend as K\n'), ((6051, 6075), 'os.path.join', 'os.path.join', (['path', 'each'], {}), '(path, each)\n', (6063, 6075), False, 'import os\n'), ((573, 588), 'keras.backend.square', 'K.square', (['(x - y)'], {}), '(x - y)\n', (581, 588), True, 'from keras import backend as K\n'), ((942, 958), 'keras.backend.square', 'K.square', (['y_pred'], {}), '(y_pred)\n', (950, 958), True, 'from keras import backend as K\n'), ((1003, 1032), 'keras.backend.maximum', 'K.maximum', (['(margin - y_pred)', '(0)'], {}), '(margin - y_pred, 0)\n', (1012, 1032), True, 'from keras import backend as K\n')]
|
import gym
import numpy as np
import pygame
from pygame.locals import *
import time
import sys
sys.path.append('../')
ENV_NAME = 'PlaygroundNavigationHuman-v1'
from src.playground_env.reward_function import sample_descriptions_from_state, get_reward_from_state
from src.playground_env.descriptions import generate_all_descriptions
from src.playground_env.env_params import get_env_params
"""
Playing script. Control the agent with the arrows, close the gripper with the space bar.
"""
env = gym.make(ENV_NAME, reward_screen=False, viz_data_collection=True)
pygame.init()
env_params = get_env_params()
train_descriptions, test_descriptions, extra_descriptions = generate_all_descriptions(env_params)
all_descriptions = train_descriptions + test_descriptions
# Select the goal to generate the scene.
goal_str = np.random.choice(all_descriptions)
env.reset()
env.unwrapped.reset_with_goal(goal_str)
while True:
# init_render
action = np.zeros([3])
for event in pygame.event.get():
if hasattr(event, 'key'):
# J1
if (event.key == K_DOWN):
action[1] = -1
elif event.key == K_UP:
action[1] = 1
# J2
elif (event.key == K_LEFT):
action[0] = -1
elif event.key == K_RIGHT:
action[0] = 1
# J3
elif event.key == K_SPACE:
action[2] = 1
elif event.key == K_q:
stop = True
if action.sum() != 0:
time.sleep(0.05)
break
out = env.step(action)
env.render()
# Sample descriptions of the current state
train_descr, test_descr, extra_descr = sample_descriptions_from_state(out[0], env.unwrapped.params)
descr = train_descr + test_descr
print(descr)
# assert that the reward function works, should give positive rewards for descriptions sampled, negative for others.
for d in descr:
assert get_reward_from_state(out[0], d, env_params)
for d in np.random.choice(list(set(all_descriptions) - set(descr)), size=20):
assert not get_reward_from_state(out[0], d, env_params)
|
[
"sys.path.append",
"src.playground_env.reward_function.get_reward_from_state",
"gym.make",
"src.playground_env.reward_function.sample_descriptions_from_state",
"pygame.event.get",
"numpy.zeros",
"pygame.init",
"src.playground_env.descriptions.generate_all_descriptions",
"time.sleep",
"numpy.random.choice",
"src.playground_env.env_params.get_env_params"
] |
[((96, 118), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (111, 118), False, 'import sys\n'), ((494, 559), 'gym.make', 'gym.make', (['ENV_NAME'], {'reward_screen': '(False)', 'viz_data_collection': '(True)'}), '(ENV_NAME, reward_screen=False, viz_data_collection=True)\n', (502, 559), False, 'import gym\n'), ((560, 573), 'pygame.init', 'pygame.init', ([], {}), '()\n', (571, 573), False, 'import pygame\n'), ((588, 604), 'src.playground_env.env_params.get_env_params', 'get_env_params', ([], {}), '()\n', (602, 604), False, 'from src.playground_env.env_params import get_env_params\n'), ((665, 702), 'src.playground_env.descriptions.generate_all_descriptions', 'generate_all_descriptions', (['env_params'], {}), '(env_params)\n', (690, 702), False, 'from src.playground_env.descriptions import generate_all_descriptions\n'), ((815, 849), 'numpy.random.choice', 'np.random.choice', (['all_descriptions'], {}), '(all_descriptions)\n', (831, 849), True, 'import numpy as np\n'), ((948, 961), 'numpy.zeros', 'np.zeros', (['[3]'], {}), '([3])\n', (956, 961), True, 'import numpy as np\n'), ((979, 997), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (995, 997), False, 'import pygame\n'), ((1717, 1777), 'src.playground_env.reward_function.sample_descriptions_from_state', 'sample_descriptions_from_state', (['out[0]', 'env.unwrapped.params'], {}), '(out[0], env.unwrapped.params)\n', (1747, 1777), False, 'from src.playground_env.reward_function import sample_descriptions_from_state, get_reward_from_state\n'), ((1989, 2033), 'src.playground_env.reward_function.get_reward_from_state', 'get_reward_from_state', (['out[0]', 'd', 'env_params'], {}), '(out[0], d, env_params)\n', (2010, 2033), False, 'from src.playground_env.reward_function import sample_descriptions_from_state, get_reward_from_state\n'), ((2135, 2179), 'src.playground_env.reward_function.get_reward_from_state', 'get_reward_from_state', (['out[0]', 'd', 'env_params'], {}), '(out[0], d, env_params)\n', (2156, 2179), False, 'from src.playground_env.reward_function import sample_descriptions_from_state, get_reward_from_state\n'), ((1542, 1558), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1552, 1558), False, 'import time\n')]
|
# -- coding: utf-8 --
import numpy as np
from sklearn.model_selection import train_test_split
def load_data(file_path='/data/u.data'):
"""
加载movielens评分数据
:param file_path: ratings数据存储位置
:return: 评分对(uid, mid, rating)数组,用户数量,电影数量
"""
data = []
for line in open(file_path, 'r'):
arr = line.split()
uid = int(arr[0])
mid = int(arr[1])
rating = int(arr[2])
data.append([uid, mid, rating])
data = np.array(data)
n_users = np.max(data[:, 0]) + 1
n_movies = len(np.unique(data[:, 1])) + 1
return np.array(data), n_users, n_movies
def split_data(data, test_size=0.2):
"""
分割数据集为训练集和测试集
:param data: 原始评分数据
:param test_size: 划分的比例
:return: 训练集和数据集array train_data, test_data
"""
return train_test_split(data, test_size)
|
[
"sklearn.model_selection.train_test_split",
"numpy.max",
"numpy.array",
"numpy.unique"
] |
[((467, 481), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (475, 481), True, 'import numpy as np\n'), ((795, 828), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'test_size'], {}), '(data, test_size)\n', (811, 828), False, 'from sklearn.model_selection import train_test_split\n'), ((496, 514), 'numpy.max', 'np.max', (['data[:, 0]'], {}), '(data[:, 0])\n', (502, 514), True, 'import numpy as np\n'), ((577, 591), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (585, 591), True, 'import numpy as np\n'), ((538, 559), 'numpy.unique', 'np.unique', (['data[:, 1]'], {}), '(data[:, 1])\n', (547, 559), True, 'import numpy as np\n')]
|
import numpy as np
def get_1d_gauss_kernel(sigma, extent=3):
"""Build a 1-dimensional Gaussian kernel.
Parameters
----------
sigma : int or float
The standard deviation of the Gaussian function.
extent : int, optional
How many times sigma to consider on each side of the mean. 3 x sigma
would cover >99% of the values in a Gaussian. This parameter defines the
length of the returned kernel, i.e. = (2 * extent) + 1.
Returns
-------
out : 1d array
The Gaussian kernel.
"""
n = np.ceil(sigma * extent)
kernel = np.exp(-(np.arange(-n, n + 1) ** 2) / (2 * (sigma ** 2)))
kernel = kernel / np.sum(kernel) # Normalize
return kernel
def get_1d_LoG_kernel(sigma, extent=3, return_threshold_factor=False):
"""Build a 1-dimensional Laplacian of Gaussian (LoG) kernel.
Parameters
----------
sigma : int or float
The standard deviation of the Gaussian function.
extent : int, optional
How many times sigma to consider on each side of the mean. 3 x sigma
would cover >99% of the values in a Gaussian. This parameter defines the
length of the returned kernel, i.e. = (2 * extent) + 1.
return_threshold_factor: bool, optional
Whether or not to return the threshold factor.
Returns
-------
out : 1d array
The Laplacian of Gaussian kernel.
"""
kernel = get_1d_gauss_kernel(sigma, extent)
kernel_len = np.int32(len(kernel))
kernel_half_len = kernel_len // 2
# Compute the Laplacian of the above Gaussian.
# The first (sigma ** 2) below is the normalising factor to render the
# outputs scale-invariant. The rest is the 2nd derivative of the Gaussian.
kernel = (sigma ** 2) * \
(-1 / (sigma ** 4)) * kernel * \
((np.arange(-kernel_half_len, kernel_half_len + 1) ** 2) -
(sigma ** 2))
# Sum of the points within one-sigma of mean
threshold_factor = np.sum(kernel) - (
2 * np.sum(kernel[0:np.ceil(2 * sigma).astype(np.int)]))
# Note: Doing this before removal of DC (below) because it undesirably
# lowers the threshold for larger sigma.
# Normalize, in order to set the convolution outputs to be closer to
# putative blobs' original SNRs.
kernel /= threshold_factor
# Remove DC
# kernel -= np.mean(kernel) # Not really necessary
if return_threshold_factor:
return kernel, threshold_factor
else:
return kernel
def first_true(bool_mask, invalid_val=-1):
"""Get the index of the first True value in the numpy 1D array 'bool_mask'.
Returns 'invalid_val' if a True value can not be found.
Based on - https://stackoverflow.com/a/47269413"""
if len(bool_mask) == 0:
return invalid_val
return np.where(bool_mask.any(), bool_mask.argmax(), invalid_val)
def last_true(bool_mask, invalid_val=-1):
"""Get the index of the last True value in the numpy 1D array 'bool_mask'.
Returns 'invalid_val' if a True value can not be found.
Based on - https://stackoverflow.com/a/47269413"""
if len(bool_mask) == 0:
return invalid_val
val = bool_mask.shape[0] - np.flip(bool_mask, axis=0).argmax() - 1
return np.where(bool_mask.any(), val, invalid_val)
|
[
"numpy.arange",
"numpy.sum",
"numpy.ceil",
"numpy.flip"
] |
[((561, 584), 'numpy.ceil', 'np.ceil', (['(sigma * extent)'], {}), '(sigma * extent)\n', (568, 584), True, 'import numpy as np\n'), ((678, 692), 'numpy.sum', 'np.sum', (['kernel'], {}), '(kernel)\n', (684, 692), True, 'import numpy as np\n'), ((1998, 2012), 'numpy.sum', 'np.sum', (['kernel'], {}), '(kernel)\n', (2004, 2012), True, 'import numpy as np\n'), ((1840, 1888), 'numpy.arange', 'np.arange', (['(-kernel_half_len)', '(kernel_half_len + 1)'], {}), '(-kernel_half_len, kernel_half_len + 1)\n', (1849, 1888), True, 'import numpy as np\n'), ((607, 627), 'numpy.arange', 'np.arange', (['(-n)', '(n + 1)'], {}), '(-n, n + 1)\n', (616, 627), True, 'import numpy as np\n'), ((3211, 3237), 'numpy.flip', 'np.flip', (['bool_mask'], {'axis': '(0)'}), '(bool_mask, axis=0)\n', (3218, 3237), True, 'import numpy as np\n'), ((2045, 2063), 'numpy.ceil', 'np.ceil', (['(2 * sigma)'], {}), '(2 * sigma)\n', (2052, 2063), True, 'import numpy as np\n')]
|
"""Plotting functions for visualizing distributions."""
from __future__ import division
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
try:
import statsmodels.nonparametric.api as smnp
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .utils import set_hls_values, iqr, _kde_support
from .palettes import color_palette, blend_palette
from .axisgrid import JointGrid
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From http://stats.stackexchange.com/questions/798/
a = np.asarray(a)
h = 2 * iqr(a) / (len(a) ** (1 / 3))
# fall back to 10 bins if iqr is 0
if h == 0:
return 10.
else:
return np.ceil((a.max() - a.min()) / h)
def distplot(a, bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
color=None, vertical=False, norm_hist=False, axlabel=None,
label=None, ax=None):
"""Flexibly plot a distribution of observations.
Parameters
----------
a : (squeezable to) 1d array
Observed data.
bins : argument for matplotlib hist(), or None, optional
Specification of hist bins, or None to use Freedman-Diaconis rule.
hist : bool, optional
Whether to plot a (normed) histogram.
kde : bool, optional
Whether to plot a gaussian kernel density estimate.
rug : bool, optional
Whether to draw a rugplot on the support axis.
fit : random variable object, optional
An object with `fit` method, returning a tuple that can be passed to a
`pdf` method a positional arguments following an grid of values to
evaluate the pdf on.
{hist, kde, rug, fit}_kws : dictionaries, optional
Keyword arguments for underlying plotting functions.
color : matplotlib color, optional
Color to plot everything but the fitted curve in.
vertical : bool, optional
If True, oberved values are on y-axis.
norm_hist : bool, otional
If True, the histogram height shows a density rather than a count.
This is implied if a KDE or fitted density is plotted.
axlabel : string, False, or None, optional
Name for the support axis label. If None, will try to get it
from a.namel if False, do not set a label.
label : string, optional
Legend label for the relevent component of the plot
ax : matplotlib axis, optional
if provided, plot on this axis
Returns
-------
ax : matplotlib axis
"""
if ax is None:
ax = plt.gca()
# Intelligently label the support axis
label_ax = bool(axlabel)
if axlabel is None and hasattr(a, "name"):
axlabel = a.name
if axlabel is not None:
label_ax = True
# Make a a 1-d array
a = np.asarray(a).squeeze()
# Decide if the hist is normed
norm_hist = norm_hist or kde or (fit is not None)
# Handle dictionary defaults
if hist_kws is None:
hist_kws = dict()
if kde_kws is None:
kde_kws = dict()
if rug_kws is None:
rug_kws = dict()
if fit_kws is None:
fit_kws = dict()
# Get the color from the current color cycle
if color is None:
if vertical:
line, = ax.plot(0, a.mean())
else:
line, = ax.plot(a.mean(), 0)
color = line.get_color()
line.remove()
# Plug the label into the right kwarg dictionary
if label is not None:
if hist:
hist_kws["label"] = label
elif kde:
kde_kws["label"] = label
elif rug:
rug_kws["label"] = label
elif fit:
fit_kws["label"] = label
if hist:
if bins is None:
bins = _freedman_diaconis_bins(a)
hist_kws.setdefault("alpha", 0.4)
hist_kws.setdefault("normed", norm_hist)
orientation = "horizontal" if vertical else "vertical"
hist_color = hist_kws.pop("color", color)
ax.hist(a, bins, orientation=orientation,
color=hist_color, **hist_kws)
if hist_color != color:
hist_kws["color"] = hist_color
if kde:
kde_color = kde_kws.pop("color", color)
kdeplot(a, vertical=vertical, ax=ax, color=kde_color, **kde_kws)
if kde_color != color:
kde_kws["color"] = kde_color
if rug:
rug_color = rug_kws.pop("color", color)
axis = "y" if vertical else "x"
rugplot(a, axis=axis, ax=ax, color=rug_color, **rug_kws)
if rug_color != color:
rug_kws["color"] = rug_color
if fit is not None:
fit_color = fit_kws.pop("color", "#282828")
gridsize = fit_kws.pop("gridsize", 200)
cut = fit_kws.pop("cut", 3)
clip = fit_kws.pop("clip", (-np.inf, np.inf))
bw = stats.gaussian_kde(a).scotts_factor() * a.std(ddof=1)
x = _kde_support(a, bw, gridsize, cut, clip)
params = fit.fit(a)
pdf = lambda x: fit.pdf(x, *params)
y = pdf(x)
if vertical:
x, y = y, x
ax.plot(x, y, color=fit_color, **fit_kws)
if fit_color != "#282828":
fit_kws["color"] = fit_color
if label_ax:
if vertical:
ax.set_ylabel(axlabel)
else:
ax.set_xlabel(axlabel)
return ax
def _univariate_kdeplot(data, shade, vertical, kernel, bw, gridsize, cut,
clip, legend, ax, cumulative=False, **kwargs):
"""Plot a univariate kernel density estimate on one of the axes."""
# Sort out the clipping
if clip is None:
clip = (-np.inf, np.inf)
# Calculate the KDE
if _has_statsmodels:
# Prefer using statsmodels for kernel flexibility
x, y = _statsmodels_univariate_kde(data, kernel, bw,
gridsize, cut, clip,
cumulative=cumulative)
else:
# Fall back to scipy if missing statsmodels
if kernel != "gau":
kernel = "gau"
msg = "Kernel other than `gau` requires statsmodels."
warnings.warn(msg, UserWarning)
if cumulative:
raise ImportError("Cumulative distributions are currently"
"only implemented in statsmodels."
"Please install statsmodels.")
x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
# Flip the data if the plot should be on the y axis
if vertical:
x, y = y, x
# Check if a label was specified in the call
label = kwargs.pop("label", None)
# Otherwise check if the data object has a name
if label is None and hasattr(data, "name"):
label = data.name
# Decide if we're going to add a legend
legend = label is not None and legend
label = "_nolegend_" if label is None else label
# Use the active color cycle to find the plot color
line, = ax.plot(x, y, **kwargs)
color = line.get_color()
line.remove()
kwargs.pop("color", None)
# Draw the KDE plot and, optionally, shade
ax.plot(x, y, color=color, label=label, **kwargs)
alpha = kwargs.get("alpha", 0.25)
if shade:
if vertical:
ax.fill_betweenx(y, 1e-12, x, color=color, alpha=alpha)
else:
ax.fill_between(x, 1e-12, y, color=color, alpha=alpha)
# Draw the legend here
if legend:
ax.legend(loc="best")
return ax
def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,
cumulative=False):
"""Compute a univariate kernel density estimate using statsmodels."""
fft = kernel == "gau"
kde = smnp.KDEUnivariate(data)
kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)
if cumulative:
grid, y = kde.support, kde.cdf
else:
grid, y = kde.support, kde.density
return grid, y
def _scipy_univariate_kde(data, bw, gridsize, cut, clip):
"""Compute a univariate kernel density estimate using scipy."""
try:
kde = stats.gaussian_kde(data, bw_method=bw)
except TypeError:
kde = stats.gaussian_kde(data)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
if isinstance(bw, str):
bw = "scotts" if bw == "scott" else bw
bw = getattr(kde, "%s_factor" % bw)()
grid = _kde_support(data, bw, gridsize, cut, clip)
y = kde(grid)
return grid, y
def _bivariate_kdeplot(x, y, filled, kernel, bw, gridsize, cut, clip, axlabel,
ax, **kwargs):
"""Plot a joint KDE estimate as a bivariate contour plot."""
# Determine the clipping
if clip is None:
clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
elif np.ndim(clip) == 1:
clip = [clip, clip]
# Calculate the KDE
if _has_statsmodels:
xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip)
else:
xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip)
# Plot the contours
n_levels = kwargs.pop("n_levels", 10)
cmap = kwargs.get("cmap", "BuGn" if filled else "BuGn_d")
if isinstance(cmap, str):
if cmap.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(cmap.replace("_d", "_r"), 2))
cmap = blend_palette(pal, as_cmap=True)
kwargs["cmap"] = cmap
contour_func = ax.contourf if filled else ax.contour
contour_func(xx, yy, z, n_levels, **kwargs)
kwargs["n_levels"] = n_levels
# Label the axes
if hasattr(x, "name") and axlabel:
ax.set_xlabel(x.name)
if hasattr(y, "name") and axlabel:
ax.set_ylabel(y.name)
return ax
def _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using statsmodels."""
if isinstance(bw, str):
bw_func = getattr(smnp.bandwidths, "bw_" + bw)
x_bw = bw_func(x)
y_bw = bw_func(y)
bw = [x_bw, y_bw]
elif np.isscalar(bw):
bw = [bw, bw]
if isinstance(x, pd.Series):
x = x.values
if isinstance(y, pd.Series):
y = y.values
kde = smnp.KDEMultivariate([x, y], "cc", bw)
x_support = _kde_support(x, kde.bw[0], gridsize, cut, clip[0])
y_support = _kde_support(y, kde.bw[1], gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using scipy."""
data = np.c_[x, y]
kde = stats.gaussian_kde(data.T)
data_std = data.std(axis=0, ddof=1)
if isinstance(bw, str):
bw = "scotts" if bw == "scott" else bw
bw_x = getattr(kde, "%s_factor" % bw)() * data_std[0]
bw_y = getattr(kde, "%s_factor" % bw)() * data_std[1]
elif np.isscalar(bw):
bw_x, bw_y = bw, bw
else:
msg = ("Cannot specify a different bandwidth for each dimension "
"with the scipy backend. You should install statsmodels.")
raise ValueError(msg)
x_support = _kde_support(data[:, 0], bw_x, gridsize, cut, clip[0])
y_support = _kde_support(data[:, 1], bw_y, gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def kdeplot(data, data2=None, shade=False, vertical=False, kernel="gau",
bw="scott", gridsize=100, cut=3, clip=None, legend=True, ax=None,
cumulative=False, **kwargs):
"""Fit and plot a univariate or bivarate kernel density estimate.
Parameters
----------
data : 1d or 2d array-like
Input data. If two-dimensional, assumed to be shaped (n_unit x n_var),
and a bivariate contour plot will be drawn.
data2: 1d array-like
Second input data. If provided `data` must be one-dimensional, and
a bivariate plot is produced.
shade : bool, optional
If true, shade in the area under the KDE curve (or draw with filled
contours when data is bivariate).
vertical : bool
If True, density is on x-axis.
kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional
Code for shape of kernel to fit with. Bivariate KDE can only use
gaussian kernel.
bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional
Name of reference method to determine kernel size, scalar factor,
or scalar for each dimension of the bivariate plot.
gridsize : int, optional
Number of discrete points in the evaluation grid.
cut : scalar, optional
Draw the estimate to cut * bw from the extreme data points.
clip : pair of scalars, or pair of pair of scalars, optional
Lower and upper bounds for datapoints used to fit KDE. Can provide
a pair of (low, high) bounds for bivariate plots.
legend : bool, optoinal
If True, add a legend or label the axes when possible.
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis.
cumulative : bool
If draw, draw the cumulative distribution estimated by the kde.
kwargs : other keyword arguments for plot()
Returns
-------
ax : matplotlib axis
Axis with plot.
"""
if ax is None:
ax = plt.gca()
data = data.astype(np.float64)
if data2 is not None:
data2 = data2.astype(np.float64)
bivariate = False
if isinstance(data, np.ndarray) and np.ndim(data) > 1:
bivariate = True
x, y = data.T
elif isinstance(data, pd.DataFrame) and np.ndim(data) > 1:
bivariate = True
x = data.iloc[:, 0].values
y = data.iloc[:, 1].values
elif data2 is not None:
bivariate = True
x = data
y = data2
if bivariate and cumulative:
raise TypeError("Cumulative distribution plots are not"
"supported for bivariate distributions.")
if bivariate:
ax = _bivariate_kdeplot(x, y, shade, kernel, bw, gridsize,
cut, clip, legend, ax, **kwargs)
else:
ax = _univariate_kdeplot(data, shade, vertical, kernel, bw,
gridsize, cut, clip, legend, ax,
cumulative=cumulative, **kwargs)
return ax
def rugplot(a, height=None, axis="x", ax=None, **kwargs):
"""Plot datapoints in an array as sticks on an axis.
Parameters
----------
a : vector
1D array of datapoints.
height : scalar, optional
Height of ticks, if None draw at 5% of axis range.
axis : {'x' | 'y'}, optional
Axis to draw rugplot on.
ax : matplotlib axis
Axis to draw plot into; otherwise grabs current axis.
kwargs : other keyword arguments for plt.plot()
Returns
-------
ax : matplotlib axis
Axis with rugplot.
"""
if ax is None:
ax = plt.gca()
a = np.asarray(a)
vertical = kwargs.pop("vertical", None)
if vertical is not None:
axis = "y" if vertical else "x"
other_axis = dict(x="y", y="x")[axis]
min, max = getattr(ax, "get_%slim" % other_axis)()
if height is None:
range = max - min
height = range * .05
if axis == "x":
ax.plot([a, a], [min, min + height], **kwargs)
else:
ax.plot([min, min + height], [a, a], **kwargs)
return ax
def jointplot(x, y, data=None, kind="scatter", stat_func=stats.pearsonr,
color=None, size=6, ratio=5, space=.2,
dropna=True, xlim=None, ylim=None,
joint_kws=None, marginal_kws=None, annot_kws=None):
"""Draw a plot of two variables with bivariate and univariate graphs.
Parameters
----------
x, y : strings or vectors
Data or names of variables in `data`.
data : DataFrame, optional
DataFrame when `x` and `y` are variable names.
kind : { "scatter" | "reg" | "resid" | "kde" | "hex" }, optional
Kind of plot to draw.
stat_func : callable or None
Function used to calculate a statistic about the relationship and
annotate the plot. Should map `x` and `y` either to a single value
or to a (value, p) tuple. Set to ``None`` if you don't want to
annotate the plot.
color : matplotlib color, optional
Color used for the plot elements.
size : numeric, optional
Size of the figure (it will be square).
ratio : numeric, optional
Ratio of joint axes size to marginal axes height.
space : numeric, optional
Space between the joint and marginal axes
dropna : bool, optional
If True, remove observations that are missing from `x` and `y`.
{x, y}lim : two-tuples, optional
Axis limits to set before plotting.
{joint, marginal, annot}_kws : dicts
Additional keyword arguments for the plot components.
Returns
-------
grid : JointGrid
JointGrid object with the plot on it.
See Also
--------
JointGrid : The Grid class used for drawing this plot. Use it directly if
you need more flexibility.
"""
# Set up empty default kwarg dicts
if joint_kws is None:
joint_kws = {}
if marginal_kws is None:
marginal_kws = {}
if annot_kws is None:
annot_kws = {}
# Make a colormap based off the plot color
if color is None:
color = color_palette()[0]
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [set_hls_values(color_rgb, l=l) for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Initialize the JointGrid object
grid = JointGrid(x, y, data, dropna=dropna,
size=size, ratio=ratio, space=space,
xlim=xlim, ylim=ylim)
# Plot the data using the grid
if kind == "scatter":
joint_kws.setdefault("color", color)
grid.plot_joint(plt.scatter, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = _freedman_diaconis_bins(grid.x)
y_bins = _freedman_diaconis_bins(grid.y)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("kde"):
joint_kws.setdefault("shade", True)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("shade", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("reg"):
from .linearmodels import regplot
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
from .linearmodels import residplot
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", False)
distplot(x, ax=grid.ax_marg_x, **marginal_kws)
distplot(y, vertical=True, fit=stats.norm, ax=grid.ax_marg_y,
**marginal_kws)
stat_func = None
else:
msg = "kind must be either 'scatter', 'reg', 'resid', 'kde', or 'hex'"
raise ValueError(msg)
if stat_func is not None:
grid.annotate(stat_func, **annot_kws)
return grid
|
[
"numpy.meshgrid",
"numpy.zeros_like",
"matplotlib.colors.colorConverter.to_rgb",
"numpy.isscalar",
"numpy.asarray",
"scipy.stats.gaussian_kde",
"numpy.ndim",
"statsmodels.nonparametric.api.KDEMultivariate",
"numpy.mean",
"numpy.linspace",
"matplotlib.pyplot.gca",
"warnings.warn",
"statsmodels.nonparametric.api.KDEUnivariate"
] |
[((662, 675), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (672, 675), True, 'import numpy as np\n'), ((7970, 7994), 'statsmodels.nonparametric.api.KDEUnivariate', 'smnp.KDEUnivariate', (['data'], {}), '(data)\n', (7988, 7994), True, 'import statsmodels.nonparametric.api as smnp\n'), ((10549, 10587), 'statsmodels.nonparametric.api.KDEMultivariate', 'smnp.KDEMultivariate', (['[x, y]', '"""cc"""', 'bw'], {}), "([x, y], 'cc', bw)\n", (10569, 10587), True, 'import statsmodels.nonparametric.api as smnp\n'), ((10735, 10768), 'numpy.meshgrid', 'np.meshgrid', (['x_support', 'y_support'], {}), '(x_support, y_support)\n', (10746, 10768), True, 'import numpy as np\n'), ((10989, 11015), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['data.T'], {}), '(data.T)\n', (11007, 11015), False, 'from scipy import stats\n'), ((11652, 11685), 'numpy.meshgrid', 'np.meshgrid', (['x_support', 'y_support'], {}), '(x_support, y_support)\n', (11663, 11685), True, 'import numpy as np\n'), ((15407, 15420), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (15417, 15420), True, 'import numpy as np\n'), ((17924, 17963), 'matplotlib.colors.colorConverter.to_rgb', 'mpl.colors.colorConverter.to_rgb', (['color'], {}), '(color)\n', (17956, 17963), True, 'import matplotlib as mpl\n'), ((2715, 2724), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2722, 2724), True, 'import matplotlib.pyplot as plt\n'), ((8344, 8382), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['data'], {'bw_method': 'bw'}), '(data, bw_method=bw)\n', (8362, 8382), False, 'from scipy import stats\n'), ((10390, 10405), 'numpy.isscalar', 'np.isscalar', (['bw'], {}), '(bw)\n', (10401, 10405), True, 'import numpy as np\n'), ((11264, 11279), 'numpy.isscalar', 'np.isscalar', (['bw'], {}), '(bw)\n', (11275, 11279), True, 'import numpy as np\n'), ((13755, 13764), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13762, 13764), True, 'import matplotlib.pyplot as plt\n'), ((15389, 15398), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15396, 15398), True, 'import matplotlib.pyplot as plt\n'), ((2964, 2977), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (2974, 2977), True, 'import numpy as np\n'), ((6289, 6320), 'warnings.warn', 'warnings.warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (6302, 6320), False, 'import warnings\n'), ((8419, 8443), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['data'], {}), '(data)\n', (8437, 8443), False, 'from scipy import stats\n'), ((9164, 9177), 'numpy.ndim', 'np.ndim', (['clip'], {}), '(clip)\n', (9171, 9177), True, 'import numpy as np\n'), ((13931, 13944), 'numpy.ndim', 'np.ndim', (['data'], {}), '(data)\n', (13938, 13944), True, 'import numpy as np\n'), ((18018, 18039), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(12)'], {}), '(1, 0, 12)\n', (18029, 18039), True, 'import numpy as np\n'), ((6675, 6691), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (6688, 6691), True, 'import numpy as np\n'), ((8621, 8652), 'warnings.warn', 'warnings.warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (8634, 8652), False, 'import warnings\n'), ((14041, 14054), 'numpy.ndim', 'np.ndim', (['data'], {}), '(data)\n', (14048, 14054), True, 'import numpy as np\n'), ((18739, 18764), 'numpy.mean', 'np.mean', (['[x_bins, y_bins]'], {}), '([x_bins, y_bins])\n', (18746, 18764), True, 'import numpy as np\n'), ((4986, 5007), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['a'], {}), '(a)\n', (5004, 5007), False, 'from scipy import stats\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 22:49:48 2020
@author: adwait
"""
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel,\
QComboBox,QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox
# from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont
import matplotlib
matplotlib.use('Qt5Agg')
# import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
##from sympy import *
import sympy as sp
import numpy as np
from scipy.optimize import curve_fit
from source.analysis.plot2widget import PlotWidget
import logging
class MathTextLabel(QWidget):
def __init__(self, mathText, parent=None, **kwargs):
super(QWidget, self).__init__(parent, **kwargs)
_l=QVBoxLayout(self)
_l.setContentsMargins(0,0,0,0)
_r,_g,_b,_a=self.palette().base().color().getRgbF()
self._figure=Figure(edgecolor=(_r,_g,_b), facecolor=(_r,_g,_b))
self._canvas=FigureCanvas(self._figure)
_l.addWidget(self._canvas)
self.drawFigure(mathText)
def drawFigure(self, mathText):
self._figure.clear()
_text=self._figure.suptitle(mathText,
x=0.0,
y=1.0,
horizontalalignment='left',
verticalalignment='top',
size=QFont().pointSize()*2
)
self._canvas.draw()
(_x0,_y0),(_x1,_y1)=_text.get_window_extent().get_points()
_w=_x1-_x0; _h=_y1-_y0
self._figure.set_size_inches(_w/80, _h/80)
self.setFixedSize(_w,_h)
# if __name__=='__main__':
# from sys import argv, exit
class FitDataWindow(QWidget):
def __init__(self, *args, **kwargs):
## super(QWidget, self).__init__(*args, **kwargs)
super().__init__()
self.setGeometry(100, 100, 1000, 500)
self.setWindowTitle("Data Fitting")
self.home()
def home(self):
# startFitLabel = QLabel("Start (%):")
# endFitLabel = QLabel("End (%):")
# self.fitStart = QDoubleSpinBox(self) #fitting range start
# self.fitStart.setValue(0)
# self.fitStart.setSingleStep(1)
# self.fitStart.setRange(0, 100)
# self.fitStop = QDoubleSpinBox(self) #fitting range end
# self.fitStop.setValue(100)
# self.fitStop.setSingleStep(1)
# self.fitStop.setRange(0, 100)
params_list = ["Index", "Time", "Vertical piezo", "Lateral piezo",
"Deformation", "Vertical force", "Lateral force"]
xPlotLabel = QLabel("X Axis:", self)
self.xPlot = QComboBox(self) #x param
self.xPlot.addItems(params_list)
self.xPlot.setCurrentIndex(0)
self.xPlot.currentIndexChanged.connect(self.plotSequence)
self.enableFitting = QCheckBox("Enable", self)
self.enableFitting.stateChanged.connect(lambda: self.fitData(True))
xFitLabel = QLabel("X Parameter:", self)
yFitLabel = QLabel("Y Parameter:", self)
self.xFit = QComboBox(self) #x param
self.xFit.addItems(params_list)
self.xFit.setCurrentIndex(4)
self.xFit.currentIndexChanged.connect(self.plotSequence)
self.yFit = QComboBox(self) #x param
self.yFit.addItems(params_list)
self.yFit.setCurrentIndex(5)
self.yFit.currentIndexChanged.connect(self.plotSequence)
# self.xDict = {'Vertical piezo':None,
# 'Lateral piezo':None,
# 'Deformation':None,
# 'Time':None,
# 'Index':None}
# self.yDict = {'Vertical force':None,
# 'Lateral force':None}
self.fileDataDict = {}
fitparamLabel = QLabel("Fit Parameters:", self)
self.fittingParams = QLineEdit(self)
self.fittingParams.setText('m,c')
self.fittingParams.textChanged.connect(self.updateTEX)
self.params_old = self.fittingParams.text().split(',')
guessValLabel = QLabel("Initial Guess:", self)
self.guessValues = QLineEdit(self)
self.guessValues.setText('0,0')
lowBoundLabel = QLabel("Lower Bouond:", self)
self.lowBound = QLineEdit(self)
self.lowBound.setText(',')
upBoundLabel = QLabel("Upper Bouond:", self)
self.upBound = QLineEdit(self)
self.upBound.setText(',')
constantsLabel = QLabel("Constants:", self)
self.constantParams = QLineEdit(self)
self.constantParams.setText('p=1,q=2,r=3')
self.constantParams.textChanged.connect(self.updateTEX)
self.constants_old = [_x.split('=')[0] for _x in self.constantParams.text().split(',')]
fitfunctionLabel = QLabel("Fitting Function:", self)
self.fittingFunctionType = QComboBox(self)
self.fittingFunctionType.addItem("Linear")
self.fittingFunctionType.addItem("Quadratic")
self.fittingFunctionType.addItem("Custom")
self.fittingFunctionType.currentIndexChanged.connect(self.updateFitFunction)
#standard functions: equation, params, guess, l_bound, u_bound
self.functionDict = {'Linear': ['m*x+c', 'm,c', '0,0', ',', ','],
'Quadratic': ['a*x**2+b*x+c', 'a,b,c', '0,0,0', ',,', ',,'],
'Custom': ['a*x', 'a', '0', '', '']}
self.fittingFunctionText = QTextEdit(self)
self.fittingFunctionText.setText('m*x+c')
self.fittingFunctionText.textChanged.connect(self.updateTEX)
self.generateFunction()
self.fittingFunctionTEX = MathTextLabel(self.mathText, self)
self.applyFitBtn = QPushButton("Fit!", self)
# self.applyFitBtn.clicked.connect(lambda: self.fitData(True))
self.fitResult = QTextEdit(self)
self.fitResult.setText("Result:\n")
self.fitResult.setReadOnly(True)
# self.fitResult.setStyleSheet("QLabel { font-weight: bold; font-size: 16px;} ")
self.plotInitialize()
plot = PlotWidget(self.fig,
cursor1_init = self.axes.get_xbound()[0],
cursor2_init = self.axes.get_xbound()[1])
self.plotWidget = plot.wid
# plotToolbar = NavigationToolbar(self.plotWidget, self)
# self.fitPosLabel = QLabel("Fit Position\n(x,y):", self) #fit eq. position
# self.fitPos = QLineEdit(self)
# self.fitPos.setText('0.5,0.5')
# self.showFitEq = QCheckBox('Show Slope', self) #display equation on plot
paramGroupBox = QGroupBox()
paramlayout=QGridLayout()
paramGroupBox.setLayout(paramlayout)
paramlayout.addWidget(xPlotLabel, 0, 0, 1, 1)
paramlayout.addWidget(self.xPlot, 0, 1, 1, 1)
paramlayout.addWidget(self.enableFitting, 0, 3, 1, 1)
paramlayout.addWidget(xFitLabel, 1, 0, 1, 1)
paramlayout.addWidget(self.xFit, 1, 1, 1, 1)
paramlayout.addWidget(yFitLabel, 1, 2, 1, 1)
paramlayout.addWidget(self.yFit, 1, 3, 1, 1)
paramlayout.addWidget(fitparamLabel, 2, 0, 1, 1)
paramlayout.addWidget(self.fittingParams, 2, 1, 1, 1)
paramlayout.addWidget(guessValLabel, 2, 2, 1, 1)
paramlayout.addWidget(self.guessValues, 2, 3, 1, 1)
paramlayout.addWidget(lowBoundLabel, 3, 0, 1, 1)
paramlayout.addWidget(self.lowBound, 3, 1, 1, 1)
paramlayout.addWidget(upBoundLabel, 3, 2, 1, 1)
paramlayout.addWidget(self.upBound, 3, 3, 1, 1)
paramlayout.addWidget(constantsLabel, 4, 0, 1, 1)
paramlayout.addWidget(self.constantParams, 4, 1, 1, 1)
paramlayout.addWidget(fitfunctionLabel, 4, 2, 1, 1)
paramlayout.addWidget(self.fittingFunctionType, 4, 3, 1, 1)
paramlayout.addWidget(self.fittingFunctionText, 5, 0, 1, 4)
paramlayout.addWidget(self.fittingFunctionTEX, 6, 0, 3, 4)
paramlayout.addWidget(self.fitResult, 9,0, 1, 4)
paramlayout.addWidget(self.applyFitBtn, 10, 1, 1, 2)
# layout.addWidget(plotToolbar, 0, 4, 1, 6)
# layout.addWidget(self.plotWidget, 1, 4, 9, 6)
# plotGroupBox = QGroupBox()
# plotlayout=QGridLayout()
# plotGroupBox.setLayout(plotlayout)
# plotlayout.addWidget(plot, 0, 0, 1, 1)
# plotlayout.addWidget(plotToolbar, 0, 0, 1, 1)
# plotlayout.addWidget(self.plotWidget, 1, 0, 1, 1)
layout=QGridLayout()
layout.addWidget(paramGroupBox, 0, 0, 1, 1)
layout.addWidget(plot, 0, 1, 1, 1)
self.setLayout(layout)
# self.show()
def updateFitFunction(self):
logging.debug('test0')
_key = self.fittingFunctionType.currentText()
self.fittingFunctionText.blockSignals(True)
self.fittingFunctionText.setText(self.functionDict[_key][0])
self.fittingFunctionText.blockSignals(False)
logging.debug('test')
self.fittingParams.blockSignals(True)
self.fittingParams.setText(self.functionDict[_key][1])
self.fittingParams.blockSignals(False)
logging.debug('test2')
self.guessValues.setText(self.functionDict[_key][2])
self.lowBound.setText(self.functionDict[_key][3])
self.upBound.setText(self.functionDict[_key][4])
self.updateTEX()
def updateTEX(self):
#delete old variables
for _x in self.params_old:
if _x != '':
exec('del ' + _x, globals())
for _x in self.constants_old:
if _x != '':
exec('del ' + _x, globals())
#update function
self.generateFunction()
self.params_old = self.fittingParams.text().split(',')
self.constants_old = [_x.split('=')[0] for _x in self.constantParams.text().split(',')]
#draw equation
if self.mathText != None:
self.fittingFunctionTEX.drawFigure(self.mathText)
#below optional, remove later: CHECK!
# self.plotRawData()
# self.plotWidget.cursor1.set_ydata(self.axes.get_ybound()) #CHECK
# self.plotWidget.cursor2.set_ydata(self.axes.get_ybound()) #CHECK
## self.plotWidget.add_cursors()
# self.fitData(False)
# self.updatePlot()
#create fitting function and TEX format for display
def generateFunction(self):
try:
math_functions = ['re','im','sign','Abs','arg','conjugate',
'polar_lift','periodic_argument',
'principal_branch','sin','cos','tan',
'cot','sec','csc','sinc','asin','acos',
'atan','acot','asec','acsc','atan2',
'sinh','cosh','tanh','coth','sech',
'csch','asinh','acosh','atanh','acoth',
'asech','acsch','ceiling','floor','frac',
'exp','LambertW','log','exp_polar','Min',
'Max','root','sqrt','cbrt','real_root','pi']
x_param = 'x'
y_param = 'y'
fit_params = self.fittingParams.text()
constants = self.constantParams.text()
# constant_vals = '1,2,3'
equation_fit = self.fittingFunctionText.toPlainText()
variables = x_param + ',' + fit_params
global var
var = sp.symbols(variables.replace(',',' '))
exec(variables + ' = var', globals())
for _x in constants.split(','):
exec(_x, globals())
## print(p+q)
for item in math_functions:
equation_fit = equation_fit.replace(item,'sp.'+item)
self.mathText = r'$' + y_param + '=' + sp.latex(eval(equation_fit),
ln_notation = True) + '$'
self.func = sp.lambdify(list(var),eval(equation_fit))
logging.debug(self.mathText)
except Exception as e:
logging.error(str(e))
self.mathText = None
def plotInitialize(self):
self.fig = Figure(figsize=(5, 4), dpi=100)
self.axes = self.fig.add_subplot(111)
self.ax_raw = None
# self.ax_norm = None
self.ax_constr = None
# #generate random data (for testing)
xdata = np.linspace(0, 4, 50)
self.fileDataDict[self.xFit.currentText()] = xdata
self.fileDataDict[self.xPlot.currentText()] = xdata
y = self.func(xdata, 2.5, 1.3)
np.random.seed(1729)
y_noise = 0.2 * np.random.normal(size=xdata.size)
self.fileDataDict[self.yFit.currentText()] = y + y_noise
self.plotRawData()
self.updatePlot()
self.fit_range = [None,None]
def plotRawData(self):
self.plotxdata= self.fileDataDict[self.xPlot.currentText()]
self.xdata = self.fileDataDict[self.xFit.currentText()]
self.ydata = self.fileDataDict[self.yFit.currentText()]
if self.ax_raw != None: #check
self.axes.lines.remove(self.ax_raw)
# if self.xdata != None and self.ydata != None:
self.ax_raw, = self.axes.plot(self.plotxdata, self.ydata, 'ro',
linewidth=1, markersize=1)
self.axes.relim()
self.axes.autoscale()
self.axes.set_xlabel(self.xPlot.currentText())
self.axes.set_ylabel(self.yFit.currentText())
# self.updatePlot()
# self.plotWidget.cursor1.set_xdata(self.axes.get_xbound()[0]) #CHECK
# self.plotWidget.cursor2.set_xdata(self.axes.get_xbound()[1]) #CHECK
def updatePlot(self):
self.axes.relim()
self.axes.autoscale()
self.fig.tight_layout()
self.fig.canvas.draw()
def plotSequence(self):
self.plotRawData()
self.update_cursor()
self.fitData(False)
# self.updatePlot()
def update_cursor(self):
if self.fit_range == [None,None]:
self.fit_range[:] = [0,len(self.plotxdata)-1]
if self.plotWidget.cursor1 != None:
# x = self.plotxdata.min()
x = self.plotxdata[self.fit_range[0]]
y = [self.ydata.min(), self.ydata.max()]
self.plotWidget.cursor1.set_xdata([x,x])
self.plotWidget.cursor1.set_ydata(y) #CHECK
if self.plotWidget.cursor2 != None:
# x = self.plotxdata.max()
x = self.plotxdata[self.fit_range[1]-1]
y = [self.ydata.min(), self.ydata.max()]
self.plotWidget.cursor2.set_xdata([x,x])
self.plotWidget.cursor2.set_ydata(y) #CHECK
# self.axes.relim()
# self.axes.autoscale()
self.updatePlot()
self.plotWidget.draw_idle()
# data fitting
def fitData(self, update_slice = True):
logging.debug("fit data")
if self.enableFitting.isChecked() == True:
self.generateFunction()
#draw equation
# if self.mathText != None:
# self.fittingFunctionTEX.drawFigure(self.mathText)
# self.updateTEX()
if update_slice == True:
xlim1 = min([self.plotWidget.cursor1.get_xdata()[0],
self.plotWidget.cursor2.get_xdata()[0]])
xlim2 = max([self.plotWidget.cursor1.get_xdata()[0],
self.plotWidget.cursor2.get_xdata()[0]])
self.fit_range[:] = [np.searchsorted(self.plotxdata, [xlim1])[0],
np.searchsorted(self.plotxdata, [xlim2])[0]+1]
logging.debug("inside")
fit_slice = slice(*self.fit_range)
logging.debug('%s', fit_slice)
guess_vals = self.guessValues.text()
l_bounds = self.lowBound.text()
u_bounds = self.upBound.text()
l_bounds_val = [float(_x) if _x != '' else -np.inf \
for _x in l_bounds.split(',')] \
if l_bounds != '' else -np.inf
u_bounds_val = [float(_x) if _x != '' else np.inf \
for _x in u_bounds.split(',')] \
if u_bounds != '' else np.inf
labeltext = self.fittingParams.text().replace(',', '=%5.3f, ') + \
'=%5.3f'
# if self.ax_norm != None:
# self.axes.lines.remove(self.ax_norm)
if self.ax_constr != None:
self.axes.lines.remove(self.ax_constr)
try:
logging.debug("test")
#normal fit
# popt, pcov = curve_fit(self.func, self.xdata[fit_slice],
# self.ydata[fit_slice],
# [float(x) for x in guess_vals.split(',')])
# print("normal", popt)
# self.ax_norm, = self.axes.plot(self.xdata[fit_slice],
# self.func(self.xdata[fit_slice], *popt), 'b-',
# label= labeltext % tuple(popt))
#contrained fit
popt, pcov = curve_fit(self.func, self.xdata[fit_slice],
self.ydata[fit_slice],
[float(_x) for _x in guess_vals.split(',')],
bounds=(l_bounds_val,u_bounds_val))
logging.debug('%s, %s', "constrained", popt)
self.fit_ydata = self.func(self.xdata[fit_slice], *popt)
fit_label = labeltext % tuple(popt)
self.ax_constr, = self.axes.plot(self.plotxdata[fit_slice],
self.fit_ydata, 'g--',
label= fit_label)
error_label = 'Std. Dev. Error:\n' + labeltext % tuple(np.sqrt(np.diag(pcov)))
self.fitResult.setText('Fit values:\n' + fit_label + '\n' + error_label)
self.fitParams = dict(zip(self.fittingParams.text().split(','),
popt))
logging.debug('%s', self.fitParams)
except Exception as e: #on fitting failure
logging.error(str(e))
self.fitResult.setText(str(e))
# self.ax_norm = None
self.ax_constr = None
# self.plotWidget.cursor1.set_ydata(self.axes.get_ybound()) #CHECK
# self.plotWidget.cursor2.set_ydata(self.axes.get_ybound()) #CHECK
self.axes.legend()
# self.axes.text(1, 1, "Test", picker=5)
## self.fig.canvas.draw()
else:
if self.ax_constr != None:
self.axes.get_legend().remove()
self.axes.lines.remove(self.ax_constr)
self.ax_constr = None
self.fitParams = {}
self.fitResult.setText('Fit values:\n')
self.updatePlot()
## plt.show()
# _a=QApplication(argv)
# _w=FitDataWindow()
# # _w.show()
# _w.raise_()
# _a.exec_()
# QApplication.exit()
|
[
"numpy.random.seed",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QVBoxLayout",
"numpy.random.normal",
"numpy.diag",
"PyQt5.QtWidgets.QLabel",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"PyQt5.QtWidgets.QCheckBox",
"matplotlib.figure.Figure",
"numpy.linspace",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtWidgets.QTextEdit",
"PyQt5.QtWidgets.QGroupBox",
"matplotlib.use",
"logging.debug",
"PyQt5.QtWidgets.QLineEdit",
"numpy.searchsorted",
"PyQt5.QtGui.QFont"
] |
[((318, 342), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (332, 342), False, 'import matplotlib\n'), ((919, 936), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['self'], {}), '(self)\n', (930, 936), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((1064, 1118), 'matplotlib.figure.Figure', 'Figure', ([], {'edgecolor': '(_r, _g, _b)', 'facecolor': '(_r, _g, _b)'}), '(edgecolor=(_r, _g, _b), facecolor=(_r, _g, _b))\n', (1070, 1118), False, 'from matplotlib.figure import Figure\n'), ((1137, 1163), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self._figure'], {}), '(self._figure)\n', (1149, 1163), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n'), ((2935, 2958), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""X Axis:"""', 'self'], {}), "('X Axis:', self)\n", (2941, 2958), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((2981, 2996), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', (['self'], {}), '(self)\n', (2990, 2996), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((3194, 3219), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['"""Enable"""', 'self'], {}), "('Enable', self)\n", (3203, 3219), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((3328, 3356), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""X Parameter:"""', 'self'], {}), "('X Parameter:', self)\n", (3334, 3356), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((3378, 3406), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Y Parameter:"""', 'self'], {}), "('Y Parameter:', self)\n", (3384, 3406), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((3430, 3445), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', (['self'], {}), '(self)\n', (3439, 3445), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((3623, 3638), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', (['self'], {}), '(self)\n', (3632, 3638), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((4161, 4192), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Fit Parameters:"""', 'self'], {}), "('Fit Parameters:', self)\n", (4167, 4192), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((4223, 4238), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (4232, 4238), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((4445, 4475), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Initial Guess:"""', 'self'], {}), "('Initial Guess:', self)\n", (4451, 4475), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((4504, 4519), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (4513, 4519), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((4596, 4625), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Lower Bouond:"""', 'self'], {}), "('Lower Bouond:', self)\n", (4602, 4625), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((4651, 4666), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (4660, 4666), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((4737, 4766), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Upper Bouond:"""', 'self'], {}), "('Upper Bouond:', self)\n", (4743, 4766), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((4791, 4806), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (4800, 4806), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((4878, 4904), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Constants:"""', 'self'], {}), "('Constants:', self)\n", (4884, 4904), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((4936, 4951), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', (['self'], {}), '(self)\n', (4945, 4951), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((5204, 5237), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Fitting Function:"""', 'self'], {}), "('Fitting Function:', self)\n", (5210, 5237), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((5274, 5289), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', (['self'], {}), '(self)\n', (5283, 5289), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((5888, 5903), 'PyQt5.QtWidgets.QTextEdit', 'QTextEdit', (['self'], {}), '(self)\n', (5897, 5903), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((6186, 6211), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Fit!"""', 'self'], {}), "('Fit!', self)\n", (6197, 6211), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((6312, 6327), 'PyQt5.QtWidgets.QTextEdit', 'QTextEdit', (['self'], {}), '(self)\n', (6321, 6327), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((7124, 7135), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', ([], {}), '()\n', (7133, 7135), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((7157, 7170), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (7168, 7170), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((9030, 9043), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (9041, 9043), False, 'from PyQt5.QtWidgets import QWidget, QVBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QTextEdit, QCheckBox, QPushButton, QGroupBox\n'), ((9271, 9293), 'logging.debug', 'logging.debug', (['"""test0"""'], {}), "('test0')\n", (9284, 9293), False, 'import logging\n'), ((9535, 9556), 'logging.debug', 'logging.debug', (['"""test"""'], {}), "('test')\n", (9548, 9556), False, 'import logging\n'), ((9725, 9747), 'logging.debug', 'logging.debug', (['"""test2"""'], {}), "('test2')\n", (9738, 9747), False, 'import logging\n'), ((12922, 12953), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(5, 4)', 'dpi': '(100)'}), '(figsize=(5, 4), dpi=100)\n', (12928, 12953), False, 'from matplotlib.figure import Figure\n'), ((13157, 13178), 'numpy.linspace', 'np.linspace', (['(0)', '(4)', '(50)'], {}), '(0, 4, 50)\n', (13168, 13178), True, 'import numpy as np\n'), ((13349, 13369), 'numpy.random.seed', 'np.random.seed', (['(1729)'], {}), '(1729)\n', (13363, 13369), True, 'import numpy as np\n'), ((15798, 15823), 'logging.debug', 'logging.debug', (['"""fit data"""'], {}), "('fit data')\n", (15811, 15823), False, 'import logging\n'), ((12737, 12765), 'logging.debug', 'logging.debug', (['self.mathText'], {}), '(self.mathText)\n', (12750, 12765), False, 'import logging\n'), ((13395, 13428), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'xdata.size'}), '(size=xdata.size)\n', (13411, 13428), True, 'import numpy as np\n'), ((16708, 16738), 'logging.debug', 'logging.debug', (['"""%s"""', 'fit_slice'], {}), "('%s', fit_slice)\n", (16721, 16738), False, 'import logging\n'), ((16609, 16632), 'logging.debug', 'logging.debug', (['"""inside"""'], {}), "('inside')\n", (16622, 16632), False, 'import logging\n'), ((17683, 17704), 'logging.debug', 'logging.debug', (['"""test"""'], {}), "('test')\n", (17696, 17704), False, 'import logging\n'), ((18633, 18677), 'logging.debug', 'logging.debug', (['"""%s, %s"""', '"""constrained"""', 'popt'], {}), "('%s, %s', 'constrained', popt)\n", (18646, 18677), False, 'import logging\n'), ((19375, 19410), 'logging.debug', 'logging.debug', (['"""%s"""', 'self.fitParams'], {}), "('%s', self.fitParams)\n", (19388, 19410), False, 'import logging\n'), ((16465, 16505), 'numpy.searchsorted', 'np.searchsorted', (['self.plotxdata', '[xlim1]'], {}), '(self.plotxdata, [xlim1])\n', (16480, 16505), True, 'import numpy as np\n'), ((1616, 1623), 'PyQt5.QtGui.QFont', 'QFont', ([], {}), '()\n', (1621, 1623), False, 'from PyQt5.QtGui import QFont\n'), ((16545, 16585), 'numpy.searchsorted', 'np.searchsorted', (['self.plotxdata', '[xlim2]'], {}), '(self.plotxdata, [xlim2])\n', (16560, 16585), True, 'import numpy as np\n'), ((19121, 19134), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (19128, 19134), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
def estimate_pi(n, plot=False):
pts = np.random.random((n, 2))
norm = np.linalg.norm(pts, axis=-1)
frac_in_circle = np.average(norm <= 1)
pi_est = frac_in_circle * 4
if plot:
plt.plot(pts[:, 0], pts[:, 1], ',')
x = np.linspace(0, 1, 100)
y = np.sqrt(1-x**2)
plt.plot(x, y, 'g-')
plt.fill_between(x, 0, y, facecolor='g', alpha=0.2)
plt.title("Monte Carlo Estimate of Pi with {} points: {}".format(n, pi_est))
plt.axis('equal')
plt.tight_layout()
plt.show()
return pi_est
if __name__ == "__main__":
n = int(input("Input a number of points to sample: "))
estimate_pi(n, plot=True)
|
[
"numpy.average",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"numpy.random.random",
"numpy.linalg.norm",
"numpy.linspace",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.tight_layout",
"numpy.sqrt"
] |
[((98, 122), 'numpy.random.random', 'np.random.random', (['(n, 2)'], {}), '((n, 2))\n', (114, 122), True, 'import numpy as np\n'), ((137, 165), 'numpy.linalg.norm', 'np.linalg.norm', (['pts'], {'axis': '(-1)'}), '(pts, axis=-1)\n', (151, 165), True, 'import numpy as np\n'), ((188, 209), 'numpy.average', 'np.average', (['(norm <= 1)'], {}), '(norm <= 1)\n', (198, 209), True, 'import numpy as np\n'), ((272, 307), 'matplotlib.pyplot.plot', 'plt.plot', (['pts[:, 0]', 'pts[:, 1]', '""","""'], {}), "(pts[:, 0], pts[:, 1], ',')\n", (280, 307), True, 'import matplotlib.pyplot as plt\n'), ((331, 353), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (342, 353), True, 'import numpy as np\n'), ((367, 386), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (374, 386), True, 'import numpy as np\n'), ((392, 412), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""g-"""'], {}), "(x, y, 'g-')\n", (400, 412), True, 'import matplotlib.pyplot as plt\n'), ((422, 473), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(0)', 'y'], {'facecolor': '"""g"""', 'alpha': '(0.2)'}), "(x, 0, y, facecolor='g', alpha=0.2)\n", (438, 473), True, 'import matplotlib.pyplot as plt\n'), ((579, 596), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (587, 596), True, 'import matplotlib.pyplot as plt\n'), ((606, 624), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (622, 624), True, 'import matplotlib.pyplot as plt\n'), ((634, 644), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (642, 644), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
from newsgac.nlp_tools.transformers import ExtractSentimentFeatures
def test_sentiment_features():
text = 'Dit is een willekeurige tekst waar wat sentiment features uitgehaald worden. Dit is de tweede zin.'
pipeline = Pipeline([
('ExtractSentimentFeatures', ExtractSentimentFeatures()),
('DictVectorizer', DictVectorizer()),
])
expected_features = numpy.array(['polarity', 'subjectivity'])
result = pipeline.fit_transform([text])
if not result.shape == (1, 2): raise AssertionError()
if not (pipeline.steps[1][1].get_feature_names() == expected_features).all(): raise AssertionError()
|
[
"newsgac.nlp_tools.transformers.ExtractSentimentFeatures",
"numpy.array",
"sklearn.feature_extraction.DictVectorizer"
] |
[((490, 531), 'numpy.array', 'numpy.array', (["['polarity', 'subjectivity']"], {}), "(['polarity', 'subjectivity'])\n", (501, 531), False, 'import numpy\n'), ((383, 409), 'newsgac.nlp_tools.transformers.ExtractSentimentFeatures', 'ExtractSentimentFeatures', ([], {}), '()\n', (407, 409), False, 'from newsgac.nlp_tools.transformers import ExtractSentimentFeatures\n'), ((439, 455), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {}), '()\n', (453, 455), False, 'from sklearn.feature_extraction import DictVectorizer\n')]
|
from . import unittest, numpy
from shapely.geometry import LineString, MultiLineString, asMultiLineString
from shapely.geometry.base import dump_coords
class MultiLineStringTestCase(unittest.TestCase):
def test_multipoint(self):
# From coordinate tuples
geom = MultiLineString((((1.0, 2.0), (3.0, 4.0)),))
self.assertIsInstance(geom, MultiLineString)
self.assertEqual(len(geom.geoms), 1)
self.assertEqual(dump_coords(geom), [[(1.0, 2.0), (3.0, 4.0)]])
# From lines
a = LineString(((1.0, 2.0), (3.0, 4.0)))
ml = MultiLineString([a])
self.assertEqual(len(ml.geoms), 1)
self.assertEqual(dump_coords(ml), [[(1.0, 2.0), (3.0, 4.0)]])
# From another multi-line
ml2 = MultiLineString(ml)
self.assertEqual(len(ml2.geoms), 1)
self.assertEqual(dump_coords(ml2), [[(1.0, 2.0), (3.0, 4.0)]])
# Sub-geometry Access
geom = MultiLineString([(((0.0, 0.0), (1.0, 2.0)))])
self.assertIsInstance(geom[0], LineString)
self.assertEqual(dump_coords(geom[0]), [(0.0, 0.0), (1.0, 2.0)])
with self.assertRaises(IndexError): # index out of range
geom.geoms[1]
# Geo interface
self.assertEqual(geom.__geo_interface__,
{'type': 'MultiLineString',
'coordinates': (((0.0, 0.0), (1.0, 2.0)),)})
@unittest.skipIf(not numpy, 'Numpy required')
def test_numpy(self):
from numpy import array
from numpy.testing import assert_array_equal
# Construct from a numpy array
geom = MultiLineString([array(((0.0, 0.0), (1.0, 2.0)))])
self.assertIsInstance(geom, MultiLineString)
self.assertEqual(len(geom.geoms), 1)
self.assertEqual(dump_coords(geom), [[(0.0, 0.0), (1.0, 2.0)]])
# Adapt a sequence of Numpy arrays to a multilinestring
a = [array(((1.0, 2.0), (3.0, 4.0)))]
geoma = asMultiLineString(a)
assert_array_equal(geoma.context, [array([[1., 2.], [3., 4.]])])
self.assertEqual(dump_coords(geoma), [[(1.0, 2.0), (3.0, 4.0)]])
# TODO: is there an inverse?
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(MultiLineStringTestCase)
|
[
"shapely.geometry.MultiLineString",
"shapely.geometry.asMultiLineString",
"shapely.geometry.base.dump_coords",
"shapely.geometry.LineString",
"numpy.array"
] |
[((285, 329), 'shapely.geometry.MultiLineString', 'MultiLineString', (['(((1.0, 2.0), (3.0, 4.0)),)'], {}), '((((1.0, 2.0), (3.0, 4.0)),))\n', (300, 329), False, 'from shapely.geometry import LineString, MultiLineString, asMultiLineString\n'), ((534, 570), 'shapely.geometry.LineString', 'LineString', (['((1.0, 2.0), (3.0, 4.0))'], {}), '(((1.0, 2.0), (3.0, 4.0)))\n', (544, 570), False, 'from shapely.geometry import LineString, MultiLineString, asMultiLineString\n'), ((584, 604), 'shapely.geometry.MultiLineString', 'MultiLineString', (['[a]'], {}), '([a])\n', (599, 604), False, 'from shapely.geometry import LineString, MultiLineString, asMultiLineString\n'), ((767, 786), 'shapely.geometry.MultiLineString', 'MultiLineString', (['ml'], {}), '(ml)\n', (782, 786), False, 'from shapely.geometry import LineString, MultiLineString, asMultiLineString\n'), ((948, 991), 'shapely.geometry.MultiLineString', 'MultiLineString', (['[((0.0, 0.0), (1.0, 2.0))]'], {}), '([((0.0, 0.0), (1.0, 2.0))])\n', (963, 991), False, 'from shapely.geometry import LineString, MultiLineString, asMultiLineString\n'), ((1974, 1994), 'shapely.geometry.asMultiLineString', 'asMultiLineString', (['a'], {}), '(a)\n', (1991, 1994), False, 'from shapely.geometry import LineString, MultiLineString, asMultiLineString\n'), ((453, 470), 'shapely.geometry.base.dump_coords', 'dump_coords', (['geom'], {}), '(geom)\n', (464, 470), False, 'from shapely.geometry.base import dump_coords\n'), ((673, 688), 'shapely.geometry.base.dump_coords', 'dump_coords', (['ml'], {}), '(ml)\n', (684, 688), False, 'from shapely.geometry.base import dump_coords\n'), ((856, 872), 'shapely.geometry.base.dump_coords', 'dump_coords', (['ml2'], {}), '(ml2)\n', (867, 872), False, 'from shapely.geometry.base import dump_coords\n'), ((1070, 1090), 'shapely.geometry.base.dump_coords', 'dump_coords', (['geom[0]'], {}), '(geom[0])\n', (1081, 1090), False, 'from shapely.geometry.base import dump_coords\n'), ((1800, 1817), 'shapely.geometry.base.dump_coords', 'dump_coords', (['geom'], {}), '(geom)\n', (1811, 1817), False, 'from shapely.geometry.base import dump_coords\n'), ((1925, 1956), 'numpy.array', 'array', (['((1.0, 2.0), (3.0, 4.0))'], {}), '(((1.0, 2.0), (3.0, 4.0)))\n', (1930, 1956), False, 'from numpy import array\n'), ((2093, 2111), 'shapely.geometry.base.dump_coords', 'dump_coords', (['geoma'], {}), '(geoma)\n', (2104, 2111), False, 'from shapely.geometry.base import dump_coords\n'), ((1643, 1674), 'numpy.array', 'array', (['((0.0, 0.0), (1.0, 2.0))'], {}), '(((0.0, 0.0), (1.0, 2.0)))\n', (1648, 1674), False, 'from numpy import array\n'), ((2038, 2069), 'numpy.array', 'array', (['[[1.0, 2.0], [3.0, 4.0]]'], {}), '([[1.0, 2.0], [3.0, 4.0]])\n', (2043, 2069), False, 'from numpy import array\n')]
|
import sys
import os
import numpy as np
from typing import Union
from PIL import Image, ImageDraw, ImageFont
from weblogo import colorscheme
from weblogo.color import Color
from weblogo.seq import protein_alphabet
try:
import bokeh as bk
from bokeh.plotting import figure, show
from bokeh.core.properties import value
# for visualization in jupyter notebook
from bokeh.io import output_notebook
output_notebook()
except ImportError:
print(
"Warning: cannot import Bokeh, so the interactive alignment viewer is disabled.",
file=sys.stderr,
)
from .alphabets import gap_letter
# revised from chemistry_extended: removed neutral and moved N and Q to polar
aa_chemistry_simple = colorscheme.ColorScheme(
[
colorscheme.SymbolColor("GSTYCNQ", "green", "polar"),
colorscheme.SymbolColor("KRH", "blue", "basic"),
colorscheme.SymbolColor("DE", "red", "acidic"),
colorscheme.SymbolColor("PAWFLIMV", "black", "hydrophobic"),
colorscheme.SymbolColor("X", "gray", "unknown"),
],
alphabet=protein_alphabet,
)
# from makelogo
aa_chemistry_extended = colorscheme.ColorScheme(
[
colorscheme.SymbolColor("GSTYC", "green", "polar"),
colorscheme.SymbolColor("NQ", "purple", "neutral"),
colorscheme.SymbolColor("KRH", "blue", "basic"),
colorscheme.SymbolColor("DE", "red", "acidic"),
colorscheme.SymbolColor("PAWFLIMV", "black", "hydrophobic"),
colorscheme.SymbolColor("X", "gray", "unknown"),
],
alphabet=protein_alphabet,
)
aa_dssp_color = colorscheme.ColorScheme(
[
colorscheme.SymbolColor("EB", "red", "strand"),
colorscheme.SymbolColor("HGI", "blue", "helix"),
colorscheme.SymbolColor("TSC-", "light gray", "coil"),
],
alphabet=protein_alphabet,
)
nt_simple = colorscheme.ColorScheme(
[
colorscheme.SymbolColor("G", "orange"),
colorscheme.SymbolColor("TU", "red"),
colorscheme.SymbolColor("C", "blue"),
colorscheme.SymbolColor("A", "green"),
],
)
def convert_weblogo_color(color: Color, color_format: str) -> Union[tuple, str]:
"""Convert weblogo Color to Bokeh color object
Note: Weblogo colors are RGB but fractional [0, 1],
whereas Bokeh and draw_alignment are [0, 255]
:sa: https://github.com/WebLogo/weblogo/blob/master/weblogo/color.py
:param color: A weblogo Color object.
:param color_format: Either "rgb" or "hex".
:returns: Either an RGB tuple (for "rgb") or hexadecimal string (for "hex").
"""
assert color_format in ["rgb", "hex"]
rgb_tuple = int(255 * color.red), int(255 * color.green), int(255 * color.blue)
hex_str = f"#{rgb_tuple[0]:02X}{rgb_tuple[1]:02X}{rgb_tuple[2]:02X}"
if color_format == "rgb":
return rgb_tuple
else:
return hex_str
def convert_colorscheme_to_color_map(color_scheme: colorscheme.ColorScheme, color_format: str) -> dict:
"""Convert weblogo ColorScheme into bokeh color map
:param color_scheme: a weblogo ColorScheme object
:param color_format: 'hex' or 'rgb' for hex string or RGB tuple, respectively
:returns: a dict of bokeh colors indexed by letter
"""
# return a Bokeh color object or simple RGB tuple (for draw_alignment)
# convert SymbolColor to bokeh color object
color_dict = dict()
for rule in color_scheme.rules:
color_dict[rule.symbols] = convert_weblogo_color(rule.color, color_format)
# default for spaces (white)
color_dict["-*"] = convert_weblogo_color(Color.from_string("white"), color_format)
# expand letter strings so that dict maps to single letters
expanded_color_dict = dict()
for letters, color in color_dict.items():
expanded_color_dict.update(dict((l, color) for l in letters))
return expanded_color_dict
def apply_matching_colorscheme(letter, ref_letter, color_format: str):
"""Apply a match/mismatch color scheme to sequence letters
:param letter: letter from target sequence
:param ref_letter: letter from reference sequence (for match/mismatch)
:param color_format: 'hex' or 'rgb' for hex string or RGB tuple, respectively
:returns: an RGB hex string (for Bokeh) or simple RGB tuple (for vizqes)
"""
# gap match
if letter == gap_letter and letter == ref_letter:
return convert_weblogo_color(Color.from_string("lightblue"), color_format)
# gap
elif letter == gap_letter:
return convert_weblogo_color(Color.from_string("white"), color_format)
# match
elif letter == ref_letter:
return convert_weblogo_color(Color.from_string("limegreen"), color_format)
# mismatch
else:
return convert_weblogo_color(Color.from_string("darkred"), color_format)
def find_font(size, fontpath=None):
"""Find and scale font based on fontpath.
Helper function for draw_alignment.
:param size: desired font size
:param fontpath: optional search path for font file (.ttf)
:returns: PIL.ImageFont object
:sa: vizqes (https://pypi.python.org/pypi/vizqes)
"""
if fontpath:
font_searchpath = fontpath
else:
font_searchpath = os.path.join(os.path.dirname(__file__), "FreeMono.ttf")
try:
font = ImageFont.truetype(font_searchpath, size=size)
sys.stdout.write("Found font in {}\n".format(str(font_searchpath)))
except IOError as e:
sys.stderr.write(str(e))
sys.stderr.write("could not find font in {}\nUsing default\n".format(str(font_searchpath)))
font = ImageFont.load_default()
return font
def draw_alignment(
aligned,
colorscheme=aa_chemistry_simple,
boxwidth=2,
boxheight=12,
label_width=100,
show_ids=False,
show_names=False,
show_descriptions=False,
show_grouping=False,
):
"""Generate a colored figure from an alignment
:param aligned: MultipleSeqAlignment object
:param colorscheme: a weblogo ColorScheme object
:param boxwidth: column width of alignment
:param boxheight: row height of alignment
:param label_width: maximum length of row label; if None, extend to maximum label length
:param show_ids: if True, show SeqRecord ID for each row
:param show_names: if True, show SeqRecord name for each row
:param show_descriptions: if True, show SeqRecord description for each row
:param show_grouping: if True, highlight changes from reference in red against green background,
instead of using the residue colorscheme
:returns: PIL Image object
:note: based on vizqespkg.vizqes_main.draw
:sa: vizqespkg.vizqes_main.draw
:sa: http://www.bioinformatics.nl/~berndb/aacolour.html
"""
if show_names or show_ids or show_descriptions:
font = find_font(boxheight)
offset = -1
if show_names:
offset += font.getsize(max([m.name[None:label_width] for m in aligned], key=len))[0] + 1
if show_ids:
offset += font.getsize(max([m.id[None:label_width] for m in aligned], key=len))[0] + 1
if show_descriptions:
offset += font.getsize(max([m.description[None:label_width] for m in aligned], key=len))[0] + 1
else:
font, offset = None, 0
height = len(aligned) * boxheight
width = aligned.get_alignment_length() * boxwidth + offset
img = Image.new("RGB", (width, height), "white")
draw = ImageDraw.Draw(img)
yd = None
color_dict = convert_colorscheme_to_color_map(colorscheme, color_format="rgb")
refseq = aligned[0].seq
for y, member in enumerate(aligned):
y *= boxheight
for x, xs in enumerate(member.seq):
if show_grouping:
color = apply_matching_colorscheme(xs, refseq[x], color_format="rgb")
else:
color = color_dict[xs]
x *= boxwidth
for i in range(0, boxwidth):
xd = x + i + offset
for j in range(0, boxheight):
yd = y + j
draw.point((xd, yd), fill=color)
if show_names or show_ids or show_descriptions:
text = ""
if show_names:
text += member.name[None:label_width] + " "
if show_ids:
text += member.id[None:label_width] + " "
if show_descriptions:
text += member.description[None:label_width] + " "
# clip last ' ' from text
draw.text((0, yd - boxheight), text[:-1], font=font, fill=(0, 0, 0))
return img
def view_alignment(
aligned,
fontsize="9pt",
show_N=100,
colorscheme=aa_chemistry_simple,
boxwidth=9,
boxheight=15,
label_width=None,
show_descriptions=False,
show_grouping=False,
):
"""Bokeh sequence alignment view for protein and nucleic acid sequences
:sa: https://dmnfarrell.github.io/bioinformatics/bokeh-sequence-aligner
:param aligned: MultipleSeqAlignment object
:param fontsize: font size for text labels
:param show_N: size of sequence window (in number of sequence letters)
:param colorscheme: a weblogo ColorScheme object
:param boxwidth: column width of alignment
:param boxheight: row height of alignment
:param label_width: maximum length of row label; if None, extend to maximum label length
:param show_descriptions: if True, show SeqRecord description for each row
:param show_grouping: if True, highlight changes from reference in red against green background,
instead of using the residue colorscheme
:returns: A Bokeh plot of the Multiple Sequence Alignment.
"""
def get_colors(seqs, color_scheme):
"""make colors for letters in sequence
:param seqs: A string sequence.
:param color_scheme: A string.
:returns: a sequence of colors for each letter in seqs.
"""
# get colors
color_dict = convert_colorscheme_to_color_map(color_scheme, color_format="hex")
# assign colors to sequences
text = [i for s in list(seqs) for i in s]
return [color_dict[a] for a in text]
def get_colors_for_matching(seqs):
"""match/mismatch color scheme for show_grouping
:param seqs: Sequences for which colors need to be matched.
:returns: a list of colors (strings)
"""
refseq = seqs[0]
colors = list()
for seq in list(seqs):
for xs, ref_s in zip(seq, refseq):
colors.append(apply_matching_colorscheme(xs, ref_s, color_format="hex"))
return colors
# make sequence and id lists from the aligned object
seqs = [rec.seq for rec in (aligned)]
if show_descriptions:
labels = [f"{row} - {rec.description} ({rec.id})" for (row, rec) in enumerate(aligned)]
else:
labels = [f"{row} - {rec.id}" for (row, rec) in enumerate(aligned)]
if label_width:
labels = [label[:label_width] for label in labels]
else:
label_width = max(len(label) for label in labels)
text = [i for s in list(seqs) for i in s]
if show_grouping:
colors = get_colors_for_matching(seqs)
else:
colors = get_colors(seqs, colorscheme)
N = len(seqs[0])
S = len(seqs)
x = np.arange(1, N + 1)
# need to reverse y so that sequences are plotted top-to-bottom
y = np.arange(S - 1, -1, -1)
# creates a 2D grid of coords from the 1D arrays
xx, yy = np.meshgrid(x, y)
# flattens the arrays
gx = xx.ravel()
gy = yy.flatten()
# use recty for rect coords with an offset
recty = gy + 0.5
# now we can create the ColumnDataSource with all the arrays
source = bk.models.ColumnDataSource(dict(x=gx, y=gy, recty=recty, text=text, colors=colors))
plot_height = len(seqs) * boxheight + 50
x_range = bk.models.Range1d(0, N + 1, bounds="auto")
viewlen = min(show_N, N)
# view_range is for the close up view
view_range = (0, viewlen)
tools = "xpan,xwheel_zoom,reset,save"
# plot_width combines length of text labels and number of letters in sequence view window
# note: this part requires additional tuning; 5 pixel average width of y-axis labels is a guess
plot_width = int(5 * label_width) + boxwidth * viewlen + 40
# entire sequence view (no text, with zoom)
p = figure(
title=None,
plot_width=plot_width,
plot_height=50,
x_range=x_range,
y_range=(0, S),
tools=tools,
min_border=0,
toolbar_location="below",
)
rects = bk.models.glyphs.Rect(
x="x",
y="recty",
width=1,
height=1,
fill_color="colors",
line_color=None,
fill_alpha=0.6,
)
p.add_glyph(source, rects)
p.yaxis.visible = False
p.grid.visible = False
# sequence text view with ability to scroll along x axis
p1 = figure(
title=None,
plot_width=plot_width,
plot_height=plot_height,
x_range=view_range,
y_range=labels[::-1],
tools="xpan,reset,save",
min_border=0,
toolbar_location="below",
) # , lod_factor=1)
glyph = bk.models.glyphs.Text(
x="x",
y="y",
text="text",
text_align="center",
text_color="black",
text_font=value("monospace"),
text_font_size=fontsize,
)
rects = bk.models.glyphs.Rect(
x="x",
y="recty",
width=1,
height=1,
fill_color="colors",
line_color=None,
fill_alpha=0.4,
)
p1.add_glyph(source, glyph)
p1.add_glyph(source, rects)
p1.grid.visible = False
p1.xaxis.major_label_text_font_style = "bold"
p1.yaxis.minor_tick_line_width = 0
p1.yaxis.major_tick_line_width = 0
p = bk.layouts.gridplot([[p], [p1]], toolbar_location="below")
show(p)
return p
|
[
"PIL.Image.new",
"bokeh.io.output_notebook",
"numpy.meshgrid",
"bokeh.plotting.figure",
"PIL.ImageFont.load_default",
"weblogo.color.Color.from_string",
"os.path.dirname",
"bokeh.models.Range1d",
"weblogo.colorscheme.SymbolColor",
"PIL.ImageFont.truetype",
"numpy.arange",
"bokeh.plotting.show",
"bokeh.models.glyphs.Rect",
"bokeh.core.properties.value",
"PIL.ImageDraw.Draw",
"bokeh.layouts.gridplot"
] |
[((423, 440), 'bokeh.io.output_notebook', 'output_notebook', ([], {}), '()\n', (438, 440), False, 'from bokeh.io import output_notebook\n'), ((7357, 7399), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)', '"""white"""'], {}), "('RGB', (width, height), 'white')\n", (7366, 7399), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((7411, 7430), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (7425, 7430), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((11262, 11281), 'numpy.arange', 'np.arange', (['(1)', '(N + 1)'], {}), '(1, N + 1)\n', (11271, 11281), True, 'import numpy as np\n'), ((11358, 11382), 'numpy.arange', 'np.arange', (['(S - 1)', '(-1)', '(-1)'], {}), '(S - 1, -1, -1)\n', (11367, 11382), True, 'import numpy as np\n'), ((11449, 11466), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (11460, 11466), True, 'import numpy as np\n'), ((11824, 11866), 'bokeh.models.Range1d', 'bk.models.Range1d', (['(0)', '(N + 1)'], {'bounds': '"""auto"""'}), "(0, N + 1, bounds='auto')\n", (11841, 11866), True, 'import bokeh as bk\n'), ((12326, 12473), 'bokeh.plotting.figure', 'figure', ([], {'title': 'None', 'plot_width': 'plot_width', 'plot_height': '(50)', 'x_range': 'x_range', 'y_range': '(0, S)', 'tools': 'tools', 'min_border': '(0)', 'toolbar_location': '"""below"""'}), "(title=None, plot_width=plot_width, plot_height=50, x_range=x_range,\n y_range=(0, S), tools=tools, min_border=0, toolbar_location='below')\n", (12332, 12473), False, 'from bokeh.plotting import figure, show\n'), ((12553, 12670), 'bokeh.models.glyphs.Rect', 'bk.models.glyphs.Rect', ([], {'x': '"""x"""', 'y': '"""recty"""', 'width': '(1)', 'height': '(1)', 'fill_color': '"""colors"""', 'line_color': 'None', 'fill_alpha': '(0.6)'}), "(x='x', y='recty', width=1, height=1, fill_color=\n 'colors', line_color=None, fill_alpha=0.6)\n", (12574, 12670), True, 'import bokeh as bk\n'), ((12886, 13068), 'bokeh.plotting.figure', 'figure', ([], {'title': 'None', 'plot_width': 'plot_width', 'plot_height': 'plot_height', 'x_range': 'view_range', 'y_range': 'labels[::-1]', 'tools': '"""xpan,reset,save"""', 'min_border': '(0)', 'toolbar_location': '"""below"""'}), "(title=None, plot_width=plot_width, plot_height=plot_height, x_range=\n view_range, y_range=labels[::-1], tools='xpan,reset,save', min_border=0,\n toolbar_location='below')\n", (12892, 13068), False, 'from bokeh.plotting import figure, show\n'), ((13382, 13499), 'bokeh.models.glyphs.Rect', 'bk.models.glyphs.Rect', ([], {'x': '"""x"""', 'y': '"""recty"""', 'width': '(1)', 'height': '(1)', 'fill_color': '"""colors"""', 'line_color': 'None', 'fill_alpha': '(0.4)'}), "(x='x', y='recty', width=1, height=1, fill_color=\n 'colors', line_color=None, fill_alpha=0.4)\n", (13403, 13499), True, 'import bokeh as bk\n'), ((13788, 13846), 'bokeh.layouts.gridplot', 'bk.layouts.gridplot', (['[[p], [p1]]'], {'toolbar_location': '"""below"""'}), "([[p], [p1]], toolbar_location='below')\n", (13807, 13846), True, 'import bokeh as bk\n'), ((13851, 13858), 'bokeh.plotting.show', 'show', (['p'], {}), '(p)\n', (13855, 13858), False, 'from bokeh.plotting import figure, show\n'), ((769, 821), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""GSTYCNQ"""', '"""green"""', '"""polar"""'], {}), "('GSTYCNQ', 'green', 'polar')\n", (792, 821), False, 'from weblogo import colorscheme\n'), ((831, 878), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""KRH"""', '"""blue"""', '"""basic"""'], {}), "('KRH', 'blue', 'basic')\n", (854, 878), False, 'from weblogo import colorscheme\n'), ((888, 934), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""DE"""', '"""red"""', '"""acidic"""'], {}), "('DE', 'red', 'acidic')\n", (911, 934), False, 'from weblogo import colorscheme\n'), ((944, 1003), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""PAWFLIMV"""', '"""black"""', '"""hydrophobic"""'], {}), "('PAWFLIMV', 'black', 'hydrophobic')\n", (967, 1003), False, 'from weblogo import colorscheme\n'), ((1013, 1060), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""X"""', '"""gray"""', '"""unknown"""'], {}), "('X', 'gray', 'unknown')\n", (1036, 1060), False, 'from weblogo import colorscheme\n'), ((1182, 1232), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""GSTYC"""', '"""green"""', '"""polar"""'], {}), "('GSTYC', 'green', 'polar')\n", (1205, 1232), False, 'from weblogo import colorscheme\n'), ((1242, 1292), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""NQ"""', '"""purple"""', '"""neutral"""'], {}), "('NQ', 'purple', 'neutral')\n", (1265, 1292), False, 'from weblogo import colorscheme\n'), ((1302, 1349), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""KRH"""', '"""blue"""', '"""basic"""'], {}), "('KRH', 'blue', 'basic')\n", (1325, 1349), False, 'from weblogo import colorscheme\n'), ((1359, 1405), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""DE"""', '"""red"""', '"""acidic"""'], {}), "('DE', 'red', 'acidic')\n", (1382, 1405), False, 'from weblogo import colorscheme\n'), ((1415, 1474), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""PAWFLIMV"""', '"""black"""', '"""hydrophobic"""'], {}), "('PAWFLIMV', 'black', 'hydrophobic')\n", (1438, 1474), False, 'from weblogo import colorscheme\n'), ((1484, 1531), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""X"""', '"""gray"""', '"""unknown"""'], {}), "('X', 'gray', 'unknown')\n", (1507, 1531), False, 'from weblogo import colorscheme\n'), ((1629, 1675), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""EB"""', '"""red"""', '"""strand"""'], {}), "('EB', 'red', 'strand')\n", (1652, 1675), False, 'from weblogo import colorscheme\n'), ((1685, 1732), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""HGI"""', '"""blue"""', '"""helix"""'], {}), "('HGI', 'blue', 'helix')\n", (1708, 1732), False, 'from weblogo import colorscheme\n'), ((1742, 1795), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""TSC-"""', '"""light gray"""', '"""coil"""'], {}), "('TSC-', 'light gray', 'coil')\n", (1765, 1795), False, 'from weblogo import colorscheme\n'), ((1889, 1927), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""G"""', '"""orange"""'], {}), "('G', 'orange')\n", (1912, 1927), False, 'from weblogo import colorscheme\n'), ((1937, 1973), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""TU"""', '"""red"""'], {}), "('TU', 'red')\n", (1960, 1973), False, 'from weblogo import colorscheme\n'), ((1983, 2019), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""C"""', '"""blue"""'], {}), "('C', 'blue')\n", (2006, 2019), False, 'from weblogo import colorscheme\n'), ((2029, 2066), 'weblogo.colorscheme.SymbolColor', 'colorscheme.SymbolColor', (['"""A"""', '"""green"""'], {}), "('A', 'green')\n", (2052, 2066), False, 'from weblogo import colorscheme\n'), ((3567, 3593), 'weblogo.color.Color.from_string', 'Color.from_string', (['"""white"""'], {}), "('white')\n", (3584, 3593), False, 'from weblogo.color import Color\n'), ((5273, 5319), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font_searchpath'], {'size': 'size'}), '(font_searchpath, size=size)\n', (5291, 5319), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4385, 4415), 'weblogo.color.Color.from_string', 'Color.from_string', (['"""lightblue"""'], {}), "('lightblue')\n", (4402, 4415), False, 'from weblogo.color import Color\n'), ((5206, 5231), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5221, 5231), False, 'import os\n'), ((5569, 5593), 'PIL.ImageFont.load_default', 'ImageFont.load_default', ([], {}), '()\n', (5591, 5593), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((13311, 13329), 'bokeh.core.properties.value', 'value', (['"""monospace"""'], {}), "('monospace')\n", (13316, 13329), False, 'from bokeh.core.properties import value\n'), ((4509, 4535), 'weblogo.color.Color.from_string', 'Color.from_string', (['"""white"""'], {}), "('white')\n", (4526, 4535), False, 'from weblogo.color import Color\n'), ((4631, 4661), 'weblogo.color.Color.from_string', 'Color.from_string', (['"""limegreen"""'], {}), "('limegreen')\n", (4648, 4661), False, 'from weblogo.color import Color\n'), ((4739, 4767), 'weblogo.color.Color.from_string', 'Color.from_string', (['"""darkred"""'], {}), "('darkred')\n", (4756, 4767), False, 'from weblogo.color import Color\n')]
|
from math import floor, ceil
import numpy as np
import matplotlib.pyplot as plt
import datetime
import folium
import random
import seaborn as sns
import pandas as pd
import plotly.express as px
import geopandas as gpd
# import movingpandas as mpd
# from statistics import mean
from shapely.geometry import Polygon, MultiPoint
import json
from branca.colormap import linear
# from copy import copy
from sklearn.cluster import DBSCAN
from geopy.distance import great_circle
class Visualiser():
def __init__(self):
print("Initializing visualisation class") # do we need anything?
def st_cube_simple(self, points):
""" To plot a space-time cube of one trajectory. Checks for the start time
and calculates seconds passed from it for every next point
Keyword Arguments:
points {dataframe} -- A Pandas dataframe of a trajectory
Returns:
No Return
"""
def seconds_from_start(x, start):
date_time_obj = datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S')
seconds = (date_time_obj-start).total_seconds()
return int(seconds)
points['lat'] = points['geometry'].apply(lambda coord: coord.y)
points['lng'] = points['geometry'].apply(lambda coord: coord.x)
start_time = datetime.datetime.strptime(
points.time.iloc[0], '%Y-%m-%dT%H:%M:%S')
points['time_seconds'] = np.vectorize(seconds_from_start)(
np.array(points.time.values.tolist()), start_time)
# plot the space-time cube
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(points['lng'], points['lat'], points['time_seconds'])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.set_zlabel('Seconds since start')
fig.canvas.set_window_title('Space-Time Cube')
plt.show()
def plot_full_correlation(self, points_df):
""" To plot a correlation matrix for all columns that contain word
'.value' in their name
Keyword Arguments:
points_df {dataframe} -- A Pandas dataframe of a trajectory
Returns:
No Return
"""
value_names = [s for s in points_df.columns if
'.value' in s]
value_columns = [np.array(
points_df[column].values.tolist()) for column
in value_names]
values_transposed = np.transpose(value_columns)
values_df = pd.DataFrame(values_transposed)
values_df.columns = value_names
f, ax = plt.subplots(figsize=(10, 8))
corr = values_df.corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),
cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
def plot_pair_correlation(self, points_df, column_1, column_2,
sort_by='id', regression=False):
""" To plot a pairwise relationship in a dataset.
Special case for the Acceleration values to see difference
(if any) between accelerating and braking.
Keyword Arguments:
points_df {dataframe} -- A Pandas dataframe of a trajectory
column_1, column_2 {string} -- names of 2 columns to analyse
sort_by {string} -- 'id' or 'temperature'
regression {boolean} -- defines which kind of plot to plot
Returns:
No Return
"""
if (sort_by == 'temperature'):
bins = [-10, 0, 5, 10, 20, 30, 40]
copied = points_df.copy()
copied['Intake Temperature.value'] = \
copied['Intake Temperature.value'].astype(int)
copied['binned_temp'] = pd.cut(copied['Intake Temperature.value'],
bins)
if (column_2 == "Acceleration.value" or
column_1 == "Acceleration.value"):
df1 = copied[copied["Acceleration.value"] > 0]
df2 = copied[copied["Acceleration.value"] < 0]
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='binned_temp',
data=df1, palette="viridis")
sns.lmplot(x=column_1, y=column_2, hue='binned_temp',
data=df2, palette="viridis")
else:
sns.pairplot(df1, vars=[column_1, column_2],
hue="binned_temp")
sns.pairplot(df2, vars=[column_1, column_2],
hue="binned_temp")
else:
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='binned_temp',
data=copied)
else:
sns.pairplot(copied, vars=[column_1, column_2],
hue="binned_temp")
else:
if (column_2 == "Acceleration.value" or
column_1 == "Acceleration.value"):
df1 = points_df[points_df["Acceleration.value"] > 0]
df2 = points_df[points_df["Acceleration.value"] < 0]
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='track.id',
data=df1, palette="viridis")
sns.lmplot(x=column_1, y=column_2, hue='track.id',
data=df2, palette="viridis")
else:
sns.pairplot(df1, vars=[column_1, column_2],
hue="track.id")
sns.pairplot(df2, vars=[column_1, column_2],
hue="track.id")
else:
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='track.id',
data=points_df, palette="viridis")
else:
sns.pairplot(points_df, vars=[column_1, column_2],
hue="track.id")
def plot_distribution(self, points, column):
fig, (ax1, ax2, ax3) = plt.subplots(
1, 3, figsize=(15, 5), gridspec_kw={'width_ratios': [5, 5, 5]})
sns.boxplot(x=points[column], ax=ax1)
ax1.set_title('Boxplot')
sns.kdeplot(points[column], shade=True, color="r", ax=ax2)
ax2.set_title('Gaussian kernel density estimate')
sns.distplot(points[column], kde=False, ax=ax3)
ax3.set_title('Histogram')
fig.tight_layout()
plt.show()
def create_map(self, trajectories):
""" To create a Folium Map object (in case its not already available)
Keyword Arguments:
trajectories {mpd trajectory collection} -- A Moving Pandas
Trajectory Collection
Returns:
map {folium map} -- Newly created map object
"""
map_zoom_point = []
map_zoom_point.append(trajectories[0].df['geometry'][0].y)
map_zoom_point.append(trajectories[0].df['geometry'][0].x)
map = folium.Map(location=[map_zoom_point[0], map_zoom_point[1]],
zoom_start=12, tiles='cartodbpositron')
return map
def plot_flows(self, flows, flow_map):
""" To plot provided aggregated flows over the provided map
Keyword Arguments:
flows {mpd aggregated flows} -- A Moving Pandas Aggreagtion
function output
flow_map {folium map} -- Map over which trajectories are to be
plotted
Returns:
No Return
"""
index = 0
# to extract coordiantes from "FLOWS"
for row in range(0, len(flows)):
my_poylyline = []
mylng = flows.loc[index, 'geometry'].coords[0][0]
mylat = flows.loc[index, 'geometry'].coords[0][1]
my_poylyline.append([mylat, mylng])
mylng = flows.loc[index, 'geometry'].coords[1][0]
mylat = flows.loc[index, 'geometry'].coords[1][1]
my_poylyline.append([mylat, mylng])
# to plot point's coordinates over the map as polyline based on
# weight
myweight = int(flows.loc[index, 'weight'])
my_line = folium.PolyLine(locations=my_poylyline,
weight=round((myweight/2)))
# as minimize very big weight number
flow_map.add_child(my_line)
index += 1
def plot_point_values(self, points, value):
""" To show points on a map
Keyword Arguments:
points {GeoDataFrame} -- points input
value {string} -- column value to use for colouriing
Returns:
No Return
"""
points['lat'] = points['geometry'].apply(lambda coord: coord.y)
points['lng'] = points['geometry'].apply(lambda coord: coord.x)
# Visualizing points by the desired value
fig = px.scatter_mapbox(points, lat="lat", lon="lng", color=value,
title=value + " visualisation", zoom=8)
fig.update_layout(mapbox_style="open-street-map",
margin={"r": 5, "t": 50, "l": 10, "b": 5})
fig.show()
def plot_region(self, region, region_map, region_color, label):
""" To plot provided regions over the provided map
Keyword Arguments:
region {shapely Polygon} -- A shapely based Polygon
region_map {folium map} -- Map over which trajectories are to be
plotted
region_color {string} -- Name of the Color in String
label {String} -- Label for popup
Returns:
No Return
"""
region_coords = []
# to extract coordiantes from provided region
index = 0
for value in range(0, len(region.exterior.coords)):
temp = []
temp.append(region.exterior.coords[index][1])
temp.append(region.exterior.coords[index][0])
region_coords.append(temp)
index += 1
# to plot point's coordinates over the map as polygon
region_plot = folium.Polygon(locations=region_coords,
color=region_color, popup=label)
region_map.add_child(region_plot)
def plot_weeks_trajectory(self, weekwise_trajectory_collection,
trajectory_map, marker_radius):
""" To iterate over list with weekwise trajectory collection and plot
each over provided folium map object
Keyword Arguments:
weekwise_trajectory_collection {list of mpd trajectory collection}
-- 7 indices respective of each day of the week
trajectory_map {folium map} -- Map over which trajectories are to
be plotted
marker_radius {integer} -- Radius of each point marker (circle)
Returns:
No Return
"""
# Dictionary to assign color based on a week day
colors = {0: "crimson", 1: "blue", 2: "purple", 3: "yellow",
4: "orange", 5: "black", 6: "green"}
day = 0
for traj_day in weekwise_trajectory_collection:
track_id = -1 # to store track id of each track for Pop Up
trajectory_points = [] # to store coordiante points for each track
traj_row = 0
# if trajectory collection has atleast a single trajectory
if(len(traj_day.trajectories) > 0):
for traj in traj_day.trajectories:
point_row = 0
track_id = traj.df['track.id'][0]
for point in range(len(traj_day.trajectories[
traj_row].df)):
temp = []
temp.append(traj.df['geometry'][point_row].y)
temp.append(traj.df['geometry'][point_row].x)
trajectory_points.append(temp)
point_row += 1
traj_row += 1
# Plotting day wise point's coordinate plot with a single
# color and track id as popup
for row in trajectory_points:
folium.Circle(radius=marker_radius, location=row,
color=colors[day], popup=track_id).add_to(
trajectory_map)
day += 1
def get_trajectories_coords(self, trajectories):
""" To iterate over trajectory collection and return individual track points
Keyword Arguments:
trajectories {mpd trajectory collection} -- A Moving Pandas
Trajectory Collection
Returns:
trajectory_list -- A list of two elements at each index,
track_id & array of associated point's coordinates
"""
trajectory_list = []
for traj in trajectories:
track_points = []
# Extracting Point's coordinate for each trajectory
for i in range(len(traj.df)):
temp = []
temp.append(traj.df['geometry'][i].y)
temp.append(traj.df['geometry'][i].x)
track_points.append(temp)
# Extracting Track_Id for each trajectory
track_id = []
track_id.append(traj.df['track.id'][0])
# Creating a list with [id,coordinates] for each individual
# trajectory
traj_temp = [track_id, track_points]
trajectory_list.append(traj_temp)
return trajectory_list
def plot_trajectories(self, trajectory_collection, trajectory_map,
marker_radius):
""" To iterate over trajectory collection and plot each over
provided folium map object
Keyword Arguments:
trajectory_collection {mpd trajectory collection}
-- A Moving Pandas Trajectory Collection
trajectory_map {folium map} -- Map over which trajectories are
to be plotted
marker_radius {integer} -- Radius of each point marker (circle)
Returns:
No Return
"""
# Function to get random hexcode to assign unique color to each
# trajectory
def get_hexcode_color():
random_number = random.randint(0, 16777215)
hex_number = str(hex(random_number))
hex_number = '#' + hex_number[2:]
return hex_number
# Call to function to iterate over trajectory collection
# and return individual track points
traj_list = self.get_trajectories_coords(trajectory_collection)
traj_index = 0
for traj in traj_list:
# Extracting Track_Id and Point's coordinate for each trajectory
track_id = traj[0]
track_points = traj[1]
# Call to function to random color for this trajectory
track_color = get_hexcode_color()
# Plotting points of each trajectory with a single color
point_index = 0
for row in track_points:
# Pop-Up will contain Track Id
folium.Circle(radius=marker_radius, location=row,
color=track_color, popup=track_id).add_to(
trajectory_map)
point_index += 1
traj_index += 1
##################################
# RELATED TO WEEK WISE BAR GRAPH #
def extract_daywise_lengths(self, weekly_trajectories):
""" To iterate over list with weekwise trajectory collection and
extract point's coordinates for day wise trajectories
Keyword Arguments:
weekly_trajectories {list of mpd trajectory collection}
-- 7 indices respective of each day of the week
Returns:
day_length {list} -- list with total length for each day
"""
days = {0: "Monday", 1: "Tuesday", 2: "Wednesday", 3: "Thursday",
4: "Friday", 5: "Saturday", 6: "Sunday"}
day_length = [] # to store total length for each day at each index
day = 0
for traj_day in range(len(weekly_trajectories)):
temp = []
# if trajectory collection has atleast a single trajectory
if(len(weekly_trajectories[day].trajectories) > 0):
traj_row = 0
length_sum = 0 # to store total sum of track length for each
# day's collection
for traj in range(len(weekly_trajectories[day].trajectories)):
length_sum += round(weekly_trajectories[day].trajectories[
traj_row].df['track.length'][0], 2)
traj_row += 1
temp.append(days[day]) # storing weekday name like Monday,
# Tuesday etc at first index of list
temp.append(length_sum) # storing assocaited total length
# at second index of list
day_length.append(temp)
else:
temp.append(days[day])
temp.append(0)
day_length.append(temp)
day += 1
return day_length
def extract_barplot_info(self, day_length):
""" To extract information for matplotlib plot
Keyword Arguments:
day_length {list} -- list with total length for each day
Returns:
day, height, highest, highest_index, average {strings/integers}
-- attributes required for plots
"""
day = []
height = []
highest = 0
highest_index = -1
total = 0
index = 0
for row in day_length:
day.append(row[0][:3]) # extracting name of day of the week
# in form of Mon, Tue etc.
track_length = round(row[1], 2) # extracting total length
# associated with each day rounded to 2 decimals
height.append(track_length)
# extracting the highest value out of 'total lengths' from all
# weekdays
if(track_length > highest):
highest = track_length
highest_index = index
total += track_length
index += 1
average_value = total/7 # extracting average value out of
# 'total lengths' from all weekdays
average = []
for row in day:
average.append(average_value) # a list of same value at each
# index, just to plot a horizontal line in plot
return day, height, highest, highest_index, average
def plot_daywise_track(self, week_trajectories):
""" To plot bar graphy of week day vs total length of that day
(all tracks combined)
Keyword Arguments:
weekly_trajectories {list of mpd trajectory collection}
-- 7 indices respective of each day of the week
Returns:
No Return
"""
# Call to function to extract daywise lengths
daywise_length = self.extract_daywise_lengths(week_trajectories)
# Call to function to extract attributes for plot
day, height, highest, highest_index, average = \
self.extract_barplot_info(daywise_length)
bar_plot = plt.bar(day, height, color=(0.1, 0.1, 0.1, 0.1),
edgecolor='blue')
bar_plot[highest_index].set_edgecolor('r')
plt.ylabel('Total Distance Travelled (Km)')
axes2 = plt.twinx()
axes2.set_ylim(0, highest+1)
axes2.plot(day, average, color='b', label='Average Distance')
plt.suptitle('Which day has a different movement pattern than others?')
plt.legend()
plt.show()
def aggregateByGrid(df, field, summary, gridSize):
"""
Aggregates the specified field with chosen summary type and user
defined grid size. returns aggregated grids with summary
Parameters
----------
df : geopandas dataframe
field : string
field to be summarized.
summary : string
type of summary to be sumarized. eg. min, max,sum, median
gridSize : float
the size of grid on same unit as geodataframe coordinates.
Returns
-------
geodataframe
Aggregated grids with summary on it
"""
def round_down(num, divisor):
return floor(num / divisor) * divisor
def round_up(num, divisor):
return ceil(num / divisor) * divisor
# Get crs from data
sourceCRS = df.crs
targetCRS = {"init": "EPSG:3857"}
# Reproject to Mercator\
df = df.to_crs(targetCRS)
# Get bounds
xmin, ymin, xmax, ymax = df.total_bounds
print(xmin, ymin, xmax, ymax)
height, width = gridSize, gridSize
top, left = round_up(ymax, height), round_down(xmin, width)
bottom, right = round_down(ymin, height), round_up(xmax, width)
rows = int((top - bottom) / height)+1
cols = int((right - left) / width)+1
XleftOrigin = left
XrightOrigin = left + width
YtopOrigin = top
YbottomOrigin = top - height
polygons = []
for i in range(cols):
Ytop = YtopOrigin
Ybottom = YbottomOrigin
for j in range(rows):
polygons.append(Polygon([(XleftOrigin, Ytop),
(XrightOrigin, Ytop),
(XrightOrigin, Ybottom),
(XleftOrigin, Ybottom)]))
Ytop = Ytop - height
Ybottom = Ybottom - height
XleftOrigin = XleftOrigin + width
XrightOrigin = XrightOrigin + width
grid = gpd.GeoDataFrame({'geometry': polygons})
grid.crs = df.crs
# Assign gridid
numGrid = len(grid)
grid['gridId'] = list(range(numGrid))
# Identify gridId for each point
points_identified = gpd.sjoin(df, grid, op='within')
# group points by gridid and calculate mean Easting,
# store it as dataframe
# delete if field already exists
if field in grid.columns:
del grid[field]
grouped = points_identified.groupby('gridId')[field].agg(summary)
grouped_df = pd.DataFrame(grouped)
new_grid = grid.join(grouped_df, on='gridId').fillna(0)
grid = new_grid.to_crs(sourceCRS)
summarized_field = summary+"_"+field
final_grid = grid.rename(columns={field: summarized_field})
final_grid = final_grid[final_grid[summarized_field] > 0].sort_values(
by=summarized_field, ascending=False)
final_grid[summarized_field] = round(final_grid[summarized_field], 1)
final_grid['x_centroid'], final_grid['y_centroid'] = \
final_grid.geometry.centroid.x, final_grid.geometry.centroid.y
return final_grid
def plotAggregate(grid, field):
"""
Plots the aggregated data on grid. Please call aggregateByGrid
function before this step.
Parameters
----------
grid :polygon geodataframe
The grid geodataframe with grid and aggregated data in a column.
Grid shoud have grid id or equivalent unique ids
field : string
Fieldname with aggregated data
Returns
-------
m : folium map object
Folium map with openstreetmap as base.
"""
# Prepare for grid plotting using folium
grid.columns = [cols.replace('.', '_') for cols in grid.columns]
field = field.replace('.', '_')
# Convert grid id to string
grid['gridId'] = grid['gridId'].astype(str)
# Convert data to geojson and csv
atts = pd.DataFrame(grid)
grid.to_file("grids.geojson", driver='GeoJSON')
atts.to_csv("attributes.csv", index=False)
# load spatial and non-spatial data
data_geojson_source = "grids.geojson"
# data_geojson=gpd.read_file(data_geojson_source)
data_geojson = json.load(open(data_geojson_source))
# Get coordiantes for map centre
lat = grid.geometry.centroid.y.mean()
lon = grid.geometry.centroid.x.mean()
# Intialize a new folium map object
m = folium.Map(location=[lat, lon], zoom_start=10,
tiles='OpenStreetMap')
# Configure geojson layer
folium.GeoJson(data_geojson,
lambda feature: {'lineOpacity': 0.4,
'color': 'black',
'fillColor': None,
'weight': 0.5,
'fillOpacity': 0}).add_to(m)
# add attribute data
attribute_pd = pd.read_csv("attributes.csv")
attribute = pd.DataFrame(attribute_pd)
# Convert gridId to string to ensure it matches with gridId
attribute['gridId'] = attribute['gridId'].astype(str)
# construct color map
minvalue = attribute[field].min()
maxvalue = attribute[field].max()
colormap_rn = linear.YlOrRd_09.scale(minvalue, maxvalue)
# Create Dictionary for colormap
population_dict_rn = attribute.set_index('gridId')[field]
# create map
folium.GeoJson(
data_geojson,
name='Choropleth map',
style_function=lambda feature: {
'lineOpacity': 0,
'color': 'green',
'fillColor': colormap_rn(
population_dict_rn[feature['properties']['gridId']]),
'weight': 0,
'fillOpacity': 0.6
},
highlight_function=lambda feature: {'weight': 3, 'color': 'black',
'fillOpacity': 1},
tooltip=folium.features.GeoJsonTooltip(fields=[field],
aliases=[field])
).add_to(m)
# format legend
field = field.replace("_", " ")
# add a legend
colormap_rn.caption = '{value} per grid'.format(value=field)
colormap_rn.add_to(m)
# add a layer control
folium.LayerControl().add_to(m)
return m
def spatioTemporalAggregation(df, field, summary, gridSize):
"""
Aggregates the given field on hour and weekday basis.
Prepares data for mosaic plot
Parameters
----------
df : geopandas dataframe
field : string
field to be summarized.
summary : string
type of summary to be sumarized. eg. min, max,sum, median
gridSize : float
the size of grid on same unit as geodataframe coordinates.
Returns
-------
geodataframes: one each for larger grid and other for subgrids
(for visualization purpose only)
Aggregated grids with summary on it
"""
def round_down(num, divisor):
return floor(num / divisor) * divisor
def round_up(num, divisor):
return ceil(num / divisor) * divisor
# Get crs from data
sourceCRS = df.crs
targetCRS = {'init': "epsg:3857"}
# Reproject to Mercator\
df = df.to_crs(targetCRS)
# Get bounds
xmin, ymin, xmax, ymax = df.total_bounds
height, width = gridSize, gridSize
top, left = round_up(ymax, height), round_down(xmin, width)
bottom, right = round_down(ymin, height), round_up(xmax, width)
rows = int((top - bottom) / height)+1
cols = int((right - left) / width)+1
XleftOrigin = left
XrightOrigin = left + width
YtopOrigin = top
YbottomOrigin = top - height
polygons = []
for i in range(cols):
Ytop = YtopOrigin
Ybottom = YbottomOrigin
for j in range(rows):
polygons.append(Polygon(
[(XleftOrigin, Ytop), (XrightOrigin, Ytop),
(XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]))
Ytop = Ytop - height
Ybottom = Ybottom - height
XleftOrigin = XleftOrigin + width
XrightOrigin = XrightOrigin + width
grid = gpd.GeoDataFrame({'geometry': polygons})
grid.crs = (targetCRS)
# Assign gridid
numGrid = len(grid)
grid['gridId'] = list(range(numGrid))
# Identify gridId for each point
df['hour'] = df['time'].apply(
lambda x: datetime.datetime.strptime(
x, '%Y-%m-%dT%H:%M:%S')).dt.hour
df['weekday'] = df['time'].apply(
lambda x: datetime.datetime.strptime(
x, '%Y-%m-%dT%H:%M:%S')).dt.dayofweek
points_identified = gpd.sjoin(df, grid, op='within')
# group points by gridid and calculate mean Easting,
# store it as dataframe
# delete if field already exists
if field in grid.columns:
del grid[field]
# Aggregate by weekday, hour and grid
grouped = points_identified.groupby(
['gridId', 'weekday', 'hour']).agg({field: [summary]})
grouped = grouped.reset_index()
grouped.columns = grouped.columns.map("_".join)
modified_fieldname = field+"_"+summary
# Create Subgrids
subgrid, mainGrid, rowNum, columnNum, value = [], [], [], [], []
unikGrid = grouped['gridId_'].unique()
for currentGrid in unikGrid:
dataframe = grid[grid['gridId'] == currentGrid]
xmin, ymin, xmax, ymax = dataframe.total_bounds
xminn, xmaxx, yminn, ymaxx = xmin + \
(xmax-xmin)*0.05, xmax-(xmax-xmin)*0.05, ymin + \
(ymax-ymin)*0.05, ymax-(ymax-ymin)*0.05
rowOffset = (ymaxx-yminn)/24.0
colOffset = (xmaxx - xminn)/7.0
for i in range(7):
for j in range(24):
topy, bottomy, leftx, rightx = ymaxx-j*rowOffset, ymaxx - \
(j+1)*rowOffset, xminn+i * \
colOffset, xminn+(i+1)*colOffset
subgrid.append(
Polygon([(leftx, topy), (rightx, topy),
(rightx, bottomy), (leftx, bottomy)]))
mainGrid.append(currentGrid)
rowNum.append(j)
columnNum.append(i)
if len(grouped[(grouped['gridId_'] == currentGrid)
& (grouped['weekday_'] == i)
& (grouped['hour_'] == j)]) != 0:
this_value = grouped[
(grouped['gridId_'] == currentGrid)
& (grouped['weekday_'] == i)
& (grouped['hour_'] == j)].iloc[0][
modified_fieldname]
value.append(this_value)
else:
value.append(np.nan)
subgrid_gpd = gpd.GeoDataFrame({'geometry': subgrid})
subgrid_gpd.crs = targetCRS
# Reproject to Mercator\
subgrid_gpd = subgrid_gpd.to_crs(sourceCRS)
subgrid_gpd['gridId'] = mainGrid
subgrid_gpd['Weekday'] = columnNum
subgrid_gpd['hour'] = rowNum
subgrid_gpd['gridId'] = subgrid_gpd.apply(lambda x: str(
x['gridId'])+"_"+str(x['Weekday'])+"_"+str(x['hour']), axis=1)
subgrid_gpd[modified_fieldname] = value
subgrid_gpd = subgrid_gpd.dropna()
grid = grid.to_crs(sourceCRS)
grid = grid[grid['gridId'].isin(unikGrid)]
return grid, subgrid_gpd
# final_subgrid=subgrid_gpd[subgrid_gpd['value'].notnull()]
# return final_subgrid
def MosaicPlot(mainGrid, grid, field):
"""
Performs spatio temporal aggregation of data on weekday and hour,
and prepares mosaicplot.
Parameters
----------
mainGrid :polygon geodataframe
The grid geodataframe with grid and aggregated data in a column.
Grid shoud have grid id or equivalent unique ids
grid: Small subgrids, prepared for visualization purpose
only represents an hour of a weekday
field : string
Fieldname with aggregated data
Returns
-------
m : folium map object
Folium map with openstreetmap as base.
"""
# Prepare for grid plotting using folium
grid.columns = [cols.replace('.', '_') for cols in grid.columns]
field = field.replace('.', '_')
# Convert grid id to string
grid['gridId'] = grid['gridId'].astype(str)
# Convert maingrid,subgrid to geojson and csv
mainGrid.to_file("mainGrids.geojson", driver='GeoJSON')
atts = pd.DataFrame(grid)
grid.to_file("grids.geojson", driver='GeoJSON')
atts.to_csv("attributes.csv", index=False)
# load spatial and non-spatial data
data_geojson_source = "grids.geojson"
# data_geojson=gpd.read_file(data_geojson_source)
data_geojson = json.load(open(data_geojson_source))
# load spatial and non-spatial data
grid_geojson_source = "mainGrids.geojson"
mainGrid_geojson = json.load(open(grid_geojson_source))
# Get coordiantes for map centre
lat = grid.geometry.centroid.y.mean()
lon = grid.geometry.centroid.x.mean()
# Intialize a new folium map object
m = folium.Map(location=[lat, lon],
zoom_start=10, tiles='Stamen Toner')
# Configure geojson layer
# style = {'fillColor': '#f5f5f5', 'lineColor': '#ffffbf'}
# polygon = folium.GeoJson(gjson, style_function = \
# lambda x: style).add_to(m)
# def style_function():
# return {'fillColor': '#00FFFFFF', 'lineColor': '#00FFFFFF'}
# folium.GeoJson(data_geojson).add_to(m)
folium.GeoJson(mainGrid_geojson,
lambda feature: {'lineOpacity': 0.4,
'color': '#00ddbb',
'fillColor': None,
'weight': 2,
'fillOpacity': 0}).add_to(m)
# add attribute data
attribute_pd = pd.read_csv("attributes.csv")
attribute = pd.DataFrame(attribute_pd)
# Convert gridId to string to ensure it matches with gridId
attribute['gridId'] = attribute['gridId'].astype(str)
# construct color map
minvalue = attribute[field].min()
maxvalue = attribute[field].max()
colormap_rn = linear.YlOrRd_09.scale(minvalue, maxvalue)
# Create Dictionary for colormap
population_dict_rn = attribute.set_index('gridId')[field]
# create map
folium.GeoJson(
data_geojson,
name='Choropleth map',
style_function=lambda feature: {
'lineOpacity': 0,
'color': 'green',
'fillColor': colormap_rn(population_dict_rn[
feature['properties']['gridId']]),
'weight': 0,
'fillOpacity': 0.9
},
highlight_function=lambda feature: {
'weight': 3, 'color': 'black', 'fillOpacity': 1},
tooltip=folium.features.GeoJsonTooltip(fields=['Weekday', 'hour',
field])).add_to(m)
# format legend
field = field.replace("_", " ")
# add a legend
colormap_rn.caption = '{value} per grid by weekday and hour'.format(
value=field)
colormap_rn.add_to(m)
# add a layer control
folium.LayerControl().add_to(m)
return m
# Aggregate data by weekday and hour
def aggregateHourly(df, field, summary):
"""
Aggregates the whole data by weekday and hour as preparation step for
mosaic plot
Parameters
----------
df : GeoDataFrame
The dataset of points to be summarized
field : STRING
The field in input dataframe to be summarized
summary : String
The type of aggregation to be used.eg. mean, median,
Returns
-------
dayhourAggregate : dataframe
Aggregated Data by weekday and time
"""
# extract date and time from timestamp
df['hour'] = df['time'].apply(
lambda x: datetime.datetime.strptime(
x, '%Y-%m-%dT%H:%M:%S')).dt.hour
df['weekday'] = df['time'].apply(
lambda x: datetime.datetime.strptime(
x, '%Y-%m-%dT%H:%M:%S')).dt.dayofweek
# Aggregate by weekday and hour
dayhourAggregate = df.groupby(
['weekday', 'hour']).agg({field: [summary]})
dayhourAggregate = dayhourAggregate.reset_index()
dayhourAggregate.columns = dayhourAggregate.columns.map("_".join)
return dayhourAggregate
def OriginAndDestination(df):
"""
Return dataframe for origin and destinations for tracks
by their trackid
Parameters
----------
df : TYPE
DESCRIPTION.
Returns
-------
origin : TYPE
DESCRIPTION.
destination : TYPE
DESCRIPTION.
"""
track_list = list(df['track.id'].unique())
origin, destination = gpd.GeoDataFrame(), gpd.GeoDataFrame()
for track in track_list:
selected_tracks = df[df['track.id'] == track]
current_origin = selected_tracks[selected_tracks['time']
== selected_tracks['time'].min()]
current_destination = selected_tracks[selected_tracks['time']
== selected_tracks[
'time'].max()]
origin = origin.append(current_origin)
destination = destination.append(current_destination)
return origin, destination
def getClusters(positions, distanceKM, min_samples=5):
"""
Returns the clusters from the points based on provided data to no. of
clusters based on DBScan Algorithm
Parameters
----------
positions : Geodataframe object
Geodataframe with positions to be clustered
distanceKM : Float
Epsilon parameters fo dbscan algorithm in km. or, distance for
clustering of points
min_samples : Integer, optional
DESCRIPTION. Minimum no. of points required to form cluster.
If 1 is set,each individual will form their own cluster
The default is 5.
Returns
-------
Dataframe
The dataframe with cluster centres co-ordinates and no. of points
on the cluster.
"""
def get_centermost_point(cluster):
centroid = (MultiPoint(cluster).centroid.x,
MultiPoint(cluster).centroid.y)
centermost_point = min(
cluster, key=lambda point: great_circle(point, centroid).m)
return tuple(centermost_point)
df = positions.to_crs({'init': 'epsg:4326'})
lon = df.geometry.x
lat = df.geometry.y
origin_pt = pd.DataFrame()
# Populate lat lon to dataframe
origin_pt['lat'] = lat
origin_pt['lon'] = lon
# add index to data
coords = origin_pt.to_numpy()
origin_pt.index = [i for i in range(len(lat))]
#
# Convert Data to projected and perform clustering
kms_per_radian = 6371.0088
epsilon = distanceKM / kms_per_radian
db = DBSCAN(eps=epsilon, min_samples=min_samples,
algorithm='ball_tree', metric='haversine').fit(
np.radians(coords))
cluster_labels = db.labels_
validClusters = []
for cluster in cluster_labels:
if cluster != -1:
validClusters.append(cluster)
num_clusters = len(set(validClusters))
clusters = pd.Series([coords[cluster_labels == n]
for n in range(num_clusters)])
# Assigining clusterId to each point
origin_pt['clusterId'] = cluster_labels
# Identify cluster Centres
centermost_points = clusters.map(get_centermost_point)
# Create Geodataframe with attributes for cluster centroids
clusterId = [i for i in range(len(centermost_points))]
centroidLat = [centermost_points[i][0]
for i in range(len(centermost_points))]
centroidLon = [centermost_points[i][1]
for i in range(len(centermost_points))]
clusterSize = [len(origin_pt[origin_pt['clusterId'] == i])
for i in range(len(centermost_points))]
# Create dataframe for cluster centers
clusterCentres_df = pd.DataFrame(
{'clusterId': clusterId, 'clusterLat': centroidLat,
'clusterLon': centroidLon, 'clusterSize': clusterSize})
clusterCentres = gpd.GeoDataFrame(clusterCentres_df,
geometry=gpd.points_from_xy(
clusterCentres_df.clusterLon,
clusterCentres_df.clusterLat))
return clusterCentres
def showClusters(clusterCentres, track):
"""
Shows the cluster of the datasets along with original tracks
Parameters
----------
clusterCentres : Geodataframe
The geodataframe object with details of clusterCenters.
Obtained as processing by getClusters fucntion
track : Geodataframe
The points geodataframe to be shown on map alongwith clusters.
For visualization only
Returns
-------
m : folium map-type object
The map with source data and clusters overlaid
"""
# Make an empty map
lat = clusterCentres.geometry.y.mean()
lon = clusterCentres.geometry.x.mean()
clusterList = list(clusterCentres['clusterSize'])
m = folium.Map(location=[lat, lon],
tiles="openstreetmap", zoom_start=12)
# add points from track
for i in range(0, len(track)):
lat = track.iloc[i].geometry.y
lon = track.iloc[i].geometry.x
folium.Circle(
location=[lat, lon],
radius=0.05,
color='black',
weight=2,
fill=True, opacity=0.5,
fill_color='black',
).add_to(m)
# add marker one by one on the map
for i in range(0, len(clusterCentres)):
folium.Circle(
location=[clusterCentres.iloc[i]['clusterLat'],
clusterCentres.iloc[i]['clusterLon']],
popup=clusterList[i],
radius=clusterList[i]*10,
color='red',
weight=2,
fill=True,
fill_color='red'
).add_to(m)
return m
|
[
"seaborn.kdeplot",
"matplotlib.pyplot.suptitle",
"plotly.express.scatter_mapbox",
"matplotlib.pyplot.bar",
"geopandas.sjoin",
"pandas.read_csv",
"geopy.distance.great_circle",
"matplotlib.pyplot.figure",
"folium.Map",
"seaborn.pairplot",
"folium.Polygon",
"branca.colormap.linear.YlOrRd_09.scale",
"sklearn.cluster.DBSCAN",
"pandas.DataFrame",
"numpy.zeros_like",
"shapely.geometry.MultiPoint",
"random.randint",
"shapely.geometry.Polygon",
"numpy.transpose",
"geopandas.GeoDataFrame",
"folium.GeoJson",
"matplotlib.pyplot.subplots",
"numpy.radians",
"matplotlib.pyplot.show",
"numpy.vectorize",
"matplotlib.pyplot.twinx",
"math.ceil",
"matplotlib.pyplot.legend",
"pandas.cut",
"datetime.datetime.strptime",
"seaborn.boxplot",
"matplotlib.pyplot.ylabel",
"seaborn.lmplot",
"folium.features.GeoJsonTooltip",
"math.floor",
"folium.Circle",
"geopandas.points_from_xy",
"seaborn.distplot",
"folium.LayerControl",
"seaborn.diverging_palette"
] |
[((1315, 1383), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['points.time.iloc[0]', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(points.time.iloc[0], '%Y-%m-%dT%H:%M:%S')\n", (1341, 1383), False, 'import datetime\n'), ((1578, 1590), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1588, 1590), True, 'import matplotlib.pyplot as plt\n'), ((1876, 1886), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1884, 1886), True, 'import matplotlib.pyplot as plt\n'), ((2442, 2469), 'numpy.transpose', 'np.transpose', (['value_columns'], {}), '(value_columns)\n', (2454, 2469), True, 'import numpy as np\n'), ((2491, 2522), 'pandas.DataFrame', 'pd.DataFrame', (['values_transposed'], {}), '(values_transposed)\n', (2503, 2522), True, 'import pandas as pd\n'), ((2580, 2609), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (2592, 2609), True, 'import matplotlib.pyplot as plt\n'), ((6165, 6241), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)', 'gridspec_kw': "{'width_ratios': [5, 5, 5]}"}), "(1, 3, figsize=(15, 5), gridspec_kw={'width_ratios': [5, 5, 5]})\n", (6177, 6241), True, 'import matplotlib.pyplot as plt\n'), ((6264, 6301), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'points[column]', 'ax': 'ax1'}), '(x=points[column], ax=ax1)\n', (6275, 6301), True, 'import seaborn as sns\n'), ((6343, 6401), 'seaborn.kdeplot', 'sns.kdeplot', (['points[column]'], {'shade': '(True)', 'color': '"""r"""', 'ax': 'ax2'}), "(points[column], shade=True, color='r', ax=ax2)\n", (6354, 6401), True, 'import seaborn as sns\n'), ((6468, 6515), 'seaborn.distplot', 'sns.distplot', (['points[column]'], {'kde': '(False)', 'ax': 'ax3'}), '(points[column], kde=False, ax=ax3)\n', (6480, 6515), True, 'import seaborn as sns\n'), ((6587, 6597), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6595, 6597), True, 'import matplotlib.pyplot as plt\n'), ((7114, 7217), 'folium.Map', 'folium.Map', ([], {'location': '[map_zoom_point[0], map_zoom_point[1]]', 'zoom_start': '(12)', 'tiles': '"""cartodbpositron"""'}), "(location=[map_zoom_point[0], map_zoom_point[1]], zoom_start=12,\n tiles='cartodbpositron')\n", (7124, 7217), False, 'import folium\n'), ((9016, 9120), 'plotly.express.scatter_mapbox', 'px.scatter_mapbox', (['points'], {'lat': '"""lat"""', 'lon': '"""lng"""', 'color': 'value', 'title': "(value + ' visualisation')", 'zoom': '(8)'}), "(points, lat='lat', lon='lng', color=value, title=value +\n ' visualisation', zoom=8)\n", (9033, 9120), True, 'import plotly.express as px\n'), ((10223, 10295), 'folium.Polygon', 'folium.Polygon', ([], {'locations': 'region_coords', 'color': 'region_color', 'popup': 'label'}), '(locations=region_coords, color=region_color, popup=label)\n', (10237, 10295), False, 'import folium\n'), ((19481, 19547), 'matplotlib.pyplot.bar', 'plt.bar', (['day', 'height'], {'color': '(0.1, 0.1, 0.1, 0.1)', 'edgecolor': '"""blue"""'}), "(day, height, color=(0.1, 0.1, 0.1, 0.1), edgecolor='blue')\n", (19488, 19547), True, 'import matplotlib.pyplot as plt\n'), ((19634, 19677), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total Distance Travelled (Km)"""'], {}), "('Total Distance Travelled (Km)')\n", (19644, 19677), True, 'import matplotlib.pyplot as plt\n'), ((19695, 19706), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (19704, 19706), True, 'import matplotlib.pyplot as plt\n'), ((19823, 19894), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Which day has a different movement pattern than others?"""'], {}), "('Which day has a different movement pattern than others?')\n", (19835, 19894), True, 'import matplotlib.pyplot as plt\n'), ((19903, 19915), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19913, 19915), True, 'import matplotlib.pyplot as plt\n'), ((19924, 19934), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19932, 19934), True, 'import matplotlib.pyplot as plt\n'), ((22030, 22070), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'geometry': polygons}"], {}), "({'geometry': polygons})\n", (22046, 22070), True, 'import geopandas as gpd\n'), ((22266, 22298), 'geopandas.sjoin', 'gpd.sjoin', (['df', 'grid'], {'op': '"""within"""'}), "(df, grid, op='within')\n", (22275, 22298), True, 'import geopandas as gpd\n'), ((22591, 22612), 'pandas.DataFrame', 'pd.DataFrame', (['grouped'], {}), '(grouped)\n', (22603, 22612), True, 'import pandas as pd\n'), ((24076, 24094), 'pandas.DataFrame', 'pd.DataFrame', (['grid'], {}), '(grid)\n', (24088, 24094), True, 'import pandas as pd\n'), ((24601, 24670), 'folium.Map', 'folium.Map', ([], {'location': '[lat, lon]', 'zoom_start': '(10)', 'tiles': '"""OpenStreetMap"""'}), "(location=[lat, lon], zoom_start=10, tiles='OpenStreetMap')\n", (24611, 24670), False, 'import folium\n'), ((25120, 25149), 'pandas.read_csv', 'pd.read_csv', (['"""attributes.csv"""'], {}), "('attributes.csv')\n", (25131, 25149), True, 'import pandas as pd\n'), ((25170, 25196), 'pandas.DataFrame', 'pd.DataFrame', (['attribute_pd'], {}), '(attribute_pd)\n', (25182, 25196), True, 'import pandas as pd\n'), ((25464, 25506), 'branca.colormap.linear.YlOrRd_09.scale', 'linear.YlOrRd_09.scale', (['minvalue', 'maxvalue'], {}), '(minvalue, maxvalue)\n', (25486, 25506), False, 'from branca.colormap import linear\n'), ((28647, 28687), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'geometry': polygons}"], {}), "({'geometry': polygons})\n", (28663, 28687), True, 'import geopandas as gpd\n'), ((29173, 29205), 'geopandas.sjoin', 'gpd.sjoin', (['df', 'grid'], {'op': '"""within"""'}), "(df, grid, op='within')\n", (29182, 29205), True, 'import geopandas as gpd\n'), ((31436, 31475), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'geometry': subgrid}"], {}), "({'geometry': subgrid})\n", (31452, 31475), True, 'import geopandas as gpd\n'), ((33240, 33258), 'pandas.DataFrame', 'pd.DataFrame', (['grid'], {}), '(grid)\n', (33252, 33258), True, 'import pandas as pd\n'), ((33924, 33992), 'folium.Map', 'folium.Map', ([], {'location': '[lat, lon]', 'zoom_start': '(10)', 'tiles': '"""Stamen Toner"""'}), "(location=[lat, lon], zoom_start=10, tiles='Stamen Toner')\n", (33934, 33992), False, 'import folium\n'), ((34762, 34791), 'pandas.read_csv', 'pd.read_csv', (['"""attributes.csv"""'], {}), "('attributes.csv')\n", (34773, 34791), True, 'import pandas as pd\n'), ((34812, 34838), 'pandas.DataFrame', 'pd.DataFrame', (['attribute_pd'], {}), '(attribute_pd)\n', (34824, 34838), True, 'import pandas as pd\n'), ((35106, 35148), 'branca.colormap.linear.YlOrRd_09.scale', 'linear.YlOrRd_09.scale', (['minvalue', 'maxvalue'], {}), '(minvalue, maxvalue)\n', (35128, 35148), False, 'from branca.colormap import linear\n'), ((39898, 39912), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (39910, 39912), True, 'import pandas as pd\n'), ((41548, 41672), 'pandas.DataFrame', 'pd.DataFrame', (["{'clusterId': clusterId, 'clusterLat': centroidLat, 'clusterLon':\n centroidLon, 'clusterSize': clusterSize}"], {}), "({'clusterId': clusterId, 'clusterLat': centroidLat,\n 'clusterLon': centroidLon, 'clusterSize': clusterSize})\n", (41560, 41672), True, 'import pandas as pd\n'), ((42812, 42881), 'folium.Map', 'folium.Map', ([], {'location': '[lat, lon]', 'tiles': '"""openstreetmap"""', 'zoom_start': '(12)'}), "(location=[lat, lon], tiles='openstreetmap', zoom_start=12)\n", (42822, 42881), False, 'import folium\n'), ((1006, 1056), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(x, '%Y-%m-%dT%H:%M:%S')\n", (1032, 1056), False, 'import datetime\n'), ((1431, 1463), 'numpy.vectorize', 'np.vectorize', (['seconds_from_start'], {}), '(seconds_from_start)\n', (1443, 1463), True, 'import numpy as np\n'), ((3751, 3799), 'pandas.cut', 'pd.cut', (["copied['Intake Temperature.value']", 'bins'], {}), "(copied['Intake Temperature.value'], bins)\n", (3757, 3799), True, 'import pandas as pd\n'), ((14462, 14489), 'random.randint', 'random.randint', (['(0)', '(16777215)'], {}), '(0, 16777215)\n', (14476, 14489), False, 'import random\n'), ((37953, 37971), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {}), '()\n', (37969, 37971), True, 'import geopandas as gpd\n'), ((37973, 37991), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {}), '()\n', (37989, 37991), True, 'import geopandas as gpd\n'), ((40436, 40454), 'numpy.radians', 'np.radians', (['coords'], {}), '(coords)\n', (40446, 40454), True, 'import numpy as np\n'), ((2673, 2707), 'numpy.zeros_like', 'np.zeros_like', (['corr'], {'dtype': 'np.bool'}), '(corr, dtype=np.bool)\n', (2686, 2707), True, 'import numpy as np\n'), ((2734, 2778), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(220)', '(10)'], {'as_cmap': '(True)'}), '(220, 10, as_cmap=True)\n', (2755, 2778), True, 'import seaborn as sns\n'), ((20639, 20659), 'math.floor', 'floor', (['(num / divisor)'], {}), '(num / divisor)\n', (20644, 20659), False, 'from math import floor, ceil\n'), ((20726, 20745), 'math.ceil', 'ceil', (['(num / divisor)'], {}), '(num / divisor)\n', (20730, 20745), False, 'from math import floor, ceil\n'), ((24737, 24877), 'folium.GeoJson', 'folium.GeoJson', (['data_geojson', "(lambda feature: {'lineOpacity': 0.4, 'color': 'black', 'fillColor': None,\n 'weight': 0.5, 'fillOpacity': 0})"], {}), "(data_geojson, lambda feature: {'lineOpacity': 0.4, 'color':\n 'black', 'fillColor': None, 'weight': 0.5, 'fillOpacity': 0})\n", (24751, 24877), False, 'import folium\n'), ((26557, 26578), 'folium.LayerControl', 'folium.LayerControl', ([], {}), '()\n', (26576, 26578), False, 'import folium\n'), ((27373, 27393), 'math.floor', 'floor', (['(num / divisor)'], {}), '(num / divisor)\n', (27378, 27393), False, 'from math import floor, ceil\n'), ((27460, 27479), 'math.ceil', 'ceil', (['(num / divisor)'], {}), '(num / divisor)\n', (27464, 27479), False, 'from math import floor, ceil\n'), ((34375, 34519), 'folium.GeoJson', 'folium.GeoJson', (['mainGrid_geojson', "(lambda feature: {'lineOpacity': 0.4, 'color': '#00ddbb', 'fillColor': None,\n 'weight': 2, 'fillOpacity': 0})"], {}), "(mainGrid_geojson, lambda feature: {'lineOpacity': 0.4,\n 'color': '#00ddbb', 'fillColor': None, 'weight': 2, 'fillOpacity': 0})\n", (34389, 34519), False, 'import folium\n'), ((36202, 36223), 'folium.LayerControl', 'folium.LayerControl', ([], {}), '()\n', (36221, 36223), False, 'import folium\n'), ((40299, 40391), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': 'epsilon', 'min_samples': 'min_samples', 'algorithm': '"""ball_tree"""', 'metric': '"""haversine"""'}), "(eps=epsilon, min_samples=min_samples, algorithm='ball_tree', metric=\n 'haversine')\n", (40305, 40391), False, 'from sklearn.cluster import DBSCAN\n'), ((41807, 41885), 'geopandas.points_from_xy', 'gpd.points_from_xy', (['clusterCentres_df.clusterLon', 'clusterCentres_df.clusterLat'], {}), '(clusterCentres_df.clusterLon, clusterCentres_df.clusterLat)\n', (41825, 41885), True, 'import geopandas as gpd\n'), ((4131, 4218), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': 'column_1', 'y': 'column_2', 'hue': '"""binned_temp"""', 'data': 'df1', 'palette': '"""viridis"""'}), "(x=column_1, y=column_2, hue='binned_temp', data=df1, palette=\n 'viridis')\n", (4141, 4218), True, 'import seaborn as sns\n'), ((4265, 4352), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': 'column_1', 'y': 'column_2', 'hue': '"""binned_temp"""', 'data': 'df2', 'palette': '"""viridis"""'}), "(x=column_1, y=column_2, hue='binned_temp', data=df2, palette=\n 'viridis')\n", (4275, 4352), True, 'import seaborn as sns\n'), ((4421, 4484), 'seaborn.pairplot', 'sns.pairplot', (['df1'], {'vars': '[column_1, column_2]', 'hue': '"""binned_temp"""'}), "(df1, vars=[column_1, column_2], hue='binned_temp')\n", (4433, 4484), True, 'import seaborn as sns\n'), ((4538, 4601), 'seaborn.pairplot', 'sns.pairplot', (['df2'], {'vars': '[column_1, column_2]', 'hue': '"""binned_temp"""'}), "(df2, vars=[column_1, column_2], hue='binned_temp')\n", (4550, 4601), True, 'import seaborn as sns\n'), ((4707, 4773), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': 'column_1', 'y': 'column_2', 'hue': '"""binned_temp"""', 'data': 'copied'}), "(x=column_1, y=column_2, hue='binned_temp', data=copied)\n", (4717, 4773), True, 'import seaborn as sns\n'), ((4847, 4913), 'seaborn.pairplot', 'sns.pairplot', (['copied'], {'vars': '[column_1, column_2]', 'hue': '"""binned_temp"""'}), "(copied, vars=[column_1, column_2], hue='binned_temp')\n", (4859, 4913), True, 'import seaborn as sns\n'), ((5261, 5340), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': 'column_1', 'y': 'column_2', 'hue': '"""track.id"""', 'data': 'df1', 'palette': '"""viridis"""'}), "(x=column_1, y=column_2, hue='track.id', data=df1, palette='viridis')\n", (5271, 5340), True, 'import seaborn as sns\n'), ((5392, 5471), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': 'column_1', 'y': 'column_2', 'hue': '"""track.id"""', 'data': 'df2', 'palette': '"""viridis"""'}), "(x=column_1, y=column_2, hue='track.id', data=df2, palette='viridis')\n", (5402, 5471), True, 'import seaborn as sns\n'), ((5545, 5605), 'seaborn.pairplot', 'sns.pairplot', (['df1'], {'vars': '[column_1, column_2]', 'hue': '"""track.id"""'}), "(df1, vars=[column_1, column_2], hue='track.id')\n", (5557, 5605), True, 'import seaborn as sns\n'), ((5659, 5719), 'seaborn.pairplot', 'sns.pairplot', (['df2'], {'vars': '[column_1, column_2]', 'hue': '"""track.id"""'}), "(df2, vars=[column_1, column_2], hue='track.id')\n", (5671, 5719), True, 'import seaborn as sns\n'), ((5825, 5915), 'seaborn.lmplot', 'sns.lmplot', ([], {'x': 'column_1', 'y': 'column_2', 'hue': '"""track.id"""', 'data': 'points_df', 'palette': '"""viridis"""'}), "(x=column_1, y=column_2, hue='track.id', data=points_df, palette=\n 'viridis')\n", (5835, 5915), True, 'import seaborn as sns\n'), ((5984, 6050), 'seaborn.pairplot', 'sns.pairplot', (['points_df'], {'vars': '[column_1, column_2]', 'hue': '"""track.id"""'}), "(points_df, vars=[column_1, column_2], hue='track.id')\n", (5996, 6050), True, 'import seaborn as sns\n'), ((21614, 21719), 'shapely.geometry.Polygon', 'Polygon', (['[(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (\n XleftOrigin, Ybottom)]'], {}), '([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom),\n (XleftOrigin, Ybottom)])\n', (21621, 21719), False, 'from shapely.geometry import Polygon, MultiPoint\n'), ((28312, 28417), 'shapely.geometry.Polygon', 'Polygon', (['[(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (\n XleftOrigin, Ybottom)]'], {}), '([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom),\n (XleftOrigin, Ybottom)])\n', (28319, 28417), False, 'from shapely.geometry import Polygon, MultiPoint\n'), ((43075, 43195), 'folium.Circle', 'folium.Circle', ([], {'location': '[lat, lon]', 'radius': '(0.05)', 'color': '"""black"""', 'weight': '(2)', 'fill': '(True)', 'opacity': '(0.5)', 'fill_color': '"""black"""'}), "(location=[lat, lon], radius=0.05, color='black', weight=2,\n fill=True, opacity=0.5, fill_color='black')\n", (43088, 43195), False, 'import folium\n'), ((43421, 43632), 'folium.Circle', 'folium.Circle', ([], {'location': "[clusterCentres.iloc[i]['clusterLat'], clusterCentres.iloc[i]['clusterLon']]", 'popup': 'clusterList[i]', 'radius': '(clusterList[i] * 10)', 'color': '"""red"""', 'weight': '(2)', 'fill': '(True)', 'fill_color': '"""red"""'}), "(location=[clusterCentres.iloc[i]['clusterLat'],\n clusterCentres.iloc[i]['clusterLon']], popup=clusterList[i], radius=\n clusterList[i] * 10, color='red', weight=2, fill=True, fill_color='red')\n", (43434, 43632), False, 'import folium\n'), ((15308, 15397), 'folium.Circle', 'folium.Circle', ([], {'radius': 'marker_radius', 'location': 'row', 'color': 'track_color', 'popup': 'track_id'}), '(radius=marker_radius, location=row, color=track_color, popup=\n track_id)\n', (15321, 15397), False, 'import folium\n'), ((26196, 26259), 'folium.features.GeoJsonTooltip', 'folium.features.GeoJsonTooltip', ([], {'fields': '[field]', 'aliases': '[field]'}), '(fields=[field], aliases=[field])\n', (26226, 26259), False, 'import folium\n'), ((28922, 28972), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(x, '%Y-%m-%dT%H:%M:%S')\n", (28948, 28972), False, 'import datetime\n'), ((29063, 29113), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(x, '%Y-%m-%dT%H:%M:%S')\n", (29089, 29113), False, 'import datetime\n'), ((30585, 30662), 'shapely.geometry.Polygon', 'Polygon', (['[(leftx, topy), (rightx, topy), (rightx, bottomy), (leftx, bottomy)]'], {}), '([(leftx, topy), (rightx, topy), (rightx, bottomy), (leftx, bottomy)])\n', (30592, 30662), False, 'from shapely.geometry import Polygon, MultiPoint\n'), ((35807, 35872), 'folium.features.GeoJsonTooltip', 'folium.features.GeoJsonTooltip', ([], {'fields': "['Weekday', 'hour', field]"}), "(fields=['Weekday', 'hour', field])\n", (35837, 35872), False, 'import folium\n'), ((36982, 37032), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(x, '%Y-%m-%dT%H:%M:%S')\n", (37008, 37032), False, 'import datetime\n'), ((37123, 37173), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(x, '%Y-%m-%dT%H:%M:%S')\n", (37149, 37173), False, 'import datetime\n'), ((39526, 39545), 'shapely.geometry.MultiPoint', 'MultiPoint', (['cluster'], {}), '(cluster)\n', (39536, 39545), False, 'from shapely.geometry import Polygon, MultiPoint\n'), ((39582, 39601), 'shapely.geometry.MultiPoint', 'MultiPoint', (['cluster'], {}), '(cluster)\n', (39592, 39601), False, 'from shapely.geometry import Polygon, MultiPoint\n'), ((12328, 12417), 'folium.Circle', 'folium.Circle', ([], {'radius': 'marker_radius', 'location': 'row', 'color': 'colors[day]', 'popup': 'track_id'}), '(radius=marker_radius, location=row, color=colors[day], popup=\n track_id)\n', (12341, 12417), False, 'import folium\n'), ((39693, 39722), 'geopy.distance.great_circle', 'great_circle', (['point', 'centroid'], {}), '(point, centroid)\n', (39705, 39722), False, 'from geopy.distance import great_circle\n')]
|
import numpy as np
from deepobs.pytorch.runners import StandardRunner
from deepobs.tuner import GridSearch
from torch.optim import SGD
from probprec import Preconditioner
from sorunner import SORunner
optimizer_class = Preconditioner
hyperparams = {"lr": {"type": float},
"est_rank": {"type": int}}
# The discrete values to construct a grid for.
grid = {'lr': np.logspace(-5, 2, 10),
'est_rank': [2,3]}
# Make sure to set the amount of ressources to the grid size. For grid search, this is just a sanity check.
tuner = GridSearch(optimizer_class, hyperparams, grid,
runner=SORunner, ressources=20)
# Tune (i.e. evaluate every grid point) and rerun the best setting with 10 different seeds.
# tuner.tune('quadratic_deep', rerun_best_setting=True, num_epochs=2, output_dir='./grid_search')
# Optionally, generate commands for a parallelized execution
tuner.generate_commands_script('mnist_vae', run_script='/home/bald/pre_fmnist/runscript.py',
output_dir='./grid_search', generation_dir='./grid_search_commands')
|
[
"numpy.logspace",
"deepobs.tuner.GridSearch"
] |
[((542, 620), 'deepobs.tuner.GridSearch', 'GridSearch', (['optimizer_class', 'hyperparams', 'grid'], {'runner': 'SORunner', 'ressources': '(20)'}), '(optimizer_class, hyperparams, grid, runner=SORunner, ressources=20)\n', (552, 620), False, 'from deepobs.tuner import GridSearch\n'), ((374, 396), 'numpy.logspace', 'np.logspace', (['(-5)', '(2)', '(10)'], {}), '(-5, 2, 10)\n', (385, 396), True, 'import numpy as np\n')]
|
import logging
import typhon
import netCDF4
import numpy as np
from scipy.interpolate import interp1d
from copy import copy
from konrad import constants
from konrad import utils
from konrad.component import Component
__all__ = [
'Atmosphere',
]
logger = logging.getLogger(__name__)
class Atmosphere(Component):
"""Atmosphere component.
Attributes:
atmosphere_variables (list[str]): Atmospheric variables defined by the
``Atmosphere`` component.
pmin (float): Minimum pressure used as threshold between upper and
lower atmosphere [Pa]. Methods like ``get_cold_point_index`` or
``get_triple_point_index`` are looking for levels with higher
pressure (closer to the surface) only.
"""
atmosphere_variables = [
'T',
'H2O',
'N2O',
'O3',
'O2',
'CO2',
'CO',
'CH4',
'CFC11',
'CFC12',
'CFC22',
'CCl4',
]
pmin = 10e2
def __init__(self, phlev):
"""Initialise atmosphere component.
Parameters:
phlev (``np.ndarray``): Atmospheric pressure at half-levels
(surface to top) [Pa].
"""
super().__init__()
plev = utils.plev_from_phlev(phlev)
self.coords = {
'time': np.array([]), # time dimension
'plev': plev, # pressure at full-levels
'phlev': phlev, # pressure at half-levels
}
for varname in self.atmosphere_variables:
self.create_variable(varname, np.zeros_like(plev))
# TODO: Combine with ``tracegases_rcemip``?
self.create_variable(
name='T',
data=utils.standard_atmosphere(plev, coordinates='pressure'),
)
self.update_height()
self.tracegases_rcemip()
@classmethod
def from_atm_fields_compact(cls, atm_fields_compact):
"""Convert an ARTS atm_fields_compact [0] into an atmosphere.
[0] http://arts.mi.uni-hamburg.de/docserver-trunk/variables/atm_fields_compact
Parameters:
atm_fields_compact (typhon.arts.types.GriddedField4):
Compact set of atmospheric fields.
"""
def _extract_profile(atmfield, species):
try:
arts_key = constants.variable_description[species]['arts_name']
except KeyError:
logger.warning(f'No variabel description for "{species}".')
else:
return atmfield.get(arts_key, keep_dims=False)
datadict = {var: _extract_profile(atm_fields_compact, var)
for var in cls.atmosphere_variables}
datadict['plev'] = atm_fields_compact.grids[1]
return cls.from_dict(datadict)
@classmethod
def from_xml(cls, xmlfile):
"""Read atmosphere from XML file containing an ARTS atm_fields_compact.
Parameters:
xmlfile (str): Path to XML file.
"""
# Read the content of given XML file.
griddedfield = typhon.arts.xml.load(xmlfile)
# Check if the XML file contains an atm_fields_compact (GriddedField4).
arts_type = typhon.arts.utils.get_arts_typename(griddedfield)
if arts_type != 'GriddedField4':
raise TypeError(
'XML file contains "{}". Expected "GriddedField4".'.format(
arts_type)
)
return cls.from_atm_fields_compact(griddedfield, **kwargs)
@classmethod
def from_dict(cls, dictionary):
"""Create an atmosphere model from dictionary values.
Parameters:
dictionary (dict): Dictionary containing ndarrays.
"""
# TODO: Currently working for good-natured dictionaries.
# Consider a more flexible user interface.
# Create a Dataset with time and pressure dimension.
d = cls(phlev=dictionary['phlev'])
for var in cls.atmosphere_variables:
val = dictionary.get(var)
if val is not None:
# Prevent variables, that are not stored in the netCDF file,
# to be overwritten with ``None``.
d.create_variable(var, val)
# Calculate the geopotential height.
d.update_height()
return d
@classmethod
def from_netcdf(cls, ncfile, timestep=-1):
"""Create an atmosphere model from a netCDF file.
Parameters:
ncfile (str): Path to netCDF file.
timestep (int): Timestep to read (default is last timestep).
"""
def _return_profile(ds, var, ts):
return (ds[var][ts, :] if 'time' in ds[var].dimensions
else ds[var][:])
with netCDF4.Dataset(ncfile) as root:
if 'atmosphere' in root.groups:
dataset = root['atmosphere']
else:
dataset = root
datadict = {var: np.array(_return_profile(dataset, var, timestep))
for var in cls.atmosphere_variables
if var in dataset.variables
}
datadict['phlev'] = np.array(root['phlev'][:])
return cls.from_dict(datadict)
def to_atm_fields_compact(self):
"""Convert an atmosphere into an ARTS atm_fields_compact."""
# Store all atmosphere variables including geopotential height.
variables = self.atmosphere_variables + ['z']
# Get ARTS variable name from variable description.
species = [constants.variable_description[var].get('arts_name')
for var in variables]
# Create a GriddedField4.
atmfield = typhon.arts.types.GriddedField4()
# Set grids and their names.
atmfield.gridnames = ['Species', 'Pressure', 'Longitude', 'Latitude']
atmfield.grids = [
species, self['plev'], np.array([]), np.array([])
]
# The profiles have to be passed in "stacked" form, as an ndarray of
# dimensions [species, pressure, lat, lon].
atmfield.data = np.vstack(
[self[var].reshape(1, self['plev'].size, 1, 1)
for var in variables]
)
atmfield.dataname = 'Data'
# Perform a consistency check of the passed grids and data tensor.
atmfield.check_dimension()
return atmfield
def hash_attributes(self):
"""Create hash based on some basic characteristics"""
return hash((
self['plev'].min(), # Pressure at top of the atmosphere
self['plev'].max(), # Surface pressure
self['plev'].size, # Number of pressure layers
np.round(self['CO2'][0] / 1e-6), # CO2 ppmv
np.round(self['T'][-1, 0], 3), # Surface temperature
))
def refine_plev(self, phlev, **kwargs):
"""Refine the pressure grid of an atmosphere object.
Note:
This method returns a **new** object,
the original object is maintained!
Parameters:
phlev (ndarray): New half-level-pressure grid [Pa].
**kwargs: Additional keyword arguments are collected
and passed to :func:`scipy.interpolate.interp1d`
Returns:
Atmosphere: A **new** atmosphere object.
"""
# Initialize an empty directory to fill it with interpolated data.
# The dictionary is later used to create a new object using the
# Atmosphere.from_dict() classmethod. This allows to circumvent the
# fixed dimension size in xarray.DataArrays.
datadict = dict()
# Store new pressure grid.
datadict['phlev'] = phlev
plev = utils.plev_from_phlev(phlev)
# Loop over all atmospheric variables...
for variable in self.atmosphere_variables:
# and create an interpolation function using the original data.
f = interp1d(self['plev'], self[variable],
axis=-1, fill_value='extrapolate', **kwargs)
# Store the interpolated new data in the data directory.
datadict[variable] = f(plev).ravel()
# Create a new atmosphere object from the filled data directory.
# This method also calculates the new phlev coordinates.
new_atmosphere = type(self).from_dict(datadict)
# Keep attributes of original atmosphere object.
# This is **extremely** important because references to e.g. the
# convection scheme or the humidity handling are stored as attributes!
new_atmosphere.attrs.update({**self.attrs})
# Calculate the geopotential height.
new_atmosphere.update_height()
return new_atmosphere
def copy(self):
"""Create a copy of the atmosphere.
Returns:
konrad.atmosphere: copy of the atmosphere
"""
datadict = dict()
datadict['phlev'] = copy(self['phlev']) # Copy pressure grid.
# Create copies (and not references) of all atmospheric variables.
for variable in self.atmosphere_variables:
datadict[variable] = copy(self[variable]).ravel()
# Create a new atmosphere object from the filled data directory.
new_atmosphere = type(self).from_dict(datadict)
return new_atmosphere
def calculate_height(self):
"""Calculate the geopotential height."""
g = constants.earth_standard_gravity
plev = self['plev'] # Air pressure at full-levels.
phlev = self['phlev'] # Air pressure at half-levels.
T = self['T'] # Air temperature at full-levels.
rho = typhon.physics.density(plev, T)
dp = np.hstack((np.array([plev[0] - phlev[0]]), np.diff(plev)))
# Use the hydrostatic equation to calculate geopotential height from
# given pressure, density and gravity.
return np.cumsum(-dp / (rho * g))
def update_height(self):
"""Update the value for height."""
z = self.calculate_height()
# If height is already in Dataset, update its values.
if 'z' in self.data_vars:
self.set('z', z)
# Otherwise create the DataArray.
else:
self.create_variable('z', z)
def get_cold_point_index(self):
"""Return the model level index at the cold point.
Returns:
int: Model level index at the cold point.
"""
plev = self['plev'][:]
T = self['T'][-1, :]
return np.argmin(T[plev > self.pmin])
def get_cold_point_plev(self):
"""Return the cold point pressure.
Returns:
float: Pressure at the cold point [Pa].
"""
return self['plev'][self.get_cold_point_index()]
def get_triple_point_index(self):
"""Return the model level index at the triple point.
The triple point is taken at the temperature closest to 0 C.
Returns:
int: Model level index at the triple point.
"""
plev = self['plev']
T = self['T'][0, :]
return np.argmin(np.abs(T[np.where(plev > self.pmin)] - 273.15))
def get_triple_point_plev(self):
"""
Return the pressure at the triple point.
The triple point is taken at the temperature closest to 0 C.
Returns:
float: Pressure at the triple point [Pa].
"""
return self['plev'][self.get_triple_point_index()]
def get_lapse_rates(self):
"""Calculate the temperature lapse rate at each level."""
return np.gradient(self['T'][0, :], self['z'][0, :])
def get_potential_temperature(self, p0=1000e2):
r"""Calculate the potential temperature.
.. math::
\theta = T \cdot \left(\frac{p_0}{P}\right)^\frac{2}{7}
Parameters:
p0 (float): Pressure at reference level [Pa].
Returns:
ndarray: Potential temperature [K].
"""
# Get view on temperature and pressure arrays.
T = self['T'][0, :]
p = self['plev']
# Calculate the potential temperature.
return T * (p0 / p) ** (2 / 7)
def get_static_stability(self):
r"""Calculate the static stability.
.. math::
\sigma = - \frac{T}{\Theta} \frac{\partial\Theta}{\partial p}
Returns:
ndarray: Static stability [K/Pa].
"""
# Get view on temperature and pressure arrays.
t = self['T'][0, :]
p = self['plev']
# Calculate potential temperature and its vertical derivative.
theta = self.get_potential_temperature()
dtheta = np.gradient(theta, p)
return -(t / theta) * dtheta
def get_diabatic_subsidence(self, radiative_cooling):
"""Calculate the diabatic subsidence.
Parameters:
radiative_cooling (ndarray): Radiative cooling rates.
Positive values for heating, negative values for cooling!
Returns:
ndarray: Diabatic subsidence [Pa/day].
"""
sigma = self.get_static_stability()
return -radiative_cooling / sigma
def get_subsidence_convergence_max_index(self, radiative_cooling):
"""Return index of maximum subsidence convergence.
Parameters:
radiative_cooling (ndarray): Radiative cooling rates.
Positive values for heating, negative values for cooling!
Returns:
int: Level index of maximum subsidence divergence.
"""
plev = self['plev']
omega = self.get_diabatic_subsidence(radiative_cooling)
domega = np.gradient(omega, plev)
max_index = np.argmax(domega[plev > self.pmin])
self.create_variable('diabatic_convergence_max_index', [max_index])
return max_index
def get_subsidence_convergence_max_plev(self, radiative_cooling):
"""Return pressure of maximum subsidence convergence.
Parameters:
radiative_cooling (ndarray): Radiative cooling rates.
Positive values for heating, negative values for cooling!
Returns:
float: Pressure of maximum subsidence divergence [Pa].
"""
max_idx = self.get_subsidence_convergence_max_index(radiative_cooling)
max_plev = self['plev'][max_idx]
self.create_variable('diabatic_convergence_max_plev', [max_plev])
return max_plev
def get_heat_capacity(self):
r"""Calculate specific heat capacity at constant pressure of moist air
.. math::
c_p = X \cdot (c_{p,v} - c_{p,d}) + c_{p,d}
Returns:
ndarray: Heat capacity [J/K/kg].
"""
cpd = constants.isobaric_mass_heat_capacity_dry_air
cpv = constants.isobaric_mass_heat_capacity_water_vapor
x = self['H2O'][-1]
return x * (cpv - cpd) + cpd
def tracegases_rcemip(self):
"""Set trace gas concentrations according to the RCE-MIP configuration.
The volume mixing ratios are following the values for the
RCE-MIP (Wing et al. 2017) and constant throughout the atmosphere.
"""
self.update_height()
concentrations = {
'H2O': utils.humidity_profile_rcemip(self.get('z')),
'CO2': 348e-6,
'CH4': 1650e-9,
'N2O': 306e-9,
'CO': 0,
'O3': utils.ozone_profile_rcemip(self.get('plev')),
'CFC11': 0,
'CFC12': 0,
'CFC22': 0,
'CCl4': 0,
}
for gas, vmr in concentrations.items():
self.set(gas, vmr)
|
[
"numpy.argmax",
"typhon.arts.types.GriddedField4",
"numpy.argmin",
"logging.getLogger",
"scipy.interpolate.interp1d",
"numpy.round",
"netCDF4.Dataset",
"numpy.zeros_like",
"typhon.arts.utils.get_arts_typename",
"numpy.cumsum",
"konrad.utils.standard_atmosphere",
"typhon.arts.xml.load",
"konrad.utils.plev_from_phlev",
"typhon.physics.density",
"copy.copy",
"numpy.diff",
"numpy.array",
"numpy.where",
"numpy.gradient"
] |
[((262, 289), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (279, 289), False, 'import logging\n'), ((1261, 1289), 'konrad.utils.plev_from_phlev', 'utils.plev_from_phlev', (['phlev'], {}), '(phlev)\n', (1282, 1289), False, 'from konrad import utils\n'), ((3065, 3094), 'typhon.arts.xml.load', 'typhon.arts.xml.load', (['xmlfile'], {}), '(xmlfile)\n', (3085, 3094), False, 'import typhon\n'), ((3196, 3245), 'typhon.arts.utils.get_arts_typename', 'typhon.arts.utils.get_arts_typename', (['griddedfield'], {}), '(griddedfield)\n', (3231, 3245), False, 'import typhon\n'), ((5703, 5736), 'typhon.arts.types.GriddedField4', 'typhon.arts.types.GriddedField4', ([], {}), '()\n', (5734, 5736), False, 'import typhon\n'), ((7734, 7762), 'konrad.utils.plev_from_phlev', 'utils.plev_from_phlev', (['phlev'], {}), '(phlev)\n', (7755, 7762), False, 'from konrad import utils\n'), ((8960, 8979), 'copy.copy', 'copy', (["self['phlev']"], {}), "(self['phlev'])\n", (8964, 8979), False, 'from copy import copy\n'), ((9675, 9706), 'typhon.physics.density', 'typhon.physics.density', (['plev', 'T'], {}), '(plev, T)\n', (9697, 9706), False, 'import typhon\n'), ((9919, 9945), 'numpy.cumsum', 'np.cumsum', (['(-dp / (rho * g))'], {}), '(-dp / (rho * g))\n', (9928, 9945), True, 'import numpy as np\n'), ((10533, 10563), 'numpy.argmin', 'np.argmin', (['T[plev > self.pmin]'], {}), '(T[plev > self.pmin])\n', (10542, 10563), True, 'import numpy as np\n'), ((11593, 11638), 'numpy.gradient', 'np.gradient', (["self['T'][0, :]", "self['z'][0, :]"], {}), "(self['T'][0, :], self['z'][0, :])\n", (11604, 11638), True, 'import numpy as np\n'), ((12682, 12703), 'numpy.gradient', 'np.gradient', (['theta', 'p'], {}), '(theta, p)\n', (12693, 12703), True, 'import numpy as np\n'), ((13674, 13698), 'numpy.gradient', 'np.gradient', (['omega', 'plev'], {}), '(omega, plev)\n', (13685, 13698), True, 'import numpy as np\n'), ((13720, 13755), 'numpy.argmax', 'np.argmax', (['domega[plev > self.pmin]'], {}), '(domega[plev > self.pmin])\n', (13729, 13755), True, 'import numpy as np\n'), ((1335, 1347), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1343, 1347), True, 'import numpy as np\n'), ((4754, 4777), 'netCDF4.Dataset', 'netCDF4.Dataset', (['ncfile'], {}), '(ncfile)\n', (4769, 4777), False, 'import netCDF4\n'), ((5175, 5201), 'numpy.array', 'np.array', (["root['phlev'][:]"], {}), "(root['phlev'][:])\n", (5183, 5201), True, 'import numpy as np\n'), ((5915, 5927), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5923, 5927), True, 'import numpy as np\n'), ((5929, 5941), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5937, 5941), True, 'import numpy as np\n'), ((7956, 8043), 'scipy.interpolate.interp1d', 'interp1d', (["self['plev']", 'self[variable]'], {'axis': '(-1)', 'fill_value': '"""extrapolate"""'}), "(self['plev'], self[variable], axis=-1, fill_value='extrapolate',\n **kwargs)\n", (7964, 8043), False, 'from scipy.interpolate import interp1d\n'), ((1578, 1597), 'numpy.zeros_like', 'np.zeros_like', (['plev'], {}), '(plev)\n', (1591, 1597), True, 'import numpy as np\n'), ((1721, 1776), 'konrad.utils.standard_atmosphere', 'utils.standard_atmosphere', (['plev'], {'coordinates': '"""pressure"""'}), "(plev, coordinates='pressure')\n", (1746, 1776), False, 'from konrad import utils\n'), ((6701, 6733), 'numpy.round', 'np.round', (["(self['CO2'][0] / 1e-06)"], {}), "(self['CO2'][0] / 1e-06)\n", (6709, 6733), True, 'import numpy as np\n'), ((6758, 6787), 'numpy.round', 'np.round', (["self['T'][-1, 0]", '(3)'], {}), "(self['T'][-1, 0], 3)\n", (6766, 6787), True, 'import numpy as np\n'), ((9731, 9761), 'numpy.array', 'np.array', (['[plev[0] - phlev[0]]'], {}), '([plev[0] - phlev[0]])\n', (9739, 9761), True, 'import numpy as np\n'), ((9763, 9776), 'numpy.diff', 'np.diff', (['plev'], {}), '(plev)\n', (9770, 9776), True, 'import numpy as np\n'), ((9163, 9183), 'copy.copy', 'copy', (['self[variable]'], {}), '(self[variable])\n', (9167, 9183), False, 'from copy import copy\n'), ((11129, 11155), 'numpy.where', 'np.where', (['(plev > self.pmin)'], {}), '(plev > self.pmin)\n', (11137, 11155), True, 'import numpy as np\n')]
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: <NAME>, <NAME>, <NAME>
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
from itertools import izip
import numpy as np
from pymor.core.interfaces import ImmutableInterface
from pymor.la.basic import induced_norm
from pymor.la.numpyvectorarray import NumpyVectorArray
from pymor.operators.constructions import LincombOperator
from pymor.reductors.basic import reduce_generic_rb
from pymor.operators.numpy import NumpyMatrixOperator
def reduce_stationary_affine_linear(discretization, RB, error_product=None, coercivity_estimator=None,
disable_caching=True, extends=None):
"""Reductor for linear |StationaryDiscretizations| whose with affinely decomposed operator and rhs.
This reductor uses :meth:`~pymor.reductors.basic.reduce_generic_rb` for the actual
RB-projection. The only addition is an error estimator. The estimator evaluates the
norm of the residual with respect to a given inner product.
Parameters
----------
discretization
The |Discretization| which is to be reduced.
RB
|VectorArray| containing the reduced basis on which to project.
error_product
Scalar product given as an |Operator| used to calculate Riesz
representative of the residual. If `None`, the Euclidean product is used.
coercivity_estimator
`None` or a |Parameterfunctional| returning a lower bound for the coercivity
constant of the given problem.
disable_caching
If `True`, caching of solutions is disabled for the reduced |Discretization|.
extends
Set by :meth:`~pymor.algorithms.greedy.greedy` to the result of the
last reduction in case the basis extension was `hierarchic`. Used to prevent
re-computation of Riesz representatives already obtained from previous
reductions.
Returns
-------
rd
The reduced |Discretization|.
rc
The reconstructor providing a `reconstruct(U)` method which reconstructs
high-dimensional solutions from solutions `U` of the reduced |Discretization|.
reduction_data
Additional data produced by the reduction process. In this case the computed
Riesz representatives. (Compare the `extends` parameter.)
"""
# assert isinstance(discretization, StationaryDiscretization)
assert discretization.linear
assert isinstance(discretization.operator, LincombOperator)
assert all(not op.parametric for op in discretization.operator.operators)
if discretization.rhs.parametric:
assert isinstance(discretization.rhs, LincombOperator)
assert all(not op.parametric for op in discretization.rhs.operators)
assert extends is None or len(extends) == 3
d = discretization
rd, rc, data = reduce_generic_rb(d, RB, disable_caching=disable_caching, extends=extends)
if extends:
old_data = extends[2]
old_RB_size = len(extends[1].RB)
else:
old_RB_size = 0
# compute data for estimator
space = d.operator.source
# compute the Riesz representative of (U, .)_L2 with respect to error_product
def riesz_representative(U):
if error_product is None:
return U.copy()
else:
return error_product.apply_inverse(U)
def append_vector(U, R, RR):
RR.append(riesz_representative(U), remove_from_other=True)
R.append(U, remove_from_other=True)
# compute all components of the residual
if RB is None:
RB = discretization.solution_space.empty()
if extends:
R_R, RR_R = old_data['R_R'], old_data['RR_R']
elif not d.rhs.parametric:
R_R = space.empty(reserve=1)
RR_R = space.empty(reserve=1)
append_vector(d.rhs.as_vector(), R_R, RR_R)
else:
R_R = space.empty(reserve=len(d.rhs.operators))
RR_R = space.empty(reserve=len(d.rhs.operators))
for op in d.rhs.operators:
append_vector(op.as_vector(), R_R, RR_R)
if len(RB) == 0:
R_Os = [space.empty()]
RR_Os = [space.empty()]
elif not d.operator.parametric:
R_Os = [space.empty(reserve=len(RB))]
RR_Os = [space.empty(reserve=len(RB))]
for i in xrange(len(RB)):
append_vector(-d.operator.apply(RB, ind=i), R_Os[0], RR_Os[0])
else:
R_Os = [space.empty(reserve=len(RB)) for _ in xrange(len(d.operator.operators))]
RR_Os = [space.empty(reserve=len(RB)) for _ in xrange(len(d.operator.operators))]
if old_RB_size > 0:
for op, R_O, RR_O, old_R_O, old_RR_O in izip(d.operator.operators, R_Os, RR_Os,
old_data['R_Os'], old_data['RR_Os']):
R_O.append(old_R_O)
RR_O.append(old_RR_O)
for op, R_O, RR_O in izip(d.operator.operators, R_Os, RR_Os):
for i in xrange(old_RB_size, len(RB)):
append_vector(-op.apply(RB, [i]), R_O, RR_O)
# compute Gram matrix of the residuals
R_RR = RR_R.dot(R_R, pairwise=False)
R_RO = np.hstack([RR_R.dot(R_O, pairwise=False) for R_O in R_Os])
R_OO = np.vstack([np.hstack([RR_O.dot(R_O, pairwise=False) for R_O in R_Os]) for RR_O in RR_Os])
estimator_matrix = np.empty((len(R_RR) + len(R_OO),) * 2)
estimator_matrix[:len(R_RR), :len(R_RR)] = R_RR
estimator_matrix[len(R_RR):, len(R_RR):] = R_OO
estimator_matrix[:len(R_RR), len(R_RR):] = R_RO
estimator_matrix[len(R_RR):, :len(R_RR)] = R_RO.T
estimator_matrix = NumpyMatrixOperator(estimator_matrix)
estimator = StationaryAffineLinearReducedEstimator(estimator_matrix, coercivity_estimator)
rd = rd.with_(estimator=estimator)
data.update(R_R=R_R, RR_R=RR_R, R_Os=R_Os, RR_Os=RR_Os)
return rd, rc, data
class StationaryAffineLinearReducedEstimator(ImmutableInterface):
"""Instatiated by :meth:`reduce_stationary_affine_linear`.
Not to be used directly.
"""
def __init__(self, estimator_matrix, coercivity_estimator):
self.estimator_matrix = estimator_matrix
self.coercivity_estimator = coercivity_estimator
self.norm = induced_norm(estimator_matrix)
def estimate(self, U, mu, discretization):
d = discretization
if len(U) > 1:
raise NotImplementedError
if not d.rhs.parametric:
CR = np.ones(1)
else:
CR = d.rhs.evaluate_coefficients(mu)
if not d.operator.parametric:
CO = np.ones(1)
else:
CO = d.operator.evaluate_coefficients(mu)
C = np.hstack((CR, np.dot(CO[..., np.newaxis], U.data).ravel()))
est = self.norm(NumpyVectorArray(C))
if self.coercivity_estimator:
est /= self.coercivity_estimator(mu)
return est
def restricted_to_subbasis(self, dim, discretization):
d = discretization
cr = 1 if not d.rhs.parametric else len(d.rhs.operators)
co = 1 if not d.operator.parametric else len(d.operator.operators)
old_dim = d.operator.source.dim
indices = np.concatenate((np.arange(cr),
((np.arange(co)*old_dim)[..., np.newaxis] + np.arange(dim)).ravel() + cr))
matrix = self.estimator_matrix._matrix[indices, :][:, indices]
return StationaryAffineLinearReducedEstimator(NumpyMatrixOperator(matrix), self.coercivity_estimator)
|
[
"pymor.operators.numpy.NumpyMatrixOperator",
"pymor.la.basic.induced_norm",
"pymor.la.numpyvectorarray.NumpyVectorArray",
"numpy.ones",
"pymor.reductors.basic.reduce_generic_rb",
"numpy.arange",
"itertools.izip",
"numpy.dot"
] |
[((2949, 3023), 'pymor.reductors.basic.reduce_generic_rb', 'reduce_generic_rb', (['d', 'RB'], {'disable_caching': 'disable_caching', 'extends': 'extends'}), '(d, RB, disable_caching=disable_caching, extends=extends)\n', (2966, 3023), False, 'from pymor.reductors.basic import reduce_generic_rb\n'), ((5688, 5725), 'pymor.operators.numpy.NumpyMatrixOperator', 'NumpyMatrixOperator', (['estimator_matrix'], {}), '(estimator_matrix)\n', (5707, 5725), False, 'from pymor.operators.numpy import NumpyMatrixOperator\n'), ((6306, 6336), 'pymor.la.basic.induced_norm', 'induced_norm', (['estimator_matrix'], {}), '(estimator_matrix)\n', (6318, 6336), False, 'from pymor.la.basic import induced_norm\n'), ((4982, 5021), 'itertools.izip', 'izip', (['d.operator.operators', 'R_Os', 'RR_Os'], {}), '(d.operator.operators, R_Os, RR_Os)\n', (4986, 5021), False, 'from itertools import izip\n'), ((6523, 6533), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (6530, 6533), True, 'import numpy as np\n'), ((6653, 6663), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (6660, 6663), True, 'import numpy as np\n'), ((6831, 6850), 'pymor.la.numpyvectorarray.NumpyVectorArray', 'NumpyVectorArray', (['C'], {}), '(C)\n', (6847, 6850), False, 'from pymor.la.numpyvectorarray import NumpyVectorArray\n'), ((7510, 7537), 'pymor.operators.numpy.NumpyMatrixOperator', 'NumpyMatrixOperator', (['matrix'], {}), '(matrix)\n', (7529, 7537), False, 'from pymor.operators.numpy import NumpyMatrixOperator\n'), ((4744, 4820), 'itertools.izip', 'izip', (['d.operator.operators', 'R_Os', 'RR_Os', "old_data['R_Os']", "old_data['RR_Os']"], {}), "(d.operator.operators, R_Os, RR_Os, old_data['R_Os'], old_data['RR_Os'])\n", (4748, 4820), False, 'from itertools import izip\n'), ((7261, 7274), 'numpy.arange', 'np.arange', (['cr'], {}), '(cr)\n', (7270, 7274), True, 'import numpy as np\n'), ((6760, 6795), 'numpy.dot', 'np.dot', (['CO[..., np.newaxis]', 'U.data'], {}), '(CO[..., np.newaxis], U.data)\n', (6766, 6795), True, 'import numpy as np\n'), ((7353, 7367), 'numpy.arange', 'np.arange', (['dim'], {}), '(dim)\n', (7362, 7367), True, 'import numpy as np\n'), ((7311, 7324), 'numpy.arange', 'np.arange', (['co'], {}), '(co)\n', (7320, 7324), True, 'import numpy as np\n')]
|
import os
import json
import shutil
import cv2
import sys
import math
# import tensorflow as tf
import numpy as np
# import align.detect_face
# import facenet
import requests
import tempfile
import _pickle as pickle
import urllib.request as request
from collections import namedtuple
from google_images_download import google_images_download
DATA_DIR = '/app/data'
TMP_DOWNLOAD_DIR = '/tmp/google_img_download'
if not os.path.exists(TMP_DOWNLOAD_DIR):
os.makedirs(TMP_DOWNLOAD_DIR)
IMG_CACHE_DIR = '/app/data/google_images'
if not os.path.exists(IMG_CACHE_DIR):
os.makedirs(IMG_CACHE_DIR)
BoundingBox = namedtuple('BoundingBox', ['x1', 'x2', 'y1', 'y2'])
KB = 1024
def file_size(filename):
st = os.stat(filename)
return st.st_size
def fetch_images(name, outdir=IMG_CACHE_DIR, n=25, query_extras=None,
force=False):
"""Fetch images from Google"""
out_subdir = os.path.join(outdir, name)
if os.path.exists(out_subdir):
if not force:
print('Using cached', out_subdir)
return out_subdir
else:
shutil.rmtree(out_subdir)
query = '"%s"' % name
if query_extras:
query += ' %s' % query_extras
response = google_images_download.googleimagesdownload()
response.download({
'keywords': query, 'limit': n,
'output_directory': TMP_DOWNLOAD_DIR, 'format': 'jpg', 'size': 'medium'
})
tmp_dir = os.path.join(TMP_DOWNLOAD_DIR, query)
for ent in os.listdir(tmp_dir):
img_path = os.path.join(tmp_dir, ent)
im = cv2.imread(img_path)
if im is None:
os.remove(img_path)
shutil.move(tmp_dir, out_subdir)
return out_subdir
MODEL_SERVER_PORT = 9999
MODEL_SERVER_URL = 'http://localhost:{}/'.format(MODEL_SERVER_PORT)
def detect_faces_in_images(dir_path):
faces = requests.get(MODEL_SERVER_URL + 'face-detect', params={'path': dir_path}).json()
result = {}
for img_path, bboxes in faces.items():
result[img_path] = []
for bbox in bboxes:
x1 = int(bbox['x1'])
x2 = int(bbox['x2'])
y1 = int(bbox['y1'])
y2 = int(bbox['y2'])
img = cv2.imread(img_path)
cropped_image = img[y1:y2, x1:x2, :]
if cropped_image.size > 0:
result[img_path].append(cropped_image)
return result
def detect_faces(input_path):
faces = requests.get(MODEL_SERVER_URL + 'face-detect',
params={'path': input_path}).json()
result = []
for img_path, bboxes in faces.items():
height, width, _ = cv2.imread(img_path).shape
for bbox in bboxes:
x1 = bbox['x1'] / width
x2 = bbox['x2'] / width
y1 = bbox['y1'] / height
y2 = bbox['y2'] / height
if y2 > y1 and x2 > x1:
result.append((
img_path,
BoundingBox(x1=x1, x2=x2, y1=y1, y2=y2)
))
return result
def embed_images(images):
embs = []
for img in images:
emb = requests.post(
MODEL_SERVER_URL + 'face-embed',
params={'height': img.shape[0], 'width': img.shape[1]},
data=img.tostring()
).json()
embs.append(np.array(emb))
if len(embs) == 0:
raise RuntimeError('No embeddings were created')
return embs
def embed_directory(dir_path, one_face_per_img=True):
"""Compute a mean embedding of all of the images in the directory"""
faces = requests.get(MODEL_SERVER_URL + 'face-detect', params={'path': dir_path}).json()
embeddings = []
for img_path, bboxes in faces.items():
if one_face_per_img and len(bboxes) > 1:
continue
for bbox in bboxes:
emb = requests.get(MODEL_SERVER_URL + 'face-embed', params={'path': img_path, **bbox}).json()
embeddings.append(np.array(emb))
return np.mean(embeddings, axis=0)
def name_to_embedding(name, n=25, cache=True, one_face_per_img=True):
"""Go directly from a name to face embedding"""
if cache:
google_imgs_dir = fetch_images(name, outdir=IMG_CACHE_DIR, n=n,
query_extras='', force=False)
return embed_directory(google_imgs_dir, one_face_per_img)
else:
tmp_dir = tempfile.mkdtemp('img_download')
try:
google_imgs_dir = fetch_images(name, outdir=tmp_dir, n=n,
query_extras='', force=True)
return embed_directory(google_imgs_dir, one_face_per_img)
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def urls_to_embedding(urls):
"""Fetch images at the urls and embed faces"""
tmp_dir = tempfile.mkdtemp('img_download')
try:
for i, url in enumerate(urls):
img_path = os.path.join(tmp_dir, '{}.jpg'.format(i))
request.urlretrieve(url, img_path)
return embed_directory(tmp_dir)
finally:
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
|
[
"os.remove",
"os.makedirs",
"os.stat",
"os.path.exists",
"google_images_download.google_images_download.googleimagesdownload",
"cv2.imread",
"numpy.mean",
"tempfile.mkdtemp",
"collections.namedtuple",
"shutil.move",
"requests.get",
"numpy.array",
"shutil.rmtree",
"urllib.request.urlretrieve",
"os.path.join",
"os.listdir"
] |
[((616, 667), 'collections.namedtuple', 'namedtuple', (['"""BoundingBox"""', "['x1', 'x2', 'y1', 'y2']"], {}), "('BoundingBox', ['x1', 'x2', 'y1', 'y2'])\n", (626, 667), False, 'from collections import namedtuple\n'), ((420, 452), 'os.path.exists', 'os.path.exists', (['TMP_DOWNLOAD_DIR'], {}), '(TMP_DOWNLOAD_DIR)\n', (434, 452), False, 'import os\n'), ((458, 487), 'os.makedirs', 'os.makedirs', (['TMP_DOWNLOAD_DIR'], {}), '(TMP_DOWNLOAD_DIR)\n', (469, 487), False, 'import os\n'), ((538, 567), 'os.path.exists', 'os.path.exists', (['IMG_CACHE_DIR'], {}), '(IMG_CACHE_DIR)\n', (552, 567), False, 'import os\n'), ((573, 599), 'os.makedirs', 'os.makedirs', (['IMG_CACHE_DIR'], {}), '(IMG_CACHE_DIR)\n', (584, 599), False, 'import os\n'), ((715, 732), 'os.stat', 'os.stat', (['filename'], {}), '(filename)\n', (722, 732), False, 'import os\n'), ((911, 937), 'os.path.join', 'os.path.join', (['outdir', 'name'], {}), '(outdir, name)\n', (923, 937), False, 'import os\n'), ((945, 971), 'os.path.exists', 'os.path.exists', (['out_subdir'], {}), '(out_subdir)\n', (959, 971), False, 'import os\n'), ((1225, 1270), 'google_images_download.google_images_download.googleimagesdownload', 'google_images_download.googleimagesdownload', ([], {}), '()\n', (1268, 1270), False, 'from google_images_download import google_images_download\n'), ((1436, 1473), 'os.path.join', 'os.path.join', (['TMP_DOWNLOAD_DIR', 'query'], {}), '(TMP_DOWNLOAD_DIR, query)\n', (1448, 1473), False, 'import os\n'), ((1489, 1508), 'os.listdir', 'os.listdir', (['tmp_dir'], {}), '(tmp_dir)\n', (1499, 1508), False, 'import os\n'), ((1650, 1682), 'shutil.move', 'shutil.move', (['tmp_dir', 'out_subdir'], {}), '(tmp_dir, out_subdir)\n', (1661, 1682), False, 'import shutil\n'), ((3957, 3984), 'numpy.mean', 'np.mean', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (3964, 3984), True, 'import numpy as np\n'), ((4808, 4840), 'tempfile.mkdtemp', 'tempfile.mkdtemp', (['"""img_download"""'], {}), "('img_download')\n", (4824, 4840), False, 'import tempfile\n'), ((1529, 1555), 'os.path.join', 'os.path.join', (['tmp_dir', 'ent'], {}), '(tmp_dir, ent)\n', (1541, 1555), False, 'import os\n'), ((1569, 1589), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1579, 1589), False, 'import cv2\n'), ((4358, 4390), 'tempfile.mkdtemp', 'tempfile.mkdtemp', (['"""img_download"""'], {}), "('img_download')\n", (4374, 4390), False, 'import tempfile\n'), ((5065, 5088), 'os.path.exists', 'os.path.exists', (['tmp_dir'], {}), '(tmp_dir)\n', (5079, 5088), False, 'import os\n'), ((1097, 1122), 'shutil.rmtree', 'shutil.rmtree', (['out_subdir'], {}), '(out_subdir)\n', (1110, 1122), False, 'import shutil\n'), ((1625, 1644), 'os.remove', 'os.remove', (['img_path'], {}), '(img_path)\n', (1634, 1644), False, 'import os\n'), ((1852, 1925), 'requests.get', 'requests.get', (["(MODEL_SERVER_URL + 'face-detect')"], {'params': "{'path': dir_path}"}), "(MODEL_SERVER_URL + 'face-detect', params={'path': dir_path})\n", (1864, 1925), False, 'import requests\n'), ((2200, 2220), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (2210, 2220), False, 'import cv2\n'), ((2428, 2503), 'requests.get', 'requests.get', (["(MODEL_SERVER_URL + 'face-detect')"], {'params': "{'path': input_path}"}), "(MODEL_SERVER_URL + 'face-detect', params={'path': input_path})\n", (2440, 2503), False, 'import requests\n'), ((2623, 2643), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (2633, 2643), False, 'import cv2\n'), ((3301, 3314), 'numpy.array', 'np.array', (['emb'], {}), '(emb)\n', (3309, 3314), True, 'import numpy as np\n'), ((3553, 3626), 'requests.get', 'requests.get', (["(MODEL_SERVER_URL + 'face-detect')"], {'params': "{'path': dir_path}"}), "(MODEL_SERVER_URL + 'face-detect', params={'path': dir_path})\n", (3565, 3626), False, 'import requests\n'), ((4648, 4671), 'os.path.exists', 'os.path.exists', (['tmp_dir'], {}), '(tmp_dir)\n', (4662, 4671), False, 'import os\n'), ((4966, 5000), 'urllib.request.urlretrieve', 'request.urlretrieve', (['url', 'img_path'], {}), '(url, img_path)\n', (4985, 5000), True, 'import urllib.request as request\n'), ((5102, 5124), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (5115, 5124), False, 'import shutil\n'), ((3931, 3944), 'numpy.array', 'np.array', (['emb'], {}), '(emb)\n', (3939, 3944), True, 'import numpy as np\n'), ((4689, 4711), 'shutil.rmtree', 'shutil.rmtree', (['tmp_dir'], {}), '(tmp_dir)\n', (4702, 4711), False, 'import shutil\n'), ((3813, 3898), 'requests.get', 'requests.get', (["(MODEL_SERVER_URL + 'face-embed')"], {'params': "{'path': img_path, **bbox}"}), "(MODEL_SERVER_URL + 'face-embed', params={'path': img_path, **bbox}\n )\n", (3825, 3898), False, 'import requests\n')]
|
import os
import re
import numpy as np
import pandas as pd
import ujson as json
import json as js
import sys
import argparse
class UCIDataset:
def __init__(self, window, source_dataset, output_json, imputing_columns):
self.read_dataset(source_dataset)
self.window = window
self.set_ids()
self.imputing_columns = imputing_columns
self.data_frame = pd.get_dummies(self.data_frame)
self.columns = self.data_frame.shape[1]
self.mean = np.asarray(list(self.data_frame.mean(axis=0)))
self.std = np.asarray(list(self.data_frame.std(axis=0, skipna=True)))
### modify the std to 1
self.std[self.std == 0.] = 1
self.fs = open(output_json, 'w')
def read_dataset(self, source_dataset):
raw_df = pd.read_csv(source_dataset)
### todo(aoqian): seperate time and main body
self.timestamp = raw_df[['timestamp']]
self.data_frame = raw_df.drop('timestamp', axis=1)
def set_ids(self):
"""
the id of sub sequences
:return:
"""
self.ids = range(0, self.data_frame.shape[0] / self.window)
### todo(aoqian): might be removed then, since no need to get the label
def get_label(self, id_):
return 0
def update_evals(self, evals, id_):
return evals
def read_data(self, id_):
frame = self.data_frame.loc[id_ * self.window: (id_+1) * self.window - 1, :]
evals = []
for i in range(self.window):
evals.append(list(frame.iloc[i, :]))
evals = (np.array(evals) - self.mean) / self.std
return evals
def __del__(self):
print("destroy UCI dataset")
if not isinstance(self.mean, list):
self.mean = self.mean.tolist()
if not isinstance(self.std, list):
self.std = self.std.tolist()
data = {
"SEQ_LEN": self.window,
"COLUMNS": self.columns,
"JsonFile": self.fs.name,
"mean": self.mean,
"std": self.std,
"imputing_columns": self.imputing_columns
}
with open('../models/settings.txt', 'w') as outfile:
js.dump(data, outfile)
self.fs.close()
def parse_delta(masks, window, columns, dir_):
"""
todo(aoqian): 1. do not normalize time column; 2. compute the correct delta matrix, may follow GURI-GAN
compute the delta matrix: the time gaps, but should focus
:param masks:
:param window:
:param columns:
:param dir_:
:return:
"""
if dir_ == 'backward':
masks = masks[::-1]
deltas = []
for h in range(window):
if h == 0:
deltas.append(np.ones(columns))
else:
deltas.append(np.ones(columns) + (1 - masks[h]) * deltas[-1])
return np.array(deltas)
def parse_rec(values, masks, evals, eval_masks, window, columns, dir_):
deltas = parse_delta(masks, window, columns, dir_)
# only used in GRU-D
forwards = pd.DataFrame(values).fillna(method='ffill').fillna(0.0).as_matrix()
rec = {}
rec['values'] = np.nan_to_num(values).tolist()
rec['masks'] = masks.astype('int32').tolist()
# imputation ground-truth
rec['evals'] = np.nan_to_num(evals).tolist()
rec['eval_masks'] = eval_masks.astype('int32').tolist()
rec['forwards'] = forwards.tolist()
rec['deltas'] = deltas.tolist()
return rec
def parse_id(id_, ds, index):
evals = ds.read_data(id_)
shp = evals.shape
evals = evals.reshape(-1) # 5 flattens the data in a 1d list
# randomly eliminate 10% values as the imputation ground-truth
indices = np.where(~np.isnan(evals))[0].tolist() # 6 getting indices of the flat evals list that are not nan
# find all indices on the imputing columns
indices = list(filter(lambda x: (x % ds.columns in ds.imputing_columns), indices))
indices = [indices[index]]
values = evals.copy()
values[indices] = np.nan # 8 setting 10 percent indices to nan in the new list values which has been copied from evals
masks = ~np.isnan(values) # 9 bool matrix which is true for not nan indices of values
# for the indices that we randomly selected, eval_masks is true in those indices, others are all false
eval_masks = (~np.isnan(values)) ^ (~np.isnan(evals))
evals = ds.update_evals(evals, id_)
evals = evals.reshape(shp)
values = values.reshape(shp)
masks = masks.reshape(shp)
eval_masks = eval_masks.reshape(shp) # 10 reshaping everything to its original shape
label = ds.get_label(id_)
rec = {'label': label}
# prepare the model for both directions
rec['forward'] = parse_rec(values, masks, evals, eval_masks, ds.window, ds.columns, dir_='forward')
rec['backward'] = parse_rec(values[::-1], masks[::-1], evals[::-1], eval_masks[::-1], ds.window, ds.columns, dir_='backward')
rec = json.dumps(rec)
ds.fs.write(rec + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--window', type=int, default=50)
parser.add_argument('--percent', type=int, default=10)
parser.add_argument('--imputing', type=int, default=1)
parser.add_argument('--index', type=int, default=0)
args = parser.parse_args()
window = args.window
imputing = args.imputing
percent = args.percent
# index = args.index
raw_fpath = '../raw/air_100.csv'
df = pd.read_csv(raw_fpath)
df_dummy = pd.get_dummies(df.drop('timestamp', axis=1))
# make it integer
nrow = window
# target_cols = [0, 1, 2, 3]
### debug,
### in theroy, only one will be detected and imputed
target_cols = [0]
nrow = 1
for col in target_cols:
for index in range(nrow):
dataset = UCIDataset(window, raw_fpath, '../json/jsonAir100_process', [col])
for id_ in dataset.ids:
print('Processing sub series {}'.format(id_))
try:
parse_id(id_, dataset, index)
except Exception as e:
print(e)
continue
|
[
"pandas.DataFrame",
"json.dump",
"argparse.ArgumentParser",
"numpy.nan_to_num",
"pandas.read_csv",
"pandas.get_dummies",
"numpy.ones",
"numpy.isnan",
"numpy.array",
"ujson.dumps"
] |
[((2823, 2839), 'numpy.array', 'np.array', (['deltas'], {}), '(deltas)\n', (2831, 2839), True, 'import numpy as np\n'), ((4918, 4933), 'ujson.dumps', 'json.dumps', (['rec'], {}), '(rec)\n', (4928, 4933), True, 'import ujson as json\n'), ((5005, 5030), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5028, 5030), False, 'import argparse\n'), ((5448, 5470), 'pandas.read_csv', 'pd.read_csv', (['raw_fpath'], {}), '(raw_fpath)\n', (5459, 5470), True, 'import pandas as pd\n'), ((393, 424), 'pandas.get_dummies', 'pd.get_dummies', (['self.data_frame'], {}), '(self.data_frame)\n', (407, 424), True, 'import pandas as pd\n'), ((793, 820), 'pandas.read_csv', 'pd.read_csv', (['source_dataset'], {}), '(source_dataset)\n', (804, 820), True, 'import pandas as pd\n'), ((4096, 4112), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (4104, 4112), True, 'import numpy as np\n'), ((2190, 2212), 'json.dump', 'js.dump', (['data', 'outfile'], {}), '(data, outfile)\n', (2197, 2212), True, 'import json as js\n'), ((3113, 3134), 'numpy.nan_to_num', 'np.nan_to_num', (['values'], {}), '(values)\n', (3126, 3134), True, 'import numpy as np\n'), ((3243, 3263), 'numpy.nan_to_num', 'np.nan_to_num', (['evals'], {}), '(evals)\n', (3256, 3263), True, 'import numpy as np\n'), ((4301, 4317), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (4309, 4317), True, 'import numpy as np\n'), ((4323, 4338), 'numpy.isnan', 'np.isnan', (['evals'], {}), '(evals)\n', (4331, 4338), True, 'import numpy as np\n'), ((1571, 1586), 'numpy.array', 'np.array', (['evals'], {}), '(evals)\n', (1579, 1586), True, 'import numpy as np\n'), ((2705, 2721), 'numpy.ones', 'np.ones', (['columns'], {}), '(columns)\n', (2712, 2721), True, 'import numpy as np\n'), ((2763, 2779), 'numpy.ones', 'np.ones', (['columns'], {}), '(columns)\n', (2770, 2779), True, 'import numpy as np\n'), ((3672, 3687), 'numpy.isnan', 'np.isnan', (['evals'], {}), '(evals)\n', (3680, 3687), True, 'import numpy as np\n'), ((3010, 3030), 'pandas.DataFrame', 'pd.DataFrame', (['values'], {}), '(values)\n', (3022, 3030), True, 'import pandas as pd\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.