prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def <|fim_middle|>(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | fit |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def <|fim_middle|>(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | predict_intensity |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def <|fim_middle|>(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | predict |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def <|fim_middle|>(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | predict_big_n |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def <|fim_middle|>(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | coef_ |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def <|fim_middle|>(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | eta_ |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def <|fim_middle|>(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | intercept_ |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def <|fim_middle|>(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | alpha_ |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def <|fim_middle|>(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | n_iter_ |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def <|fim_middle|>(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | __init__ |
<|file_name|>fit.py<|end_file_name|><|fim▁begin|>"""
Poisson time series penalised likelihood regression
via the Berman Turner device
"""
from . import weighted_linear_model
from . import design_nonlattice as design
from math import ceil
import numpy as np
from importlib import reload
design = reload(design)
class NonLatticeOneShot:
"""
the simplest device.
Uses a stepwise-constant quadrature rule and non-adaptive
smoothing.
"""
def __init__(
self,
positive=True,
normalize=False,
wlm=None,
wlm_factory='WeightedLassoLarsCV',
cum_interp='linear',
smoothing=1.0, # only for spline smoother
step_size=0.25, # only for dirac interpolant
strategy='random', # only for dirac interpolant
*args, **kwargs):
if wlm is None:
# Allow reference by class for easy serialization
if isinstance(wlm_factory, str):
wlm_factory = getattr(weighted_linear_model, wlm_factory)
self.wlm = wlm_factory(
positive=positive,
normalize=normalize,
*args, **kwargs
)
else:
self.wlm = wlm
self.big_n_hat_ = None
self.cum_interp = cum_interp
self.smoothing = smoothing
self.strategy = strategy
self.step_size = step_size
def fit(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
*args, **kwargs):
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
(
self.inc_predictors_,
self.inc_response_,
self.inc_sample_weight_
) = (
design.design_stepwise(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
basis_lag=self.basis_lag_,
big_n_hat=self.big_n_hat_,
sample_weight=sample_weight
)
)
self.wlm.fit(
X=self.inc_predictors_,
y=self.inc_response_,
sample_weight=self.inc_sample_weight_,
penalty_weight=penalty_weight,
*args, **kwargs
)
def predict_intensity(self, obs_t=None):
"""
This should return forward-predicted intensity
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict(self, obs_t=None):
"""
This should return predicted increments
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.predict_increment(
big_n=self.big_n_hat_,
obs_t=obs_t if obs_t is not None else self.obs_t_,
mu=self.intercept_,
basis_lag=self.basis_lag_,
coef=self.coef_)
def predict_big_n(self, obs_t=None):
"""
This should return predicted increment interpolant
based on the fitted histogram, up to the last observations
before the given times.
"""
return design.interpolate(
obs_t=self.obs_t_,
cum_obs=self.cum_obs_,
cum_interp=self.cum_interp,
smoothing=self.smoothing,
step_size=self.step_size,
strategy=self.strategy,
)
@property
def coef_(self):
return self.wlm.coef_
@property
def eta_(self):
return np.sum(self.coef_)
@property
def intercept_(self):
return self.wlm.intercept_
@property
def alpha_(self):
return self.wlm.alpha_
@property
def n_iter_(self):
return self.wlm.n_iter_
class NonLatticeIterative(NonLatticeOneShot):
"""
repeatedly forward-smooth to find optimal interpolant.
TODO: This doesn't do backwards losses
"""
def __init__(
self,
*args, **kwargs):
super().__init__(
cum_interp='dirac',
strategy='random',
*args, **kwargs)
def <|fim_middle|>(
self,
obs_t,
cum_obs,
basis_lag=1.0,
penalty_weight='adaptive',
sample_weight='bermanturner',
max_basis_span=float('inf'),
big_n_hat=None,
max_iter=3,
*args, **kwargs):
inner_model = NonLatticeOneShot(
wlm=self.wlm,
cum_interp='linear',
)
self.inner_model = inner_model
self.obs_t_ = obs_t
self.cum_obs_ = cum_obs
if np.isscalar(basis_lag):
# scalars are a bin width
basis_span = min(
(np.amax(obs_t) - np.amin(obs_t))/2.0,
max_basis_span
)
n_bins = ceil(basis_span/basis_lag)
self.basis_lag_ = np.arange(n_bins+1) * basis_lag
else:
self.basis_lag_ = basis_lag
if big_n_hat is None:
self.big_n_hat_ = self.predict_big_n()
for i in range(max_iter):
print('i', i, max_iter)
inner_model.fit(
obs_t=self.big_n_hat_.spike_lattice,
cum_obs=self.big_n_hat_.spike_cum_weight,
*args,
**kwargs)
n_hat_arr = inner_model.predict(
obs_t=self.big_n_hat_.spike_lattice,
)
self.big_n_hat_ = design.reweight_dirac_interpolant(
self.big_n_hat_,
n_hat_arr
)
<|fim▁end|> | fit |
<|file_name|>sign.py<|end_file_name|><|fim▁begin|>import chainer
from chainer import backend
from chainer import utils<|fim▁hole|>
def sign(x):
"""Elementwise sign function.
For a given input :math:`x`, this function returns :math:`sgn(x)`
defined as
.. math::
sgn(x) = \\left \\{ \\begin{array}{cc}
-1 & {\\rm if~x < 0} \\\\
0 & {\\rm if~x = 0} \\\\
1 & {\\rm if~x > 0} \\\\
\\end{array} \\right.
.. note::
The gradient of this function is ``None`` everywhere and therefore
unchains the computational graph.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable for which the sign is computed.
Returns:
~chainer.Variable: Output variable.
"""
if isinstance(x, chainer.variable.Variable):
x = x.array
xp = backend.get_array_module(x)
return chainer.as_variable(utils.force_array(xp.sign(x)))<|fim▁end|> | |
<|file_name|>sign.py<|end_file_name|><|fim▁begin|>import chainer
from chainer import backend
from chainer import utils
def sign(x):
<|fim_middle|>
<|fim▁end|> | """Elementwise sign function.
For a given input :math:`x`, this function returns :math:`sgn(x)`
defined as
.. math::
sgn(x) = \\left \\{ \\begin{array}{cc}
-1 & {\\rm if~x < 0} \\\\
0 & {\\rm if~x = 0} \\\\
1 & {\\rm if~x > 0} \\\\
\\end{array} \\right.
.. note::
The gradient of this function is ``None`` everywhere and therefore
unchains the computational graph.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable for which the sign is computed.
Returns:
~chainer.Variable: Output variable.
"""
if isinstance(x, chainer.variable.Variable):
x = x.array
xp = backend.get_array_module(x)
return chainer.as_variable(utils.force_array(xp.sign(x))) |
<|file_name|>sign.py<|end_file_name|><|fim▁begin|>import chainer
from chainer import backend
from chainer import utils
def sign(x):
"""Elementwise sign function.
For a given input :math:`x`, this function returns :math:`sgn(x)`
defined as
.. math::
sgn(x) = \\left \\{ \\begin{array}{cc}
-1 & {\\rm if~x < 0} \\\\
0 & {\\rm if~x = 0} \\\\
1 & {\\rm if~x > 0} \\\\
\\end{array} \\right.
.. note::
The gradient of this function is ``None`` everywhere and therefore
unchains the computational graph.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable for which the sign is computed.
Returns:
~chainer.Variable: Output variable.
"""
if isinstance(x, chainer.variable.Variable):
<|fim_middle|>
xp = backend.get_array_module(x)
return chainer.as_variable(utils.force_array(xp.sign(x)))
<|fim▁end|> | x = x.array |
<|file_name|>sign.py<|end_file_name|><|fim▁begin|>import chainer
from chainer import backend
from chainer import utils
def <|fim_middle|>(x):
"""Elementwise sign function.
For a given input :math:`x`, this function returns :math:`sgn(x)`
defined as
.. math::
sgn(x) = \\left \\{ \\begin{array}{cc}
-1 & {\\rm if~x < 0} \\\\
0 & {\\rm if~x = 0} \\\\
1 & {\\rm if~x > 0} \\\\
\\end{array} \\right.
.. note::
The gradient of this function is ``None`` everywhere and therefore
unchains the computational graph.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable for which the sign is computed.
Returns:
~chainer.Variable: Output variable.
"""
if isinstance(x, chainer.variable.Variable):
x = x.array
xp = backend.get_array_module(x)
return chainer.as_variable(utils.force_array(xp.sign(x)))
<|fim▁end|> | sign |
<|file_name|>maps_http_geocode_place_id.py<|end_file_name|><|fim▁begin|># [START maps_http_geocode_place_id]
import requests
url = "https://maps.googleapis.com/maps/api/geocode/json?place_id=ChIJd8BlQ2BZwokRAFUEcm_qrcA&key=YOUR_API_KEY"
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
<|fim▁hole|># [END maps_http_geocode_place_id]<|fim▁end|> | |
<|file_name|>waypointMaps.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys<|fim▁hole|>import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())<|fim▁end|> | import cgi
import Cookie |
<|file_name|>waypointMaps.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
<|fim_middle|>
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())
<|fim▁end|> | try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = '' |
<|file_name|>waypointMaps.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
<|fim_middle|>
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())
<|fim▁end|> | currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '') |
<|file_name|>waypointMaps.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
<|fim_middle|>
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())
<|fim▁end|> | loginResult = 'success' |
<|file_name|>waypointMaps.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
<|fim_middle|>
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())
<|fim▁end|> | logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid |
<|file_name|>waypointMaps.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
<|fim_middle|>
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())
<|fim▁end|> | uiTheme = dbShared.getUserAttr(currentUser, 'themeName') |
<|file_name|>waypointMaps.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
<|fim_middle|>
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())
<|fim▁end|> | linkappend = 'gh_sid=' + sid |
<|file_name|>waypointMaps.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
else:
<|fim_middle|>
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())
<|fim▁end|> | disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter' |
<|file_name|>waypointMaps.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
<|fim_middle|>
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())
<|fim▁end|> | uiTheme = 'crafter' |
<|file_name|>test_InT01.py<|end_file_name|><|fim▁begin|>""" Integration test: permit call
"""
import os
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../../')
import logging
import nose
from nose.tools import *
import inte_testutils
from telewall.core.model import TelephoneNumber
from telewall.core.util import sleep_until
logging.basicConfig(filename='/tmp/telewall-inte.log', level=logging.DEBUG)
logging.getLogger('telewall').setLevel(logging.DEBUG)
LOG = logging.getLogger(__name__)
def test_Anruf_erlauben():
u = inte_testutils.TestUtil()<|fim▁hole|>
call = u.make_call_to_incoming(callerid='0790000001')
LOG.info('call: %s', call)
sleep_until(lambda: 'Ringing' in call.get_call_states() or 'Up' in call.get_call_states(), 5)
call.hangup()
states = call.get_call_states()
LOG.info('states: %s', states)
assert_true('Ringing' in states,
'Das analoge Telefon sollte angerufen worden sein, aber es gab keinen "Ringing" Status.')
call.stop()
if __name__ == '__main__':
nose.runmodule()<|fim▁end|> | u.unblock_callerid(TelephoneNumber('0790000001')) |
<|file_name|>test_InT01.py<|end_file_name|><|fim▁begin|>""" Integration test: permit call
"""
import os
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../../')
import logging
import nose
from nose.tools import *
import inte_testutils
from telewall.core.model import TelephoneNumber
from telewall.core.util import sleep_until
logging.basicConfig(filename='/tmp/telewall-inte.log', level=logging.DEBUG)
logging.getLogger('telewall').setLevel(logging.DEBUG)
LOG = logging.getLogger(__name__)
def test_Anruf_erlauben():
<|fim_middle|>
if __name__ == '__main__':
nose.runmodule()
<|fim▁end|> | u = inte_testutils.TestUtil()
u.unblock_callerid(TelephoneNumber('0790000001'))
call = u.make_call_to_incoming(callerid='0790000001')
LOG.info('call: %s', call)
sleep_until(lambda: 'Ringing' in call.get_call_states() or 'Up' in call.get_call_states(), 5)
call.hangup()
states = call.get_call_states()
LOG.info('states: %s', states)
assert_true('Ringing' in states,
'Das analoge Telefon sollte angerufen worden sein, aber es gab keinen "Ringing" Status.')
call.stop() |
<|file_name|>test_InT01.py<|end_file_name|><|fim▁begin|>""" Integration test: permit call
"""
import os
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../../')
import logging
import nose
from nose.tools import *
import inte_testutils
from telewall.core.model import TelephoneNumber
from telewall.core.util import sleep_until
logging.basicConfig(filename='/tmp/telewall-inte.log', level=logging.DEBUG)
logging.getLogger('telewall').setLevel(logging.DEBUG)
LOG = logging.getLogger(__name__)
def test_Anruf_erlauben():
u = inte_testutils.TestUtil()
u.unblock_callerid(TelephoneNumber('0790000001'))
call = u.make_call_to_incoming(callerid='0790000001')
LOG.info('call: %s', call)
sleep_until(lambda: 'Ringing' in call.get_call_states() or 'Up' in call.get_call_states(), 5)
call.hangup()
states = call.get_call_states()
LOG.info('states: %s', states)
assert_true('Ringing' in states,
'Das analoge Telefon sollte angerufen worden sein, aber es gab keinen "Ringing" Status.')
call.stop()
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | nose.runmodule() |
<|file_name|>test_InT01.py<|end_file_name|><|fim▁begin|>""" Integration test: permit call
"""
import os
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../../')
import logging
import nose
from nose.tools import *
import inte_testutils
from telewall.core.model import TelephoneNumber
from telewall.core.util import sleep_until
logging.basicConfig(filename='/tmp/telewall-inte.log', level=logging.DEBUG)
logging.getLogger('telewall').setLevel(logging.DEBUG)
LOG = logging.getLogger(__name__)
def <|fim_middle|>():
u = inte_testutils.TestUtil()
u.unblock_callerid(TelephoneNumber('0790000001'))
call = u.make_call_to_incoming(callerid='0790000001')
LOG.info('call: %s', call)
sleep_until(lambda: 'Ringing' in call.get_call_states() or 'Up' in call.get_call_states(), 5)
call.hangup()
states = call.get_call_states()
LOG.info('states: %s', states)
assert_true('Ringing' in states,
'Das analoge Telefon sollte angerufen worden sein, aber es gab keinen "Ringing" Status.')
call.stop()
if __name__ == '__main__':
nose.runmodule()
<|fim▁end|> | test_Anruf_erlauben |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))<|fim▁hole|> # Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)<|fim▁end|> | |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
<|fim_middle|>
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
<|fim_middle|>
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root) |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
<|fim_middle|>
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | file = open(file) |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
<|fim_middle|>
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | cat = "noun" |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
<|fim_middle|>
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | cat = "verb" |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
<|fim_middle|>
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | cat = "adj" |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
<|fim_middle|>
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | cat = "other" |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
<|fim_middle|>
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | counts[form] = dict(noun=0, verb=0, adj=0, other=0) |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
<|fim_middle|>
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count)) |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
<|fim_middle|>
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | cat = "adj" |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
<|fim_middle|>
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | cat = "other" |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def <|fim_middle|>(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def add_count_attrib(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | read_counts |
<|file_name|>cornetto-add-counts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
add word counts to Cornetto lexical units database file
The word count file should have three columns, delimited by white space,
containing (1) the count, (2) the lemma, (3) the main POS tag.
The tagset is assumed to be the Spoken Dutch Corpus tagset,
and the character encoding must be ISO-8859-1.
The counts appear as the value of the feature "count" on <form> elements.
The updated lexical units xml database is written to standard output.
Since we have only the lemma and the POS, and no word sense, the frequency
information is added to each matching lexical unit regardless of its sense
(i.e. the value of the "c_seq_nr" attribute).
"""
# TODO:
# - deal with multiword counts
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6'
from sys import stderr, stdout
from xml.etree.cElementTree import iterparse, SubElement, tostring, ElementTree
from cornetto.argparse import ArgumentParser, RawDescriptionHelpFormatter
def read_counts(file):
if not hasattr(file, "read"):
file = open(file)
counts = {}
totals = dict(noun=0, verb=0, adj=0, other=0)
for l in file:
try:
count, form, tag = l.strip().split()
except ValueError:
stderr.write("Warning; ill-formed line: %s\n" % repr(l))
continue
# translate CGN tagset to word category
if tag in ("N", "VNW", "TW", "SPEC"):
cat = "noun"
elif tag in ("WW"):
cat = "verb"
elif tag in ("ADJ", "BW"):
cat = "adj"
else:
# LET LID TSW VG VZ
cat = "other"
# Cornetto word forms are stored in unicode
form = form.decode("iso-8859-1")
count = int(count)
if form not in counts:
counts[form] = dict(noun=0, verb=0, adj=0, other=0)
counts[form][cat] += count
totals[cat] += count
return counts, totals
def <|fim_middle|>(counts, totals, cdb_lu_file):
parser = iterparse(cdb_lu_file)
for event, elem in parser:
if elem.tag == "form":
# following the ElementTree conventions,
# word form will be ascii or unicode
form = elem.get("form-spelling")
# lower case because Cornette is not consistent
cat = elem.get("form-cat").lower()
# fix category flaws in current release of Cornetto
if cat == "adjective":
cat = "adj"
elif cat == "adverb":
cat = "other"
try:
count = counts[form][cat]
except KeyError:
# form not found
count = 0
elem.set("count", str(count))
# Finally, add totals, per category and overall, to the doc root
# Note that all words _not_ in Cornetto are not included in these totals
totals["all"] = sum(totals.values())
for cat, count in totals.items():
parser.root.set("count-total-%s" % cat, str(count))
return ElementTree(parser.root)
parser = ArgumentParser(description=__doc__,
version="%(prog)s version " + __version__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("cdb_lu", type=file,
help="xml file containing the lexical units")
parser.add_argument("word_counts", type=file,
help="tabular file containing the word counts")
args = parser.parse_args()
counts, totals = read_counts(args.word_counts)
etree = add_count_attrib(counts, totals, args.cdb_lu)
etree.write(stdout, encoding="utf-8")
#def add_statistics_elem(counts, cdb_lu_file):
#"""
#adds a separate <statistics> element,
#which accomodates for other counts for other sources
#"""
#parser = iterparse(cdb_lu_file)
#for event, elem in parser:
#if elem.tag == "cdb_lu":
#try:
#count = counts[form][cat]
#except KeyError:
#count = 0
#freq_el = SubElement(elem, "statistics")
#SubElement(freq_el, "count", scr="uvt").text = str(count)
#elif elem.tag == "form":
## following the ElementTree conventions,
## word form will be ascii or unicode
#form = elem.get("form-spelling")
#cat = elem.get("form-cat")
#return ElementTree(parser.root)
<|fim▁end|> | add_count_attrib |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""<|fim▁hole|> return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other<|fim▁end|> | if not isinstance(other, WorkTitleV30Rc2): |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
<|fim_middle|>
<|fim▁end|> | """NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
<|fim_middle|>
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | """WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
<|fim_middle|>
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | """Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
<|fim_middle|>
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | """Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
<|fim_middle|>
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | """Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
<|fim_middle|>
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | """Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
<|fim_middle|>
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | """Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
<|fim_middle|>
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | """Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
<|fim_middle|>
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | """Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
<|fim_middle|>
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | """Returns the string representation of the model"""
return pprint.pformat(self.to_dict()) |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
<|fim_middle|>
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | """For `print` and `pprint`"""
return self.to_str() |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
<|fim_middle|>
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | """Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__ |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
<|fim_middle|>
<|fim▁end|> | """Returns true if both objects are not equal"""
return not self == other |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
<|fim_middle|>
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | self.title = title |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
<|fim_middle|>
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | self.subtitle = subtitle |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
<|fim_middle|>
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | self.translated_title = translated_title |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
<|fim_middle|>
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
)) |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
<|fim_middle|>
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | result[attr] = value.to_dict() |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
<|fim_middle|>
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
)) |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
<|fim_middle|>
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | result[attr] = value |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
<|fim_middle|>
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | for key, value in self.items():
result[key] = value |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
<|fim_middle|>
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | return False |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def <|fim_middle|>(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | __init__ |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def <|fim_middle|>(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | title |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def <|fim_middle|>(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | title |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def <|fim_middle|>(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | subtitle |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def <|fim_middle|>(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | subtitle |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def <|fim_middle|>(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | translated_title |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def <|fim_middle|>(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | translated_title |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def <|fim_middle|>(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | to_dict |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def <|fim_middle|>(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | to_str |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def <|fim_middle|>(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | __repr__ |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def <|fim_middle|>(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | __eq__ |
<|file_name|>work_title_v30_rc2.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.subtitle_v30_rc2 import SubtitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.title_v30_rc2 import TitleV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.translated_title_v30_rc2 import TranslatedTitleV30Rc2 # noqa: F401,E501
class WorkTitleV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'TitleV30Rc2',
'subtitle': 'SubtitleV30Rc2',
'translated_title': 'TranslatedTitleV30Rc2'
}
attribute_map = {
'title': 'title',
'subtitle': 'subtitle',
'translated_title': 'translated-title'
}
def __init__(self, title=None, subtitle=None, translated_title=None): # noqa: E501
"""WorkTitleV30Rc2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._subtitle = None
self._translated_title = None
self.discriminator = None
if title is not None:
self.title = title
if subtitle is not None:
self.subtitle = subtitle
if translated_title is not None:
self.translated_title = translated_title
@property
def title(self):
"""Gets the title of this WorkTitleV30Rc2. # noqa: E501
:return: The title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TitleV30Rc2
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this WorkTitleV30Rc2.
:param title: The title of this WorkTitleV30Rc2. # noqa: E501
:type: TitleV30Rc2
"""
self._title = title
@property
def subtitle(self):
"""Gets the subtitle of this WorkTitleV30Rc2. # noqa: E501
:return: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:rtype: SubtitleV30Rc2
"""
return self._subtitle
@subtitle.setter
def subtitle(self, subtitle):
"""Sets the subtitle of this WorkTitleV30Rc2.
:param subtitle: The subtitle of this WorkTitleV30Rc2. # noqa: E501
:type: SubtitleV30Rc2
"""
self._subtitle = subtitle
@property
def translated_title(self):
"""Gets the translated_title of this WorkTitleV30Rc2. # noqa: E501
:return: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:rtype: TranslatedTitleV30Rc2
"""
return self._translated_title
@translated_title.setter
def translated_title(self, translated_title):
"""Sets the translated_title of this WorkTitleV30Rc2.
:param translated_title: The translated_title of this WorkTitleV30Rc2. # noqa: E501
:type: TranslatedTitleV30Rc2
"""
self._translated_title = translated_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkTitleV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkTitleV30Rc2):
return False
return self.__dict__ == other.__dict__
def <|fim_middle|>(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|fim▁end|> | __ne__ |
<|file_name|>AssociationRule.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Wed Jul 6 22:58:00 2016
@author: Diogo
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 26 19:08:00 2016
@author: Diogo
"""
def ImportGames():
games = list()
user_games = dict()
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesCleansed.txt', 'r', encoding = 'utf-8') as lines:
next(lines) # Skiping headers
for ln in lines:
user, board_game, board_type, list_type, score10 = ln.split('##')
if board_game not in games:
games.append(board_game.replace('\t',' ').replace(' ', ' '))
if user not in user_games:
user_games[user] = dict()
if board_game not in user_games[user].keys():
<|fim▁hole|>games, user_games = ImportGames()
def BuildMatrix(games, user_games):
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write('user\t' + '\t'.join(games) + '\n')
for user in user_games:
user_line = list()
for game in games:
if game in user_games[user].keys():
user_line.append('1')
else:
user_line.append('0')
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write(user + '\t' + '\t'.join(user_line) + '\n')
BuildMatrix(games, user_games)<|fim▁end|> | user_games[user][board_game] = 1
return (games, user_games)
|
<|file_name|>AssociationRule.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Wed Jul 6 22:58:00 2016
@author: Diogo
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 26 19:08:00 2016
@author: Diogo
"""
def ImportGames():
<|fim_middle|>
games, user_games = ImportGames()
def BuildMatrix(games, user_games):
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write('user\t' + '\t'.join(games) + '\n')
for user in user_games:
user_line = list()
for game in games:
if game in user_games[user].keys():
user_line.append('1')
else:
user_line.append('0')
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write(user + '\t' + '\t'.join(user_line) + '\n')
BuildMatrix(games, user_games)<|fim▁end|> | games = list()
user_games = dict()
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesCleansed.txt', 'r', encoding = 'utf-8') as lines:
next(lines) # Skiping headers
for ln in lines:
user, board_game, board_type, list_type, score10 = ln.split('##')
if board_game not in games:
games.append(board_game.replace('\t',' ').replace(' ', ' '))
if user not in user_games:
user_games[user] = dict()
if board_game not in user_games[user].keys():
user_games[user][board_game] = 1
return (games, user_games) |
<|file_name|>AssociationRule.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Wed Jul 6 22:58:00 2016
@author: Diogo
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 26 19:08:00 2016
@author: Diogo
"""
def ImportGames():
games = list()
user_games = dict()
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesCleansed.txt', 'r', encoding = 'utf-8') as lines:
next(lines) # Skiping headers
for ln in lines:
user, board_game, board_type, list_type, score10 = ln.split('##')
if board_game not in games:
games.append(board_game.replace('\t',' ').replace(' ', ' '))
if user not in user_games:
user_games[user] = dict()
if board_game not in user_games[user].keys():
user_games[user][board_game] = 1
return (games, user_games)
games, user_games = ImportGames()
def BuildMatrix(games, user_games):
<|fim_middle|>
BuildMatrix(games, user_games)<|fim▁end|> | with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write('user\t' + '\t'.join(games) + '\n')
for user in user_games:
user_line = list()
for game in games:
if game in user_games[user].keys():
user_line.append('1')
else:
user_line.append('0')
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write(user + '\t' + '\t'.join(user_line) + '\n') |
<|file_name|>AssociationRule.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Wed Jul 6 22:58:00 2016
@author: Diogo
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 26 19:08:00 2016
@author: Diogo
"""
def ImportGames():
games = list()
user_games = dict()
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesCleansed.txt', 'r', encoding = 'utf-8') as lines:
next(lines) # Skiping headers
for ln in lines:
user, board_game, board_type, list_type, score10 = ln.split('##')
if board_game not in games:
<|fim_middle|>
if user not in user_games:
user_games[user] = dict()
if board_game not in user_games[user].keys():
user_games[user][board_game] = 1
return (games, user_games)
games, user_games = ImportGames()
def BuildMatrix(games, user_games):
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write('user\t' + '\t'.join(games) + '\n')
for user in user_games:
user_line = list()
for game in games:
if game in user_games[user].keys():
user_line.append('1')
else:
user_line.append('0')
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write(user + '\t' + '\t'.join(user_line) + '\n')
BuildMatrix(games, user_games)<|fim▁end|> | games.append(board_game.replace('\t',' ').replace(' ', ' ')) |
<|file_name|>AssociationRule.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Wed Jul 6 22:58:00 2016
@author: Diogo
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 26 19:08:00 2016
@author: Diogo
"""
def ImportGames():
games = list()
user_games = dict()
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesCleansed.txt', 'r', encoding = 'utf-8') as lines:
next(lines) # Skiping headers
for ln in lines:
user, board_game, board_type, list_type, score10 = ln.split('##')
if board_game not in games:
games.append(board_game.replace('\t',' ').replace(' ', ' '))
if user not in user_games:
<|fim_middle|>
if board_game not in user_games[user].keys():
user_games[user][board_game] = 1
return (games, user_games)
games, user_games = ImportGames()
def BuildMatrix(games, user_games):
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write('user\t' + '\t'.join(games) + '\n')
for user in user_games:
user_line = list()
for game in games:
if game in user_games[user].keys():
user_line.append('1')
else:
user_line.append('0')
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write(user + '\t' + '\t'.join(user_line) + '\n')
BuildMatrix(games, user_games)<|fim▁end|> | user_games[user] = dict() |
<|file_name|>AssociationRule.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Wed Jul 6 22:58:00 2016
@author: Diogo
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 26 19:08:00 2016
@author: Diogo
"""
def ImportGames():
games = list()
user_games = dict()
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesCleansed.txt', 'r', encoding = 'utf-8') as lines:
next(lines) # Skiping headers
for ln in lines:
user, board_game, board_type, list_type, score10 = ln.split('##')
if board_game not in games:
games.append(board_game.replace('\t',' ').replace(' ', ' '))
if user not in user_games:
user_games[user] = dict()
if board_game not in user_games[user].keys():
<|fim_middle|>
return (games, user_games)
games, user_games = ImportGames()
def BuildMatrix(games, user_games):
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write('user\t' + '\t'.join(games) + '\n')
for user in user_games:
user_line = list()
for game in games:
if game in user_games[user].keys():
user_line.append('1')
else:
user_line.append('0')
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write(user + '\t' + '\t'.join(user_line) + '\n')
BuildMatrix(games, user_games)<|fim▁end|> | user_games[user][board_game] = 1 |
<|file_name|>AssociationRule.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Wed Jul 6 22:58:00 2016
@author: Diogo
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 26 19:08:00 2016
@author: Diogo
"""
def ImportGames():
games = list()
user_games = dict()
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesCleansed.txt', 'r', encoding = 'utf-8') as lines:
next(lines) # Skiping headers
for ln in lines:
user, board_game, board_type, list_type, score10 = ln.split('##')
if board_game not in games:
games.append(board_game.replace('\t',' ').replace(' ', ' '))
if user not in user_games:
user_games[user] = dict()
if board_game not in user_games[user].keys():
user_games[user][board_game] = 1
return (games, user_games)
games, user_games = ImportGames()
def BuildMatrix(games, user_games):
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write('user\t' + '\t'.join(games) + '\n')
for user in user_games:
user_line = list()
for game in games:
if game in user_games[user].keys():
<|fim_middle|>
else:
user_line.append('0')
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write(user + '\t' + '\t'.join(user_line) + '\n')
BuildMatrix(games, user_games)<|fim▁end|> | user_line.append('1') |
<|file_name|>AssociationRule.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Wed Jul 6 22:58:00 2016
@author: Diogo
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 26 19:08:00 2016
@author: Diogo
"""
def ImportGames():
games = list()
user_games = dict()
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesCleansed.txt', 'r', encoding = 'utf-8') as lines:
next(lines) # Skiping headers
for ln in lines:
user, board_game, board_type, list_type, score10 = ln.split('##')
if board_game not in games:
games.append(board_game.replace('\t',' ').replace(' ', ' '))
if user not in user_games:
user_games[user] = dict()
if board_game not in user_games[user].keys():
user_games[user][board_game] = 1
return (games, user_games)
games, user_games = ImportGames()
def BuildMatrix(games, user_games):
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write('user\t' + '\t'.join(games) + '\n')
for user in user_games:
user_line = list()
for game in games:
if game in user_games[user].keys():
user_line.append('1')
else:
<|fim_middle|>
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write(user + '\t' + '\t'.join(user_line) + '\n')
BuildMatrix(games, user_games)<|fim▁end|> | user_line.append('0') |
<|file_name|>AssociationRule.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Wed Jul 6 22:58:00 2016
@author: Diogo
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 26 19:08:00 2016
@author: Diogo
"""
def <|fim_middle|>():
games = list()
user_games = dict()
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesCleansed.txt', 'r', encoding = 'utf-8') as lines:
next(lines) # Skiping headers
for ln in lines:
user, board_game, board_type, list_type, score10 = ln.split('##')
if board_game not in games:
games.append(board_game.replace('\t',' ').replace(' ', ' '))
if user not in user_games:
user_games[user] = dict()
if board_game not in user_games[user].keys():
user_games[user][board_game] = 1
return (games, user_games)
games, user_games = ImportGames()
def BuildMatrix(games, user_games):
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write('user\t' + '\t'.join(games) + '\n')
for user in user_games:
user_line = list()
for game in games:
if game in user_games[user].keys():
user_line.append('1')
else:
user_line.append('0')
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write(user + '\t' + '\t'.join(user_line) + '\n')
BuildMatrix(games, user_games)<|fim▁end|> | ImportGames |
<|file_name|>AssociationRule.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Wed Jul 6 22:58:00 2016
@author: Diogo
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 26 19:08:00 2016
@author: Diogo
"""
def ImportGames():
games = list()
user_games = dict()
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesCleansed.txt', 'r', encoding = 'utf-8') as lines:
next(lines) # Skiping headers
for ln in lines:
user, board_game, board_type, list_type, score10 = ln.split('##')
if board_game not in games:
games.append(board_game.replace('\t',' ').replace(' ', ' '))
if user not in user_games:
user_games[user] = dict()
if board_game not in user_games[user].keys():
user_games[user][board_game] = 1
return (games, user_games)
games, user_games = ImportGames()
def <|fim_middle|>(games, user_games):
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write('user\t' + '\t'.join(games) + '\n')
for user in user_games:
user_line = list()
for game in games:
if game in user_games[user].keys():
user_line.append('1')
else:
user_line.append('0')
with open('C:\\Users\\Diogo\\Documents\\Monografia FIA\\UserGamesMatrix.tab', 'a', encoding = 'utf-8') as lines:
lines.write(user + '\t' + '\t'.join(user_line) + '\n')
BuildMatrix(games, user_games)<|fim▁end|> | BuildMatrix |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Python Library Boilerplate contains all the boilerplate you need to create a Python package."""
__author__ = 'Michael Joseph'
__email__ = '[email protected]'
__url__ = 'https://github.com/michaeljoseph/sealeyes'
__version__ = '0.0.1'<|fim▁hole|>
def sealeyes():
return 'Hello World!'<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Python Library Boilerplate contains all the boilerplate you need to create a Python package."""
__author__ = 'Michael Joseph'
__email__ = '[email protected]'
__url__ = 'https://github.com/michaeljoseph/sealeyes'
__version__ = '0.0.1'
def sealeyes():
<|fim_middle|>
<|fim▁end|> | return 'Hello World!' |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Python Library Boilerplate contains all the boilerplate you need to create a Python package."""
__author__ = 'Michael Joseph'
__email__ = '[email protected]'
__url__ = 'https://github.com/michaeljoseph/sealeyes'
__version__ = '0.0.1'
def <|fim_middle|>():
return 'Hello World!'
<|fim▁end|> | sealeyes |
<|file_name|>modules.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Tue Mar 14 02:17:11 2017
@author: guida
"""
import json
<|fim▁hole|>def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
#Json parser
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js<|fim▁end|> | import requests
|
<|file_name|>modules.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Tue Mar 14 02:17:11 2017
@author: guida
"""
import json
import requests
def get_url(url):
<|fim_middle|>
#Json parser
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js<|fim▁end|> | response = requests.get(url)
content = response.content.decode("utf8")
return content |
<|file_name|>modules.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Tue Mar 14 02:17:11 2017
@author: guida
"""
import json
import requests
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
#Json parser
def get_json_from_url(url):
<|fim_middle|>
<|fim▁end|> | content = get_url(url)
js = json.loads(content)
return js |
<|file_name|>modules.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Tue Mar 14 02:17:11 2017
@author: guida
"""
import json
import requests
def <|fim_middle|>(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
#Json parser
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js<|fim▁end|> | get_url |
<|file_name|>modules.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Tue Mar 14 02:17:11 2017
@author: guida
"""
import json
import requests
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
#Json parser
def <|fim_middle|>(url):
content = get_url(url)
js = json.loads(content)
return js<|fim▁end|> | get_json_from_url |
<|file_name|>test_different_outputs.py<|end_file_name|><|fim▁begin|>import unittest
from polycircles import polycircles
from nose.tools import assert_equal, assert_almost_equal
class TestDifferentOutputs(unittest.TestCase):
"""Tests the various output methods: KML style, WKT, lat-lon and lon-lat."""
<|fim▁hole|> def setUp(self):
self.latitude = 32.074322
self.longitude = 34.792081
self.radius_meters = 100
self.number_of_vertices = 36
self.polycircle = \
polycircles.Polycircle(latitude=self.latitude,
longitude=self.longitude,
radius=self.radius_meters,
number_of_vertices=self.number_of_vertices)
def test_lat_lon_output(self):
"""Asserts that the vertices in the lat-lon output are in the
right order (lat before long)."""
for vertex in self.polycircle.to_lat_lon():
assert_almost_equal(vertex[0], self.latitude, places=2)
assert_almost_equal(vertex[1], self.longitude, places=2)
def test_lon_lat_output(self):
"""Asserts that the vertices in the lat-lon output are in the
right order (lat before long)."""
for vertex in self.polycircle.to_lon_lat():
assert_almost_equal(vertex[0], self.longitude, places=2)
assert_almost_equal(vertex[1], self.latitude, places=2)
def test_vertices_equals_lat_lon(self):
"""Asserts that the "vertices" property is identical to the return
value of to_lat_lon()."""
assert_equal(self.polycircle.vertices, self.polycircle.to_lat_lon())
def test_kml_equals_lon_lat(self):
"""Asserts that the return value of to_kml() property is identical to
the return value of to_lon_lat()."""
assert_equal(self.polycircle.to_kml(), self.polycircle.to_lon_lat())
if __name__ == '__main__':
unittest.main()<|fim▁end|> | |
<|file_name|>test_different_outputs.py<|end_file_name|><|fim▁begin|>import unittest
from polycircles import polycircles
from nose.tools import assert_equal, assert_almost_equal
class TestDifferentOutputs(unittest.TestCase):
<|fim_middle|>
if __name__ == '__main__':
unittest.main()<|fim▁end|> | """Tests the various output methods: KML style, WKT, lat-lon and lon-lat."""
def setUp(self):
self.latitude = 32.074322
self.longitude = 34.792081
self.radius_meters = 100
self.number_of_vertices = 36
self.polycircle = \
polycircles.Polycircle(latitude=self.latitude,
longitude=self.longitude,
radius=self.radius_meters,
number_of_vertices=self.number_of_vertices)
def test_lat_lon_output(self):
"""Asserts that the vertices in the lat-lon output are in the
right order (lat before long)."""
for vertex in self.polycircle.to_lat_lon():
assert_almost_equal(vertex[0], self.latitude, places=2)
assert_almost_equal(vertex[1], self.longitude, places=2)
def test_lon_lat_output(self):
"""Asserts that the vertices in the lat-lon output are in the
right order (lat before long)."""
for vertex in self.polycircle.to_lon_lat():
assert_almost_equal(vertex[0], self.longitude, places=2)
assert_almost_equal(vertex[1], self.latitude, places=2)
def test_vertices_equals_lat_lon(self):
"""Asserts that the "vertices" property is identical to the return
value of to_lat_lon()."""
assert_equal(self.polycircle.vertices, self.polycircle.to_lat_lon())
def test_kml_equals_lon_lat(self):
"""Asserts that the return value of to_kml() property is identical to
the return value of to_lon_lat()."""
assert_equal(self.polycircle.to_kml(), self.polycircle.to_lon_lat()) |
<|file_name|>test_different_outputs.py<|end_file_name|><|fim▁begin|>import unittest
from polycircles import polycircles
from nose.tools import assert_equal, assert_almost_equal
class TestDifferentOutputs(unittest.TestCase):
"""Tests the various output methods: KML style, WKT, lat-lon and lon-lat."""
def setUp(self):
<|fim_middle|>
def test_lat_lon_output(self):
"""Asserts that the vertices in the lat-lon output are in the
right order (lat before long)."""
for vertex in self.polycircle.to_lat_lon():
assert_almost_equal(vertex[0], self.latitude, places=2)
assert_almost_equal(vertex[1], self.longitude, places=2)
def test_lon_lat_output(self):
"""Asserts that the vertices in the lat-lon output are in the
right order (lat before long)."""
for vertex in self.polycircle.to_lon_lat():
assert_almost_equal(vertex[0], self.longitude, places=2)
assert_almost_equal(vertex[1], self.latitude, places=2)
def test_vertices_equals_lat_lon(self):
"""Asserts that the "vertices" property is identical to the return
value of to_lat_lon()."""
assert_equal(self.polycircle.vertices, self.polycircle.to_lat_lon())
def test_kml_equals_lon_lat(self):
"""Asserts that the return value of to_kml() property is identical to
the return value of to_lon_lat()."""
assert_equal(self.polycircle.to_kml(), self.polycircle.to_lon_lat())
if __name__ == '__main__':
unittest.main()<|fim▁end|> | self.latitude = 32.074322
self.longitude = 34.792081
self.radius_meters = 100
self.number_of_vertices = 36
self.polycircle = \
polycircles.Polycircle(latitude=self.latitude,
longitude=self.longitude,
radius=self.radius_meters,
number_of_vertices=self.number_of_vertices) |
<|file_name|>test_different_outputs.py<|end_file_name|><|fim▁begin|>import unittest
from polycircles import polycircles
from nose.tools import assert_equal, assert_almost_equal
class TestDifferentOutputs(unittest.TestCase):
"""Tests the various output methods: KML style, WKT, lat-lon and lon-lat."""
def setUp(self):
self.latitude = 32.074322
self.longitude = 34.792081
self.radius_meters = 100
self.number_of_vertices = 36
self.polycircle = \
polycircles.Polycircle(latitude=self.latitude,
longitude=self.longitude,
radius=self.radius_meters,
number_of_vertices=self.number_of_vertices)
def test_lat_lon_output(self):
<|fim_middle|>
def test_lon_lat_output(self):
"""Asserts that the vertices in the lat-lon output are in the
right order (lat before long)."""
for vertex in self.polycircle.to_lon_lat():
assert_almost_equal(vertex[0], self.longitude, places=2)
assert_almost_equal(vertex[1], self.latitude, places=2)
def test_vertices_equals_lat_lon(self):
"""Asserts that the "vertices" property is identical to the return
value of to_lat_lon()."""
assert_equal(self.polycircle.vertices, self.polycircle.to_lat_lon())
def test_kml_equals_lon_lat(self):
"""Asserts that the return value of to_kml() property is identical to
the return value of to_lon_lat()."""
assert_equal(self.polycircle.to_kml(), self.polycircle.to_lon_lat())
if __name__ == '__main__':
unittest.main()<|fim▁end|> | """Asserts that the vertices in the lat-lon output are in the
right order (lat before long)."""
for vertex in self.polycircle.to_lat_lon():
assert_almost_equal(vertex[0], self.latitude, places=2)
assert_almost_equal(vertex[1], self.longitude, places=2) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.