max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
binarycheck.py | pnordin/trimeol | 0 | 10900 | """Module to help guess whether a file is binary or text.
Requirements:
Python 2.7+
Recommended:
Python 3
"""
def is_binary_file(fname):
"""Attempt to guess if 'fname' is a binary file heuristically.
This algorithm has many flaws. Use with caution.
It assumes that if a part of the file has NUL bytes
or has more control characters than text characters,
it is a binary file.
Additionally, an ASCII compatible character set is assumed.
Returns True if 'fname' appears to be a binary file.
"""
with open(fname, 'rb') as fh:
chunk = fh.read(1024)
if not chunk: # Empty file
return False
if b'\x00' in chunk: # Has NUL bytes
return True
ncontrol = control_char_count(chunk)
ntext = len(chunk) - ncontrol
return ncontrol > ntext
def is_control_char(c):
"""Return True if 'c' is a control character.
c is considered a control character if
it is outside of the extended ASCII set or
has a code below 32 with some exclusions.
An ASCII compatible character set is assumed.
"""
charcode = 0
# The following assignment
# should make this module compatible with
# at least Python 2.7 (tested on 2.7.9).
try:
charcode = ord(c)
except TypeError:
charcode = c
excludes = ("\t", "\r", "\n")
if charcode in [ord(char) for char in excludes]:
return False
return (charcode < 32 or
charcode > 255)
def control_char_count(data):
"""Return the count of control characters in 'data'."""
n = 0
for c in data:
if is_control_char(c):
n += 1
return n
| 3.984375 | 4 |
spiketoolkit/validation/quality_metric_classes/snr.py | seankmartin/spiketoolkit | 0 | 10901 | import numpy as np
import spikemetrics.metrics as metrics
from .utils.thresholdcurator import ThresholdCurator
from .quality_metric import QualityMetric
import spiketoolkit as st
from spikemetrics.utils import Epoch, printProgressBar
from collections import OrderedDict
from .parameter_dictionaries import get_recording_gui_params, get_feature_gui_params
def make_curator_gui_params(params):
keys = list(params.keys())
types = [type(params[key]) for key in keys]
values = [params[key] for key in keys]
gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "Mode to compute noise SNR ('mad' | 'std' - default 'mad')"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "Number of seconds to compute noise level from (default 10.0)"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Maximum number of spikes to compute templates from (default 1000)"},
{'name': keys[3], 'type': str(types[3].__name__), 'value': values[3], 'default': values[3], 'title': "Use 'mean' or 'median' to compute templates"},
{'name': keys[4], 'type': str(types[4].__name__), 'value': values[4], 'default': values[4], 'title': "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)"},
{'name': keys[5], 'type': 'int', 'value': values[5], 'default': values[5], 'title': "Random seed for reproducibility"},
{'name': keys[6], 'type': str(types[6].__name__), 'value': values[6], 'default': values[6], 'title': "If True, will be verbose in metric computation."},]
curator_gui_params = [{'name': 'threshold', 'type': 'float', 'title': "The threshold for the given metric."},
{'name': 'threshold_sign', 'type': 'str',
'title': "If 'less', will threshold any metric less than the given threshold. "
"If 'less_or_equal', will threshold any metric less than or equal to the given threshold. "
"If 'greater', will threshold any metric greater than the given threshold. "
"If 'greater_or_equal', will threshold any metric greater than or equal to the given threshold."}]
gui_params = curator_gui_params + gui_params + get_recording_gui_params() + get_feature_gui_params()
return gui_params
class SNR(QualityMetric):
installed = True # check at class level if installed or not
installation_mesg = "" # err
params = OrderedDict([('snr_mode',"mad"), ('snr_noise_duration',10.0), ('max_spikes_per_unit_for_snr',1000),
('template_mode', "median"), ('max_channel_peak', "both"), ('seed',None), ('verbose',False)])
curator_name = "ThresholdSNR"
curator_gui_params = make_curator_gui_params(params)
def __init__(self, metric_data):
QualityMetric.__init__(self, metric_data, metric_name="snr")
if not metric_data.has_recording():
raise ValueError("MetricData object must have a recording")
def compute_metric(self, snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props,
recompute_info, seed, save_as_property):
snrs_epochs = []
for epoch in self._metric_data._epochs:
epoch_recording = self._metric_data._recording.get_epoch(epoch[0])
epoch_sorting = self._metric_data._sorting.get_epoch(epoch[0])
channel_noise_levels = _compute_channel_noise_levels(
recording=epoch_recording,
mode=snr_mode,
noise_duration=snr_noise_duration,
seed=seed,
)
templates = st.postprocessing.get_unit_templates(
epoch_recording,
epoch_sorting,
unit_ids=self._metric_data._unit_ids,
max_spikes_per_unit=max_spikes_per_unit_for_snr,
mode=template_mode,
save_wf_as_features=save_features_props,
recompute_waveforms=recompute_info,
save_as_property=save_features_props,
seed=seed,
)
max_channels = st.postprocessing.get_unit_max_channels(
epoch_recording,
epoch_sorting,
unit_ids=self._metric_data._unit_ids,
max_spikes_per_unit=max_spikes_per_unit_for_snr,
peak=max_channel_peak,
recompute_templates=recompute_info,
save_as_property=save_features_props,
mode=template_mode,
seed=seed,
)
snr_list = []
for i, unit_id in enumerate(self._metric_data._unit_ids):
if self._metric_data.verbose:
printProgressBar(i + 1, len(self._metric_data._unit_ids))
max_channel_idx = epoch_recording.get_channel_ids().index(
max_channels[i]
)
snr = _compute_template_SNR(
templates[i], channel_noise_levels, max_channel_idx
)
snr_list.append(snr)
snrs = np.asarray(snr_list)
snrs_epochs.append(snrs)
if save_as_property:
self.save_as_property(self._metric_data._sorting, snrs_epochs, self._metric_name)
return snrs_epochs
def threshold_metric(self, threshold, threshold_sign, snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props, recompute_info,
seed, save_as_property):
snrs_epochs = self.compute_metric(snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, save_features_props,
recompute_info, seed, save_as_property)[0]
threshold_curator = ThresholdCurator(
sorting=self._metric_data._sorting, metrics_epoch=snrs_epochs
)
threshold_curator.threshold_sorting(
threshold=threshold, threshold_sign=threshold_sign
)
return threshold_curator
def _compute_template_SNR(template, channel_noise_levels, max_channel_idx):
"""
Computes SNR on the channel with largest amplitude
Parameters
----------
template: np.array
Template (n_elec, n_timepoints)
channel_noise_levels: list
Noise levels for the different channels
max_channel_idx: int
Index of channel with largest templaye
Returns
-------
snr: float
Signal-to-noise ratio for the template
"""
snr = (
np.max(np.abs(template[max_channel_idx]))
/ channel_noise_levels[max_channel_idx]
)
return snr
def _compute_channel_noise_levels(recording, mode, noise_duration, seed):
"""
Computes noise level channel-wise
Parameters
----------
recording: RecordingExtractor
The recording ectractor object
mode: str
'std' or 'mad' (default
noise_duration: float
Number of seconds to compute SNR from
Returns
-------
moise_levels: list
Noise levels for each channel
"""
M = recording.get_num_channels()
n_frames = int(noise_duration * recording.get_sampling_frequency())
if n_frames >= recording.get_num_frames():
start_frame = 0
end_frame = recording.get_num_frames()
else:
start_frame = np.random.RandomState(seed=seed).randint(
0, recording.get_num_frames() - n_frames
)
end_frame = start_frame + n_frames
X = recording.get_traces(start_frame=start_frame, end_frame=end_frame)
noise_levels = []
for ch in range(M):
if mode == "std":
noise_level = np.std(X[ch, :])
elif mode == "mad":
noise_level = np.median(np.abs(X[ch, :]) / 0.6745)
else:
raise Exception("'mode' can be 'std' or 'mad'")
noise_levels.append(noise_level)
return noise_levels | 2.4375 | 2 |
parser/fase2/team16/main.py | webdev188/tytus | 0 | 10902 | <reponame>webdev188/tytus
# This is a sample Python script.
# Press Mayรบs+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import Gramatica as g
import interprete as Inter
import ts as TS
import jsonMode as JSON_INGE
import jsonMode as json
import Instruccion as INST
import Interfaz.Interfaz as Gui
import os
import glob
from os import path
from os import remove
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
if __name__ == '__main__':
Gui.principal
cadena= "goto"
# for n in cadena:
# in
print("ELIMINANDO...")
files = glob.glob('data/json/*')
for ele in files:
os.remove(ele)
| 2.515625 | 3 |
stsynphot/tests/test_parser.py | tddesjardins/stsynphot_refactor | 5 | 10903 | <filename>stsynphot/tests/test_parser.py<gh_stars>1-10
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test spparser.py module, which uses spark.py.
.. note::
Only testing to see if the parser makes the right kind of
objects. Quality of the data is tested in other modules.
"""
# STDLIB
import os
# THIRD-PARTY
import pytest
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.exceptions import AstropyUserWarning
from numpy.testing import assert_allclose
# SYNPHOT
from synphot import exceptions as synexceptions
from synphot import units
from synphot.models import (BlackBodyNorm1D, Box1D, ConstFlux1D, Empirical1D,
GaussianFlux1D, PowerLawFlux1D)
from synphot.reddening import ExtinctionCurve
from synphot.spectrum import SourceSpectrum, SpectralElement
# LOCAL
from .. import catalog, exceptions, observationmode, spectrum, spparser
from ..config import conf
from ..stio import resolve_filename
def _single_functioncall(sp, ans_cls, ans_model, ans_name, ans_z=0):
assert isinstance(sp, ans_cls)
# Do not check composite model
if ans_model is not None:
assert isinstance(sp.model, ans_model)
if ans_name:
assert sp.meta['expr'] == ans_name
if ans_z is not None:
assert_allclose(sp.z, ans_z)
def _compare_spectra(sp1, sp2):
"""Test that two spectra are basically equivalent."""
if sp1.waveset is None:
assert sp2.waveset is None
w = [100, 5000, 11000] * u.AA
else:
w = sp1.waveset
assert_quantity_allclose(w, sp2.waveset)
assert_quantity_allclose(sp1(w), sp2(w))
assert_quantity_allclose(sp1.integrate(wavelengths=w),
sp2.integrate(wavelengths=w))
assert type(sp1.model.__class__) == type(sp2.model.__class__) # noqa
if hasattr(sp1, 'z'):
assert sp1.z == sp2.z
def test_unit_1_flam():
sp1 = spparser.parse_spec('unit(1, flam)')
_single_functioncall(sp1, SourceSpectrum, ConstFlux1D, 'unit(1.0,flam)')
sp2 = SourceSpectrum(ConstFlux1D, amplitude=1 * units.FLAM)
_compare_spectra(sp1, sp2)
def test_bb_5000():
sp1 = spparser.parse_spec('bb(5000)')
_single_functioncall(sp1, SourceSpectrum, BlackBodyNorm1D, 'bb(5000.0)')
sp2 = SourceSpectrum(BlackBodyNorm1D, temperature=5000 * u.K)
_compare_spectra(sp1, sp2)
def test_powerlaw_5000_1_flam():
sp1 = spparser.parse_spec('pl(5000, 1, flam)')
_single_functioncall(
sp1, SourceSpectrum, PowerLawFlux1D, 'pl(5000.0,1.0,flam)')
sp2 = SourceSpectrum(PowerLawFlux1D, amplitude=1 * units.FLAM,
x_0=5000 * u.AA, alpha=-1)
_compare_spectra(sp1, sp2)
def test_box_5000_1():
sp1 = spparser.parse_spec('box(5000, 1)')
_single_functioncall(sp1, SpectralElement, Box1D, 'box(5000.0,1.0)',
ans_z=None)
sp2 = SpectralElement(Box1D, amplitude=1, x_0=5000 * u.AA, width=1 * u.AA)
_compare_spectra(sp1, sp2)
def test_em_5000_25_1_flam():
sp1 = spparser.parse_spec('em(5000, 25, 1, flam)')
_single_functioncall(
sp1, SourceSpectrum, GaussianFlux1D, 'em(5000, 25, 1, FLAM)')
f = 1 * (units.FLAM * u.AA) # Integrated flux
sp2 = SourceSpectrum(
GaussianFlux1D, mean=5000 * u.AA, fwhm=25 * u.AA, total_flux=f)
_compare_spectra(sp1, sp2)
def test_rn_bb_box_abmag():
sp1 = spparser.parse_spec('rn(bb(5000), box(5000, 10), 17, abmag)')
_single_functioncall(sp1, SourceSpectrum, None,
'rn(bb(5000.0),box(5000.0,10.0),17.0,abmag)')
bb = SourceSpectrum(BlackBodyNorm1D, temperature=5000 * u.K)
box = SpectralElement(Box1D, amplitude=1, x_0=5000 * u.AA, width=10 * u.AA)
sp2 = bb.normalize(17 * u.ABmag, band=box)
_compare_spectra(sp1, sp2)
def test_z_null():
"""ETC junk spectrum results in flat spectrum with no redshift."""
sp1 = spparser.parse_spec('z(null, 0.1)')
_single_functioncall(sp1, SourceSpectrum, ConstFlux1D, 'z(null,0.1)')
sp2 = SourceSpectrum(ConstFlux1D, amplitude=1 * units.PHOTLAM)
_compare_spectra(sp1, sp2)
def test_z_em():
sp1 = spparser.parse_spec('z(em(5000, 25, 1, flam), 0.1)')
_single_functioncall(
sp1, SourceSpectrum, None, 'z(em(5000, 25, 1, FLAM),0.1)', ans_z=0.1)
f = 1 * (units.FLAM * u.AA) # Integrated flux
sp2 = SourceSpectrum(
GaussianFlux1D, mean=5000 * u.AA, fwhm=25 * u.AA, total_flux=f)
sp2.z = 0.1
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_spec_vegafile():
sp1 = spparser.parse_spec('spec(crcalspec$alpha_lyr_stis_007.fits)')
_single_functioncall(sp1, SourceSpectrum, Empirical1D,
'spec(crcalspec$alpha_lyr_stis_007.fits)')
sp2 = SourceSpectrum.from_file(resolve_filename(
os.environ['PYSYN_CDBS'], 'calspec', 'alpha_lyr_stis_007.fits'))
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_band_v():
sp1 = spparser.parse_spec('band(v)')
_single_functioncall(
sp1, spectrum.ObservationSpectralElement, Empirical1D, 'band(v)',
ans_z=None)
sp2 = SpectralElement.from_filter('johnson_v')
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_icat_k93():
sp1 = spparser.parse_spec('icat(k93models, 5000, 0.5, 0)')
_single_functioncall(sp1, SourceSpectrum, Empirical1D,
'k93models(T_eff=5000,metallicity=0.5,log_g=0)')
sp2 = catalog.grid_to_spec('k93models', 5000, 0.5, 0)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_ebmvx_mwavg():
sp1 = spparser.parse_spec('ebmvx(0.3, mwavg)')
_single_functioncall(
sp1, ExtinctionCurve, Empirical1D, 'ebmvx(0.3,mwavg)', ans_z=None)
sp2 = spectrum.ebmvx('mwavg', 0.3)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_rn_calspec_box():
sp1 = spparser.parse_spec(
'rn(crcalspec$gd71_mod_005.fits, box(5000, 10), 17, vegamag)')
_single_functioncall(
sp1, SourceSpectrum, None,
'rn(crcalspec$gd71_mod_005.fits,box(5000.0,10.0),17.0,vegamag)')
gd71 = SourceSpectrum.from_file(resolve_filename(
os.environ['PYSYN_CDBS'], 'calspec', 'gd71_mod_005.fits'))
box = SpectralElement(Box1D, amplitude=1, x_0=5000 * u.AA, width=10 * u.AA)
sp2 = gd71.normalize(17 * units.VEGAMAG, band=box, vegaspec=spectrum.Vega)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_rn_icat_k93():
sp1 = spparser.parse_spec(
'rn(icat(k93models, 5000, 0.5, 0), '
'cracscomp$acs_f814w_hrc_006_syn.fits, 17, obmag)')
_single_functioncall(
sp1, SourceSpectrum, None,
'rn(k93models(T_eff=5000,metallicity=0.5,log_g=0),'
'cracscomp$acs_f814w_hrc_006_syn.fits,17.0,obmag)')
k93 = catalog.grid_to_spec('k93models', 5000, 0.5, 0)
bp = SpectralElement.from_file(resolve_filename(
os.environ['PYSYN_CDBS'], 'comp', 'acs', 'acs_f814w_hrc_006_syn.fits'))
sp2 = k93.normalize(17 * units.OBMAG, band=bp, area=conf.area)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_rn_powerlaw():
sp1 = spparser.parse_spec('rn(pl(5000, 1, flam), band(v), 1, photlam)')
_single_functioncall(sp1, SourceSpectrum, None,
'rn(pl(5000.0,1.0,flam),band(v),1.0,photlam)')
pl = SourceSpectrum(PowerLawFlux1D, amplitude=1 * units.FLAM,
x_0=5000 * u.AA, alpha=-1)
bp = SpectralElement.from_filter('johnson_v')
sp2 = pl.normalize(1 * units.PHOTLAM, band=bp)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_rn_unit_1_flam():
sp1 = spparser.parse_spec(
'rn(unit(1,flam), band(acs, wfc1, fr388n#3881.0), 10, abmag)')
_single_functioncall(
sp1, SourceSpectrum, None,
'rn(unit(1.0,flam),band(acs,wfc1,fr388n#3881.0),10.0,abmag)')
constsp = SourceSpectrum(ConstFlux1D, amplitude=1 * units.FLAM)
bp = spectrum.band('acs, wfc1, fr388n#3881.0')
sp2 = constsp.normalize(10 * u.ABmag, band=bp)
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_rn_calspec_u():
sp1 = spparser.parse_spec(
'rn(crcalspec$bd_75d325_stis_002.fits, band(u), 9.5, vegamag) * '
'band(fos, blue, 4.3, g160l)')
# NOTE: No expr for this combo.
_single_functioncall(sp1, SourceSpectrum, None, '')
bd75 = SourceSpectrum.from_file(resolve_filename(
os.environ['PYSYN_CDBS'], 'calspec', 'bd_75d325_stis_002.fits'))
bp_u = SpectralElement.from_filter('johnson_u')
bd75_norm = bd75.normalize(
9.5 * units.VEGAMAG, band=bp_u, vegaspec=spectrum.Vega)
bp_fos = spectrum.band('fos, blue, 4.3, g160l')
sp2 = bd75_norm * bp_fos
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
def test_remote_z_vega():
sp1 = spparser.parse_spec('z(crcalspec$alpha_lyr_stis_007.fits, 0.1)')
_single_functioncall(sp1, SourceSpectrum, None,
'z(crcalspec$alpha_lyr_stis_007.fits,0.1)', ans_z=0.1)
sp2 = SourceSpectrum.from_file(resolve_filename(
os.environ['PYSYN_CDBS'], 'calspec', 'alpha_lyr_stis_007.fits'))
sp2.z = 0.1
_compare_spectra(sp1, sp2)
@pytest.mark.remote_data
class TestRenormPartialOverlap:
"""Test handling of ``rn(...)`` syntax for partial overlap."""
def setup_class(self):
self.fname = resolve_filename(
conf.rootdir, 'etc', 'source', 'qso_fos_001.dat')
def test_partial(self):
"""Warning only."""
input_str = f'rn({self.fname}, band(johnson, u), 15, abmag)'
with pytest.warns(AstropyUserWarning,
match=r'Spectrum is not defined everywhere'):
sp = spparser.parse_spec(input_str)
assert isinstance(sp, SourceSpectrum)
assert 'force_renorm' in sp.warnings
name = sp.meta['expr']
assert (name.startswith('rn(') and
name.endswith('qso_fos_001.dat,band(johnson,u),15.0,abmag)'))
def test_disjoint(self):
"""Raise error."""
input_str = f'rn({self.fname}, band(johnson, v), 15, abmag)'
with pytest.raises(synexceptions.DisjointError):
spparser.parse_spec(input_str)
@pytest.mark.remote_data
class TestEnvVar:
"""Test syntax using PYSYN_CDBS environment variable."""
def setup_class(self):
self.old_path = os.environ.get('PYSYN_CDBS')
if self.old_path is None:
os.environ['PYSYN_CDBS'] = conf.rootdir
def test_double_slash(self):
sp = spparser.parse_spec(
'spec($PYSYN_CDBS//calspec/gd71_mod_005.fits)')
assert isinstance(sp, SourceSpectrum)
assert isinstance(sp.model, Empirical1D)
def teardown_class(self):
if self.old_path is None:
del os.environ['PYSYN_CDBS']
@pytest.mark.parametrize(
'input_str',
['foo(1)',
'unit(1, nm)',
'unit(1, vegamag)',
'pl(5000, 1, nm)',
'pl(5000, 1, vegamag)',
'em(5000, 25, 1, nm)',
'rn(bb(5000), foo(v), 17, obmag)',
'rn(unit(1, flam), band(stis, ccd, g430m, c4451, 52X0.2), 10, abmag)',
'rn(unit(1, flam), band(stis, ccd, mirror, 50CCD), 10, abmag)',
'ebmvx(0.3, foo)'])
def test_parser_exception(input_str):
"""Test syntax that raises ParserError."""
with pytest.raises(exceptions.ParserError):
spparser.parse_spec(input_str)
class TestTokens:
"""Test underlying parser engine."""
def setup_class(self):
self.scanner = spparser.Scanner()
@pytest.mark.parametrize(
('token_type', 'token_str'),
[('FLOAT', '.1'),
('FLOAT', '1.1'),
('FLOAT', '1.'),
('FLOAT', '1'),
('FLOAT', '.1e+1'),
('FLOAT', '1.1e+1'),
('FLOAT', '1.e+1'),
('FLOAT', '1e+1'),
('FLOAT', '.1e-1'),
('FLOAT', '1.1e-1'),
('FLOAT', '1.e-1'),
('FLOAT', '1e-1'),
('FLOAT', '.1e1'),
('FLOAT', '1.1e1'),
('FLOAT', '1.e1'),
('FLOAT', '1e1'),
('IDENTIFIER', '/'),
('IDENTIFIER', 'xyzzy'),
('IDENTIFIER', 'xy20zzy'),
('IDENTIFIER', 'xyzzy20'),
('IDENTIFIER', '/a/b/c'),
('IDENTIFIER', 'foo$bar'),
('IDENTIFIER', 'a/b'),
('IDENTIFIER', '/a/b/c/foo.fits'),
('IDENTIFIER', 'C:/a/b/c/foo.fits')])
def test_single_token_1(self, token_type, token_str):
t = self.scanner.tokenize(token_str)
assert (t[0].type, t[0].attr) == (token_type, token_str)
@pytest.mark.parametrize(
('token_str', 'ans'),
[('(', ('LPAREN', None)),
(')', ('RPAREN', None)),
(',', (',', None)),
('+', ('+', None)),
('*', ('*', None)),
('@foolist', ('FILELIST', 'foolist'))])
def test_single_token_2(self, token_str, ans):
t = self.scanner.tokenize(token_str)
assert (t[0].type, t[0].attr) == ans
@pytest.mark.parametrize(
('input_str', 'ans'),
[('50CCD',
[('FLOAT', '50'),
('IDENTIFIER', 'CCD')]),
('500X0.2',
[('FLOAT', '500'),
('IDENTIFIER', 'X0.2')]),
('spec($PYSYN_CDBS//calspec/gd71_mod_005.fits)',
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', '$PYSYN_CDBS//calspec/gd71_mod_005.fits'),
('RPAREN', None)]),
('spec(earthshine.fits) * 0.5 + '
'rn(spec(Zodi.fits), band(johnson, v), 22.7, vegamag) + '
'(spec(el1215a.fits) + spec(el1302a.fits) + spec(el1356a.fits) + '
'spec(el2471a.fits)) * 0.5',
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'earthshine.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.5'),
('+', None),
('IDENTIFIER', 'rn'),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'Zodi.fits'),
('RPAREN', None),
(',', None),
('IDENTIFIER', 'band'),
('LPAREN', None),
('IDENTIFIER', 'johnson'),
(',', None),
('IDENTIFIER', 'v'),
('RPAREN', None),
(',', None),
('FLOAT', '22.7'),
(',', None),
('IDENTIFIER', 'vegamag'),
('RPAREN', None),
('+', None),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1215a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1302a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1356a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el2471a.fits'),
('RPAREN', None),
('RPAREN', None),
('*', None),
('FLOAT', '0.5')]),
('spec(earthshine.fits) * 0.5 + '
'rn(spec(Zodi.fits), band(johnson, v), 22.7, vegamag) + '
'(spec(el1215a.fits) * 0.1 + spec(el1302a.fits) * 0.066666667 + '
'spec(el1356a.fits) * 0.0060 + spec(el2471a.fits) * 0.0050)',
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'earthshine.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.5'),
('+', None),
('IDENTIFIER', 'rn'),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'Zodi.fits'),
('RPAREN', None),
(',', None),
('IDENTIFIER', 'band'),
('LPAREN', None),
('IDENTIFIER', 'johnson'),
(',', None),
('IDENTIFIER', 'v'),
('RPAREN', None),
(',', None),
('FLOAT', '22.7'),
(',', None),
('IDENTIFIER', 'vegamag'),
('RPAREN', None),
('+', None),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1215a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.1'),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1302a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.066666667'),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1356a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.0060'),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el2471a.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.0050'),
('RPAREN', None)]),
('spec(earthshine.fits) * 0.5 + '
'rn(spec(Zodi.fits), band(johnson, v), 22.7, vegamag) + '
'(spec(el1215a.fits) + spec(el1302a.fits) + spec(el1356a.fits) + '
'spec(el2471a.fits))',
[('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'earthshine.fits'),
('RPAREN', None),
('*', None),
('FLOAT', '0.5'),
('+', None),
('IDENTIFIER', 'rn'),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'Zodi.fits'),
('RPAREN', None),
(',', None),
('IDENTIFIER', 'band'),
('LPAREN', None),
('IDENTIFIER', 'johnson'),
(',', None),
('IDENTIFIER', 'v'),
('RPAREN', None),
(',', None),
('FLOAT', '22.7'),
(',', None),
('IDENTIFIER', 'vegamag'),
('RPAREN', None),
('+', None),
('LPAREN', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1215a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1302a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el1356a.fits'),
('RPAREN', None),
('+', None),
('IDENTIFIER', 'spec'),
('LPAREN', None),
('IDENTIFIER', 'el2471a.fits'),
('RPAREN', None),
('RPAREN', None)])])
def test_composite_token(self, input_str, ans):
t = self.scanner.tokenize(input_str)
for expect, actual in zip(ans, t):
assert (actual.type, actual.attr) == expect
def teardown_module():
"""Clear all cache."""
catalog.reset_cache()
observationmode.reset_cache()
spectrum.reset_cache()
| 2.234375 | 2 |
rsbroker/urls.py | land-pack/RsBroker | 0 | 10904 | <gh_stars>0
from __future__ import absolute_import
import os
from tornado.web import StaticFileHandler
from rsbroker.views import websocket
from rsbroker.views.error import NotFoundErrorHandler
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static")
)
handlers = [
# Http api
# Events WebSocket API
(r"/api/ws", websocket.BrokerServerHandler),
# Static
(r"/static/(.*)", StaticFileHandler),
# Error
(r".*", NotFoundErrorHandler)
]
| 1.835938 | 2 |
tests/pheweb/load/command_flags_test.py | stellakeppo/pheweb | 4 | 10905 | <reponame>stellakeppo/pheweb<filename>tests/pheweb/load/command_flags_test.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Unit testing for command flags.
This tests the various command flags
and there helper methods.
"""
import argparse
import typing
import uuid
import pytest
from pheweb.load.command_flags import (
FLAG_CHROMOSOME,
add_chromosome_flag,
OUTPUT_COLUMN_CHROMOSOME,
FLAG_POSITION,
add_position_flag,
FLAG_REFERENCE,
add_reference_flag,
FLAG_ALTERNATIVE,
add_alternate_flag,
OUTPUT_COLUMN_REFERENCE,
OUTPUT_COLUMN_ALTERNATIVE,
FLAG_P_VALUE,
add_p_value_flag,
OUTPUT_COLUMN_P_VALUE,
FLAG_M_LOG_P_VALUE,
add_m_log_p_value_flag,
OUTPUT_COLUMN_M_LOG_P_VALUE,
add_beta_value_flag,
FLAG_BETA,
OUTPUT_COLUMN_BETA,
FLAG_SE_BETA,
add_se_beta_value_flag,
OUTPUT_COLUMN_SE_BETA,
OUTPUT_COLUMN_POSITION,
add_in_file_value_flag,
DEFAULT_IN_FILE,
add_out_file_value_flag,
DEFAULT_OUT_FILE,
add_rename_value_flag,
DEFAULT_RENAME,
add_exclude_value_flag,
FLAG_EXCLUDE,
FLAG_RENAME,
DEFAULT_EXCLUDE,
parse_exclude_args,
parse_rename_args,
)
def test_exclude_args() -> None:
"""
Test exclude args.
@return: None
"""
assert parse_exclude_args("") == set()
assert parse_exclude_args("a") == {"a"}
assert parse_exclude_args("a,b") == {"a", "b"}
assert parse_exclude_args("a,b,c") == {"a", "b", "c"}
def test_rename_args() -> None:
"""
Test rename args.
@return: None
"""
assert not parse_rename_args("")
assert parse_rename_args("a:b") == {"a": "b"}
assert parse_rename_args("a:b,c:d") == {"a": "b", "c": "d"}
with pytest.raises(ValueError):
assert parse_rename_args("a")
def parse_harness(
cli_argv: typing.List[str],
parse_method: typing.Callable[[argparse.ArgumentParser], None],
):
"""
Parse harness.
Calls the argument parser with the parse method.
Then calls the argument parse with the cli argv.
@param cli_argv: arguments to pass to parser
@param parse_method: parse set up method
@return: result of the parse
"""
parser = argparse.ArgumentParser(description=f"test : {parse_method}")
parse_method(parser)
return parser.parse_args(cli_argv)
def test_add_chromosome() -> None:
"""
Test arguments for chromosome column.
@return: None
"""
chromosome = str(uuid.uuid4())
arguments = parse_harness([FLAG_CHROMOSOME, chromosome], add_chromosome_flag)
assert arguments.chromosome == chromosome
assert parse_harness([], add_chromosome_flag).chromosome is OUTPUT_COLUMN_CHROMOSOME
def test_add_position():
"""
Test arguments for position column.
@return: None
"""
position = str(uuid.uuid4())
arguments = parse_harness([FLAG_POSITION, position], add_position_flag)
assert arguments.position == position
assert parse_harness([], add_position_flag).position is OUTPUT_COLUMN_POSITION
def test_add_ref() -> None:
"""
Test arguments for alternative column.
@return: None
"""
reference = str(uuid.uuid4())
arguments = parse_harness([FLAG_REFERENCE, reference], add_reference_flag)
assert arguments.reference == reference
assert parse_harness([], add_reference_flag).reference is OUTPUT_COLUMN_REFERENCE
def test_add_alt() -> None:
"""
Test arguments for alternative column.
@return: None
"""
alternative = str(uuid.uuid4())
arguments = parse_harness([FLAG_ALTERNATIVE, alternative], add_alternate_flag)
assert arguments.alternative == alternative
assert (
parse_harness([], add_alternate_flag).alternative is OUTPUT_COLUMN_ALTERNATIVE
)
def test_add_p_value() -> None:
"""
Test arguments for p-value column.
@return: None
"""
p_value = str(uuid.uuid4())
arguments = parse_harness([FLAG_P_VALUE, p_value], add_p_value_flag)
assert arguments.p_value == p_value
assert parse_harness([], add_p_value_flag).p_value == OUTPUT_COLUMN_P_VALUE
def test_add_m_log_p_value() -> None:
"""
Test arguments for m log p value column.
@return: None
"""
m_log_p_value = str(uuid.uuid4())
arguments = parse_harness(
[FLAG_M_LOG_P_VALUE, m_log_p_value], add_m_log_p_value_flag
)
assert arguments.m_log_p_value == m_log_p_value
arguments = parse_harness([], add_m_log_p_value_flag)
assert arguments.m_log_p_value == OUTPUT_COLUMN_M_LOG_P_VALUE
def test_add_beta() -> None:
"""
Test arguments for beta column.
@return: None
"""
beta = str(uuid.uuid4())
arguments = parse_harness([FLAG_BETA, beta], add_beta_value_flag)
assert arguments.beta == beta
assert parse_harness([], add_beta_value_flag).beta == OUTPUT_COLUMN_BETA
def test_add_se_beta() -> None:
"""
Test arguments for beta column.
@return: None
"""
se_beta = str(uuid.uuid4())
arguments = parse_harness([FLAG_SE_BETA, se_beta], add_se_beta_value_flag)
assert arguments.se_beta == se_beta
assert parse_harness([], add_se_beta_value_flag).se_beta == OUTPUT_COLUMN_SE_BETA
def test_add_exclude() -> None:
"""
Test argument for columns to exclude.
@return: None
"""
exclude = str(uuid.uuid4())
arguments = parse_harness([FLAG_EXCLUDE, exclude], add_exclude_value_flag)
assert arguments.exclude == exclude
assert parse_harness([], add_exclude_value_flag).exclude == DEFAULT_EXCLUDE
def test_add_rename() -> None:
"""
Test arguments for rename.
@return: None
"""
new_name = str(uuid.uuid4())
old_name = str(uuid.uuid4())
rename = f"{old_name}:{new_name}"
arguments = parse_harness([FLAG_RENAME, rename], add_rename_value_flag)
assert arguments.rename == rename
assert parse_harness([], add_rename_value_flag).rename == DEFAULT_RENAME
def test_parse_out_file() -> None:
"""
Test arguments for out file.
@return: None
"""
out_file = str(uuid.uuid4())
arguments = parse_harness(["--out-file", out_file], add_out_file_value_flag)
assert arguments.out_file == out_file
assert parse_harness([], add_out_file_value_flag).out_file == DEFAULT_OUT_FILE
def test_add_in_file() -> None:
"""
Test arguments for input file.
@return: None
"""
in_file = str(uuid.uuid4())
assert parse_harness([in_file], add_in_file_value_flag).in_file == in_file
assert parse_harness([], add_in_file_value_flag).in_file == DEFAULT_IN_FILE
| 2.53125 | 3 |
sandbox/error-correct-pass2.py | sadeepdarshana/khmer | 558 | 10906 | #! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2011-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: <EMAIL>
"""
Error correct reads based on a counting hash from a diginorm step.
Output sequences will be put in inputfile.corr.
% python scripts/error-correct-pass2 <counting.ct> <data1> [ <data2> <...> ]
Use '-h' for parameter help.
"""
import sys
import os
import screed
import khmer
from khmer import Countgraph
from khmer import khmer_args
from khmer.khmer_args import FileType as khFileType
DEFAULT_CUTOFF = 2
def output_single(read, new_sequence):
name = read.name
sequence = new_sequence
quality = None
if hasattr(read, 'quality'):
quality = read.quality[:len(sequence)]
sequence = sequence[:len(quality)] # sequence is _lengthened_
if quality:
assert len(sequence) == len(quality), (sequence, quality)
return "@%s\n%s\n+\n%s\n" % (name, sequence, quality)
else:
return ">%s\n%s\n" % (name, sequence)
def main():
parser = khmer_args.build_counting_args(
"Correct reads against an already-computed table",
citations=['counting', 'SeqAn'])
parser.add_argument("--trusted-cov", dest="trusted_cov", type=int,
default=DEFAULT_CUTOFF)
parser.add_argument("--theta", dest="bits_theta", type=float, default=1.0)
parser.add_argument('-o', '--output', dest='output_file',
help="output file for histogram; defaults to "
"<first filename>.corr in cwd.",
type=khFileType('w'), default=None)
parser.add_argument('counts_table')
parser.add_argument('readfile')
args = parser.parse_args()
print('loading counts')
ht = Countgraph.load(args.counts_table)
aligner = khmer.ReadAligner(ht,
args.trusted_cov,
args.bits_theta)
print("trusted:", args.trusted_cov)
corrfp = args.output_file
if not corrfp:
outfile = os.path.basename(args.readfile) + '.corr'
corrfp = open(outfile, 'w')
n_corrected = 0
for n, read in enumerate(screed.open(args.readfile)):
if n % 10000 == 0:
print('...', n, n_corrected, file=sys.stderr)
seq = read.sequence.replace('N', 'A')
# build the alignment...
score, graph_alignment, read_alignment, truncated = \
aligner.align(seq)
if not truncated:
graph_seq = graph_alignment.replace("-", "")
if graph_seq != seq:
n_corrected += 1
seq = graph_seq
corrfp.write(output_single(read, seq))
if __name__ == '__main__':
main()
| 1.664063 | 2 |
metadata-ingestion/tests/integration/azure_ad/test_azure_ad.py | zhoxie-cisco/datahub | 1 | 10907 | <reponame>zhoxie-cisco/datahub
import json
import pathlib
from unittest.mock import patch
from freezegun import freeze_time
from datahub.ingestion.run.pipeline import Pipeline
from datahub.ingestion.source.identity.azure_ad import AzureADConfig
from tests.test_helpers import mce_helpers
FROZEN_TIME = "2021-08-24 09:00:00"
def test_azure_ad_config():
config = AzureADConfig.parse_obj(
dict(
client_id="00000000-0000-0000-0000-000000000000",
tenant_id="00000000-0000-0000-0000-000000000000",
client_secret="<KEY>",
redirect="https://login.microsoftonline.com/common/oauth2/nativeclient",
authority="https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
token_url="https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
graph_url="https://graph.microsoft.com/v1.0",
ingest_users=True,
ingest_groups=True,
ingest_group_membership=True,
)
)
# Sanity on required configurations
assert config.client_id == "00000000-0000-0000-0000-000000000000"
assert config.tenant_id == "00000000-0000-0000-0000-000000000000"
assert config.client_secret == "<KEY>"
assert (
config.redirect
== "https://login.microsoftonline.com/common/oauth2/nativeclient"
)
assert (
config.authority
== "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000"
)
assert (
config.token_url
== "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token"
)
assert config.graph_url == "https://graph.microsoft.com/v1.0"
# assert on defaults
assert config.ingest_users
assert config.ingest_groups
assert config.ingest_group_membership
@freeze_time(FROZEN_TIME)
def test_azure_ad_source_default_configs(pytestconfig, tmp_path):
test_resources_dir: pathlib.Path = (
pytestconfig.rootpath / "tests/integration/azure_ad"
)
with patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource.get_token"
) as mock_token, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_users"
) as mock_users, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_groups"
) as mock_groups, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_group_users"
) as mock_group_users:
mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_group_users
)
# Run an azure usage ingestion run.
pipeline = Pipeline.create(
{
"run_id": "test-azure-ad",
"source": {
"type": "azure-ad",
"config": {
"client_id": "00000000-0000-0000-0000-000000000000",
"tenant_id": "00000000-0000-0000-0000-000000000000",
"client_secret": "client_secret",
"redirect": "https://login.microsoftonline.com/common/oauth2/nativeclient",
"authority": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
"token_url": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
"graph_url": "https://graph.microsoft.com/v1.0",
"ingest_group_membership": True,
"ingest_groups": True,
"ingest_users": True,
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/azure_ad_mces_default_config.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "azure_ad_mces_default_config.json",
golden_path=test_resources_dir / "azure_ad_mces_golden_default_config.json",
)
@freeze_time(FROZEN_TIME)
def test_azure_source_ingestion_disabled(pytestconfig, tmp_path):
test_resources_dir: pathlib.Path = (
pytestconfig.rootpath / "tests/integration/azure_ad"
)
with patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource.get_token"
) as mock_token, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_users"
) as mock_users, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_groups"
) as mock_groups, patch(
"datahub.ingestion.source.identity.azure_ad.AzureADSource._get_azure_ad_group_users"
) as mock_group_users:
mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_group_users
)
# Run an Azure usage ingestion run.
pipeline = Pipeline.create(
{
"run_id": "test-azure-ad",
"source": {
"type": "azure-ad",
"config": {
"client_id": "00000000-0000-0000-0000-000000000000",
"tenant_id": "00000000-0000-0000-0000-000000000000",
"client_secret": "client_secret",
"redirect": "https://login.microsoftonline.com/common/oauth2/nativeclient",
"authority": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000",
"token_url": "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token",
"graph_url": "https://graph.microsoft.com/v1.0",
"ingest_group_membership": "False",
"ingest_groups": "False",
"ingest_users": "False",
},
},
"sink": {
"type": "file",
"config": {
"filename": f"{tmp_path}/azure_ad_mces_ingestion_disabled.json",
},
},
}
)
pipeline.run()
pipeline.raise_from_status()
mce_helpers.check_golden_file(
pytestconfig,
output_path=tmp_path / "azure_ad_mces_ingestion_disabled.json",
golden_path=test_resources_dir / "azure_ad_mces_golden_ingestion_disabled.json",
)
def load_test_resources(test_resources_dir):
azure_ad_users_json_file = test_resources_dir / "azure_ad_users.json"
azure_ad_groups_json_file = test_resources_dir / "azure_ad_groups.json"
with azure_ad_users_json_file.open() as azure_ad_users_json:
reference_users = json.loads(azure_ad_users_json.read())
with azure_ad_groups_json_file.open() as azure_ad_groups_json:
reference_groups = json.loads(azure_ad_groups_json.read())
return reference_users, reference_groups
def mocked_functions(
test_resources_dir, mock_token, mock_users, mock_groups, mock_groups_users
):
# mock token response
mock_token.return_value = "xxxxxxxx"
# mock users and groups response
users, groups = load_test_resources(test_resources_dir)
mock_users.return_value = iter(list([users]))
mock_groups.return_value = iter(list([groups]))
# For simplicity, each user is placed in ALL groups.
# Create a separate response mock for each group in our sample data.
r = []
for _ in groups:
r.append(users)
mock_groups_users.return_value = iter(r)
| 2.046875 | 2 |
edit/core/optimizer/__init__.py | tpoisonooo/basicVSR_mge | 28 | 10908 | <reponame>tpoisonooo/basicVSR_mge
from .builder import build_optimizers, MGE_OPTIMIZERS, build_gradmanagers
from .default_constructor import DefaultOptimizerConstructor
| 0.972656 | 1 |
hackerrank/medium/Climbing_the_Leaderboard.py | HoussemBousmaha/Competitive-Programming | 6 | 10909 | <reponame>HoussemBousmaha/Competitive-Programming
def climbingLeaderboard(ranked, player):
ranked = sorted(list(set(ranked)), reverse=True)
ranks = []
# print(ranked)
for i in range(len(player)):
bi = 0
bs = len(ranked) - 1
index = 0
while (bi <= bs):
mid = (bi+bs) // 2
if (ranked[mid] > player[i]):
index = mid
bi = mid + 1
else:
bs = mid - 1
if (ranked[index] > player[i]):
index += 1
index += 1
ranks.append(index)
return ranks
| 3.8125 | 4 |
src/python/config/parser/test_parsing.py | ncsa/NCSA-Genomics_MGC_GenomeGPS_CromwelWDL | 0 | 10910 | <gh_stars>0
#!/usr/bin/env python3
import unittest
from config.parser.parsing import Parser
class TestParser(unittest.TestCase):
# Create an instance of the Parser class
parser_inst = Parser(job_id="NA")
# Turn the project logger off during UnitTesting, so the end user is not confused by error messages
# (Some tests are designed to fail, so they will log "ERROR" messages that are expected)
parser_inst.project_logger.logger.disabled = True
def test_remove_comments(self):
# Should remove comment lines
input_lines = ["# Comment line", " # Whitespace with comment", 'Key="Value"']
filtered_lines = Parser.remove_comments(input_lines)
self.assertEqual(filtered_lines, ['Key="Value"'])
def test_clean_input_file(self):
# Should remove blank and comment lines
input_lines = ["", "", "# Comment line", 'Key="Value"']
filtered_lines = Parser.clean_input_file(input_lines)
self.assertEqual(filtered_lines, ['Key="Value"'])
def test_create_key_value_pairs(self):
# Note: the second test case purposefully has an '=' in the value (the parser only assumes the key has no '=')
input_lines = ['Key1="Value1"', 'Key2="Value=2"']
expected_output = [('Key1', '"Value1"'), ('Key2', '"Value=2"')]
self.assertEqual(expected_output,
self.parser_inst.create_key_value_pairs(input_lines, "test_create_key_value_pairs")
)
def test_validate_key_value_pairs_pass(self):
'''
This test has no assert. The method being tested returns nothing, but throws errors if anything fails
This test should pass if the validate function can be called without throwing an error
'''
valid_tuple = [("keyA", '"valueA"')]
self.parser_inst.validate_key_value_pairs(valid_tuple, file_path="dummy_file_path")
def test_validate_key_value_pairs_fail_empty_value(self):
no_value_tuple = [("keyA", "")]
with self.assertRaises(SystemExit):
self.parser_inst.validate_key_value_pairs(no_value_tuple, file_path="dummy_file_path")
def test_validate_key_value_pairs_pass_empty_optional_key(self):
# InputRead2 is a key that is allowed to be empty (see src/config/util/special_keys.py)
nullable_key_empty_value = [("DebugMode", "")]
self.parser_inst.validate_key_value_pairs(nullable_key_empty_value, file_path="dummy_file_path")
def test_validate_key_value_pairs_fail_empty_non_optional_key(self):
# InputRead1 is a key that is not allowed to be empty (it must have a value)
key_empty_value = [("InputRead1", "")]
with self.assertRaises(SystemExit):
self.parser_inst.validate_key_value_pairs(key_empty_value, file_path="dummy_file_path")
def test_validate_key_value_pairs_fail_no_quotes(self):
no_value_tuple = [("keyA", 'Value without quotes')]
with self.assertRaises(SystemExit):
self.parser_inst.validate_key_value_pairs(no_value_tuple, file_path="dummy_file_path")
def test_validate_key_value_pairs_fail_special_characters(self):
no_value_tuple = [("keyA", '!@#$%&&^%(*&^%s')]
with self.assertRaises(SystemExit):
self.parser_inst.validate_key_value_pairs(no_value_tuple, file_path="dummy_file_path")
def test_validate_key_value_pairs_fail_duplicate_keys(self):
no_value_tuple = [("duplicateKey", 'valueA'), ("duplicateKey", "valueB")]
with self.assertRaises(SystemExit):
self.parser_inst.validate_key_value_pairs(no_value_tuple, file_path="dummy_file_path")
def test_insert_values_into_dict(self):
original_dict = {'major.minor.A': "init_A_value",
'major.minor.B': "init_B_value",
'major.minor.C': "init_C_value"
}
key_value_tuples = [('A', '"final_A_value"'), ("B", '"final_B_value"')]
substituted_dict = self.parser_inst.insert_values_into_dict(original_dict,
key_value_tuples,
"test_insert_values_into_dict"
)
# The final dictionary should have new values for A and B, which C's value unchanged
expected_dict = {'major.minor.A': "final_A_value",
'major.minor.B': "final_B_value",
'major.minor.C': "init_C_value"
}
self.assertEqual(expected_dict, substituted_dict)
def test_combine_input_read_arrays_paired_end_both(self):
key_value_tuples = [("PairedEnd", '"true"'),
("NormalInputRead1", '"readL1.fq,readL2.fq,readL3.fq"'),
("NormalInputRead2", '"readR1.fq,readR2.fq,readR3.fq"')
]
expected_paired_end_value = [["readL1.fq", "readR1.fq"], ["readL2.fq", "readR2.fq"], ["readL3.fq", "readR3.fq"]]
actual_paired_end_value = self.parser_inst.combine_input_read_arrays(key_value_tuples,
"NormalInputRead1",
"NormalInputRead2"
)
self.assertEqual(expected_paired_end_value, actual_paired_end_value)
def test_combine_input_read_arrays_paired_end_one(self):
key_value_tuples = [("PairedEnd", '"true"'),
("NormalInputRead1", '"readL1.fq,readL2.fq,readL3.fq"'),
("NormalInputRead2", '""')
]
with self.assertRaises(SystemExit):
# Should fail, as paired end is true but only one read set is provided
self.parser_inst.combine_input_read_arrays(key_value_tuples, "NormalInputRead1", "NormalInputRead2")
def test_combine_input_read_arrays_paired_end_unequal_lists(self):
key_value_tuples = [("PairedEnd", '"true"'),
("NormalInputRead1", '"readL1.fq,readL2.fq,readL3.fq"'),
("NormalInputRead2", '"readR1.fq"')
]
with self.assertRaises(SystemExit):
# Should fail, as paired end is true but only one read set is provided
self.parser_inst.combine_input_read_arrays(key_value_tuples, "NormalInputRead1", "NormalInputRead2")
def test_combine_input_read_arrays_single_end_both(self):
key_value_tuples = [("PairedEnd", '"false"'),
("NormalInputRead1", '"readL1.fq,readL2.fq,readL3.fq"'),
("NormalInputRead2", '"readR1.fq,readR2.fq,readR3.fq"')
]
expected_paired_end_value = [["readL1.fq"], ["readL2.fq"], ["readL3.fq"],
["readR1.fq"], ["readR2.fq"], ["readR3.fq"]]
actual_paired_end_value = self.parser_inst.combine_input_read_arrays(key_value_tuples,
"NormalInputRead1",
"NormalInputRead2"
)
self.assertEqual(expected_paired_end_value, actual_paired_end_value)
def test_combine_input_read_arrays_single_end_one(self):
key_value_tuples = [("PairedEnd", '"false"'),
("NormalInputRead1", '"readL1.fq,readL2.fq,readL3.fq"'),
("NormalInputRead2", '""')
]
expected_paired_end_value = [["readL1.fq"], ["readL2.fq"], ["readL3.fq"]]
actual_paired_end_value = self.parser_inst.combine_input_read_arrays(key_value_tuples,
"NormalInputRead1",
"NormalInputRead2"
)
self.assertEqual(expected_paired_end_value, actual_paired_end_value)
| 3.359375 | 3 |
scripts/aggregate_membership.py | LibrariesHacked/wuthering-hacks | 5 | 10911 | ## Requires Python v3 and pandas (pip install pandas)
## This script takes the newcastle membership csv and attempts
## to reduce the file size as much as possible through aggregation and lookups
## Two lookup files to provide library names and dates are also created.
import csv
import os
import re
from datetime import datetime
import pandas
MEMBERDATA = '..\\data\\dashboard_newcastle_members.csv'
def read_member_data():
member_data_frame = pandas.DataFrame(
pandas.read_csv(open(os.path.join(os.path.dirname(__file__), MEMBERDATA), 'r')), index=None)
return member_data_frame
def run():
members = read_member_data()
postcodes = members['Postcode'].unique()
libraries = members['Library Registered At'].unique()
dates_added = members['Date Added'].unique()
times_added = members['Date Added'].unique()
run() | 2.921875 | 3 |
mdl/contracts/contract.py | fafhrd91/mdl | 3 | 10912 | <reponame>fafhrd91/mdl
"""Interface contract object"""
from __future__ import absolute_import
import six
import sys
import logging
from contracts.interface import ContractException, ContractNotRespected
from .extension import ID
from ..declarations import implementer
from ..verify import verifyObject
from ..interface import InterfaceClass
__all__ = (
'InterfaceContract', 'MethodContract',
'AttributeContract', 'ContractNotRespected')
class InterfaceContract(object):
def __init__(self, iface, contracts, adapter=None):
self.iface = iface
self.elements = {}
self.adapter = adapter
for elem in contracts:
self.elements[elem.name] = elem
self._cls = construct_class(iface, self.elements)
def verify(self, ob):
"""Raise exception if ob does not implement interface"""
verifyObject(self.iface, ob)
def bind(self, ob, verify=True, logger=None):
if verify:
self.verify(ob)
if logger is None:
logger = logging
return self._cls(ob, logger)
def bind_adapter(self, factory, logger=None):
if logger is None:
logger = logging
if self.adapter is not None:
return BoundAdapterContract(factory, self.adapter, logger)
return factory
class AdapterContract(object):
def __init__(self, iface, args, exceptions):
self.name = iface.__name__
self.iface = iface
self.args = args
self.exceptions = exceptions
def _check_args_contract(self, adapter, ob, args, kwargs):
bound = self.getcallargs(*args, **kwargs)
for arg, contract in self.args_contract.items():
context = {'self': ob}
try:
contract._check_contract(context, bound[arg], silent=True)
except ContractNotRespected as e:
msg = 'Breach for argument %r to %s:%s(...)\n' % (
arg, self.iface.__name__, self.name)
e.error = msg + e.error
raise e
def __call__(self, factory, logger, *args, **kwargs):
# self._check_args_contract(ob, args, kwargs)
try:
result = factory(*args, **kwargs)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
# check exception contract
context = {'factory': factory}
for contract in self.exceptions:
try:
contract._check_contract(context, exc_value, silent=True)
except ContractNotRespected:
continue
else:
break
else:
# log un-defined exception
logger.error(
'Un-defined exception received from %s.%s(...)' % (
self.iface.__name__, self.name),
exc_info=(exc_type, exc_value, exc_tb))
six.reraise(exc_type, exc_value, exc_tb)
if not self.iface.providedBy(result):
raise ContractException(
'interface %s is not provided by adapted object %s' % (
self.name, result))
return result
class BoundAdapterContract(object):
def __init__(self, factory, contract, logger):
self.factory = factory
self.contract = contract
self.logger = logger
def __call__(self, *args, **kwargs):
return self.contract(self.factory, self.logger, *args, **kwargs)
class AttributeContract(object):
def __init__(self, iface, attr, contract):
self.name = attr.__name__
self.iface = iface
self.attr = attr
self.contract = contract
def check_value(self, ob, value):
context = {'self': ob}
try:
self.contract._check_contract(context, value, silent=True)
except ContractNotRespected as e:
msg = 'Breach for attribute value of %s.%s\n' % (
self.iface.__name__, self.name)
e.error = msg + e.error
raise e
type_ob = context.get(ID)
if (type_ob is not None and
not isinstance(value, BoundInterfaceContract) and
isinstance(type_ob, InterfaceClass)):
return type_ob.contract(value)
return value
class MethodContract(object):
def __init__(self, iface, method,
args_contract, result_contract, exceptions):
self.name = method.__name__
self.iface = iface
self.method = method
self.args_contract = args_contract
self.result_contract = result_contract
self.exceptions = exceptions
def _check_args_contract(self, ob, args, kwargs):
bound = self.getcallargs(*args, **kwargs)
for arg, contract in self.args_contract.items():
context = {'self': ob}
try:
contract._check_contract(context, bound[arg], silent=True)
except ContractNotRespected as e:
msg = 'Breach for argument %r to %s:%s(...)\n' % (
arg, self.iface.__name__, self.name)
e.error = msg + e.error
raise e
def _check_result_contract(self, ob, result):
context = {'self': ob}
try:
self.result_contract._check_contract(context, result, silent=False)
except ContractNotRespected as e:
msg = 'Breach for return value of %s.%s(...)\n' % (
self.iface.__name__, self.name)
e.error = msg + e.error
raise e
type_ob = context.get(ID)
if (type_ob is not None and
not isinstance(result, BoundInterfaceContract) and
isinstance(type_ob, InterfaceClass)):
return type_ob.contract(result)
return result
def __call__(self, ob, logger, *args, **kwargs):
self._check_args_contract(ob, args, kwargs)
try:
result = getattr(ob, self.name)(*args, **kwargs)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
# check exception contract
context = {'self': ob}
for contract in self.exceptions:
try:
contract._check_contract(context, exc_value, silent=True)
except ContractNotRespected:
continue
else:
break
else:
# log un-defined exception
logger.exception(
'Un-defined exception received from %s.%s(...)' % (
self.iface.__name__, self.name),
exc_info=(exc_type, exc_value, exc_tb))
six.reraise(exc_type, exc_value, exc_tb)
if self.result_contract is not None:
result = self._check_result_contract(ob, result)
return result
def getcallargs(self, *positional, **named):
"""Get the mapping of arguments to values."""
arg2value = {}
args = self.method.positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
for arg, value in zip(args, positional):
arg2value[arg] = value
defaults = self.method.optional
if 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
self.name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
raise TypeError(
'%s() takes no arguments (%d given)' % (self.name, num_total))
for arg in args:
if isinstance(arg, str) and arg in named:
if arg in arg2value:
raise TypeError(
"%s() got multiple values for keyword "
"argument '%s'" % (self.name, arg))
else:
arg2value[arg] = named.pop(arg)
if defaults: # fill in any missing values with the defaults
for arg, value in defaults.items():
if arg not in arg2value:
arg2value[arg] = value
if named:
unexpected = next(iter(named))
raise TypeError(
"%s() got an unexpected keyword argument '%s'" %
(self.name, unexpected))
unassigned = num_args - len([arg for arg in args if arg in arg2value])
if unassigned:
num_required = num_args - len(defaults)
raise TypeError('%s() takes %s %d %s (%d given)' % (
self.name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value
class AttributeDescriptor(object):
""" The AttributeDescriptor serves as a wrapper
for interface's attributes """
def __init__(self, attr):
self.attr = attr
self.name = attr.name
def __get__(self, instance, cls):
ob = instance.__context__
value = getattr(ob, self.name)
return self.attr.check_value(ob, value)
def __set__(self, instance, value):
ob = instance.__context__
self.attr.check_value(ob, value)
# extract original object
if isinstance(value, BoundInterfaceContract):
value = value.__context__
setattr(ob, self.name, value)
class BoundInterfaceContract(object):
def __init__(self, context, logger):
self.__context__ = context
self.__logger__ = logger
def __setattr__(self, name, value):
if name in self.__slots__:
super(BoundInterfaceContract, self).__setattr__(name, value)
else:
raise AttributeError(name)
def method_wrapper(element):
def func(self, *args, **kwargs):
return element(self.__context__, self.__logger__, *args, **kwargs)
return func
def construct_class(iface, elements):
attrs = {'__module__': iface.__module__}
slots = {'__context__', '__logger__'}
for name, element in elements.items():
slots.add(name)
if isinstance(element, AttributeContract):
attrs[name] = AttributeDescriptor(element)
else:
attrs[name] = method_wrapper(element)
name = '%sBoundContract' % iface.__name__
cls = type(name, (BoundInterfaceContract,), attrs)
cls.__slots__ = tuple(slots)
return implementer(iface)(cls)
| 2.46875 | 2 |
hourglass/train.py | ziqi123/AutoParking | 0 | 10913 | <gh_stars>0
import numpy as np
import torch
import torchvision.transforms as transforms
from dataloader.dataloader_hourglass import heatmap_Dataloader
import os
from network import KFSGNet
import torchvision.transforms as transforms
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 200
learning_rate = 0.001
transform = transforms.Compose([
transforms.ToTensor()])
params = dict()
params['data_normalize_factor'] = 256
params['dataset_dir'] = "./"
params['rgb2gray'] = False
params['dataset'] = "heatmap_dataset_all"
params['train_batch_sz'] = 16
params['val_batch_sz'] = 1
params['sigma'] = 3
dataloaders, dataset_sizes = heatmap_Dataloader(params)
train_loader = dataloaders['train']
test_loader = dataloaders['val']
# Define your model
model = KFSGNet()
# model.load_state_dict(torch.load(
# '/media/home_bak/ziqi/park/hourglass/10heatmap5.ckpt'))
# move model to the right device
model.to(device)
model.train()
# Loss and optimizer
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# ๅคๆญฅ้ฟๅญฆไน ็่กฐๅ
# ไธๅ็ๅบ้ด้็จไธๅ็ๆดๆฐ้ข็๏ผๆ่
ๆฏๆ็ๅบ้ดๆดๆฐๅญฆไน ็๏ผๆ็ๅบ้ดไธๆดๆฐๅญฆไน ็
# ๅ
ถไธญmilestonesๅๆฐไธบ่กจ็คบๅญฆไน ็ๆดๆฐ็่ตทๆญขๅบ้ด๏ผๅจๅบ้ด[0. 200]ๅ
ๅญฆไน ็ไธๆดๆฐ๏ผ
# ่ๅจ[200, 300]ใ[300, 320].....[340, 400]็ๅณไพงๅผ้ฝ่ฟ่กไธๆฌกๆดๆฐ๏ผ
# gammaๅๆฐ่กจ็คบๅญฆไน ็่กฐๅไธบไธๆฌก็gammaๅไนไธ
# torch.optim.lr_scheduler.MultiStepLR(optimizer,
# milestones=[30, 60, 80, 100, 120, 140], gamma=0.5)
print(optimizer.state_dict()['param_groups'][0]['lr'])
# For updating learning rate
# Train the model
total_step = len(train_loader)
curr_lr = learning_rate
print("start")
def calculate_mask(heatmaps_target):
"""
:param heatmaps_target: Variable (N,15,96,96)
:return: Variable (N,15,96,96)
"""
N, C, _, _ = heatmaps_targets.size()
N_idx = []
C_idx = []
for n in range(N):
for c in range(C):
max_v = heatmaps_targets[n, c, :, :].max().data
if max_v != 0.0:
N_idx.append(n)
C_idx.append(c)
mask = torch.zeros(heatmaps_targets.size())
mask[N_idx, C_idx, :, :] = 1.
mask = mask.float().cuda()
return mask, [N_idx, C_idx]
# def MSE(y_pred, gt):
# loss = 0
# loss += 0.5 * np.sum((y_pred - gt)**2)
# vec_gt = [[0]*3] * 5
# for w in range(4):
# vec_gt[w] = np.array([gt[w][0],
# gt[w][1]])
# vector_gt = vec_gt[1]-vec_gt[0]
# vec_pred = [[0]*3] * 5
# for v in range(4):
# vec_pred[w] = np.array([y_pred[w][0],
# y_pred[w][1]])
# vector_pred = vec_pred[1]-vec_pred[0]
# loss += (vector_gt[0]*vector_pred[1]-vector_pred[0]*vector_gt[1])**0.5
for epoch in range(num_epochs):
tmp = 0
for i, (data, gt, mask, item, imgPath, heatmaps_targets) in enumerate(train_loader):
# print(i)
data = data.to(device)
gt = gt.to(device)
mask = mask.to(device)
gt = gt.view(-1, 8)
heatmaps_targets = heatmaps_targets.to(device)
mask, indices_valid = calculate_mask(heatmaps_targets)
# print(heatmaps_targets.shape)
# Forward pass
outputs = model(data)
outputs = outputs * mask
heatmaps_targets = heatmaps_targets * mask
# print(outputs.shape)
loss = loss_fn(outputs, heatmaps_targets)
tmp += loss.item()
# exit()
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
print("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}, average_loss: {:.4f}, learning_rate: {}".format(
epoch + 1, num_epochs, i + 1, total_step, loss.item(), tmp / (i+1), optimizer.state_dict()['param_groups'][0]['lr']))
if (epoch + 1) % 10 == 0:
torch.save(model.state_dict(), '{}heatmap4.ckpt'.format(epoch + 1))
# card2 heatmap 26688
# card0 heatmap2 29009
| 2.328125 | 2 |
wpa-psk/wpa-psk.py | ranisalt/rsaur | 0 | 10914 | #!/usr/bin/env python3
import sys
from argparse import ArgumentParser
from getpass import getpass
from hashlib import pbkdf2_hmac
from signal import signal, SIGINT
def die(*_, **__):
sys.exit()
signal = signal(SIGINT, die)
iwd = """[Security]
PreSharedKey={psk}"""
supplicant = """network={{
ssid={ssid}
#psk={passphrase}
psk={psk}
}}"""
parser = ArgumentParser(
description="%(prog)s pre-computes PSK entries for network configuration blocks of wpa_supplicant or iwd config. An ASCII passphrase and SSID are used to generate a 256-bit PSK."
)
parser.add_argument("ssid", help="The SSID whose passphrase should be derived.")
parser.add_argument(
"passphrase",
help="The passphrase to use. If not included on the command line, passphrase will be read from standard input.",
nargs="?",
)
parser.add_argument(
"--iwd",
"-i",
dest="template",
action="store_const",
const=iwd,
default=supplicant,
help="Generate for iwd (default: generate for wpa_supplicant).",
)
args = parser.parse_args()
if not args.passphrase:
print("# reading passphrase from stdin", file=sys.stderr)
args.passphrase = getpass(prompt="")
if not 8 <= len(args.passphrase) <= 63:
print("Passphrase must be 8..63 characters", file=sys.stderr)
sys.exit(1)
passphrase = args.passphrase.encode()
if any(b < 32 or b == 127 for b in passphrase):
print("Invalid passphrase character", file=sys.stderr)
sys.exit(1)
ssid = args.ssid.encode()
psk = pbkdf2_hmac("sha1", passphrase, ssid, iterations=4096, dklen=32)
print(args.template.format(ssid=args.ssid, passphrase=args.passphrase, psk=psk.hex()))
| 3.046875 | 3 |
cms/management/commands/subcommands/copy_lang.py | mightyiam/django-cms | 2 | 10915 | <filename>cms/management/commands/subcommands/copy_lang.py
# -*- coding: utf-8 -*-
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from cms.api import copy_plugins_to_language
from cms.models import Title, Page
from cms.utils.i18n import get_language_list
class CopyLangCommand(BaseCommand):
args = '<language_from language_to>'
help = u'duplicate the cms content from one lang to another (to boot a new lang) using draft pages'
def handle(self, *args, **kwargs):
verbose = 'verbose' in args
only_empty = 'force-copy' not in args
site = [arg.split("=")[1] for arg in args if arg.startswith("site")]
if site:
site = site.pop()
else:
site = settings.SITE_ID
#test both langs
try:
assert len(args) >= 2
from_lang = args[0]
to_lang = args[1]
assert from_lang != to_lang
except AssertionError:
raise CommandError("Error: bad arguments -- Usage: manage.py cms copy-lang <lang_from> <lang_to>")
try:
assert from_lang in get_language_list(site)
assert to_lang in get_language_list(site)
except AssertionError:
raise CommandError("Both languages have to be present in settings.LANGUAGES and settings.CMS_LANGUAGES")
for page in Page.objects.on_site(site).drafts():
# copy title
if from_lang in page.get_languages():
try:
title = page.get_title_obj(to_lang, fallback=False)
except Title.DoesNotExist:
title = page.get_title_obj(from_lang)
if verbose:
self.stdout.write('copying title %s from language %s\n' % (title.title, from_lang))
title.id = None
title.language = to_lang
title.save()
# copy plugins using API
if verbose:
self.stdout.write('copying plugins for %s from %s\n' % (page.get_page_title(from_lang), from_lang))
copy_plugins_to_language(page, from_lang, to_lang, only_empty)
else:
if verbose:
self.stdout.write('Skipping page %s, language %s not defined\n' % (page, from_lang))
self.stdout.write(u"all done")
| 2.21875 | 2 |
easyml/mainsite/migrations/0015_auto_20181014_1837.py | evancasey1/EasyML | 0 | 10916 | <filename>easyml/mainsite/migrations/0015_auto_20181014_1837.py
# Generated by Django 2.1.2 on 2018-10-14 18:37
from django.db import migrations
import picklefield.fields
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0014_mlmodel_type_num'),
]
operations = [
migrations.AlterField(
model_name='mlmodel',
name='data',
field=picklefield.fields.PickledObjectField(editable=False),
),
]
| 1.632813 | 2 |
adsrefpipe/refparsers/handler.py | golnazads/ADSReferencePipeline | 0 | 10917 | <filename>adsrefpipe/refparsers/handler.py
# -*- coding: utf-8 -*-
from adsrefpipe.refparsers.CrossRefXML import CrossReftoREFs
from adsrefpipe.refparsers.ElsevierXML import ELSEVIERtoREFs
from adsrefpipe.refparsers.JATSxml import JATStoREFs
from adsrefpipe.refparsers.IOPxml import IOPtoREFs
from adsrefpipe.refparsers.SpringerXML import SPRINGERtoREFs
from adsrefpipe.refparsers.APSxml import APStoREFs
from adsrefpipe.refparsers.NatureXML import NATUREtoREFs
from adsrefpipe.refparsers.AIPxml import AIPtoREFs
from adsrefpipe.refparsers.WileyXML import WILEYtoREFs
from adsrefpipe.refparsers.NLM3xml import NLMtoREFs
from adsrefpipe.refparsers.AGUxml import AGUtoREFs
from adsrefpipe.refparsers.arXivTXT import ARXIVtoREFs
def verify(parser_name):
"""
:param parser_name: parser name from db
:return:
"""
# based on parser name return the parser class, if it is an xml
if parser_name == 'CrossRef':
return CrossReftoREFs
if parser_name == 'ELSEVIER':
return ELSEVIERtoREFs
if parser_name == 'JATS':
return JATStoREFs
if parser_name == 'IOP':
return IOPtoREFs
if parser_name == 'SPRINGER':
return SPRINGERtoREFs
if parser_name == 'APS':
return APStoREFs
if parser_name == 'NATURE':
return NATUREtoREFs
if parser_name == 'AIP':
return AIPtoREFs
if parser_name == 'WILEY':
return WILEYtoREFs
if parser_name == 'NLM':
return NLMtoREFs
if parser_name == 'AGU':
return AGUtoREFs
if parser_name == 'arXiv':
return ARXIVtoREFs
return None
| 2.109375 | 2 |
src/onegov/translator_directory/layout.py | politbuero-kampagnen/onegov-cloud | 0 | 10918 | from cached_property import cached_property
from purl import URL
from onegov.translator_directory import _
from onegov.core.elements import Block, Link, LinkGroup, Confirm, Intercooler
from onegov.core.utils import linkify
from onegov.org.layout import DefaultLayout as BaseLayout
from onegov.translator_directory.collections.documents import \
TranslatorDocumentCollection
from onegov.translator_directory.collections.language import LanguageCollection
from onegov.translator_directory.collections.translator import \
TranslatorCollection
from onegov.translator_directory.constants import member_can_see, \
editor_can_see, GENDERS, ADMISSIONS, PROFESSIONAL_GUILDS, \
INTERPRETING_TYPES
class DefaultLayout(BaseLayout):
@staticmethod
def linkify(text):
return linkify(text)
@staticmethod
def format_languages(languages):
return ', '.join(sorted((lang.name for lang in languages or [])))
def format_gender(self, gender):
return self.request.translate(GENDERS[gender])
@staticmethod
def format_drive_distance(number):
if not number:
return ''
return f'{number} km'
def format_boolean(self, val):
assert isinstance(val, bool)
return self.request.translate((_('Yes') if val else _('No')))
def format_admission(self, val):
return self.request.translate(ADMISSIONS[val])
def show(self, attribute_name):
"""Some attributes on the translator are hidden for less privileged
users"""
if self.request.is_member:
return attribute_name in member_can_see
if self.request.is_editor:
return attribute_name in editor_can_see
return True
def color_class(self, count):
""" Depending how rare a language is offered by translators,
apply a color code using the returned css class
"""
if count <= 5:
return 'text-orange'
def format_prof_guild(self, key):
return self.request.translate(PROFESSIONAL_GUILDS[key])
def format_interpreting_type(self, key):
return self.request.translate(INTERPRETING_TYPES[key])
class TranslatorLayout(DefaultLayout):
@cached_property
def file_collection(self):
return TranslatorDocumentCollection(
self.request.session,
translator_id=self.model.id,
category=None
)
@cached_property
def editbar_links(self):
if self.request.is_admin:
return [
LinkGroup(
title=_('Add'),
links=(
Link(
text=_("Add translator"),
url=self.request.class_link(
TranslatorCollection, name='new'
),
attrs={'class': 'new-person'}
),
)
),
Link(
text=_("Edit"),
url=self.request.link(
self.model, name='edit'
),
attrs={'class': 'edit-link'}
),
Link(
_('Delete'),
self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-link'},
traits=(
Confirm(
_("Do you really want to delete "
"this translator?"),
_("This cannot be undone."),
_("Delete translator"),
_("Cancel")
),
Intercooler(
request_method='DELETE',
redirect_after=self.request.class_link(
TranslatorCollection
)
)
)
),
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
),
Link(
_('Documents'),
self.request.link(self.file_collection),
attrs={'class': 'documents'}
),
]
elif self.request.is_editor:
return [
Link(
text=_("Edit"),
url=self.request.link(
self.model, name='edit-restricted'
),
attrs={'class': 'edit-link'}
),
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
),
]
elif self.request.is_member:
return [
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
)
]
@cached_property
def breadcrumbs(self):
links = super().breadcrumbs + [
Link(
text=_('Translators'),
url=self.request.class_link(TranslatorCollection)
),
Link(text=self.model.title)
]
return links
class EditTranslatorLayout(TranslatorLayout):
@cached_property
def title(self):
return _('Edit translator')
@cached_property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Edit')))
return links
class TranslatorCollectionLayout(DefaultLayout):
@cached_property
def title(self):
return _('Search for translators')
@cached_property
def breadcrumbs(self):
return super().breadcrumbs + [
Link(
text=_('Translators'),
url=self.request.class_link(TranslatorCollection)
)
]
@cached_property
def editbar_links(self):
if self.request.is_admin:
return [
LinkGroup(
_('Add'),
links=(
Link(
text=_("Add translator"),
url=self.request.class_link(
TranslatorCollection, name='new'
),
attrs={'class': 'new-person'}
),
Link(
text=_("Add language"),
url=self.request.class_link(
LanguageCollection, name='new'
),
attrs={'class': 'new-language'}
)
)
),
Link(
_('Export Excel'),
url=self.request.class_link(
TranslatorCollection, name='export'
),
attrs={'class': 'export-link'}
),
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
)
]
elif self.request.is_editor or self.request.is_member:
return [
Link(
_('Voucher template'),
self.request.link(self.request.app.org, name='voucher'),
attrs={'class': 'create-excel'}
)
]
class AddTranslatorLayout(TranslatorCollectionLayout):
@cached_property
def title(self):
return _('Add translator')
@cached_property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Add')))
return links
@property
def editbar_links(self):
return []
class TranslatorDocumentsLayout(DefaultLayout):
@cached_property
def breadcrumbs(self):
return super().breadcrumbs + [
Link(
text=_('Translators'),
url=self.request.class_link(TranslatorCollection)
),
Link(
text=self.model.translator.title,
url=self.request.link(self.model.translator)
),
Link(text=_('Documents'))
]
@cached_property
def upload_url(self):
url = URL(self.request.link(self.model, name='upload'))
url = url.query_param('category', self.model.category)
return self.csrf_protected_url(url.as_string())
def link_for(self, category):
return self.request.class_link(
self.model.__class__,
{'translator_id': self.model.translator_id, 'category': category}
)
class LanguageCollectionLayout(DefaultLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Languages')))
return links
@property
def editbar_links(self):
return [LinkGroup(
_('Add'),
links=(
Link(
text=_("Add language"),
url=self.request.class_link(
LanguageCollection, name='new'
),
attrs={'class': 'new-language'}
),
)
)] if self.request.is_admin else []
class LanguageLayout(DefaultLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(
Link(_('Languages'),
url=self.request.class_link(LanguageCollection))
)
return links
class EditLanguageLayout(LanguageLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(self.model.name))
links.append(Link(_('Edit')))
return links
@cached_property
def editbar_links(self):
if self.request.is_admin:
if not self.model.deletable:
return [
Link(
_('Delete'),
self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-link'},
traits=(
Block(
_("This language is used and can't be "
"deleted."),
no=_("Cancel")
),
)
),
]
return [
Link(
_('Delete'),
self.csrf_protected_url(
self.request.link(self.model)
),
attrs={'class': 'delete-link'},
traits=(
Confirm(
_("Do you really want to delete "
"this language?"),
_("This cannot be undone."),
_("Delete language"),
_("Cancel")
),
Intercooler(
request_method='DELETE',
redirect_after=self.request.class_link(
TranslatorCollection
)
)
)
),
]
return []
class AddLanguageLayout(LanguageLayout):
@property
def breadcrumbs(self):
links = super().breadcrumbs
links.append(Link(_('Add')))
return links
@property
def editbar_links(self):
return []
| 2.1875 | 2 |
src/dsanalizer/informations.py | perqu/Dataset-Analizer | 0 | 10919 | import pandas as pd
import numpy as np
import io
def info(df):
print("------------DIMENSIONS------------")
print("Rows:", df.shape[0])
print("Columns:", df.shape[1])
print("--------------DTYPES--------------")
columns = df.columns.tolist()
integers = df.select_dtypes("integer").columns.tolist()
floats = df.select_dtypes("float").columns.tolist()
bools = df.select_dtypes("bool").columns.tolist()
objects = df.select_dtypes("object").columns.tolist()
dataType = []
for el in columns:
if el in integers:
dataType.append('int')
if el in floats:
dataType.append('float')
if el in bools:
dataType.append('bool')
if el in objects:
dataType.append('object')
d = {'Column' : columns, 'Type': dataType}
print(pd.DataFrame(d))
print("----------MISSING VALUES----------")
print("Is any value missing? ", np.where(df.isnull().values.any() == False, "No", "Yes"), "\n")
buf = io.StringIO()
df.info(buf=buf)
info = buf.getvalue().split('\n')[-2].split(":")[1].strip()
print("----------MEMORY USAGE------------ \n", info) | 3.3125 | 3 |
src/packagedcode/cargo.py | Siddhant-K-code/scancode-toolkit | 1,511 | 10920 | <reponame>Siddhant-K-code/scancode-toolkit<filename>src/packagedcode/cargo.py
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import logging
import re
import attr
from packageurl import PackageURL
import toml
from commoncode import filetype
from commoncode import fileutils
from packagedcode import models
"""
Handle Rust cargo crates
"""
TRACE = False
logger = logging.getLogger(__name__)
if TRACE:
import sys
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
@attr.s()
class RustCargoCrate(models.Package):
default_type = 'cargo'
default_primary_language = 'Rust'
default_web_baseurl = 'https://crates.io'
default_download_baseurl = 'https://crates.io/api/v1'
default_api_baseurl = 'https://crates.io/api/v1'
@classmethod
def get_package_root(cls, manifest_resource, codebase):
return manifest_resource.parent(codebase)
def repository_homepage_url(self, baseurl=default_web_baseurl):
if self.name:
return '{}/crates/{}'.format(baseurl, self.name)
def repository_download_url(self, baseurl=default_download_baseurl):
if self.name and self.version:
return '{}/crates/{}/{}/download'.format(baseurl, self.name, self.version)
def api_data_url(self, baseurl=default_api_baseurl):
if self.name:
return '{}/crates/{}'.format(baseurl, self.name)
@attr.s()
class CargoToml(RustCargoCrate, models.PackageManifest):
file_patterns = ('Cargo.toml',)
extensions = ('.toml',)
@classmethod
def is_manifest(cls, location):
"""
Return True if the file at ``location`` is likely a manifest of this type.
"""
return filetype.is_file(location) and fileutils.file_name(location).lower() == 'cargo.toml'
@classmethod
def recognize(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
package_data = toml.load(location, _dict=dict)
core_package_data = package_data.get('package', {})
name = core_package_data.get('name')
version = core_package_data.get('version')
description = core_package_data.get('description')
if description:
description = description.strip()
authors = core_package_data.get('authors')
parties = list(party_mapper(authors, party_role='author'))
declared_license = core_package_data.get('license')
package = cls(
name=name,
version=version,
description=description,
parties=parties,
declared_license=declared_license
)
yield package
@attr.s()
class CargoLock(RustCargoCrate, models.PackageManifest):
file_patterns = ('Cargo.lock',)
extensions = ('.lock',)
@classmethod
def is_manifest(cls, location):
"""
Return True if the file at ``location`` is likely a manifest of this type.
"""
return (filetype.is_file(location)
and fileutils.file_name(location).lower() == 'cargo.lock')
@classmethod
def recognize(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""
package_data = toml.load(location, _dict=dict)
package_dependencies = []
core_package_data = package_data.get('package', [])
for dep in core_package_data:
package_dependencies.append(
models.DependentPackage(
purl=PackageURL(
type='crates',
name=dep.get('name'),
version=dep.get('version')
).to_string(),
requirement=dep.get('version'),
scope='dependency',
is_runtime=True,
is_optional=False,
is_resolved=True,
)
)
yield cls(dependencies=package_dependencies)
def party_mapper(party, party_role):
"""
Yields a Party object with party of `party_role`.
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
"""
for person in party:
name, email = parse_person(person)
yield models.Party(
type=models.party_person,
name=name,
role=party_role,
email=email)
def parse_person(person):
"""
https://doc.rust-lang.org/cargo/reference/manifest.html#the-authors-field-optional
A "person" is an object with an optional "name" or "email" field.
A person can be in the form:
"author": "<NAME> <<EMAIL>>"
For example:
>>> p = parse_person('<NAME> <<EMAIL>>')
>>> assert p == ('<NAME>', '<EMAIL>')
>>> p = parse_person('<NAME>')
>>> assert p == ('<NAME>', None)
>>> p = parse_person('<<EMAIL>>')
>>> assert p == (None, '<EMAIL>')
"""
parsed = person_parser(person)
if not parsed:
name = None
parsed = person_parser_no_name(person)
else:
name = parsed.group('name')
email = parsed.group('email')
if name:
name = name.strip()
if email:
email = email.strip('<> ')
return name, email
person_parser = re.compile(
r'^(?P<name>[^\(<]+)'
r'\s?'
r'(?P<email><([^>]+)>)?'
).match
person_parser_no_name = re.compile(
r'(?P<email><([^>]+)>)?'
).match
| 1.859375 | 2 |
tests/testing/units.py | mandaltj/gem5_chips | 135 | 10921 | <reponame>mandaltj/gem5_chips<filename>tests/testing/units.py
#!/usr/bin/env python2.7
#
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
from abc import ABCMeta, abstractmethod
from datetime import datetime
import difflib
import functools
import os
import re
import subprocess
import sys
import traceback
from results import UnitResult
from helpers import *
_test_base = os.path.join(os.path.dirname(__file__), "..")
class TestUnit(object):
"""Base class for all test units.
A test unit is a part of a larger test case. Test cases usually
contain two types of units, run units (run gem5) and verify units
(diff output files). All unit implementations inherit from this
class.
A unit implementation overrides the _run() method. The test runner
calls the run() method, which wraps _run() to protect against
exceptions.
"""
__metaclass__ = ABCMeta
def __init__(self, name, ref_dir, test_dir, skip=False):
self.name = name
self.ref_dir = ref_dir
self.test_dir = test_dir
self.force_skip = skip
self.start_time = None
self.stop_time = None
def result(self, state, **kwargs):
if self.start_time is not None and "runtime" not in kwargs:
self.stop_time = datetime.utcnow()
delta = self.stop_time - self.start_time
kwargs["runtime"] = delta.total_seconds()
return UnitResult(self.name, state, **kwargs)
def ok(self, **kwargs):
return self.result(UnitResult.STATE_OK, **kwargs)
def skip(self, **kwargs):
return self.result(UnitResult.STATE_SKIPPED, **kwargs)
def error(self, message, **kwargs):
return self.result(UnitResult.STATE_ERROR, message=message, **kwargs)
def failure(self, message, **kwargs):
return self.result(UnitResult.STATE_FAILURE, message=message, **kwargs)
def ref_file(self, fname):
return os.path.join(self.ref_dir, fname)
def out_file(self, fname):
return os.path.join(self.test_dir, fname)
def _read_output(self, fname, default=""):
try:
with open(self.out_file(fname), "r") as f:
return f.read()
except IOError:
return default
def run(self):
self.start_time = datetime.utcnow()
try:
if self.force_skip:
return self.skip()
else:
return self._run()
except:
return self.error("Python exception:\n%s" % traceback.format_exc())
@abstractmethod
def _run(self):
pass
class RunGem5(TestUnit):
"""Test unit representing a gem5 run.
Possible failure modes:
- gem5 failed to run -> STATE_ERROR
- timeout -> STATE_ERROR
- non-zero exit code -> STATE_ERROR
Possible non-failure results:
- exit code == 0 -> STATE_OK
- exit code == 2 -> STATE_SKIPPED
"""
def __init__(self, gem5, gem5_args, timeout=0, **kwargs):
super(RunGem5, self).__init__("gem5", **kwargs)
self.gem5 = gem5
self.args = gem5_args
self.timeout = timeout
def _run(self):
gem5_cmd = [
self.gem5,
"-d", self.test_dir,
"--stats-file", "text://stats.txt?desc=False",
"-re",
] + self.args
try:
with ProcessHelper(gem5_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
status, gem5_stdout, gem5_stderr = p.call(timeout=self.timeout)
except CallTimeoutException as te:
return self.error("Timeout", stdout=te.stdout, stderr=te.stderr)
except OSError as ose:
return self.error("Failed to launch gem5: %s" % ose)
stderr = "\n".join([
"*** gem5 stderr ***",
gem5_stderr,
"",
"*** m5out/simerr ***",
self._read_output("simerr"),
])
stdout = "\n".join([
"*** gem5 stdout ***",
gem5_stdout,
"",
"*** m5out/simout ***",
self._read_output("simout"),
])
# Signal
if status < 0:
return self.error("gem5 terminated by signal %i" % (-status, ),
stdout=stdout, stderr=stderr)
elif status == 2:
return self.skip(stdout=stdout, stderr=stderr)
elif status > 0:
return self.error("gem5 exited with non-zero status: %i" % status,
stdout=stdout, stderr=stderr)
else:
return self.ok(stdout=stdout, stderr=stderr)
class DiffOutFile(TestUnit):
"""Test unit comparing and output file and a reference file."""
# regular expressions of lines to ignore when diffing outputs
diff_ignore_regexes = {
"simout" : [
re.compile('^Redirecting (stdout|stderr) to'),
re.compile('^gem5 compiled '),
re.compile('^gem5 started '),
re.compile('^gem5 executing on '),
re.compile('^command line:'),
re.compile("^Couldn't import dot_parser,"),
re.compile("^info: kernel located at:"),
re.compile("^Couldn't unlink "),
re.compile("^Using GPU kernel code file\(s\) "),
],
"simerr" : [
#re.compile('^Simulation complete at'),
],
"config.ini" : [
re.compile("^(executable|readfile|kernel|image_file)="),
re.compile("^(cwd|input|codefile)="),
],
"config.json" : [
re.compile(r'''^\s*"(executable|readfile|kernel|image_file)":'''),
re.compile(r'''^\s*"(cwd|input|codefile)":'''),
],
}
def __init__(self, fname, **kwargs):
super(DiffOutFile, self).__init__("diff[%s]" % fname,
**kwargs)
self.fname = fname
self.line_filters = DiffOutFile.diff_ignore_regexes.get(fname, tuple())
def _filter_file(self, fname):
def match_line(l):
for r in self.line_filters:
if r.match(l):
return True
return False
with open(fname, "r") as f:
for l in f:
if not match_line(l):
yield l
def _run(self):
fname = self.fname
ref = self.ref_file(fname)
out = self.out_file(fname)
if not os.path.exists(ref):
return self.error("%s doesn't exist in reference directory" \
% fname)
if not os.path.exists(out):
return self.error("%s doesn't exist in output directory" % fname)
diff = difflib.unified_diff(
tuple(self._filter_file(ref)),
tuple(self._filter_file(out)),
fromfile="ref/%s" % fname, tofile="out/%s" % fname)
diff = list(diff)
if diff:
return self.error("ref/%s and out/%s differ" % (fname, fname),
stderr="".join(diff))
else:
return self.ok(stdout="-- ref/%s and out/%s are identical --" \
% (fname, fname))
class DiffStatFile(TestUnit):
"""Test unit comparing two gem5 stat files."""
def __init__(self, **kwargs):
super(DiffStatFile, self).__init__("stat_diff", **kwargs)
self.stat_diff = os.path.join(_test_base, "diff-out")
def _run(self):
STATUS_OK = 0
STATUS_NEW_STATS = 1
STATUS_FAILED = 2
stats = "stats.txt"
cmd = [
self.stat_diff,
self.ref_file(stats), self.out_file(stats),
]
with ProcessHelper(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
status, stdout, stderr = p.call()
if status in (STATUS_OK, STATUS_NEW_STATS):
return self.ok(stdout=stdout, stderr=stderr)
elif status == STATUS_FAILED:
return self.failure("Statistics mismatch",
stdout=stdout, stderr=stderr)
else:
return self.error("diff-out returned an error: %i" % status,
stdout=stdout, stderr=stderr)
| 1.234375 | 1 |
mythic-docker/app/routes/routes.py | rmusser01/Mythic | 934 | 10922 | <reponame>rmusser01/Mythic<gh_stars>100-1000
from app import (
mythic,
links,
nginx_port,
listen_port,
mythic_admin_password,
mythic_admin_user,
default_operation_name,
mythic_db
)
import app
import asyncpg
import redis
from peewee_async import Manager
from sanic.response import json
from sanic import response
from sanic.exceptions import (
NotFound,
Unauthorized,
MethodNotSupported,
SanicException,
RequestTimeout,
)
import sys
from jinja2 import Environment, PackageLoader
from app.database_models.model import (
Operator,
Operation,
OperatorOperation,
ATTACK,
Artifact,
)
import datetime
import app.crypto as crypto
from sanic_jwt import BaseEndpoint, utils, exceptions
from sanic_jwt.decorators import scoped, inject_user
import ujson as js
from ipaddress import ip_address
from app.routes.authentication import invalidate_refresh_token
import app.database_models.model as db_model
from sanic.log import logger
from uuid import uuid4
import asyncio
env = Environment(loader=PackageLoader("app", "templates"), autoescape=True)
async def respect_pivot(my_links, request):
# given the links dictionary, update the server_ip and server_port to match what was received
# this will allow people using pivots (127.0.0.1:8888) to still access things going through to IP:other_port
updated_links = my_links
host_field = request.host.split(":")
if len(host_field) == 1:
server_ip = host_field[0]
if 'x-forwarded-port' in request.headers:
server_port = request.headers["x-forwarded-port"]
else:
if request.scheme == "https":
server_port = nginx_port
else:
server_port = listen_port
else:
server_ip = host_field[0]
server_port = host_field[1]
updated_links["server_ip"] = server_ip
updated_links["server_port"] = server_port
updated_links["login"] = "/login"
return updated_links
async def getSchemes(request):
if 'x-forwarded-proto' in request.headers:
if request.headers['x-forwarded-proto'] == "http":
return {"http": "http", "ws": "ws"}
else:
return {"http": "https", "ws": "wss"}
if request.scheme == "http":
return {"http": "http", "ws": "ws"}
else:
return {"http": "https", "ws": "wss"}
@mythic.route("/")
@inject_user()
@scoped("auth:user")
async def index(request, user):
template = env.get_template("main_page.html")
content = template.render(
name=user["username"],
links=await respect_pivot(links, request),
current_operation=user["current_operation"],
config=user["ui_config"],
view_utc_time=user["view_utc_time"],
** await getSchemes(request)
)
return response.html(content)
class Login(BaseEndpoint):
async def get(self, request):
error = ""
template = env.get_template("login.html")
content = template.render(
links=await respect_pivot(links, request),
error=error,
config={},
view_utc_time=False,
** await getSchemes(request)
)
return response.html(content)
async def post(self, request):
form = request.form
error = ""
username = None
ip = request.headers["x-real-ip"] if "x-real-ip" in request.headers else request.ip
from app.api.operation_api import send_all_operations_message
try:
username = form["username"][0] if 'username' in form and len(form['username']) > 0 else ""
password = form["password"][0] if 'password' in form and len(form['password']) > 0 else ""
user = await app.db_objects.get(db_model.operator_query, username=username)
if user.id == 1 and user.failed_login_count > 10 and (user.last_failed_login_timestamp
> datetime.datetime.utcnow() + datetime.timedelta(seconds=-60)):
# throttle their attempts to log in to 1 min between checks
error = "Too many failed login attempts, try again later"
user.failed_login_count += 1
user.last_failed_login_timestamp = datetime.datetime.utcnow()
await app.db_objects.update(user)
await send_all_operations_message(message=f"Throttling login attempts for {user.username} due to too many failed login attempts\nLast connection from {ip}",
level="warning", source="throttled_login_" + user.username)
elif not user.active:
error = "Account is not active, cannot log in"
await send_all_operations_message(message=f"Deactivated account {user.username} trying to log in from {ip}",
level="warning", source="deactivated_login_" + user.username)
elif await user.check_password(password):
try:
# update the last login time to be now
user.last_login = datetime.datetime.utcnow()
user.failed_login_count = 0
await app.db_objects.update(user)
if user.current_operation is not None:
# update that operations' event log that the user just signed in
await app.db_objects.create(
db_model.OperationEventLog,
operator=None,
operation=user.current_operation,
message="{} signed in from {}".format(user.username, ip),
)
(
access_token,
output,
) = await self.responses.get_access_token_output(
request,
{"user_id": user.id, "auth": "cookie"},
self.config,
self.instance,
)
refresh_token = (
await self.instance.auth.generate_refresh_token(
request, {"user_id": user.id, "auth": "cookie"}
)
)
output.update(
{self.config.refresh_token_name(): refresh_token}
)
template = env.get_template("login.html")
content = template.render(
links=await respect_pivot(links, request),
error=error,
access_token=access_token,
** await getSchemes(request),
refresh_token=refresh_token,
config={},
view_utc_time=False,
)
resp = response.html(content)
# resp = response.redirect("/")
resp.cookies[
self.config.cookie_access_token_name()
] = access_token
resp.cookies[self.config.cookie_access_token_name()][
"httponly"
] = True
resp.cookies[self.config.cookie_access_token_name()][
"samesite"
] = "strict"
resp.cookies[
self.config.cookie_refresh_token_name()
] = refresh_token
resp.cookies[self.config.cookie_refresh_token_name()][
"httponly"
] = True
resp.cookies[self.config.cookie_refresh_token_name()][
"samesite"
] = "strict"
return resp
except Exception as e:
print(str(sys.exc_info()[-1].tb_lineno) + " " + str(e))
logger.error("post login error:" + str(e))
else:
# user exists, but password is wrong
error = "Username or password invalid"
user.failed_login_count += 1
if user.failed_login_count >= 10 and user.active:
user.last_failed_login_timestamp = datetime.datetime.utcnow()
if user.id != 1:
user.active = False
await send_all_operations_message(message=f"Deactivating account {user.username} due to too many failed logins.\nLast connection from {ip}",
level="warning")
await app.db_objects.update(user)
except Exception as e:
if username is not None:
logger.warning("login error: " + str(e))
error = "Username or password invalid"
await send_all_operations_message(message=f"Attempt to login with unknown user: {username}, from {ip}",
level="warning", source="unknown_login" + ip)
template = env.get_template("login.html")
content = template.render(
links=await respect_pivot(links, request),
error=error,
config={},
view_utc_time=False,
** await getSchemes(request)
)
return response.html(content)
class UIRefresh(BaseEndpoint):
async def get(self, request, *args, **kwargs):
# go here if we're in the browser and our JWT expires so we can update it and continue on
payload = self.instance.auth.extract_payload(request, verify=True)
try:
user = await utils.call(
self.instance.auth.retrieve_user, request, payload=payload
)
except exceptions.MeEndpointNotSetup:
raise exceptions.RefreshTokenNotImplemented
user_id = await self.instance.auth._get_user_id(user)
refresh_token = await utils.call(
self.instance.auth.retrieve_refresh_token,
request=request,
user_id=user_id,
)
if isinstance(refresh_token, bytes):
refresh_token = refresh_token.decode("utf-8")
token = await self.instance.auth.retrieve_refresh_token_from_request(request)
if refresh_token != token:
raise exceptions.AuthenticationFailed()
access_token, output = await self.responses.get_access_token_output(
request, user, self.config, self.instance
)
redirect_to = (
request.headers["referer"] if "referer" in request.headers else "/"
)
resp = response.redirect(redirect_to)
resp.cookies[self.config.cookie_access_token_name()] = access_token
resp.cookies[self.config.cookie_access_token_name()]["httponly"] = True
return resp
@mythic.route("/settings", methods=["GET"])
@inject_user()
@scoped("auth:user")
async def settings(request, user):
template = env.get_template("settings.html")
try:
content = template.render(
links=await respect_pivot(links, request),
name=user["username"],
** await getSchemes(request),
config=user["ui_config"],
view_utc_time=user["view_utc_time"],
)
return response.html(content)
except Exception as e:
logger.error(str(e))
return json({"status": "error", "error": "Failed to find operator"})
@mythic.route("/logout")
@inject_user()
@scoped("auth:user")
async def logout(request, user):
resp = response.redirect("/login")
del resp.cookies["access_token"]
del resp.cookies["refresh_token"]
operator = await app.db_objects.get(db_model.operator_query, id=user["id"])
if operator.current_operation is not None:
await app.db_objects.create(
db_model.OperationEventLog,
operator=None,
operation=operator.current_operation,
message="{} signed out".format(operator.username),
)
# now actually invalidate tokens
await invalidate_refresh_token(user["id"])
return resp
@mythic.exception(asyncio.CancelledError)
async def handle_cancellation(request, exception):
logger.info(
"Request {} was cancelled".format(str(request))
)
return json({"status": "error", "error": "Request was cancelled"}, status=500)
@mythic.exception(NotFound)
async def handler_404(request, exception):
return json({"status": "error", "error": "Not Found"}, status=404)
@mythic.exception(MethodNotSupported)
async def handler_405(request, exception):
return json({"status": "error", "error": "Session Expired, refresh"}, status=405)
@mythic.exception(RequestTimeout)
def request_timeout(request, exception):
return json({"status": "error", "error": "request timeout"})
@mythic.exception(exceptions.AuthenticationFailed)
async def handler_auth_failed(request, exception):
if "/new" in request.path or "webhook" in request.path or "/auth" in request.path or "/refresh" in request.path:
return json({"status": "error", "error": "Authentication failed", "message": "access-denied", "code": "access-denied"}, status=401)
else:
return response.redirect("/login")
@mythic.exception(Unauthorized)
async def handler_auth_failed(request, exception):
if "/new" in request.path or "webhook" in request.path or "/auth" in request.path or "/refresh" in request.path:
return json({"status": "error", "error": "Authentication failed", "message": "Unauthorized", "code": "forbidden"}, status=403)
else:
return response.redirect("/login")
@mythic.exception(SanicException)
def catch_all(request, exception):
logger.exception(
"Caught random exception within Mythic: {}, {}".format(exception, str(request))
)
return json({"status": "error", "error": "Mythic encountered an error"}, status=500)
@mythic.middleware("request")
async def check_ips(request):
if (
request.path in ["/login", "/auth", "/"]
or "/payloads/download/" in request.path
):
ip = ip_address(request.headers["x-real-ip"] if "x-real-ip" in request.headers else request.ip)
for block in mythic.config["ALLOWED_IPS"]:
if ip in block:
return
return json({"error": "Not Found"}, status=404)
@mythic.middleware("response")
async def add_cors(request, response):
response.headers["Access-Control-Allow-Headers"] = "authorization,content-type"
@mythic.listener("before_server_start")
async def setup_initial_info(sanic, loop):
logger.info("setup_initial_info")
app.db_objects = Manager(mythic_db, loop=loop)
await mythic_db.connect_async(loop=loop)
app.db_objects.database.allow_sync = True # logging.WARNING
await initial_setup()
asyncio.create_task(app.api.rabbitmq_api.start_listening())
async def initial_setup():
# create mythic_admin
import multiprocessing
try:
max_worker_connection = int(200 / (multiprocessing.cpu_count() + 1))
app.websocket_pool = await asyncpg.create_pool(mythic.config["DB_POOL_ASYNCPG_CONNECT_STRING"],
max_size=max_worker_connection)
# redis automatically creates a pool behind the scenes
app.redis_pool = redis.Redis(host=app.redis_host, port=app.redis_port, db=3)
# clear the database on start
keys = app.redis_pool.keys("*")
for k in keys:
app.redis_pool.delete(k)
operators = await app.db_objects.count(Operator.select())
if operators > 0:
logger.info("Users already exist, aborting initial install")
return
salt = str(uuid4())
password = await crypto.hash_SHA512(salt + <PASSWORD>)
try:
admin, created = await app.db_objects.get_or_create(
Operator, username=mythic_admin_user, password=password, admin=True, active=True, salt=salt
)
except Exception as e:
print(e)
return
logger.info("Created Admin")
# create default operation
operation, created = await app.db_objects.get_or_create(
Operation,
name=default_operation_name,
admin=admin,
complete=False,
)
logger.info("Created Operation")
await app.db_objects.get_or_create(
OperatorOperation, operator=admin, operation=operation
)
admin.current_operation = operation
await app.db_objects.update(admin)
logger.info("Registered Admin with the default operation")
logger.info("Started parsing ATT&CK data...")
file = open("./app/default_files/other_info/attack.json", "r")
attack = js.load(file) # this is a lot of data and might take a hot second to load
for obj in attack["techniques"]:
await app.db_objects.create(ATTACK, **obj)
file.close()
logger.info("Created all ATT&CK entries")
file = open("./app/default_files/other_info/artifacts.json", "r")
artifacts_file = js.load(file)
for artifact in artifacts_file["artifacts"]:
await app.db_objects.get_or_create(
Artifact, name=artifact["name"], description=artifact["description"]
)
file.close()
logger.info("Created all base artifacts")
logger.info("Successfully finished initial setup")
except Exception as e:
from app.api.operation_api import send_all_operations_message
asyncio.create_task(
send_all_operations_message(
message=f"Worker failed to initialize:\n {str(e)}",
level="warning"))
# /static serves out static images and files
mythic.static("/static", "./app/static", name="shared_files")
mythic.static("/favicon.ico", "./app/static/favicon.ico", name="favicon")
mythic.static("/strict_time.png", "./app/static/strict_time.png", name="strict_time")
mythic.static(
"/grouped_output.png", "./app/static/grouped_output.png", name="grouped_output"
)
mythic.static(
"/no_cmd_output.png", "./app/static/no_cmd_output.png", name="no_cmd_output"
)
mythic.static("/add_comment.png", "./app/static/add_comment.png", name="add_comment")
# add links to the routes in this file at the bottom
links["index"] = mythic.url_for("index")
links["login"] = links["WEB_BASE"] + "/login"
links["logout"] = mythic.url_for("logout")
links["settings"] = mythic.url_for("settings")
| 2.234375 | 2 |
sdl2/blendmode.py | namelivia/py-sdl2 | 222 | 10923 | <gh_stars>100-1000
from ctypes import c_int
from .dll import _bind
__all__ = [
# Enums
"SDL_BlendMode",
"SDL_BLENDMODE_NONE", "SDL_BLENDMODE_BLEND", "SDL_BLENDMODE_ADD",
"SDL_BLENDMODE_MOD", "SDL_BLENDMODE_MUL", "SDL_BLENDMODE_INVALID",
"SDL_BlendOperation",
"SDL_BLENDOPERATION_ADD", "SDL_BLENDOPERATION_SUBTRACT",
"SDL_BLENDOPERATION_REV_SUBTRACT", "SDL_BLENDOPERATION_MINIMUM",
"SDL_BLENDOPERATION_MAXIMUM",
"SDL_BlendFactor",
"SDL_BLENDFACTOR_ZERO", "SDL_BLENDFACTOR_ONE",
"SDL_BLENDFACTOR_SRC_COLOR", "SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR",
"SDL_BLENDFACTOR_SRC_ALPHA", "SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA",
"SDL_BLENDFACTOR_DST_COLOR", "SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR",
"SDL_BLENDFACTOR_DST_ALPHA", "SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA",
# Functions
"SDL_ComposeCustomBlendMode"
]
SDL_BlendMode = c_int
SDL_BLENDMODE_NONE = 0x00000000
SDL_BLENDMODE_BLEND = 0x00000001
SDL_BLENDMODE_ADD = 0x00000002
SDL_BLENDMODE_MOD = 0x00000004
SDL_BLENDMODE_MUL = 0x00000008
SDL_BLENDMODE_INVALID = 0x7FFFFFFF
SDL_BlendOperation = c_int
SDL_BLENDOPERATION_ADD = 0x1
SDL_BLENDOPERATION_SUBTRACT = 0x2
SDL_BLENDOPERATION_REV_SUBTRACT = 0x3
SDL_BLENDOPERATION_MINIMUM = 0x4
SDL_BLENDOPERATION_MAXIMUM = 0x5
SDL_BlendFactor = c_int
SDL_BLENDFACTOR_ZERO = 0x1
SDL_BLENDFACTOR_ONE = 0x2
SDL_BLENDFACTOR_SRC_COLOR = 0x3
SDL_BLENDFACTOR_ONE_MINUS_SRC_COLOR = 0x4
SDL_BLENDFACTOR_SRC_ALPHA = 0x5
SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA = 0x6
SDL_BLENDFACTOR_DST_COLOR = 0x7
SDL_BLENDFACTOR_ONE_MINUS_DST_COLOR = 0x8
SDL_BLENDFACTOR_DST_ALPHA = 0x9
SDL_BLENDFACTOR_ONE_MINUS_DST_ALPHA = 0xA
SDL_ComposeCustomBlendMode = _bind("SDL_ComposeCustomBlendMode", [SDL_BlendFactor, SDL_BlendFactor, SDL_BlendOperation, SDL_BlendFactor, SDL_BlendFactor, SDL_BlendOperation], SDL_BlendMode, added='2.0.6')
| 1.453125 | 1 |
python_code/cutils/viz/__init__.py | IBM/oct-glaucoma-vf-estimate | 0 | 10924 | from .vizutils import viz_overlaymask, display_side2side, display_side2sidev1, stack_patches, figure2image, get_heatmap, visualize_probmaps
from .vizutils import get_heatmap_multiple, figure2image_save | 1.039063 | 1 |
lib/site_config.py | bruceravel/xraylarch | 0 | 10925 | <filename>lib/site_config.py
#!/usr/bin/env python
"""
site configuration for larch:
init_files: list of larch files run (in order) on startup
module_path: list of directories to search for larch code
history_file:
"""
from __future__ import print_function
import sys
import os
from os.path import exists, abspath, join
from .utils import get_homedir, nativepath
from .version import __version__ as larch_version
def pjoin(*args):
return nativepath(join(*args))
##
# set system-wide and local larch folders
# larchdir = sys.exec_prefix + 'share' + 'larch'
# usr_larchdir = get_homedir() + '.larch' (#unix)
# = get_homedir() + 'larch' (#win)
##
larchdir = pjoin(sys.exec_prefix, 'share', 'larch')
home_dir = get_homedir()
usr_larchdir = pjoin(home_dir, '.larch')
if os.name == 'nt':
usr_larchdir = pjoin(home_dir, 'larch')
if 'LARCHDIR' in os.environ:
usr_larchdir = nativepath(os.environ['LARCHDIR'])
##
## names (and loading order) for core plugin modules
core_plugins = ('std', 'math', 'io', 'wx', 'xray', 'xrf', 'xafs')
# frozen executables, as from cx_freeze, will have
# these paths to be altered...
if hasattr(sys, 'frozen'):
if os.name == 'nt':
try:
tdir, exe = os.path.split(sys.executable)
toplevel, bindir = os.path.split(tdir)
larchdir = os.path.abspath(toplevel)
except:
pass
elif sys.platform.lower().startswith('darwin'):
tdir, exe = os.path.split(sys.executable)
toplevel, bindir = os.path.split(tdir)
larchdir = pjoin(toplevel, 'Resources', 'larch')
modules_path = []
plugins_path = []
_path = [usr_larchdir, larchdir]
if 'LARCHPATH' in os.environ:
_path.extend([nativepath(s) for s in os.environ['LARCHPATH'].split(':')])
for pth in _path:
mdir = pjoin(pth, 'modules')
if exists(mdir) and mdir not in modules_path:
modules_path.append(mdir)
pdir = pjoin(pth, 'plugins')
if exists(pdir) and pdir not in plugins_path:
plugins_path.append(pdir)
# initialization larch files to be run on startup
init_files = [pjoin(usr_larchdir, 'init.lar')]
if 'LARCHSTARTUP' in os.environ:
startup = os.environ['LARCHSTARTUP']
if exists(startup):
init_files = [nativepath(startup)]
# history file:
history_file = pjoin(usr_larchdir, 'history.lar')
def make_user_larchdirs():
"""create user's larch directories"""
files = {'init.lar': 'put custom startup larch commands:',
'history.lar': 'history of larch commands:',
'history_larchgui.lar': 'history of larch_gui commands:',
}
subdirs = {'matplotlib': 'matplotlib may put files here',
'dlls': 'put dlls here',
'modules': 'put custom larch or python modules here',
'plugins': 'put custom larch plugins here'}
def make_dir(dname):
if not exists(dname):
try:
os.mkdir(dname)
except (OSError, TypeError):
print(sys.exc_info()[1])
def write_file(fname, text):
if not exists(fname):
try:
f = open(fname, 'w')
f.write('# %s\n' % text)
f.close()
except:
print(sys.exc_info()[1])
make_dir(usr_larchdir)
for fname, text in files.items():
write_file(pjoin(usr_larchdir, fname), text)
for sdir, text in subdirs.items():
sdir = pjoin(usr_larchdir, sdir)
make_dir(sdir)
write_file(pjoin(sdir, 'README'), text)
def show_site_config():
print( """=== Larch Configuration
larch version: %s
sys executable: %s
sys is frozen: %s
system larch dir: %s
users larch dir: %s
users history_file: %s
users startup files: %s
modules search path: %s
plugins search path: %s
========================
""" % (larch_version, sys.executable,
repr(getattr(sys, 'frozen', False)),
larchdir, usr_larchdir,
history_file, init_files,
modules_path, plugins_path))
def system_settings():
"""set system-specific Environmental Variables, and make sure
that the user larchdirs exist.
This is run by the interpreter on startup."""
# ubuntu / unity hack
if sys.platform.lower().startswith('linux'):
if 'ubuntu' in os.uname()[3].lower():
os.environ['UBUNTU_MENUPROXY'] = '0'
make_user_larchdirs()
if __name__ == '__main__':
show_site_config()
| 2.21875 | 2 |
gpath/path_similarity.py | insilichem/gpathfinder | 5 | 10926 | <reponame>insilichem/gpathfinder<filename>gpath/path_similarity.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############
# GPathFinder: Identification of ligand pathways by a multi-objective
# genetic algorithm
#
# https://github.com/insilichem/gpathfinder
#
# Copyright 2019 <NAME>, <NAME>,
# <NAME>, <NAME>,
# <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############
"""
This module contains the similarity functions that are used to discard
individuals that are not different enough.
This criterion of similarity will be applied in the case of two
``pathways`` individuals with the same score. Then, if they are similar
enough according to this module, one of them will be discarded.
"""
from __future__ import print_function, division
import logging
import numpy as np
logger = logging.getLogger(__name__)
def pathways_rmsd(ind1, ind2, subject, threshold, *args, **kwargs):
"""
Calculates the RMSD between the positions of the ``pathways`` genes
belonging two the two individuals object of study. If the squared
RMSD is less or equal than the squared threshold, we consider that
the two pathways are identical and one of them will be discarded.
Parameters
----------
ind1 : gpath.base.Individual
ind2 : gpath.base.Individual
subject: str
Name of Gpath ``pathway`` gene instance to measure.
threshold : float
Maximum RMSD value in Angstroms to consider two individuals as
similar.
If ``rmsd > threshold``, they are considered different.
Returns
-------
bool
True if ``rmsd`` is within threshold, False otherwise.
It will always return False if number of points of the pathway
is not equal in the two Individuals.
"""
coords1 = np.array([elem[:] for elem in \
ind1.genes[subject].allele['positions']])
coords2 = np.array([elem[:] for elem in \
ind2.genes[subject].allele['positions']])
if coords1.shape[0] != coords2.shape[0]:
return False
rmsd_squared = _rmsd_squared(coords1, coords2)
if rmsd_squared > threshold*threshold:
return False
return True
def _rmsd_squared(coords1, coords2):
diff = coords1 - coords2
return (diff * diff).sum() / coords1.shape[0] | 1.851563 | 2 |
Schedule/groupagenda/urls.py | f0rdream/party-time | 0 | 10927 | from django.conf.urls import url, include
from .views import (GroupListAPIView,
GroupCreateAPIView,
AgendaListAPIView,
AgendaDetailAPIView,
AgendaCreateAPIView,
AgendaPostAPIView,
agenda_create,
AgendaRefreshAPIView,
NumberInGroupAPIView,
GroupProfileDetailAPIView,
GroupProfileUpdateAPIView,
number_in_group)
urlpatterns = [
url(r'^group/$', GroupListAPIView.as_view(), name="group_list"),
url(r'^group/create/$', GroupCreateAPIView.as_view(), name="group_create"),
url(r'agenda-list/$', AgendaListAPIView.as_view(), name="agenda_list"),
url(r'^(?P<group_id>\d+)/(?P<pk>\d+)/detail/$', AgendaDetailAPIView.as_view(), name='agenda_detail'),
# url(r'^create/$', AgendaCreateAPIView.as_view(), name='agenda_create'),
url(r'^(?P<group_id>\d+)/post2/$', AgendaPostAPIView.as_view(), name='agenda_create2'), # recommended api
url(r'^(?P<group_id>\d+)/post/$', agenda_create, name='agenda_create'),
url(r'^(?P<group_id>\d+)/(?P<pk>\d+)/refresh/$', AgendaRefreshAPIView.as_view(), name='agenda_refresh'),
url(r'^(?P<id>\d+)/number/$', NumberInGroupAPIView.as_view(), name="number"),
url(r'^(?P<group_id>\d+)/(?P<date>\d{4}-\d{2}-\d{2})/number/$', number_in_group, name="number2"),
url(r'^(?P<group_id>\d+)/group-profile/$', GroupProfileDetailAPIView.as_view(), name="group_profile"),
url(r'^(?P<group_id>\d+)/group-profile/update/$', GroupProfileUpdateAPIView.as_view(), name="group_profile_update"),
]
| 2 | 2 |
examples/admin.py | kimbackdoo/Web-Cralwer | 0 | 10928 | from django.contrib import admin
# Register your models here.
#models์์ Shop์ ์ํดํธ
from .models import Shop
from .models import Parsed_data
from .models import Img_data
from .models import Other
admin.site.register(Shop)
admin.site.register(Parsed_data)
admin.site.register(Img_data)
admin.site.register(Other)
| 1.445313 | 1 |
main_test_dad.py | AdamLohSg/GTA | 8 | 10929 | <reponame>AdamLohSg/GTA<gh_stars>1-10
import torch
from models.gta import GraphTemporalEmbedding
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
x = torch.randn(32, 96, 122)
model = GraphTemporalEmbedding(122, 96, 3)
y = model(x)
print(y.size())
# model = AdaGraphSage(num_nodes=10, seq_len=96, label_len=48, out_len=24)
# model = model.double().to(device)
# x = torch.randn(32, 96, 10, requires_grad=True).double().to(device)
# y = torch.randn(32, 48, 10, requires_grad=True).double().to(device)
# # print(out.size())
# out = model(x, y, None, None)
# print(out.size()) | 2.359375 | 2 |
daiquiri_client/auth.py | aipescience/django-daiquiri-client | 0 | 10930 | class Auth():
def __init__(self, client):
self.client = client
def get_profiles(self):
return self.client.get('/auth/api/profiles/', {'page_size': 10000})['results']
def get_groups(self):
return self.client.get('/auth/api/groups/')
def get_group_map(self):
return {group['id']: group['name'] for group in self.get_groups()}
def activate_profile(self, pk):
return self.client.put('/auth/api/profiles/%d/activate/' % pk, {})
def update_profile_attributes(self, pk, attributes):
return self.client.patch('/auth/api/profiles/%d/' % pk, {'attributes': attributes})
| 2.59375 | 3 |
CSS/spiraleFile.py | NsiLycee/premiere | 0 | 10931 | '''
Auteur : <NAME>
But : tracรฉ une figure gรฉomรฉtrique ร l'aide de la bibliothรจque Turtle
Le projet utilise l'objet file pour itรฉrer le calcul de chaque nouveau point
Les coordonnรฉes des points d'un polygone sont placรฉs dans une file
l'algorithme consiste ร calculer les coordonnรฉes d'un point pour tracer une droite qui part du premier points
de la file et passe par le deuxiรจme en prolongeant le segment d'une fraction dรฉterminรฉe de la longueur entre les
deux points. Le deuxiรจme point est remplacรฉ par le nouveau. A la prochaine itรฉration, le segment va partir du
nouveau point pour passer par le suivant dans la file, qui sera remplacรฉ par le nouveau point et ainsi de
suite.
'''
import turtle
board = turtle.Turtle()
listePoints = [(0,0),(10,0),(5, int(10*75**.5)]
print(listePoints)
for x, y in listePoints :
board.goto(x, y)
turtle.done() | 3.84375 | 4 |
network/modules/spconv_unet.py | alexisgroshenry/NPM3D_DSNet | 0 | 10932 | # -*- coding:utf-8 -*-
# author: Xinge
# @file: spconv_unet.py
# @time: 2020/06/22 15:01
import time
import numpy as np
import spconv
import torch
import torch.nn.functional as F
from torch import nn
def conv3x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, indice_key=indice_key)
def conv1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=stride,
padding=(0, 1, 1), bias=False, indice_key=indice_key)
def conv1x1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 1, 3), stride=stride,
padding=(0, 0, 1), bias=False, indice_key=indice_key)
def conv1x3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 1), stride=stride,
padding=(0, 1, 0), bias=False, indice_key=indice_key)
def conv3x1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 1), stride=stride,
padding=(1, 0, 0), bias=False, indice_key=indice_key)
def conv3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 3), stride=stride,
padding=(1, 0, 1), bias=False, indice_key=indice_key)
def conv1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=1, bias=False, indice_key=indice_key)
class ResContextBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ResContextBlock, self).__init__()
self.conv1 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.LeakyReLU()
self.conv1_2 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.LeakyReLU()
self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
return resA
class ResBlock(nn.Module):
def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3, 3), stride=1,
pooling=True, drop_out=True, height_pooling=False, indice_key=None):
super(ResBlock, self).__init__()
self.pooling = pooling
self.drop_out = drop_out
self.conv1 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act1 = nn.LeakyReLU()
self.bn0 = nn.BatchNorm1d(out_filters)
self.conv1_2 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act1_2 = nn.LeakyReLU()
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.conv2 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
# self.conv4 = conv3x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act4 = nn.LeakyReLU()
# self.bn4 = nn.BatchNorm1d(out_filters)
if pooling:
# self.dropout = nn.Dropout3d(p=dropout_rate)
if height_pooling:
# self.pool = spconv.SparseMaxPool3d(kernel_size=2, stride=2)
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=2,
padding=1, indice_key=indice_key, bias=False)
else:
# self.pool = spconv.SparseMaxPool3d(kernel_size=(2,2,1), stride=(2, 2, 1))
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=(2,2,1),
padding=1, indice_key=indice_key, bias=False)
# else:
# self.dropout = nn.Dropout3d(p=dropout_rate)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
# resA = self.conv4(resA)
# resA.features = self.act4(resA.features)
# resA.features = self.bn4(resA.features)
if self.pooling:
# if self.drop_out:
# resB = self.dropout(resA.features)
# else:
# resB = resA
resB = self.pool(resA)
return resB, resA
else:
# if self.drop_out:
# resB = self.dropout(resA)
# else:
# resB = resA
return resA
class UpBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), indice_key=None, up_key=None):
super(UpBlock, self).__init__()
# self.drop_out = drop_out
#self.trans = nn.ConvTranspose2d(in_filters, out_filters, kernel_size, stride=(2, 2), padding=1)
self.trans_dilao = conv3x3(in_filters, out_filters, indice_key=indice_key+"new_up")
self.trans_act = nn.LeakyReLU()
self.trans_bn = nn.BatchNorm1d(out_filters)
# self.dropout1 = nn.Dropout3d(p=dropout_rate)
# self.dropout2 = nn.Dropout3d(p=dropout_rate)
self.conv1 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act1 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv2 = conv3x1(out_filters, out_filters, indice_key=indice_key)
self.act2 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x3(out_filters, out_filters, indice_key=indice_key)
self.act3 = nn.LeakyReLU()
self.bn3 = nn.BatchNorm1d(out_filters)
# self.dropout3 = nn.Dropout3d(p=dropout_rate)
self.up_subm = spconv.SparseInverseConv3d(out_filters, out_filters, kernel_size=3, indice_key=up_key, bias=False)
def forward(self, x, skip):
upA = self.trans_dilao(x)
#if upA.shape != skip.shape:
# upA = F.pad(upA, (0, 1, 0, 1), mode='replicate')
upA.features = self.trans_act(upA.features)
upA.features = self.trans_bn(upA.features)
## upsample
upA = self.up_subm(upA)
# upA = F.interpolate(upA, size=skip.size()[2:], mode='trilinear', align_corners=True)
# if self.drop_out:
# upA = self.dropout1(upA)
upA.features = upA.features + skip.features
# if self.drop_out:
# upB = self.dropout2(upB)
upE = self.conv1(upA)
upE.features = self.act1(upE.features)
upE.features = self.bn1(upE.features)
upE = self.conv2(upE)
upE.features = self.act2(upE.features)
upE.features = self.bn2(upE.features)
upE = self.conv3(upE)
upE.features = self.act3(upE.features)
upE.features = self.bn3(upE.features)
# if self.drop_out:
# upE = self.dropout3(upE)
return upE
class ReconBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ReconBlock, self).__init__()
self.conv1 = conv3x1x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.Sigmoid()
self.conv1_2 = conv1x3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.Sigmoid()
self.conv1_3 = conv1x1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_3 = nn.BatchNorm1d(out_filters)
self.act1_3 = nn.Sigmoid()
# self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
# self.act2 = nn.LeakyReLU()
# self.bn1 = nn.BatchNorm1d(out_filters)
#
# self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act3 = nn.LeakyReLU()
# self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.bn0(shortcut.features)
shortcut.features = self.act1(shortcut.features)
shortcut2 = self.conv1_2(x)
shortcut2.features = self.bn0_2(shortcut2.features)
shortcut2.features = self.act1_2(shortcut2.features)
shortcut3 = self.conv1_3(x)
shortcut3.features = self.bn0_3(shortcut3.features)
shortcut3.features = self.act1_3(shortcut3.features)
# resA = self.conv2(x)
# resA.features = self.act2(resA.features)
# resA.features = self.bn1(resA.features)
#
# resA = self.conv3(resA)
# resA.features = self.act3(resA.features)
# resA.features = self.bn2(resA.features)
shortcut.features = shortcut.features + shortcut2.features + shortcut3.features
shortcut.features = shortcut.features * x.features
return shortcut
class Spconv_salsaNet_res_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_salsaNet_res_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 = ResBlock(init_size, init_size, 0.2, pooling=True, height_pooling=True, indice_key="down1")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down5")
# self.resBlock6 = ResBlock(16 * init_size, 16 * init_size, 0.2, pooling=False, height_pooling=False, indice_key="down6")
# self.ReconNet = ReconBlock(16 * init_size, 16 * init_size, indice_key="recon")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
# self.upBlock4 = UpBlock(4 * init_size, 2 * init_size, indice_key="up4", up_key="down2")
# self.upBlock5 = UpBlock(2 * init_size, init_size, indice_key="up5", up_key="down1")
self.ReconNet = ReconBlock(2*init_size, 2*init_size, indice_key="recon")
def forward(self, voxel_features, coors, batch_size):
# x = x.contiguous()
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
# down0c, down0b = self.resBlock1(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
# down5b = self.resBlock6(down4c)
# down6b = self.ReconNet(down5b)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e.features = torch.cat((up0e.features, up1e.features), 1) # size 4 * init_size --> OK with the size of the semantic and instance heads
return up0e, up0e
class Spconv_sem_logits_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_sem_logits_head_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, fea):
logits = self.logits(fea)
return logits.dense()
class Spconv_ins_offset_concatxyz_threelayers_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_ins_offset_concatxyz_threelayers_head_cfg, self).__init__()
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.pt_fea_dim = 4 * init_size
self.embedding_dim = cfg.MODEL.INS_HEAD.EMBEDDING_CHANNEL
self.conv1 = conv3x3(self.pt_fea_dim, self.pt_fea_dim, indice_key='offset_head_conv1')
self.bn1 = nn.BatchNorm1d(self.pt_fea_dim)
self.act1 = nn.LeakyReLU()
self.conv2 = conv3x3(self.pt_fea_dim, 2 * init_size, indice_key='offset_head_conv2')
self.bn2 = nn.BatchNorm1d(2 * init_size)
self.act2 = nn.LeakyReLU()
self.conv3 = conv3x3(2 * init_size, init_size, indice_key='offset_head_conv3')
self.bn3 = nn.BatchNorm1d(init_size)
self.act3 = nn.LeakyReLU()
self.offset = nn.Sequential(
nn.Linear(init_size+3, init_size, bias=True),
nn.BatchNorm1d(init_size),
nn.ReLU()
)
self.offset_linear = nn.Linear(init_size, self.embedding_dim, bias=True)
def forward(self, fea, batch):
fea = self.conv1(fea)
fea.features = self.act1(self.bn1(fea.features))
fea = self.conv2(fea)
fea.features = self.act2(self.bn2(fea.features))
fea = self.conv3(fea)
fea.features = self.act3(self.bn3(fea.features))
grid_ind = batch['grid']
xyz = batch['pt_cart_xyz']
fea = fea.dense()
fea = fea.permute(0, 2, 3, 4, 1)
pt_ins_fea_list = []
for batch_i, grid_ind_i in enumerate(grid_ind):
pt_ins_fea_list.append(fea[batch_i, grid_ind[batch_i][:,0], grid_ind[batch_i][:,1], grid_ind[batch_i][:,2]])
pt_pred_offsets_list = []
for batch_i, pt_ins_fea in enumerate(pt_ins_fea_list):
pt_pred_offsets_list.append(self.offset_linear(self.offset(torch.cat([pt_ins_fea,torch.from_numpy(xyz[batch_i]).cuda()],dim=1))))
return pt_pred_offsets_list, pt_ins_fea_list
class Spconv_alsaNet_res(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
nclasses = 20, n_height = 32, strict=False, init_size=16):
super(Spconv_alsaNet_res, self).__init__()
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 = ResBlock(init_size, init_size, 0.2, pooling=True, height_pooling=True, indice_key="down1")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down5")
# self.resBlock6 = ResBlock(16 * init_size, 16 * init_size, 0.2, pooling=False, height_pooling=False, indice_key="down6")
# self.ReconNet = ReconBlock(16 * init_size, 16 * init_size, indice_key="recon")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
# self.upBlock4 = UpBlock(4 * init_size, 2 * init_size, indice_key="up4", up_key="down2")
# self.upBlock5 = UpBlock(2 * init_size, init_size, indice_key="up5", up_key="down1")
self.ReconNet = ReconBlock(2*init_size, 2*init_size, indice_key="recon")
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, voxel_features, coors, batch_size):
# x = x.contiguous()
coors = coors.int()
import pdb
pdb.set_trace()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
# down0c, down0b = self.resBlock1(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
# down5b = self.resBlock6(down4c)
# down6b = self.ReconNet(down5b)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e.features = torch.cat((up0e.features, up1e.features), 1)
# up2e = self.upBlock3(up3e, down2b)
# up1e = self.upBlock4(up2e, down1b)
# up0e = self.upBlock5(up1e, down0b)
# up0e_gap = nn.AdaptiveAvgPool3d((1))(up0e)
# up0e_gap = F.interpolate(up0e_gap, size=(up0e.size()[2:]), mode='trilinear', align_corners=True)
# up0e = torch.cat((up0e, up0e_gap), dim=1)
logits = self.logits(up0e)
y = logits.dense()
# y = logits.permute(0, 1, 3, 4, 2)
return y
| 2.0625 | 2 |
scivision_test_plugin/__init__.py | acocac/scivision-test-plugin | 0 | 10933 | <reponame>acocac/scivision-test-plugin
from .model import DummyModel, ImageNetModel
| 0.957031 | 1 |
prml/dimreduction/bayesian_pca.py | andresmasegosa/PRML-CoreSets | 0 | 10934 | import numpy as np
from prml.dimreduction.pca import PCA
class BayesianPCA(PCA):
def fit(self, X, iter_max=100, initial="random"):
"""
empirical bayes estimation of pca parameters
Parameters
----------
X : (sample_size, n_features) ndarray
input data
iter_max : int
maximum number of em steps
Returns
-------
mean : (n_features,) ndarray
sample mean fo the input data
W : (n_features, n_components) ndarray
projection matrix
var : float
variance of observation noise
"""
initial_list = ["random", "eigen"]
self.mean = np.mean(X, axis=0)
self.I = np.eye(self.n_components)
if initial not in initial_list:
print("availabel initializations are {}".format(initial_list))
if initial == "random":
self.W = np.eye(np.size(X, 1), self.n_components)
#self.W = np.random.randn(np.size(X, 1), self.n_components)
self.var = 1.
elif initial == "eigen":
self.eigen(X)
self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
for i in range(iter_max):
W = np.copy(self.W)
stats = self._expectation(X - self.mean)
self._maximization(X - self.mean, *stats)
#self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
#if np.allclose(W, self.W):
# break
self.n_iter = i + 1
self.C = self.W @ self.W.T + self.var * np.eye(np.size(X, 1))
self.Cinv = np.linalg.inv(self.C)
def _maximization(self, X, Ez, Ezz):
self.W = X.T @ Ez @ np.linalg.inv(np.sum(Ezz, axis=0) + self.var * np.diag(self.alpha))
self.var = np.mean(
np.mean(X ** 2, axis=-1)
- 2 * np.mean(Ez @ self.W.T * X, axis=-1)
+ np.trace((Ezz @ self.W.T @ self.W).T) / len(self.mean))
def maximize(self, D, Ez, Ezz):
self.W = D.T.dot(Ez).dot(np.linalg.inv(np.sum(Ezz, axis=0) + self.var * np.diag(self.alpha)))
self.var = np.mean(
np.mean(D ** 2, axis=-1)
- 2 * np.mean(Ez.dot(self.W.T) * D, axis=-1)
+ np.trace(Ezz.dot(self.W.T).dot(self.W).T) / self.ndim)
| 2.984375 | 3 |
mne/time_frequency/psd.py | jnvandermeer/mne-python | 0 | 10935 | <filename>mne/time_frequency/psd.py
# Authors : <NAME>, <EMAIL> (2011)
# <NAME> <<EMAIL>>
# License : BSD 3-clause
import numpy as np
from ..parallel import parallel_func
from ..io.pick import _pick_data_channels
from ..utils import logger, verbose, _time_mask
from ..fixes import get_spectrogram
from .multitaper import psd_array_multitaper
def _psd_func(epoch, noverlap, n_per_seg, nfft, fs, freq_mask, func):
"""Aux function."""
return func(epoch, fs=fs, nperseg=n_per_seg, noverlap=noverlap,
nfft=nfft, window='hamming')[2][..., freq_mask, :]
def _check_nfft(n, n_fft, n_per_seg, n_overlap):
"""Ensure n_fft, n_per_seg and n_overlap make sense."""
if n_per_seg is None and n_fft > n:
raise ValueError(('If n_per_seg is None n_fft is not allowed to be > '
'n_times. If you want zero-padding, you have to set '
'n_per_seg to relevant length. Got n_fft of %d while'
' signal length is %d.') % (n_fft, n))
n_per_seg = n_fft if n_per_seg is None or n_per_seg > n_fft else n_per_seg
n_per_seg = n if n_per_seg > n else n_per_seg
if n_overlap >= n_per_seg:
raise ValueError(('n_overlap cannot be greater than n_per_seg (or '
'n_fft). Got n_overlap of %d while n_per_seg is '
'%d.') % (n_overlap, n_per_seg))
return n_fft, n_per_seg, n_overlap
def _check_psd_data(inst, tmin, tmax, picks, proj, reject_by_annotation=False):
"""Check PSD data / pull arrays from inst."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, BaseRaw, Evoked)):
raise ValueError('epochs must be an instance of Epochs, Raw, or'
'Evoked. Got type {0}'.format(type(inst)))
time_mask = _time_mask(inst.times, tmin, tmax, sfreq=inst.info['sfreq'])
if picks is None:
picks = _pick_data_channels(inst.info, with_ref_meg=False)
if proj:
# Copy first so it's not modified
inst = inst.copy().apply_proj()
sfreq = inst.info['sfreq']
if isinstance(inst, BaseRaw):
start, stop = np.where(time_mask)[0][[0, -1]]
rba = 'NaN' if reject_by_annotation else None
data = inst.get_data(picks, start, stop + 1, reject_by_annotation=rba)
elif isinstance(inst, BaseEpochs):
data = inst.get_data()[:, picks][:, :, time_mask]
else: # Evoked
data = inst.data[picks][:, time_mask]
return data, sfreq
@verbose
def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0,
n_per_seg=None, n_jobs=1, verbose=None):
"""Compute power spectral density (PSD) using Welch's method.
Parameters
----------
x : array, shape=(..., n_times)
The data to compute PSD from.
sfreq : float
The sampling frequency.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs) or
The power spectral densities. All dimensions up to the last will
be the same as input.
freqs : ndarray, shape (n_freqs,)
The frequencies.
Notes
-----
.. versionadded:: 0.14.0
"""
spectrogram = get_spectrogram()
dshape = x.shape[:-1]
n_times = x.shape[-1]
x = x.reshape(-1, n_times)
# Prep the PSD
n_fft, n_per_seg, n_overlap = _check_nfft(n_times, n_fft, n_per_seg,
n_overlap)
win_size = n_fft / float(sfreq)
logger.info("Effective window size : %0.3f (s)" % win_size)
freqs = np.arange(n_fft // 2 + 1, dtype=float) * (sfreq / n_fft)
freq_mask = (freqs >= fmin) & (freqs <= fmax)
freqs = freqs[freq_mask]
# Parallelize across first N-1 dimensions
parallel, my_psd_func, n_jobs = parallel_func(_psd_func, n_jobs=n_jobs)
x_splits = np.array_split(x, n_jobs)
f_spectrogram = parallel(my_psd_func(d, noverlap=n_overlap, nfft=n_fft,
fs=sfreq, freq_mask=freq_mask,
func=spectrogram, n_per_seg=n_per_seg)
for d in x_splits)
# Combining, reducing windows and reshaping to original data shape
psds = np.concatenate([np.nanmean(f_s, axis=-1)
for f_s in f_spectrogram], axis=0)
psds.shape = dshape + (-1,)
return psds, freqs
@verbose
def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256,
n_overlap=0, n_per_seg=None, picks=None, proj=False, n_jobs=1,
reject_by_annotation=True, verbose=None):
"""Compute the power spectral density (PSD) using Welch's method.
Calculates periodograms for a sliding window over the time dimension, then
averages them together for each channel/epoch.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
tmin : float | None
Min time of interest
tmax : float | None
Max time of interest
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
If n_per_seg is None, n_fft must be >= number of time points
in the data.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
n_jobs : int
Number of CPUs to use in the computation.
reject_by_annotation : bool
Whether to omit bad segments from the data while computing the
PSD. If True, annotated segments with a description that starts
with 'bad' are omitted. Has no effect if ``inst`` is an Epochs or
Evoked object. Defaults to True.
.. versionadded:: 0.15.0
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_multitaper
psd_array_welch
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj,
reject_by_annotation=reject_by_annotation)
return psd_array_welch(data, sfreq, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_overlap=n_overlap, n_per_seg=n_per_seg,
n_jobs=n_jobs, verbose=verbose)
@verbose
def psd_multitaper(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, proj=False,
n_jobs=1, verbose=None):
"""Compute the power spectral density (PSD) using multitapers.
Calculates spectral density for orthogonal tapers, then averages them
together for each channel/epoch. See [1] for a description of the tapers
and [2] for the general method.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation.
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
tmin : float | None
Min time of interest
tmax : float | None
Max time of interest
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
References
----------
.. [1] <NAME>. "Prolate spheroidal wave functions, Fourier analysis,
and uncertainty V: The discrete case." Bell System Technical
Journal, vol. 57, 1978.
.. [2] <NAME>. and <NAME>. "Spectral Analysis for Physical
Applications: Multitaper and Conventional Univariate Techniques."
Cambridge University Press, 1993.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_array_multitaper
psd_welch
csd_multitaper
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj)
return psd_array_multitaper(data, sfreq, fmin=fmin, fmax=fmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
n_jobs=n_jobs, verbose=verbose)
| 2.34375 | 2 |
culturebank/models.py | Anaphory/culturebank | 0 | 10936 | <reponame>Anaphory/culturebank
from zope.interface import implementer
from sqlalchemy import (
Column,
String,
Integer,
Float,
ForeignKey,
CheckConstraint,
)
from sqlalchemy.orm import relationship, backref
from clld import interfaces
from clld.db.meta import Base, CustomModelMixin
from clld.db.versioned import Versioned
from clld.db.models.common import (
Contribution, Parameter, IdNameDescriptionMixin, Language
)
from clld_glottologfamily_plugin.models import HasFamilyMixin, Family
from .interfaces import IDependency, ITransition, IStability, IDeepFamily, ISupport, IHasSupport
@implementer(interfaces.ILanguage)
class CulturebankLanguage(CustomModelMixin, Language, HasFamilyMixin):
pk = Column(Integer, ForeignKey('language.pk'), primary_key=True)
@implementer(interfaces.IParameter)
class Feature(CustomModelMixin, Parameter, Versioned):
"""Parameters in CultureBank are called features. They are always related to one Designer.
"""
pk = Column(Integer, ForeignKey('parameter.pk'), primary_key=True)
doc = Column(String)
patron = Column(String)
newdoc = Column(String)
vdoc = Column(String)
std_comments = Column(String)
name_french = Column(String)
clarification = Column(String)
alternative_id = Column(String)
representation = Column(Integer)
designer = Column(String)
abbreviation = Column(String)
sortkey_str = Column(String)
sortkey_int = Column(Integer)
jl_relevant_unit = Column(String)
jl_function = Column(String)
jl_formal_means = Column(String)
legacy_status = Column(String)
culturebank_status = Column(String)
wip_comments = Column(String)
nts_culturebank = Column(String)
hard_to_deny = Column(String)
prone_misunderstanding = Column(String)
requires_extensive_data = Column(String)
last_edited = Column(String)
other_survey = Column(String)
@implementer(IStability)
class Stability(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
parsimony_stability_value = Column(Float)
parsimony_retentions = Column(Float)
parsimony_transitions = Column(Float)
feature_pk = Column(Integer, ForeignKey('feature.pk'))
feature = relationship(Feature, lazy='joined', foreign_keys = feature_pk, backref = "stability")
@implementer(IDependency)
class Dependency(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
feature1_pk = Column(Integer, ForeignKey('feature.pk'))
feature1 = relationship(Feature, lazy='joined', foreign_keys = feature1_pk)
feature2_pk = Column(Integer, ForeignKey('feature.pk'))
feature2 = relationship(Feature, lazy='joined', foreign_keys = feature2_pk)
strength = Column(Float)
representation = Column(Integer)
combinatory_status = Column(String)
@implementer(ITransition)
class Transition(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
fromnode = Column(String)
fromvalue = Column(String)
tonode = Column(String)
tovalue = Column(String)
stability_pk = Column(Integer, ForeignKey('stability.pk'))
stability = relationship(Stability, lazy='joined', foreign_keys = stability_pk)
family_pk = Column(Integer, ForeignKey('family.pk'))
family = relationship(Family, backref='transitions')
retention_innovation = Column(String)
@implementer(interfaces.IContribution)
class CulturebankContribution(CustomModelMixin, Contribution):
pk = Column(Integer, ForeignKey('contribution.pk'), primary_key=True)
desc = Column(String)
@implementer(IDeepFamily)
class DeepFamily(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
family1_pk = Column(Integer, ForeignKey('family.pk'))
family1 = relationship(Family, lazy='joined', foreign_keys = family1_pk)
family1_longitude = Column(
Float(),
CheckConstraint('-180 <= family1_longitude and family1_longitude <= 180 '),
doc='geographical longitude in WGS84')
family1_latitude = Column(
Float(),
CheckConstraint('-90 <= family1_latitude and family1_latitude <= 90'),
doc='geographical latitude in WGS84')
family2_pk = Column(Integer, ForeignKey('family.pk'))
family2 = relationship(Family, lazy='joined', foreign_keys = family2_pk)
family2_longitude = Column(
Float(),
CheckConstraint('-180 <= family2_longitude and family2_longitude <= 180 '),
doc='geographical longitude in WGS84')
family2_latitude = Column(
Float(),
CheckConstraint('-90 <= family2_latitude and family2_latitude <= 90'),
doc='geographical latitude in WGS84')
support_value = Column(Float)
significance = Column(Float)
geographic_plausibility = Column(Float)
@implementer(ISupport)
class Support(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
value1 = Column(String)
value2 = Column(String)
historical_score = Column(Float)
independent_score = Column(Float)
support_score = Column(Float)
feature_pk = Column(Integer, ForeignKey('feature.pk'))
feature = relationship(Feature, lazy='joined', foreign_keys = feature_pk)
@implementer(IHasSupport)
class HasSupport(Base, CustomModelMixin):
id = Column(String)
deepfamily_pk = Column(Integer, ForeignKey('deepfamily.pk'), primary_key=True)
deepfamily = relationship(DeepFamily, lazy='joined', foreign_keys = deepfamily_pk)
support_pk = Column(Integer, ForeignKey('support.pk'), primary_key=True)
support = relationship(Support, lazy='joined', foreign_keys = support_pk)
| 2.15625 | 2 |
src/models/layers/feature.py | icycookies/dd_benchmark | 2 | 10937 | <filename>src/models/layers/feature.py
import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, num_features, hidden_sizes, dropout):
super().__init__()
self.layers = nn.ModuleList(
[nn.Linear(num_features, hidden_sizes[0])] +
[nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]) for i in range(len(hidden_sizes) - 1)]
)
self.activation = nn.ReLU()
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = self.activation(x)
x = self.dropout(x)
return x | 3.34375 | 3 |
src/kanone/adapter/tx.py | doncatnip/kanone | 5 | 10938 | """ Twisted adapter for Kanone """
from twisted.python.failure import Failure
from twisted.internet import defer
from ..lib import Invalid
from ..util import varargs2kwargs
import logging, sys
log = logging.getLogger( __name__ )
# hacky and redundant, but it'll do for now ..
# TODO: move to proper twisted specific classes under .tx.*
# and get rid of the monkey
_python3 = sys.version_info[0]>=3
def monkeyPatch():
"""
Patches Kanone so that any validation returns a Deferred, thus
one can write asynchronous validators using Twisted's non-blocking API.
Schema and ForEach fields are validated concurrently.
"""
if getattr( monkeyPatch,'_isMonkeyPatched',False):
return
from ..lib import Context, PASS, MISSING
from ..validator.core import Tag, Compose, Tmp, Item, Not, And, Or, Call, If
from ..validator.check import Match
from ..validator.schema import Schema, ForEach, Field
from ..validator.web import MXLookup
@defer.inlineCallbacks
def context_validate( self ):
if self.isValidated:
if self.__error__ is not MISSING:
raise self.__error__
defer.returnValue( self.__result__ )
self.isValidating = True
if self.parent is not None:
if not self.parent.isValidated and not self.parent.isValidating:
yield defer.maybeDeferred\
( self.parent.validate
)
if not self.validator:
raise AttributeError("No validator set for context '%s'" % self.path )
result = defer.maybeDeferred\
( self.validator.validate
, self
, self.__value__
)
result.addErrback( context_gotError, self )
result = yield result
self.isValidated = True
self.isValidating = False
if self.__error__ is not MISSING:
raise self.__error__
else:
if result is not PASS:
self.__result__ = result
else:
self.__result__ = self.__value__
self.__result__ = result
defer.returnValue( result )
def context_gotError( error, self ):
e = error.value
if not isinstance( e, Invalid ):
self.__error__ = error
return
self.__error__ = e
e.context = self
message = e.validator.__messages__[e.key]
if message is not None:
extra = e.data['extra']
value = e.value
data = e.data
data['message'] = message
if hasattr(e,'realkey'):
data['key'] = e.realkey
extra['value.type'] = getattr(value, '__class__', None) is not None \
and getattr(value.__class__,'__name__', False) or 'unknown'
if isinstance(value,str) or not _python3 and isinstance(value,basestring):
extra['value'] = value
else:
extra['value'] = str(value)
cache = getattr( self, 'cache', None)
if cache is not None:
extra.update( cache )
self['error'] = self.__error__.data
self.root.errorlist.append( self.__error__.context.path )
def tag_gotResult( result, d, validator, tagName ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
return
e = result.value
if e.validator is validator or getattr(e,'composer',None) is validator:
e.tagName = tagName
d.errback( e )
else:
d.callback( result )
def tag_validate( self, context, value ):
validator = context.root.taggedValidators.get(self.tagID, None)
if validator is None:
validator = self.enabled and self.validator
if not validator:
return value
d = defer.Deferred()
result = defer.maybeDeferred\
( validator.validate
, context
, value
)
result.addBoth( tag_gotResult, d, validator, self.tagName )
return d
def compose_gotResult( result, d, context, tmpTags, composer ):
context.root.taggedValidators = tmpTags
if isinstance( result, Failure ):
if not isinstance( result.value, Invalid ):
d.errback( result )
return
e = result.value
if hasattr(e,'tagName'):
e.realkey = "%s_%s" % (e.tagName, getattr(e,'realkey',e.key))
e.composer = composer
del e.tagName
d.errback( e )
else:
d.callback( result )
def compose_validate( self, context, value ):
tmpTags = context.root.taggedValidators
context.root.taggedValidators = self.currentTaggedValidators
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, value
)
result.addBoth( compose_gotResult, d, context, tmpTags, self )
return d
def tmp_gotReslt( result, d, raiseError, value ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
return
if raiseError:
d.errback( result.value )
return
d.callback( value )
def tmp_validate( self, context, value ):
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, value
)
result.addBoth( tmp_gotReslt, d, self.raiseError, value )
return d
def item_gotResult( result, d, value, key, alter ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
return
d.errback( result.value )
else:
if alter:
value[key] = result
d.callback( value )
def item_validate( self, context, value ):
try:
val = value[ self.key ]
except TypeError:
raise Invalid( value, self, 'type' )
except (KeyError, IndexError):
raise Invalid( value, self, 'notFound', key=self.key )
else:
if self.validator is not None:
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, val
)
result.addBoth( item_gotResult, d , value, self.key, self.alter )
return d
else:
return val
def not_gotResult( result, d, value, validator ):
if isinstance( result, Failure ):
if not isinstance( result.value, Invalid ):
d.errback( result )
return
d.callback( value )
else:
d.errback( Invalid( value, validator ) )
def not_validate(self, context, value ):
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, value
)
result.addBoth( not_gotResult, d, value, self )
return d
def and_doTryNext( result, validators, context, value, d ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
else:
e = result.value
d.errback( e )
else:
if validators:
and_tryNext( validators, context, result, d )
else:
d.callback( result )
def and_tryNext( validators, context, value, d ):
result = defer.maybeDeferred\
( validators.pop(0).validate
, context
, value
)
result.addBoth( and_doTryNext, validators, context, value, d )
def and_validate( self, context, value ):
d = defer.Deferred()
and_tryNext( list( self.validators ), context, value, d )
return d
def or_doTryNext( result, validators, context, value, d ):
if isinstance( result, Failure ):
err = result
if not isinstance(err.value, Invalid):
d.errback( err )
return
e = err.value
if not validators:
d.errback( e )
else:
or_tryNext( validators, context, value, d )
else:
d.callback( result )
def or_tryNext( validators, context, value, d ):
result = defer.maybeDeferred\
( validators.pop(0).validate
, context
, value
)
result.addBoth( or_doTryNext, validators, context, value, d )
def or_validate( self, context, value ):
d = defer.Deferred()
or_tryNext( list(self.validators), context, value, d )
return d
@defer.inlineCallbacks
def call_validate( self, context, value ):
try:
result = yield defer.maybeDeferred\
( self.__func__
, context
, value
)
except Failure as e:
if not isinstance(e.value, Invalid):
raise
e = e.value
e.validator = self
raise e
else:
defer.returnValue( result )
def match_gotResult( result, self, value, d ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
raise
d.errback( Invalid( value, self, matchType=self.type, criterion=result.value ) )
else:
val = value
if self.ignoreCase:
result = str(result).lower()
val = str(value).lower()
if val != result:
d.errback( Invalid( value, self, matchType=self.type, criterion=result ) )
else:
d.callback( value )
def match_on_value(self, context, value ):
if self.type is Match.REGEX:
if not self.criterion.match(value):
raise Invalid( value, self, matchType=self.type, criterion=self.criterion.pattern)
return value
elif self.type is Match.VALIDATOR:
compare = defer.maybeDeferred\
( self.criterion.validate
, context
, value
)
d = defer.Deferred()
compare.addBoth( match_gotResult, self, value, d )
return d
else:
compare = self.criterion
val = value
if self.ignoreCase:
compare = str(compare).lower()
val = str(value).lower()
if val != compare:
raise Invalid( value, self, matchType=self.type, criterion=compare )
return value
def if_gotResult( result, d, context, value ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
else:
d.errback( result.value )
else:
d.callback( result )
def if_gotResultExpression( result, validator, d, context, value ):
if isinstance( result, Failure ):
if not isinstance( result.value, Invalid):
raise
value = defer.maybeDeferred\
( validator._else.validate, context, value
)
else:
value = defer.maybeDeferred\
( validator._then.validate, context, result
)
value.addBoth( if_gotResult, d, context, value )
def if_validate( self, context, value ):
d = defer.Deferred()
result = defer.maybeDeferred( self.criterion.validate, context, value )
result.addBoth( if_gotResultExpression, self, d, context, value )
return d
def schema_gotResult( result, resultset, key, isList, returnList ):
if returnList:
resultset.append( result )
else:
resultset[ key ] = result
return result
def schema_gotError( error, errorset, key ):
if isinstance( error, Failure ):
if not isinstance(error.value, Invalid):
raise error
error = error.value
errorset.append( error )
def schema__on_value_done( waste, d, schema, value, result, errors ):
if not errors:
d.callback( result )
else:
d.errback( errors.pop(0) )
def schema__createContextChildren_on_value_done( waste, d, schema, value, result, errors ):
if not errors:
d.callback( result )
else:
d.errback( Invalid( value, schema ) )
def schema__on_value( self, context, value ):
isList = isinstance(value, list) or isinstance(value,tuple) or isinstance(value,set)
if not isList and not isinstance( value, dict ):
raise Invalid( value, self, 'type')
extraFields = None
if not self.allowExtraFields:
if isList:
extraFields = max( len(value), len(self.index) )
else:
extraFields = list(value.keys())
if self.returnList:
result = []
else:
result = {}
numValues = len(value)
jobs = []
errorset = []
for pos in range(len(self.index)):
key = self.index[pos]
if isList:
if numValues>pos:
val = value[ pos ]
if not self.allowExtraFields:
extraFields-=1
else:
val = MISSING
else:
val = value.get( key, MISSING)
if not self.allowExtraFields and val is not MISSING:
try: extraFields.remove(key)
except: pass
job = defer.maybeDeferred\
( self.validators[ key ].validate
, context
, val
)
jobs.append\
( job.addCallback( schema_gotResult, result, key, isList, self.returnList )\
.addErrback( schema_gotError, errorset, key )
)
if extraFields:
raise Invalid( value, self, 'extraFields',extraFields=extraFields)
d = defer.Deferred()
jobs =defer.DeferredList( jobs )
jobs.addCallback\
( schema__on_value_done
, d
, self
, value
, result
, errorset
)
return d
def schema__createContextChildren_on_value( self, context, value ):
isList = isinstance(value, list) or isinstance(value,tuple) or isinstance(value,set)
if not isList and not isinstance( value, dict ):
raise Invalid( value, self, 'type')
extraFields = None
if not self.allowExtraFields:
if isList:
extraFields = max( len(value), len(self.index) )
else:
extraFields = list(value.keys())
errors = []
if self.returnList:
result = []
else:
result = {}
len_value = len(value)
len_index = len(self.index)
# populate
for pos in range(len_index):
key = self.index[pos]
childContext = context( key )
try:
childContext.validator = self.validators[ key ]
except KeyError:
raise SyntaxError("No validator set for %s" % childContext.path)
if isList:
if len_value<=pos:
childContext.__value__ = MISSING
else:
childContext.__value__ = value[ pos ]
else:
childContext.__value__ = value.get( key, MISSING )
if not self.allowExtraFields:
if isList:
extraFields-=1
else:
try: extraFields.remove(key)
except: pass
if extraFields:
raise Invalid( value, self, 'extraFields',extraFields=extraFields)
context.setIndexFunc( lambda index: self.index[index] )
jobs = []
# validate
for key in self.index:
jobs.append\
( context( key ).result\
.addCallback( schema_gotResult, result, key, isList, self.returnList )\
.addErrback( schema_gotError, errors, key )
)
d = defer.Deferred()
jobs = defer.DeferredList( jobs )
jobs.addCallback\
( schema__createContextChildren_on_value_done
, d
, self
, value
, result
, errors
)
return d
def forEach__on_value( self, context, value ):
if self.returnList:
result = []
else:
result = {}
isList = isinstance( value, list) or isinstance(value, tuple) or isinstance(value, set)
errorset = []
jobs = []
if isList or self.numericKeys:
for pos in range( len( value ) ):
if not isList:
val = value.get(str(pos),MISSING)
if val is MISSING:
raise Invalid( value, self, 'numericKeys', keys=list(value.keys()) )
else:
val = value[pos]
key = str(pos)
jobs.append\
( defer.maybeDeferred\
( self.validator.validate
, context, val
).addCallback\
( schema_gotResult
, result
, key
, isList
, self.returnList
)\
.addErrback\
( schema_gotError
, errorset
, key
)
)
else:
for (key, val) in value.items():
jobs.append\
( defer.maybeDeferred\
( self.validator.validate
, context, val
).addCallback\
( schema_gotResult
, result
, key
, isList
, self.returnList
)\
.addErrback\
( schema_gotError
, errorset
, key
)
)
d = defer.Deferred()
jobs = defer.DeferredList( jobs )
jobs.addCallback\
( schema__on_value_done
, d
, self
, value
, result
, errorset
)
return d
def forEach__createContextChildren_on_value( self, context, value ):
isList = isinstance( value, list) or isinstance(value, tuple) or isinstance(value, set)
if not isList:
if not isinstance(value, dict ):
raise Invalid( value, self,'type' )
if self.returnList:
result = []
else:
result = {}
errors = []
# populate
children = []
if isList or self.numericKeys:
context.setIndexFunc( lambda index: str(index) )
for pos in range( len( value ) ):
if not isList:
val = value.get(str(pos),MISSING)
if value.get(str(pos),MISSING) is MISSING:
context.setIndexFunc( None )
raise Invalid( value, self, 'numericKeys',keys=list(value.keys()))
else:
val = value[ pos ]
contextChild = context( str( pos ) )
contextChild.validator = self.validator
contextChild.__value__ = val
children.append( contextChild )
else:
context.setIndexFunc( None )
if self.returnList:
raise Invalid( value, self, 'listType' )
for (key,val) in value.items():
contextChild = context( key )
contextChild.validator = self.validator
contextChild.__value__ = val
children.append( contextChild )
jobs = []
#validate
for childContext in children:
jobs.append\
( childContext.validate()\
.addCallback\
( schema_gotResult
, result
, childContext.key
, isList
, self.returnList
)\
.addErrback\
( schema_gotError
, errors
, childContext.key
)
)
d = defer.Deferred()
jobs = defer.DeferredList( jobs )
jobs.addCallback\
( schema__createContextChildren_on_value_done
, d
, self
, value
, result
, errors
)
return d
@defer.inlineCallbacks
def field_validate(self, context, value):
fieldcontext = self.getField( context, self.path )
if not self.useResult:
result = fieldcontext.value
else:
try:
result = yield fieldcontext.result
except Invalid:
result = PASS
if self.validator is not None:
if result is not PASS:
result = yield defer.maybeDeferred\
( self.validator.validate
, fieldcontext, result
)
if self.writeToContext:
fieldcontext.__result__ = result
if self.copy:
if result is PASS:
defer.returnValue( value )
defer.returnValue( result )
defer.returnValue( value )
from twisted.names import client
from twisted.names.dns import Record_MX
from twisted.names.error import DNSNameError
from twisted.internet.defer import TimeoutError
def mxLookup_gotResult(result, d, value, validator, context ):
if isinstance( result, Failure ):
if isinstance(result.value, TimeoutError):
d.errback( Invalid( value, validator ) )
elif not isinstance(result.value, DNSNameError):
d.errback( result )
else:
d.errback( Invalid( value, validator ) )
return
(answers, auth, add) = result
if not len(answers):
d.errback( Invalid( value, validator ) )
else:
for record in answers:
if isinstance(record.payload,Record_MX):
d.callback( value )
return
d.errback( Invalid( value, validator ) )
mxLookup_resolver = client.Resolver('/etc/resolv.conf')
def mxLookup_on_value( self, context, value ):
d = defer.Deferred()
mxLookup_resolver.lookupMailExchange( value, [2,4,6,8,10] )\
.addBoth( mxLookup_gotResult, d, value, self, context )
return d
Context.validate = context_validate
Tag.validate = tag_validate
Compose.valdate = compose_validate
Tmp.validate = tmp_validate
Item.validate = item_validate
Not.validate = not_validate
And.validate = and_validate
Or.validate = or_validate
Call.validate = call_validate
Match.on_value = match_on_value
If.validate = if_validate
Schema._on_value = schema__on_value
Schema._createContextChildren_on_value = schema__createContextChildren_on_value
ForEach._on_value = forEach__on_value
ForEach._createContextChildren_on_value = forEach__createContextChildren_on_value
Field.validate = field_validate
MXLookup.on_value = mxLookup_on_value
monkeyPatch._isMonkeyPatched = True
from ..util import getArgSpec, getParameterNames
def validateDecorator_gotValidationResult\
( result
, d
, origArgs
, origKwargs
, method
, varargs
, keywords
, shifted
, onInvalid
):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
elif onInvalid is not None:
try:
result = onInvalid( result.value )
except Exception as e:
d.errback( e )
else:
d.callback( result )
else:
d.errback( result )
else:
origKwargs.update( result )
resultArgs = origKwargs.pop( varargs, origArgs )
resultArgs = [ origKwargs.pop(key) for key in shifted ] + resultArgs
if keywords is not False:
origKwargs.update( origKwargs.pop( keywords ) )
defer.maybeDeferred( method, *resultArgs, **origKwargs )\
.chainDeferred( d )
def validateDecorator( validator, method, include, exclude, onInvalid, inlineCallbacks ):
if include and exclude:
raise SyntaxError("'include' and 'exclude' cannot be used at the same time")
spec = getArgSpec( method )
hasVarargs = spec.varargs is not None
varargs = spec.varargs or '*varargs'
keywords = spec.keywords or False
methodParameterNames = getParameterNames( method, skipSelf=False )
skip = ()
if exclude:
skip = exclude
if include:
skip = set(methodParameterNames) - set(include)
varargs = varargs
hasVarargs = spec.varargs not in skip and hasVarargs
keywords = keywords not in skip and keywords
if inlineCallbacks:
method = defer.inlineCallbacks( method )
def __wrap( *fargs, **fkwargs):
d = defer.Deferred()
(fargs, fkwargs, shifted ) = varargs2kwargs( method, fargs, fkwargs, skipSelf=False )
origKwargs = dict(fkwargs)
if keywords is not False:
restKwargs = dict(\
( key, fkwargs.pop(key))\
for key in list(fkwargs.keys()) if key not in methodParameterNames
)
fkwargs[ keywords ] = restKwargs
if fargs or hasVarargs:
fkwargs[ varargs ] = list(fargs)
result = validator.context\
( dict( ( key, fkwargs[ key] ) for key in fkwargs if key not in skip )
).result
result.addBoth( validateDecorator_gotValidationResult, d, fargs, origKwargs, method, varargs, keywords, shifted, onInvalid )
return d
return __wrap
def validate( validator, include=None, exclude=None, onInvalid=None, inlineCallbacks=False ):
def __createDecorator( method ):
return validateDecorator( validator, method, include, exclude, onInvalid, inlineCallbacks)
return __createDecorator
| 2.046875 | 2 |
sandbox/graph-size.py | maarten1983/khmer | 1 | 10939 | <filename>sandbox/graph-size.py
#! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: <EMAIL>
#
import khmer
import sys
import screed
import os.path
from khmer.thread_utils import ThreadedSequenceProcessor, verbose_fasta_iter
K = 32
HASHTABLE_SIZE = int(4e9)
THRESHOLD = 500
N_HT = 4
WORKER_THREADS = 5
###
GROUPSIZE = 100
###
def main():
infile = sys.argv[1]
outfile = os.path.basename(infile) + '.graphsize'
if len(sys.argv) == 3:
outfile = sys.argv[2]
print 'input file to graphsize filter: %s' % infile
print 'filtering to output:', outfile
print '-- settings:'
print 'K', K
print 'HASHTABLE SIZE %g' % HASHTABLE_SIZE
print 'N HASHTABLES %d' % N_HT
print 'THRESHOLD', THRESHOLD
print 'N THREADS', WORKER_THREADS
print '--'
print 'creating ht'
ht = khmer.new_hashbits(K, HASHTABLE_SIZE, N_HT)
print 'eating fa', infile
total_reads, n_consumed = ht.consume_fasta(infile)
outfp = open(outfile, 'w')
###
def process_fn(record, ht=ht):
kmer = record['sequence'][:K]
size = ht.calc_connected_graph_size(kmer, THRESHOLD)
if size >= THRESHOLD:
return record['name'], record['sequence']
return None, None
tsp = ThreadedSequenceProcessor(process_fn, WORKER_THREADS, GROUPSIZE)
###
tsp.start(verbose_fasta_iter(infile), outfp)
if __name__ == '__main__':
main()
| 2.453125 | 2 |
ferry/crawler/fetch_demand.py | coursetable/ferry | 4 | 10940 | """
Fetches demand statistics.
Modified from <NAME>
Original article:
https://yaledailynews.com/blog/2020/01/10/yales-most-popular-courses/
Github:
https://github.com/iamdanzhao/yale-popular-classes
README:
https://github.com/iamdanzhao/yale-popular-classes/blob/master/data-guide/course_data_guide.md
"""
import argparse
from multiprocessing import Pool
from typing import List, Tuple
import ujson
from ferry import config
from ferry.crawler.common_args import add_seasons_args, parse_seasons_arg
from ferry.includes.demand_processing import fetch_season_subject_demand, get_dates
from ferry.includes.tqdm import tqdm
def handle_season_subject_demand(demand_args: Tuple[str, str, List[str], List[str]]):
"""
Handler for fetching subject codes to be passed into Pool()
"""
demand_season, demand_subject_code, demand_subject_codes, demand_dates = demand_args
courses = fetch_season_subject_demand(
demand_season, demand_subject_code, demand_subject_codes, demand_dates
)
return courses
if __name__ == "__main__":
class FetchDemandError(Exception):
"""
Error object for demand fetching exceptions.
"""
# pylint: disable=unnecessary-pass
pass
# Set season
# Pass using command line arguments
# Examples: 202001 = 2020 Spring, 201903 = 2019 Fall
# If no season is provided, the program will scrape all available seasons
parser = argparse.ArgumentParser(description="Import demand stats")
add_seasons_args(parser)
args = parser.parse_args()
# list of seasons previously from fetch_seasons.py
with open(f"{config.DATA_DIR}/demand_seasons.json", "r") as f:
all_viable_seasons = ujson.load(f)
seasons = parse_seasons_arg(args.seasons, all_viable_seasons)
print("Retrieving subjects list... ", end="")
with open(f"{config.DATA_DIR}/demand_subjects.json", "r") as f:
subjects = ujson.load(f)
subject_codes = sorted(list(subjects.keys()))
print("ok")
# set up parallel processing pool
with Pool(processes=64) as pool:
for season in seasons:
print(f"Retrieving demand by subject for season {season}")
dates = get_dates(season)
pool_args = [
(season, subject_code, subject_codes, dates)
for subject_code in subject_codes
]
season_courses = []
# use imap_unordered to report to tqdm
with tqdm(total=len(pool_args), desc="Subjects retrieved") as pbar:
for i, result in enumerate(
pool.imap_unordered(handle_season_subject_demand, pool_args)
):
pbar.update()
season_courses.append(result)
# flatten season courses
season_courses = [x for y in season_courses for x in y]
# sort courses by title (for consistency with ferry-data)
season_courses = sorted(season_courses, key=lambda x: x["title"])
with open(f"{config.DATA_DIR}/demand_stats/{season}_demand.json", "w") as f:
ujson.dump(season_courses, f, indent=4)
| 3.1875 | 3 |
migrate_db.py | qxf2/interview-scheduler | 2 | 10941 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from qxf2_scheduler import models
from qxf2_scheduler import db
from qxf2_scheduler.__init__ import app
from flask_script import Manager
from flask_migrate import Migrate,MigrateCommand
migrate=Migrate(app, db,render_as_batch=True)
manager=Manager(app)
manager.add_command('db',MigrateCommand)
if __name__ == "__main__":
manager.run() | 1.882813 | 2 |
main.py | tarunsinghal92/indeedscrapperlatest | 15 | 10942 | # import packages
import requests
import pandas as pd
import time
from functions import *
# limit per sity
max_results_per_city = 100
# db of city
city_set = ['New+York','Toronto','Las+Vegas']
# job roles
job_set = ['business+analyst','data+scientist']
# file num
file = 1
# from where to skip
SKIPPER = 0
# loop on all cities
for city in city_set:
# for each job role
for job_qry in job_set:
# count
cnt = 0
startTime = time.time()
# skipper
if(file > SKIPPER):
# dataframe
df = pd.DataFrame(columns = ['unique_id', 'city', 'job_qry','job_title', 'company_name', 'location', 'summary', 'salary', 'link', 'date', 'full_text'])
# for results
for start in range(0, max_results_per_city, 10):
# get dom
page = requests.get('http://www.indeed.com/jobs?q=' + job_qry +'&l=' + str(city) + '&start=' + str(start))
#ensuring at least 1 second between page grabs
time.sleep(1)
#fetch data
soup = get_soup(page.text)
divs = soup.find_all(name="div", attrs={"class":"row"})
# if results exist
if(len(divs) == 0):
break
# for all jobs on a page
for div in divs:
#specifying row num for index of job posting in dataframe
num = (len(df) + 1)
cnt = cnt + 1
#job data after parsing
job_post = []
#append unique id
job_post.append(div['id'])
#append city name
job_post.append(city)
#append job qry
job_post.append(job_qry)
#grabbing job title
job_post.append(extract_job_title(div))
#grabbing company
job_post.append(extract_company(div))
#grabbing location name
job_post.append(extract_location(div))
#grabbing summary text
job_post.append(extract_summary(div))
#grabbing salary
job_post.append(extract_salary(div))
#grabbing link
link = extract_link(div)
job_post.append(link)
#grabbing date
job_post.append(extract_date(div))
#grabbing full_text
job_post.append(extract_fulltext(link))
#appending list of job post info to dataframe at index num
df.loc[num] = job_post
#debug add
write_logs(('Completed =>') + '\t' + city + '\t' + job_qry + '\t' + str(cnt) + '\t' + str(start) + '\t' + str(time.time() - startTime) + '\t' + ('file_' + str(file)))
#saving df as a local csv file
df.to_csv('jobs_' + str(file) + '.csv', encoding='utf-8')
else:
#debug add
write_logs(('Skipped =>') + '\t' + city + '\t' + job_qry + '\t' + str(-1) + '\t' + str(-1) + '\t' + str(time.time() - startTime) + '\t' + ('file_' + str(file)))
# increment file
file = file + 1
| 2.984375 | 3 |
L0_serial.py | RL-WWW/ISST | 5 | 10943 | <reponame>RL-WWW/ISST<gh_stars>1-10
# Import Libraries
import numpy as np
import cv2
import argparse
import time
# Import User Libraries
import L0_helpers
# Image File Path
image_r = "images/flowers.jpg"
image_w = "out_serial.png"
# L0 minimization parameters
kappa = 2.0
_lambda = 2e-2
# Verbose output
verbose = False
def L0_smooth(input_path, output_path, kappa=2.0, _lambda=2e-2, verbose=False):
# Set parameters
image_r = input_path
image_w = output_path
# Read image I
image = cv2.imread(image_r)
# Timers
step_1 = 0.0
step_2 = 0.0
step_2_fft = 0.0
# Start time
start_time = time.time()
# Validate image format
N, M, D = np.int32(image.shape)
assert D == 3, "Error: input must be 3-channel RGB image"
print("Processing %d x %d RGB image" % (M, N))
# Initialize S as I
S = np.float32(image) / 256
# Compute image OTF
size_2D = [N, M]
fx = np.int32([[1, -1]])
fy = np.int32([[1], [-1]])
otfFx = L0_helpers.psf2otf(fx, size_2D)
otfFy = L0_helpers.psf2otf(fy, size_2D)
# Compute F(I)
FI = np.complex64(np.zeros((N, M, D)))
FI[:, :, 0] = np.fft.fft2(S[:, :, 0])
FI[:, :, 1] = np.fft.fft2(S[:, :, 1])
FI[:, :, 2] = np.fft.fft2(S[:, :, 2])
# Compute MTF
MTF = np.power(np.abs(otfFx), 2) + np.power(np.abs(otfFy), 2)
MTF = np.tile(MTF[:, :, np.newaxis], (1, 1, D))
# Initialize buffers
h = np.float32(np.zeros((N, M, D)))
v = np.float32(np.zeros((N, M, D)))
dxhp = np.float32(np.zeros((N, M, D)))
dyvp = np.float32(np.zeros((N, M, D)))
FS = np.complex64(np.zeros((N, M, D)))
# Iteration settings
beta_max = 1e5;
beta = 2 * _lambda
iteration = 0
# Done initializing
init_time = time.time()
# Iterate until desired convergence in similarity
while beta < beta_max:
if verbose:
print("ITERATION %i" % iteration)
### Step 1: estimate (h, v) subproblem
# subproblem 1 start time
s_time = time.time()
# compute dxSp
h[:, 0:M - 1, :] = np.diff(S, 1, 1)
h[:, M - 1:M, :] = S[:, 0:1, :] - S[:, M - 1:M, :]
# compute dySp
v[0:N - 1, :, :] = np.diff(S, 1, 0)
v[N - 1:N, :, :] = S[0:1, :, :] - S[N - 1:N, :, :]
# compute minimum energy E = dxSp^2 + dySp^2 <= _lambda/beta
t = np.sum(np.power(h, 2) + np.power(v, 2), axis=2) < _lambda / beta
t = np.tile(t[:, :, np.newaxis], (1, 1, 3))
# compute piecewise solution for hp, vp
h[t] = 0
v[t] = 0
# subproblem 1 end time
e_time = time.time()
step_1 = step_1 + e_time - s_time
if verbose:
print("-subproblem 1: estimate (h,v)")
print("--time: %f (s)" % (e_time - s_time))
### Step 2: estimate S subproblem
# subproblem 2 start time
s_time = time.time()
# compute dxhp + dyvp
dxhp[:, 0:1, :] = h[:, M - 1:M, :] - h[:, 0:1, :]
dxhp[:, 1:M, :] = -(np.diff(h, 1, 1))
dyvp[0:1, :, :] = v[N - 1:N, :, :] - v[0:1, :, :]
dyvp[1:N, :, :] = -(np.diff(v, 1, 0))
normin = dxhp + dyvp
fft_s = time.time()
FS[:, :, 0] = np.fft.fft2(normin[:, :, 0])
FS[:, :, 1] = np.fft.fft2(normin[:, :, 1])
FS[:, :, 2] = np.fft.fft2(normin[:, :, 2])
fft_e = time.time()
step_2_fft += fft_e - fft_s
# solve for S + 1 in Fourier domain
denorm = 1 + beta * MTF
FS[:, :, :] = (FI + beta * FS) / denorm
# inverse FFT to compute S + 1
fft_s = time.time()
S[:, :, 0] = np.float32((np.fft.ifft2(FS[:, :, 0])).real)
S[:, :, 1] = np.float32((np.fft.ifft2(FS[:, :, 1])).real)
S[:, :, 2] = np.float32((np.fft.ifft2(FS[:, :, 2])).real)
fft_e = time.time()
step_2_fft += fft_e - fft_s
# subproblem 2 end time
e_time = time.time()
step_2 = step_2 + e_time - s_time
if verbose:
print("-subproblem 2: estimate S + 1")
print("--time: %f (s)" % (e_time - s_time))
print("")
# update beta for next iteration
beta *= kappa
iteration += 1
# Rescale image
S = S * 256
# Total end time
final_time = time.time()
print("Total Time: %f (s)" % (final_time - start_time))
print("Setup: %f (s)" % (init_time - start_time))
print("Step 1: %f (s)" % (step_1))
print("Step 2: %f (s)" % (step_2))
print("Step 2 (FFT): %f (s)" % (step_2_fft))
print("Iterations: %d" % (iteration))
cv2.imwrite(image_w, S)
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(
description="Serial implementation of image smoothing via L0 gradient minimization")
parser.add_argument('image_r', help="input image file")
parser.add_argument('image_w', help="output image file")
parser.add_argument('-k', type=float, default=2.0,
metavar='kappa', help='updating weight (default 2.0)')
parser.add_argument('-l', type=float, default=2e-2,
metavar='lambda', help='smoothing weight (default 2e-2)')
parser.add_argument('-v', '--verbose', action='store_true',
help='enable verbose logging for each iteration')
args = parser.parse_args()
L0_smooth(args.image_r, args.image_w, args.k, args.l, args.verbose)
| 2.5 | 2 |
data_processing/signal_downsampling.py | HassanHayat08/Interpretable-CNN-for-Big-Five-Personality-Traits-using-Audio-Data | 9 | 10944 | <filename>data_processing/signal_downsampling.py
### Interpretable cnn for big five personality traits using audio data ###
### This script downsamples 41000 kz signal into 4000 kz signal ###
from __future__ import absolute_import, division, print_function
import pathlib
import random
import csv
import numpy as np
from scipy.io import wavfile
import tensorflow as tf
import itertools
from scipy import stats
### functions for mapping ###
def normalize_with_moments(data, axes=[0], epsilon=1e-8):
mean, variance = tf.nn.moments(data, axes=axes)
data_normed = (data - mean) / tf.sqrt(variance + epsilon) # epsilon to avoid dividing by zero
return data_normed
def get_wav(path, label):
wav_file = tf.read_file(path)
data = tf.contrib.ffmpeg.decode_audio(tf.read_file(path), file_format="wav",samples_per_second=4000, channel_count=1)
data = tf.cast(data,tf.complex64)
data = tf.fft(data,name='FFT')
return normalize_with_moments(data), label
### down sample the data ###
data = []
labels = []
folder_path = '/...path/to/wav/data/folder/'
folder_path = pathlib.Path(folder_path)
files_path = list(folder_path.glob('*.wav'))
files_path = [str(path) for path in files_path]
no_of_samples = len(files_path)
### load data labels ###
with open('/...path/to/.csv/labels/file', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
data.append(row)
for i in range(len(files_path)):
file_1 = files_path[i]
file_1 = file_1.split("/")[5]
file_name_1 = file_1[:-4]
new_filename_1 = file_name_1 + '.mp4'
label_1 = []
label_2 = []
matching = [s for s in data if new_filename_1 in s]
label_1= np.delete(matching,[0],axis=1)
label_2 = label_1[0,:]
label_2 = [float(i) for i in label_2]
labels.append(label_2)
### dataset pipeline ###
ds = tf.data.Dataset.from_tensor_slices((files_path, labels))
data_ds = ds.map(get_wav)
ds = data_ds.shuffle(buffer_size=wavfiles_count)
ds = ds.repeat()
ds = ds.batch(1)
### prefetch the data batches in the background ###
ds = ds.prefetch(buffer_size=1)
iterator = ds.make_one_shot_iterator()
next_ele = iterator.get_next()
features_4k = []
labels_4k = []
with tf.Session() as sess:
for _ in range(len(files_path)):
t_features, t_labels = sess.run(next_ele)
features_4k.append(t_features)
labels_4k.append(t_labels)
np.save('.../save/path/',features_4k)
np.save('.../save/path/',labels_4k)
print('Completed')
| 2.75 | 3 |
ai_flow/model_center/entity/_model_repo_object.py | flink-extended/ai-flow | 79 | 10945 | #
# Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import pprint
from abc import abstractmethod
class _ModelRepoObject(object):
def __iter__(self):
# Iterate through list of properties and yield as key -> value
for prop in self._properties():
yield prop, self.__getattribute__(prop)
@classmethod
def _get_properties_helper(cls):
return sorted([p for p in cls.__dict__ if isinstance(getattr(cls, p), property)])
@classmethod
def _properties(cls):
return cls._get_properties_helper()
@classmethod
@abstractmethod
def from_proto(cls, proto):
pass
def __repr__(self):
return to_string(self)
def to_string(obj):
return _ModelRepoObjectPrinter().to_string(obj)
def get_classname(obj):
return type(obj).__name__
class _ModelRepoObjectPrinter(object):
def __init__(self):
super(_ModelRepoObjectPrinter, self).__init__()
self.printer = pprint.PrettyPrinter()
def to_string(self, obj):
if isinstance(obj, _ModelRepoObject):
return "<%s: %s>" % (get_classname(obj), self._entity_to_string(obj))
return self.printer.pformat(obj)
def _entity_to_string(self, entity):
return ", ".join(["%s=%s" % (key, self.to_string(value)) for key, value in entity])
| 2.015625 | 2 |
model_search/search/common_test.py | LinqCod/model_search | 0 | 10946 | <filename>model_search/search/common_test.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for model_search.search.common."""
from absl.testing import parameterized
from model_search.search import common
import tensorflow.compat.v2 as tf
class CommonTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "no_completed_trials",
"num_completed_trials": 0,
"expected": 1,
}, {
"testcase_name": "some_completed_trials",
"num_completed_trials": 11,
"expected": 3,
}, {
"testcase_name": "custom_depth_thresholds",
"num_completed_trials": 2,
"expected": 2,
"depth_thresholds": [0, 1, 10, 20],
}, {
"testcase_name": "maximum_respected",
"num_completed_trials": 1000,
"expected": 5,
})
def test_get_allowed_depth(self,
num_completed_trials,
expected,
depth_thresholds=None):
actual = common.get_allowed_depth(
num_completed_trials, depth_thresholds, max_depth=5)
self.assertEqual(expected, actual)
def test_get_random_architecture(self):
architecture = common.get_random_architecture(["a", "b", "c"], 3)
self.assertLen(architecture, 3)
self.assertAllInSet(architecture, ["a", "b", "c"])
def test_get_random_block(self):
block = common.get_random_block(["a", "b", "c"])
self.assertIn(block, ["a", "b", "c"])
if __name__ == "__main__":
tf.enable_v2_behavior()
tf.test.main()
| 2.109375 | 2 |
model-builder/skrutil/deprecate_util.py | DaYeSquad/worktilerwdemo | 5 | 10947 | <reponame>DaYeSquad/worktilerwdemo
import warnings
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
| 2.90625 | 3 |
models.py | sheldonjinqi/CIS680_BicycleGAN | 0 | 10948 | from torchvision.models import resnet18
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import torch
import pdb
##############################
# Encoder
##############################
class Encoder(nn.Module):
def __init__(self, latent_dim):
super(Encoder, self).__init__()
""" The encoder used in both cVAE-GAN and cLR-GAN, which encode image B or B_hat to latent vector
This encoder uses resnet-18 to extract features, and further encode them into a distribution
similar to VAE encoder.
Note: You may either add "reparametrization trick" and "KL divergence" or in the train.py file
Args in constructor:
latent_dim: latent dimension for z
Args in forward function:
img: image input (from domain B)
Returns:
mu: mean of the latent code
logvar: sigma of the latent code
"""
# Extracts features at the last fully-connected
resnet18_model = resnet18(pretrained=True)
self.feature_extractor = nn.Sequential(*list(resnet18_model.children())[:-3])
self.pooling = nn.AvgPool2d(kernel_size=8, stride=8, padding=0)
# Output is mu and log(var) for reparameterization trick used in VAEs
self.fc_mu = nn.Linear(256, latent_dim)
self.fc_logvar = nn.Linear(256, latent_dim)
def forward(self, img):
out = self.feature_extractor(img)
out = self.pooling(out)
out = out.view(out.size(0), -1)
mu = self.fc_mu(out)
logvar = self.fc_logvar(out)
return mu, logvar
##############################
# Generator
##############################
class Generator(nn.Module):
""" The generator used in both cVAE-GAN and cLR-GAN, which transform A to B
Args in constructor:
latent_dim: latent dimension for z
image_shape: (channel, h, w), you may need this to specify the output dimension (optional)
Args in forward function:
x: image input (from domain A)
z: latent vector (encoded B)
Returns:
fake_B: generated image in domain B
"""
def __init__(self, latent_dim, img_shape):
super(Generator, self).__init__()
channels, self.h, self.w = img_shape
# (TODO: add layers...)
def forward(self, x, z):
# (TODO: add layers...)
return
##############################
# Discriminator
##############################
class Discriminator(nn.Module):
def __init__(self, in_channels=3):
super(Discriminator, self).__init__()
""" The discriminator used in both cVAE-GAN and cLR-GAN
Args in constructor:
in_channels: number of channel in image (default: 3 for RGB)
Args in forward function:
x: image input (real_B, fake_B)
Returns:
discriminator output: could be a single value or a matrix depending on the type of GAN
"""
def forward(self, x):
return
| 2.71875 | 3 |
oem_storage_file/main.py | OpenEntityMap/oem-storage-file | 0 | 10949 | <filename>oem_storage_file/main.py<gh_stars>0
from oem_framework.models.core import ModelRegistry
from oem_framework.plugin import Plugin
from oem_framework.storage import ProviderStorage
from oem_storage_file.core.base import BaseFileStorage
from oem_storage_file.database import DatabaseFileStorage
import appdirs
import os
class ProviderFileStorage(ProviderStorage, BaseFileStorage, Plugin):
__key__ = 'file'
def __init__(self, path=None):
super(ProviderFileStorage, self).__init__()
self.path = path
if self.path is None:
self.path = self._create_dir()
@classmethod
def open(cls, client, path=None):
storage = cls(path)
storage.initialize(client)
return storage
#
# Provider methods
#
def create(self, source, target):
package_path = self.package_path(source, target)
# Ensure cache directory exists
if not os.path.exists(package_path):
os.makedirs(package_path)
return True
def open_database(self, source, target, path=None):
return ModelRegistry['Database'].load(
DatabaseFileStorage.open(self, source, target, path),
source, target
)
#
# Index methods
#
def has_index(self, source, target):
return os.path.exists(os.path.join(
self._collection_path(source, target),
'index.%s' % self.main.format.__extension__
))
def update_index(self, source, target, response):
# Build collection path
collection_path = self._collection_path(source, target)
# Ensure directory exists
if not os.path.exists(collection_path):
os.makedirs(collection_path)
# Write index to file
path = os.path.join(collection_path, 'index.%s' % self.main.format.__extension__)
with open(path, 'w') as fp:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
return True
#
# Item methods
#
def has_item(self, source, target, key, metadata=None):
return os.path.exists(os.path.join(
self._collection_path(source, target), 'items',
'%s.%s' % (key, self.main.format.__extension__)
))
def update_item(self, source, target, key, response, metadata):
# Build collection path
items_path = os.path.join(self._collection_path(source, target), 'items')
# Ensure directory exists
if not os.path.exists(items_path):
os.makedirs(items_path)
# Write index to file
path = os.path.join(items_path, '%s.%s' % (key, self.main.format.__extension__))
with open(path, 'w') as fp:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
return True
#
# Private methods
#
def _collection_path(self, source, target):
return os.path.join(self.database_path(source, target), source)
@staticmethod
def _create_dir():
# Build cache path
path = os.path.join(
appdirs.user_data_dir('OpenEntityMap', appauthor=False),
'databases',
'file'
)
# Ensure cache directory exists
if not os.path.exists(path):
os.makedirs(path)
return path
| 2.203125 | 2 |
scripts/slave/recipe_modules/v8/gclient_config.py | bopopescu/chromium-build | 0 | 10950 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import DEPS
CONFIG_CTX = DEPS['gclient'].CONFIG_CTX
ChromiumGitURL = DEPS['gclient'].config.ChromiumGitURL
@CONFIG_CTX()
def v8(c):
soln = c.solutions.add()
soln.name = 'v8'
soln.url = ChromiumGitURL(c, 'v8', 'v8')
c.got_revision_reverse_mapping['got_revision'] = 'v8'
# Needed to get the testers to properly sync the right revision.
# TODO(infra): Upload full buildspecs for every build to isolate and then use
# them instead of this gclient garbage.
c.parent_got_revision_mapping['parent_got_revision'] = 'got_revision'
p = c.patch_projects
p['icu'] = ('v8/third_party/icu', 'HEAD')
@CONFIG_CTX(includes=['v8'])
def dynamorio(c):
soln = c.solutions.add()
soln.name = 'dynamorio'
soln.url = ChromiumGitURL(c, 'external', 'dynamorio')
@CONFIG_CTX(includes=['v8'])
def llvm_compiler_rt(c):
c.solutions[0].custom_deps['v8/third_party/llvm/projects/compiler-rt'] = (
ChromiumGitURL(c, 'external', 'llvm.org', 'compiler-rt'))
@CONFIG_CTX()
def node_js(c):
soln = c.solutions.add()
soln.name = 'node.js'
soln.url = ChromiumGitURL(c, 'external', 'github.com', 'v8', 'node')
soln.revision = 'vee-eight-lkgr:HEAD'
c.got_revision_reverse_mapping['got_node_js_revision'] = soln.name
@CONFIG_CTX(includes=['v8'])
def v8_valgrind(c):
c.solutions[0].custom_deps['v8/third_party/valgrind'] = (
ChromiumGitURL(c, 'chromium', 'deps', 'valgrind', 'binaries'))
| 1.8125 | 2 |
parser/fase2/team28/models/Other/funcion.py | jossiebk/tytus | 0 | 10951 | from models.instructions.shared import Instruction
from models.Other.ambito import Ambito
from controllers.three_address_code import ThreeAddressCode
from controllers.procedures import Procedures
from models.instructions.Expression.expression import DATA_TYPE, PrimitiveData
class Parametro(Instruction):
def __init__(self, id, data_type, line, column):
self.id = id
self.data_type = data_type
self.line = line
self.column = column
self._tac = ''
def compile(self):
pass
def process(self, environment):
pass
def __repr__(self):
return str(vars(self))
class Funcion(Instruction):
def __init__(self, id, params, body, val_return, isNew, isCall, line, column):
self.id = id
self.params = params
self.body = body
self.val_return = val_return
self.isNew = isNew
self.isCall = isCall
self.environment = None
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
pass
def compile(self, environment):
params = len(self.params)
temporal = None
if self.isNew:
self.environment = environment # TODO verificar
if Procedures().saveProcedure(self.id, self, self.line, self.column):
var_array = self.print(environment)
temporal = self.setVariables(var_array, environment)
else:
var_array = Procedures().getProcedure(self.id, params, self.line, self.column)
if var_array:
temporal = self.setVariables(var_array, environment)
fun = ThreeAddressCode().searchFunction(self.id)
if fun:
temporal = self.setVariables(fun['variables'], environment)
return temporal
#temp = ThreeAddressCode().newTemp()
def print(self, environment):
if ThreeAddressCode().searchFunction(self.id):
return None
ThreeAddressCode().newFunction(self.id)
newAmbito = Ambito(environment)
pos = 0
var_array = []
for var in self.params:
pos = ThreeAddressCode().stackCounter
var_array.append(newAmbito.addVar(var.id, var.data_type, None,
pos, var.line, var.column))
ThreeAddressCode().incStackCounter()
pos = ThreeAddressCode().stackCounter
#Generando etiqueta de salida para la funcion
lbl_exit = ThreeAddressCode().newLabel()
newAmbito.lbl_return = lbl_exit
#Agregando cuerpo de la funcion
self.body.compile(newAmbito)
# Agregando etiqueta de salida
ThreeAddressCode().addCode(f"label .{lbl_exit}")
# Imprime primera variable declarada, NO parametro
# ThreeAddressCode().addCode(f"print(Stack[{pos}])")
ThreeAddressCode().createFunction(self.id, self.params, var_array)
return var_array
def setVariables(self, var_array, environment):
if self.isCall:
value = 0
for index, var in enumerate(var_array):
value = self.params[index].compile(environment)
if isinstance(value, PrimitiveData):
if value.data_type == DATA_TYPE.STRING:
value.value = f"\'{value.value}\'"
ThreeAddressCode().addCode(f"Stack[{var.position}] = {value.value}")
temp = ThreeAddressCode().newTemp()
#Llamando a la funcion
ThreeAddressCode().addCode(f"{self.id}()")
#Obteniendo el valor de retorno de la funcion
ThreeAddressCode().addCode("#Obteniendo valor de retorno--------")
ThreeAddressCode().addCode(f"{temp} = Stack[P]")
return temp
return None
class DropFuncion(Instruction):
def __init__(self, id, params, line, column):
self.id = id
self.params = params
self.line = line
self.column = column
class ProcedimientoAlmacenado(Instruction):
def __init__(self, id, params, body, isNew, isCall, line, column):
self.id = id
self.params = params
self.body = body
self.isNew = isNew
self.isCall = isCall
self.environment = None
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
pass
def compile(self, environment):
params = len(self.params)
if self.isNew:
self.environment = environment # TODO verificar
if Procedures().saveProcedure(self.id, self, self.line, self.column):
var_array = self.print(environment)
self.setVariables(var_array, environment)
else:
var_array = Procedures().getProcedure(self.id, params, self.line, self.column)
if var_array:
self.setVariables(var_array, environment)
fun = ThreeAddressCode().searchFunction(self.id)
if fun:
self.setVariables(fun['variables'], environment)
#temp = ThreeAddressCode().newTemp()
def print(self, environment):
if ThreeAddressCode().searchFunction(self.id):
return None
ThreeAddressCode().newFunction(self.id)
newAmbito = Ambito(environment)
pos = 0
var_array = []
for var in self.params:
pos = ThreeAddressCode().stackCounter
var_array.append(newAmbito.addVar(var.id, var.data_type, None,
pos, var.line, var.column))
ThreeAddressCode().incStackCounter()
pos = ThreeAddressCode().stackCounter
#Generando etiqueta de salida para la funcion
lbl_exit = ThreeAddressCode().newLabel()
newAmbito.lbl_return = lbl_exit
#Agregando cuerpo de la funcion
self.body.compile(newAmbito)
# Agregando etiqueta de salida
ThreeAddressCode().addCode(f"label .{lbl_exit}")
# Imprime primera variable declarada, NO parametro
ThreeAddressCode().addCode(f"print(Stack[{pos}])")
ThreeAddressCode().createFunction(self.id, self.params, var_array)
return var_array
def setVariables(self, var_array, environment):
if self.isCall:
value = 0
for index, var in enumerate(var_array):
value = self.params[index].compile(environment)
if isinstance(value, PrimitiveData):
if value.data_type == DATA_TYPE.STRING:
value.value = f"\'{value.value}\'"
ThreeAddressCode().addCode(f"Stack[{var.position}] = {value.value}")
#Llamando a la funcion
ThreeAddressCode().addCode(f"{self.id}()")
#Una procedimiento almacenado NO devuelve nada | 2.53125 | 3 |
podcast/download.py | jessstringham/podcasts | 1 | 10952 | import typing
import urllib.error
import urllib.request
from podcast.files import download_location
from podcast.info import build_info_content
from podcast.info import InfoContent
from podcast.models import Channel
from podcast.models import get_podcast_audio_link
from podcast.models import NewStatus
from podcast.models import Podcast
from podcast.models import Radio
from podcast.models import RadioDirectory
def _download_from_url(url: str, location: str) -> bool:
try:
urllib.request.urlretrieve(url, location)
return True
except (IOError, urllib.error.ContentTooShortError):
# If a connection can't be made, IOError is raised
# If the download gets interrupted (ContentTooShortError), we
# should try again later
# TODO: can we tell if it was a bad filename (and should stop
# requesting it), or internet connectivity (and should tell
# us), or just a fluke (and should retry)?
return False
def download_podcast(
directory: RadioDirectory,
channel: Channel,
podcast: Podcast) -> Podcast:
location = download_location(directory, channel, podcast)
url = get_podcast_audio_link(podcast)
# TODO: This takes some time, especially when there are a lot to
# download. I could have this spawn threads, or add priorities,
# and so on. For now, since it runs every few hours, and is more
# of a push than a pull situation for the user, I'm leaving it
# simple
success = _download_from_url(url, location)
if success:
return podcast._replace(status=NewStatus())
else:
return podcast
def download_channel(directory: RadioDirectory, channel: Channel) -> Channel:
updated_podcasts = []
for known_podcast in channel.known_podcasts:
if type(known_podcast.status).__name__ == 'RequestedStatus':
known_podcast = download_podcast(directory, channel, known_podcast)
updated_podcasts.append(known_podcast)
return channel._replace(known_podcasts=updated_podcasts)
def download_radio(radio: Radio) -> typing.Tuple[Radio, InfoContent]:
downloaded_channels = [
download_channel(radio.directory, channel)
for channel in radio.channels
]
radio = radio._replace(channels=downloaded_channels)
info_content = build_info_content()
return (radio, info_content)
| 2.515625 | 3 |
tests/model/test_ocrd_page.py | j23d/core | 0 | 10953 | <gh_stars>0
from tests.base import TestCase, main, assets
from ocrd_models.ocrd_page import (
AlternativeImageType,
PcGtsType,
PageType,
TextRegionType,
TextLineType,
WordType,
GlyphType,
parseString,
parse,
to_xml
)
simple_page = """\
<PcGts xmlns="http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15 http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15/pagecontent.xsd">
<Metadata>
<Creator>OCR-D</Creator>
<Created>2016-09-20T11:09:27.041+02:00</Created>
<LastChange>2018-04-25T17:44:49.605+01:00</LastChange>
</Metadata>
<Page
imageFilename="https://github.com/OCR-D/assets/raw/master/data/kant_aufklaerung_1784/data/OCR-D-IMG/INPUT_0017.tif"
imageWidth="1457"
imageHeight="2083"
type="content">
<TextRegion type="heading" id="r_1_1" custom="readingOrder {index:0;} structure {type:heading;}">
<Coords points="113,365 919,365 919,439 113,439"/>
<TextLine id="tl_1" primaryLanguage="German" custom="readingOrder {index:0;} textStyle {offset:0; length:26;fontFamily:Arial; fontSize:17.0; bold:true;}">
<Coords points="114,366 918,366 918,438 114,438"/>
<Baseline points="114,429 918,429"/>
<Word id="w_w1aab1b1b2b1b1ab1" language="German" custom="readingOrder {index:0;} textStyle {offset:0; length:11;fontFamily:Arial; fontSize:17.0; bold:true;}">
<Coords points="114,368 442,368 442,437 114,437"/>
<TextEquiv conf="0.987654321">
<Unicode>Berliniลฟche</Unicode>
</TextEquiv>
</Word>
</TextLine>
</TextRegion>
</Page>
</PcGts>
"""
# pylint: disable=protected-access
class TestOcrdPage(TestCase):
def setUp(self):
with open(assets.path_to('glyph-consistency/data/OCR-D-GT-PAGE/FAULTY_GLYPHS.xml'), 'rb') as f:
self.xml_as_str = f.read()
self.pcgts = parseString(self.xml_as_str, silence=True)
def test_to_xml(self):
# with open('/tmp/test.xml', 'w') as f:
# f.write(to_xml(self.pcgts))
self.assertIn(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15 http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15/pagecontent.xsd"', to_xml(self.pcgts)[:1000])
self.assertIn('</TextRegion', to_xml(self.pcgts))
def test_issue_269(self):
"""
@conf is parsed as str but should be float
https://github.com/OCR-D/core/issues/269
"""
# GIGO
self.pcgts.get_Page().get_TextRegion()[0].get_TextEquiv()[0].set_conf(1.0)
self.assertEqual(type(self.pcgts.get_Page().get_TextRegion()[0].get_TextEquiv()[0].get_conf()), float)
self.pcgts.get_Page().get_TextRegion()[0].get_TextEquiv()[0].set_conf('1.0')
self.assertEqual(type(self.pcgts.get_Page().get_TextRegion()[0].get_TextEquiv()[0].get_conf()), str)
# test with parseString that @conf in TextEquiv won't throw an error
parseString(simple_page, silence=True)
# self.assertTrue(True)
def test_pcGtsId(self):
self.assertEqual(self.pcgts.pcGtsId, 'glyph-test')
def test_delete_region(self):
pcgts = parseString(simple_page, silence=True)
self.assertEqual(len(pcgts.get_Page().get_TextRegion()), 1)
del pcgts.get_Page().get_TextRegion()[0]
self.assertEqual(len(pcgts.get_Page().get_TextRegion()), 0)
def test_imageFileName(self):
# print(self.pcgts.export(sys.stdout, 0))
self.assertEqual(self.pcgts.get_Page().imageFilename, '00000259.sw.tif')
self.pcgts.get_Page().imageFilename = 'foo'
self.assertEqual(self.pcgts.get_Page().imageFilename, 'foo')
def test_alternativeImage(self):
pcgts = PcGtsType(pcGtsId="foo")
self.assertEqual(pcgts.pcGtsId, 'foo')
# Page/AlternativeImage
page = PageType()
pcgts.set_Page(page)
page.add_AlternativeImage(AlternativeImageType())
# TextRegion/AlternativeImage
region = TextRegionType()
page.add_TextRegion(region)
region.add_AlternativeImage(AlternativeImageType())
# TextLine/AlternativeImage
line = TextLineType()
region.add_TextLine(line)
line.add_AlternativeImage(AlternativeImageType())
# Word/AlternativeImage
word = WordType()
line.add_Word(word)
word.add_AlternativeImage(AlternativeImageType())
# Glyph/AlternativeImage
glyph = GlyphType()
word.add_Glyph(glyph)
glyph.add_AlternativeImage(AlternativeImageType())
def test_simpletypes(self):
pcgts = parseString(simple_page, silence=True)
self.assertTrue(isinstance(pcgts.get_Page().imageWidth, int))
el = pcgts.get_Page().get_TextRegion()[0].get_TextLine()[0].get_Word()[0].get_TextEquiv()[0]
self.assertTrue(isinstance(el.conf, float))
# XXX no validation on setting attributes :-(
# c.f. https://www.davekuhlman.org/generateDS.html#simpletype
# el.set_conf('2.0987')
# self.assertTrue(isinstance(el.conf, float))
with self.assertRaisesRegex(TypeError, ''):
el.set_conf('I AM NOT A FLOAT DEAL WITH IT')
parseString(to_xml(pcgts).encode('utf8'))
if __name__ == '__main__':
main()
| 2.125 | 2 |
athena/athena/algorithms/NetworkAnalysis/Components.py | aculich/openmappr | 19 | 10954 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 13 15:35:50 2014
@author: rich
"""
import networkx as nx
# assign component IDs to graph components, id=0 is giant component
def componentIDs(network):
# networkx algo only works on undirected network
if isinstance(network, nx.DiGraph):
network = nx.Graph(network)
cIDs = {}
components = sorted(nx.connected_components(network), key = len, reverse=True)
# assign ids to node properties
for i in range(len(components)):
component = components[i]
cIDs.update(dict(zip(component, len(component)*[i])))
return cIDs
| 2.484375 | 2 |
tools/netconf.py | jpfluger/radiucal | 5 | 10955 | <filename>tools/netconf.py
#!/usr/bin/python
"""composes the config from user definitions."""
import argparse
import os
import users
import users.__config__
import importlib
import csv
# file indicators
IND_DELIM = "_"
USER_INDICATOR = "user" + IND_DELIM
VLAN_INDICATOR = "vlan" + IND_DELIM
AUTH_PHASE_ONE = "PEAP"
AUTH_PHASE_TWO = "MSCHAPV2"
class ConfigMeta(object):
"""configuration meta information."""
def __init__(self):
"""init the instance."""
self.passwords = []
self.macs = []
self.vlans = []
self.all_vlans = []
self.user_name = []
self.vlan_users = []
self.vlan_initiate = []
self.extras = []
def password(self, password):
"""password group validation(s)."""
if password in self.passwords:
print("password duplicated")
exit(-1)
self.passwords.append(password)
def extra(self, macs):
"""Limited macs."""
for mac in macs:
if mac in self.extras:
print("mac already known as extra: " + mac)
exit(-1)
self.extras.append(mac)
def user_macs(self, macs):
"""user+mac combos."""
self.macs = self.macs + macs
self.macs = list(set(self.macs))
def verify(self):
"""verify meta data."""
for mac in self.macs:
if mac in self.extras:
print("mac is flagged extra: " + mac)
exit(-1)
for mac in self.extras:
if mac in self.macs:
print("mac is user assigned: " + mac)
exit(-1)
used_vlans = set(self.vlans + self.vlan_initiate)
if len(used_vlans) != len(set(self.all_vlans)):
print("unused vlans detected")
exit(-1)
for ref in used_vlans:
if ref not in self.all_vlans:
print("reference to unknown vlan: " + ref)
exit(-1)
def vlan_user(self, vlan, user):
"""indicate a vlan was used."""
self.vlans.append(vlan)
self.vlan_users.append(vlan + "." + user)
self.user_name.append(user)
def vlan_to_vlan(self, vlan_to):
"""VLAN to VLAN mappings."""
self.vlan_initiate.append(vlan_to)
def _get_mod(name):
"""import the module dynamically."""
return importlib.import_module("users." + name)
def _load_objs(name, typed):
mod = _get_mod(name)
for key in dir(mod):
obj = getattr(mod, key)
if not isinstance(obj, typed):
continue
yield obj
def _get_by_indicator(indicator):
"""get by a file type indicator."""
return [x for x in sorted(users.__all__) if x.startswith(indicator)]
def _common_call(common, method, entity):
"""make a common mod call."""
obj = entity
if common is not None and method in dir(common):
call = getattr(common, method)
if call is not None:
obj = call(obj)
return obj
def check_object(obj):
"""Check an object."""
return obj.check()
def _process(output):
"""process the composition of users."""
common_mod = None
try:
common_mod = _get_mod("common")
print("loaded common definitions...")
except Exception as e:
print("defaults only...")
vlans = None
meta = ConfigMeta()
for v_name in _get_by_indicator(VLAN_INDICATOR):
print("loading vlan..." + v_name)
for obj in _load_objs(v_name, users.__config__.VLAN):
if vlans is None:
vlans = {}
if not check_object(obj):
exit(-1)
num_str = str(obj.num)
for vk in vlans.keys():
if num_str == vlans[vk]:
print("vlan number defined multiple times...")
exit(-1)
vlans[obj.name] = num_str
if obj.initiate is not None and len(obj.initiate) > 0:
for init_to in obj.initiate:
meta.vlan_to_vlan(init_to)
if vlans is None:
raise Exception("missing required config settings...")
meta.all_vlans = vlans.keys()
store = Store()
for f_name in _get_by_indicator(USER_INDICATOR):
print("composing..." + f_name)
for obj in _load_objs(f_name, users.__config__.Assignment):
obj = _common_call(common_mod, 'ready', obj)
key = f_name.replace(USER_INDICATOR, "")
if not key.isalnum():
print("does not meet naming requirements...")
exit(-1)
vlan = obj.vlan
if vlan not in vlans:
raise Exception("no vlan defined for " + key)
store.add_vlan(vlan, vlans[vlan])
meta.vlan_user(vlan, key)
fqdn = vlan + "." + key
if not check_object(obj):
print("did not pass check...")
exit(-1)
if obj.disabled:
print("account is disabled")
continue
macs = sorted(obj.macs)
password = <PASSWORD>
bypassed = sorted(obj.bypassed())
owned = sorted(obj.owns)
# meta checks
meta.user_macs(macs)
if not obj.inherits:
meta.password(password)
meta.extra(bypassed)
meta.extra(owned)
store.add_user(fqdn, macs, password)
if obj.mab_only:
store.set_mab(fqdn)
if len(bypassed) > 0:
for m in bypassed:
store.add_mab(m, obj.bypass_vlan(m))
user_all = []
for l in [obj.macs, obj.owns, bypassed]:
user_all += list(l)
store.add_audit(fqdn, sorted(set(user_all)))
meta.verify()
# audit outputs
with open(output + "audit.csv", 'w') as f:
csv_writer = csv.writer(f, lineterminator=os.linesep)
for a in sorted(store.get_tag(store.audit)):
p = a[0].split(".")
for m in a[1]:
csv_writer.writerow([p[1], p[0], m])
# eap_users and preauth
manifest = []
with open(output + "eap_users", 'w') as f:
for u in store.get_eap_user():
f.write('"{}" {}\n\n'.format(u[0], AUTH_PHASE_ONE))
f.write('"{}" {} hash:{} [2]\n'.format(u[0], AUTH_PHASE_TWO, u[1]))
write_vlan(f, u[2])
for u in store.get_eap_mab():
up = u[0].upper()
f.write('"{}" MD5 "{}"\n'.format(up, up))
write_vlan(f, u[1])
manifest.append((u[0], u[0]))
for u in store.get_tag(store.umac):
manifest.append((u[0], u[1]))
with open(output + "manifest", 'w') as f:
for m in sorted(manifest):
f.write("{}.{}\n".format(m[0], m[1]).lower())
def write_vlan(f, vlan_id):
"""Write vlan assignment for login."""
f.write('radius_accept_attr=64:d:13\n')
f.write('radius_accept_attr=65:d:6\n')
f.write('radius_accept_attr=81:s:{}\n\n'.format(vlan_id))
class Store(object):
"""Storage object."""
def __init__(self):
"""Init the instance."""
self._data = []
self.umac = "UMAC"
self.pwd = "<PASSWORD>"
self.mac = "MAC"
self.audit = "AUDIT"
self._users = []
self._mab = []
self._macs = []
self._vlans = {}
def set_mab(self, username):
"""Set a user as MAB-only, no login set."""
self._mab.append(username)
def get_tag(self, tag):
"""Get tagged items."""
for item in self._data:
if item[0] == tag:
yield item[1:]
def add_vlan(self, vlan_name, vlan_id):
"""Add a vlan item."""
self._vlans[vlan_name] = vlan_id
def _add(self, tag, key, value):
"""Backing tagged add."""
self._data.append([tag, key, value])
def add_user(self, username, macs, password):
"""Add a user definition."""
if username in self._users:
raise Exception("{} already defined".format(username))
self._users.append(username)
for m in macs:
self._add(self.umac, username, m)
self._add(self.pwd, username, password)
def add_mab(self, mac, vlan):
"""Add a MAB."""
if mac in self._macs:
raise Exception("{} already defined".format(mac))
self._macs.append(mac)
self._add(self.mac, mac, vlan)
def add_audit(self, user, objs):
"""Add an audit entry."""
self._add(self.audit, user, objs)
def get_eap_mab(self):
"""Get eap entries for MAB."""
for m in self.get_tag(self.mac):
v = m[1]
if not isinstance(v, int):
v = self._get_vlan(v)
yield [m[0], v]
def get_eap_user(self):
"""Get eap users."""
for u in self.get_tag(self.pwd):
if u[0] in self._mab:
continue
vlan = u[0].split(".")[0]
yield [u[0], u[1], self._get_vlan(vlan)]
def _get_vlan(self, name):
"""Get vlans."""
return self._vlans[name]
def main():
"""main entry."""
success = False
try:
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, required=True)
args = parser.parse_args()
_process(args.output)
success = True
except Exception as e:
print('unable to compose')
print(str(e))
if success:
print("success")
exit(0)
else:
print("failure")
exit(1)
if __name__ == "__main__":
main()
| 2.765625 | 3 |
twitter_scrapper.py | juanlucruz/SportEventLocator | 0 | 10956 | # Import the Twython class
from twython import Twython, TwythonStreamer
import json
# import pandas as pd
import csv
import datetime
def process_tweet(tweet):
# Filter out unwanted data
d = {}
d['hashtags'] = [hashtag['text'] for hashtag in tweet['entities']['hashtags']]
try:
for key in {
'created_at', 'id', 'text', 'source', 'truncated',
'in_reply_to_status_id', 'in_reply_to_user_id',
'in_reply_to_screen_name', 'user', 'coordinates',
'place', 'quoted_status_id', 'is_quote_status', 'quoted_status',
'retweeted_status', 'quote_count', 'reply_count', 'retweet_count',
'favorite_count', 'favorited', 'retweeted', 'entities', 'extended_entities',
'possibly_sensitive', 'filter_level', 'lang', 'matching_rules'}:
if key == 'user':
pass
elif key == 'place':
pass
elif key == 'quoted_status' or key == 'retweeted_status':
pass
elif key == 'entities':
pass
elif key == 'extended_entities':
pass
else:
d[key] = tweet[key]
except KeyError as e:
pass
# d['text'] = tweet['text']
# d['user'] = tweet['user']['screen_name']
# d['user_loc'] = tweet['user']['location']
# d['date'] = tweet['created_at']
return d
# Create a class that inherits TwythonStreamer
class MyStreamer(TwythonStreamer):
# Received data
def on_success(self, data):
# # Only collect tweets in English
# if data['lang'] == 'en':
# tweet_data = process_tweet(data)
print(datetime.datetime.now())
# self.save_to_csv(tweet_data)
self.save_to_json(data)
# Problem with the API
def on_error(self, status_code, data):
print(status_code, data)
self.disconnect()
# Save each tweet to csv file
def save_to_csv(self, tweet):
# with open(r'saved_tweets.csv', 'a') as out_file:
with open(r'saved_tweets_big.csv', 'a') as out_file:
writer = csv.writer(out_file)
writer.writerow(list(tweet.values()))
def save_to_json(self, tweet):
with open('saved_tweets_big.json', 'a') as out_file:
json.dump(tweet, out_file)
def main():
# Load credentials from json file
with open("twitter_credentials.json", "r") as tw_creds:
creds = json.load(tw_creds)
# Instantiate an object
# python_tweets = Twython(creds['CONSUMER_KEY'], creds['CONSUMER_SECRET'])
# Instantiate from our streaming class
stream = MyStreamer(creds['CONSUMER_KEY'], creds['CONSUMER_SECRET'],
creds['ACCESS_TOKEN'], creds['ACCESS_SECRET'])
# Start the stream
# stream.statuses.filter(track='madrid')
stream.statuses.filter(locations='-7.876154,37.460012,3.699873,43.374723')
# # Create our query
# query = {
# 'q': 'futbol',
# 'result_type': 'mixed',
# 'lang': 'es',
# 'count': '100',
# }
#
# dict_ = {'user': [], 'date': [], 'text': [], 'favorite_count': []}
# for status in python_tweets.search(**query)['statuses']:
# print(format(status))
# dict_['user'].append(status['user']['screen_name'])
# dict_['date'].append(status['created_at'])
# dict_['text'].append(status['text'])
# dict_['favorite_count'].append(status['favorite_count'])
#
# df = pd.DataFrame(dict_)
# df.sort_values(by='favorite_count', inplace=True, ascending=False)
# print(df.values)
if __name__ == "__main__":
main()
| 2.9375 | 3 |
tools/bin/filter_cassandra_attributes.py | fruch/scylla-tools-java | 0 | 10957 | #!/usr/bin/env python2
import sys;
from yaml import load, dump, load_all
from cassandra_attributes import *
def main():
attributes = dict()
for i in range(1, len(sys.argv)):
attributes.update(load(open(sys.argv[i], 'r')))
print dump(dict(filter(lambda (a, b): a in cassandra_attributes, attributes.items())))
if __name__ == "__main__":
main()
| 2.59375 | 3 |
ci/test_filename.py | climateamante/linode.docs | 0 | 10958 | import pytest
import itertools
# Cartesian product of file names and extensions
# e.g. README.txt, README.md, CHANGELOG.txt, CHANGELOG.md ...
file_extensions = ['txt', 'md']
names = ['README', 'CHANGELOG', 'CONTRIBUTING', 'LICENSE', 'CODE_OF_CONDUCT']
exempt_files = [('.'.join(x)) for x in itertools.product(names, file_extensions)]
def test_filename(md_filepath):
if any(e in md_filepath for e in exempt_files):
assert True
else:
assert md_filepath.islower() == True,'Filename should be lowercase'
| 2.53125 | 3 |
test/test_sshtransport.py | stribika/sshlabs | 76 | 10959 | import sys
import unittest
sys.path.append("../main")
from sshtransport import *
class FakeSocket(object):
def __init__(self):
self.recv_buffer = b""
self.send_buffer = b""
def recv(self, n):
resp = self.recv_buffer[:n]
self.recv_buffer = self.recv_buffer[n:]
return resp
def send(self, x):
self.send_buffer += x
class TestIdentificationString(unittest.TestCase):
def test_recv(self):
conn = FakeSocket()
conn.recv_buffer = b"SSH-2.00-SecureMcShellface_1.0\r\n"
idstr = IdentificationString(recvfrom=conn)
self.assertEqual(idstr.protoversion, "2.00")
self.assertEqual(idstr.softwareversion, "SecureMcShellface_1.0")
def test_send(self):
conn = FakeSocket()
idstr = IdentificationString(protoversion="2.00", softwareversion="SecureMcShellface_1.0")
idstr.send(conn)
self.assertEqual(conn.send_buffer, b"SSH-2.00-SecureMcShellface_1.0\r\n")
class TestBinaryPacket(unittest.TestCase):
def test_recv(self):
conn = FakeSocket()
conn.recv_buffer = b"\x00\x00\x00\x14\x07Hello World!\x00\x00\x00\x00\x00\x00\x00"
binpkt = BinaryPacket(recvfrom=conn)
self.assertEqual(binpkt.payload, b"Hello World!")
self.assertEqual(binpkt.mac, b"")
def test_send(self):
conn = FakeSocket()
binpkt = BinaryPacket(payload=b"Hello World!")
binpkt.send(conn)
self.assertEqual(conn.send_buffer, b"\x00\x00\x00\x14\x07Hello World!\x00\x00\x00\x00\x00\x00\x00")
| 2.828125 | 3 |
activity-classification/main_scenario_baseline.py | bstollnitz/grad-school-portfolio | 2 | 10960 | <gh_stars>1-10
import random
import time
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import torch
from torch.utils import data
from torch.utils.tensorboard import SummaryWriter
import utils_graph
import utils_io
import utils_nn
from feed_forward import FeedForward
from hyperparameters import Hyperparameters
from signal_data import SignalData
from signal_dataset import SignalDataset
PLOTS_FOLDER = 'plots'
USE_CUDA = torch.cuda.is_available()
def _train_ff_network(hyperparameter_dict: dict,
data: SignalData) -> Tuple[FeedForward, List, List, List, List]:
"""Trains a feed-forward network using the specified hyperparameters.
"""
# Ensure reproducibility by giving PyTorch the same seed every time we train.
torch.manual_seed(1)
# Print hyperparameters.
print(f'Hyperparameters: {hyperparameter_dict}')
# Get hyperparameters.
learning_rate = hyperparameter_dict['learning_rate']
batch_size = hyperparameter_dict['batch_size']
optimizer_str = hyperparameter_dict['optimizer']
# There are 6 labels, and Pytorch expects them to go from 0 to 5.
full_train_labels = data.train_labels - 1
# Get generators.
signal_dataset = SignalDataset(data.train_signals, full_train_labels)
(training_generator, validation_generator) = utils_nn.get_trainval_generators(
signal_dataset, batch_size, num_workers=0, training_fraction=0.8)
# Crete feed forward network.
input_size = data.num_timesteps * data.num_components
feed_forward = FeedForward(input_size, input_size, data.num_activity_labels)
print(feed_forward)
# Parameters should be moved to GPU before constructing the optimizer.
device = torch.device('cuda:0' if USE_CUDA else 'cpu')
feed_forward = feed_forward.to(device)
# Get optimizer.
optimizer = None
if optimizer_str == 'adam':
optimizer = torch.optim.Adam(feed_forward.parameters(), lr=learning_rate)
elif optimizer_str == 'sgd':
optimizer = torch.optim.SGD(feed_forward.parameters(), lr=learning_rate)
else:
raise Exception(f'Specified optimizer not valid: {optimizer_str}')
training_accuracy_list = []
training_loss_list = []
validation_accuracy_list = []
validation_loss_list = []
max_epochs = 10
for epoch in range(max_epochs):
print(f'Epoch {epoch}')
# Training data.
(training_accuracy, training_loss) = utils_nn.fit(feed_forward,
training_generator, optimizer, USE_CUDA)
training_accuracy_list.append(training_accuracy)
training_loss_list.append(training_loss)
# Validation data.
(validation_accuracy, validation_loss) = utils_nn.evaluate(feed_forward,
validation_generator, 'Validation', USE_CUDA)
validation_accuracy_list.append(validation_accuracy)
validation_loss_list.append(validation_loss)
return (feed_forward, training_accuracy_list, training_loss_list,
validation_accuracy_list, validation_loss_list)
def _get_ff_hyperparameters() -> Hyperparameters:
"""Returns hyperparameters used to tune the feed-forward network.
"""
# First pass:
hyperparameter_values = Hyperparameters({
'learning_rate': [0.1, 0.01, 0.001],
'batch_size': [32, 64, 128],
'optimizer': ['adam', 'sgd']
})
# Best:
# optimizer: sgd, batch size: 64, learning rate: 0.1
# Second pass:
hyperparameter_values = Hyperparameters({
'learning_rate': [0.05, 0.1, 0.2],
'batch_size': [16, 32, 64],
'optimizer': ['sgd']
})
# Best:
# optimizer: sgd, batch size: 16, learning rate: 0.1
return hyperparameter_values
def _tune_ff_hyperparameters(data: SignalData) -> None:
"""Classifies temporal signals using a feed-forward network.
"""
print(' Tuning hyperparameters.')
start_time = time.time()
# Hyperparameters to tune.
hyperparameter_values = _get_ff_hyperparameters()
hyperparameter_combinations = hyperparameter_values.sample_combinations()
# Create Tensorboard writer.
with SummaryWriter(f'runs/signals', filename_suffix='') as writer:
# Hyperparameter loop.
for hyperparameter_dict in hyperparameter_combinations:
(_, _, _, validation_accuracy_list, _) = _train_ff_network(
hyperparameter_dict, data)
writer.add_hparams(hyperparameter_dict,
{'hparam/signals/validation_accuracy': validation_accuracy_list[-1]})
utils_io.print_elapsed_time(start_time, time.time())
def _test_ff_network(feed_forward: FeedForward, signal_data: SignalData,
hyperparameter_dict: dict) -> Tuple[float, float]:
"""Returns accuracy and loss of specified network for specified test data
and specified hyperparameters.
"""
# There are 6 labels, and Pytorch expects them to go from 0 to 5.
test_labels = signal_data.test_labels - 1
# Get test generator.
batch_size = hyperparameter_dict['batch_size']
test_data = SignalDataset(signal_data.test_signals, test_labels)
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 0}
test_generator = data.DataLoader(test_data, **params)
(test_avg_accuracy, test_avg_loss) = utils_nn.evaluate(feed_forward,
test_generator, 'Test', USE_CUDA)
return (test_avg_accuracy, test_avg_loss)
def _test_best_ff_hyperparameters(data: SignalDataset) -> None:
"""Use network with best hyperparameters to predict labels for test data.
Produces accuracy and loss graphs for training and validation data, as
well as accuracy and loss values for test data.
"""
hyperparameter_dict = {
'learning_rate': 0.1,
'batch_size': 16,
'optimizer': 'sgd',
}
(feed_forward, training_accuracy_list,
training_loss_list,
validation_accuracy_list,
validation_loss_list) = _train_ff_network(hyperparameter_dict,
data)
utils_graph.graph_nn_results(training_accuracy_list, validation_accuracy_list,
f'Training and validation accuracy of classification of temporal signals',
'Accuracy', PLOTS_FOLDER, f'signals_accuracy.html')
utils_graph.graph_nn_results(training_loss_list, validation_loss_list,
f'Training and validation loss of classification of temporal signals',
'Loss', PLOTS_FOLDER, f'signals_loss.html')
_test_ff_network(feed_forward, data, hyperparameter_dict)
with SummaryWriter(f'runs/signals', filename_suffix='') as writer:
num_epochs_train_val = len(training_accuracy_list)
for i in range(num_epochs_train_val):
writer.add_scalars(f'signals/accuracy', {
'training': training_accuracy_list[i],
'validation': validation_accuracy_list[i]
}, i)
writer.add_scalars(f'signals/loss', {
'training': training_loss_list[i],
'validation': validation_loss_list[i]
}, i)
# Test accuracy: 87.25%
# Test loss: 0.45
def scenario1(data: SignalData) -> None:
"""Uses a simple feed forward network to classify the raw signal.
"""
print('Scenario 1: feed forward network on raw signal')
# _tune_ff_hyperparameters(data)
_test_best_ff_hyperparameters(data) | 2.25 | 2 |
2020/day04/day4_part1.py | dstjacques/AdventOfCode | 0 | 10961 | <reponame>dstjacques/AdventOfCode<filename>2020/day04/day4_part1.py
input = """
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
"""
def validate(passport):
passport_fields = { "byr": False, "iyr": False, "eyr": False, "hgt": False, "hcl": False, "ecl": False, "pid": False }
for line in passport.split("\n"):
values = line.split(" ")
for value in values:
field = value.split(":")[0]
if field == "cid":
continue
passport_fields[field] = True
if False in passport_fields.values():
return False
return True
count = 0
for i in input.strip().split("\n\n"):
if validate(i):
count += 1
print(count) | 2.875 | 3 |
flit_core/flit_core/tests/test_common.py | rahul-deepsource/flit | 0 | 10962 | <filename>flit_core/flit_core/tests/test_common.py<gh_stars>0
import os.path as osp
from unittest import TestCase
import pytest
from flit_core.common import (
Module, get_info_from_module, InvalidVersion, NoVersionError, check_version,
normalize_file_permissions, Metadata
)
samples_dir = osp.join(osp.dirname(__file__), 'samples')
class ModuleTests(TestCase):
def test_package_importable(self):
i = Module('package1', samples_dir)
assert i.path == osp.join(samples_dir, 'package1')
assert i.file == osp.join(samples_dir, 'package1', '__init__.py')
assert i.is_package
def test_module_importable(self):
i = Module('module1', samples_dir)
assert i.path == osp.join(samples_dir, 'module1.py')
assert not i.is_package
def test_missing_name(self):
with self.assertRaises(ValueError):
i = Module('doesnt_exist', samples_dir)
def test_get_info_from_module(self):
info = get_info_from_module(Module('module1', samples_dir))
self.assertEqual(info, {'summary': 'Example module',
'version': '0.1'}
)
info = get_info_from_module(Module('module2', samples_dir))
self.assertEqual(info, {'summary': 'Docstring formatted like this.',
'version': '7.0'}
)
info = get_info_from_module(Module('package1', samples_dir))
self.assertEqual(info, {'summary': 'A sample package',
'version': '0.1'}
)
info = get_info_from_module(Module('moduleunimportable', samples_dir))
self.assertEqual(info, {'summary': 'A sample unimportable module',
'version': '0.1'}
)
info = get_info_from_module(Module('modulewithconstructedversion', samples_dir))
self.assertEqual(info, {'summary': 'This module has a __version__ that requires runtime interpretation',
'version': '1.2.3'}
)
with self.assertRaises(InvalidVersion):
get_info_from_module(Module('invalid_version1', samples_dir))
def test_version_raise(self):
with pytest.raises(InvalidVersion):
check_version('a.1.0.beta0')
with pytest.raises(InvalidVersion):
check_version('3!')
with pytest.raises(InvalidVersion):
check_version((1, 2))
with pytest.raises(NoVersionError):
check_version(None)
assert check_version('4.1.0beta1') == '4.1.0b1'
assert check_version('v1.2') == '1.2'
def test_normalize_file_permissions():
assert normalize_file_permissions(0o100664) == 0o100644 # regular file
assert normalize_file_permissions(0o40775) == 0o40755 # directory
@pytest.mark.parametrize(
("requires_python", "expected_result"),
[
("", True),
(">2.7", True),
("3", False),
(">= 3.7", False),
("<4, > 3.2", False),
(">3.4", False),
(">=2.7, !=3.0.*, !=3.1.*, !=3.2.*", True),
],
)
def test_supports_py2(requires_python, expected_result):
metadata = object.__new__(Metadata)
metadata.requires_python = requires_python
result = metadata.supports_py2
assert result == expected_result
| 2.296875 | 2 |
flumine/markets/market.py | jsphon/flumine | 0 | 10963 | import datetime
import logging
from typing import Optional
from betfairlightweight.resources.bettingresources import MarketBook, MarketCatalogue
from .blotter import Blotter
from ..events import events
logger = logging.getLogger(__name__)
class Market:
def __init__(
self,
flumine,
market_id: str,
market_book: MarketBook,
market_catalogue: MarketCatalogue = None,
):
self.flumine = flumine
self.market_id = market_id
self.closed = False
self.date_time_closed = None
self.market_book = market_book
self.market_catalogue = market_catalogue
self.context = {"simulated": {}} # data store (raceCard / scores etc)
self.blotter = Blotter(self)
def __call__(self, market_book: MarketBook):
self.market_book = market_book
def open_market(self) -> None:
self.closed = False
def close_market(self) -> None:
self.closed = True
self.date_time_closed = datetime.datetime.utcnow()
# order
def place_order(self, order, execute: bool = True) -> None:
order.place(self.market_book.publish_time)
if order.id not in self.blotter:
self.blotter[order.id] = order
if order.trade.market_notes is None:
order.trade.update_market_notes(self.market_book)
self.flumine.log_control(events.TradeEvent(order.trade)) # todo dupes?
else:
return # retry attempt so ignore?
if execute: # handles replaceOrder
self.blotter.pending_place.append(order)
def cancel_order(self, order, size_reduction: float = None) -> None:
order.cancel(size_reduction)
self.blotter.pending_cancel.append(order)
def update_order(self, order, new_persistence_type: str) -> None:
order.update(new_persistence_type)
self.blotter.pending_update.append(order)
def replace_order(self, order, new_price: float) -> None:
order.replace(new_price)
self.blotter.pending_replace.append(order)
@property
def event_type_id(self) -> str:
if self.market_book:
return self.market_book.market_definition.event_type_id
@property
def event_id(self) -> str:
if self.market_book:
return self.market_book.market_definition.event_id
@property
def seconds_to_start(self):
return (self.market_start_datetime - datetime.datetime.utcnow()).total_seconds()
@property
def elapsed_seconds_closed(self) -> Optional[float]:
if self.closed and self.date_time_closed:
return (datetime.datetime.utcnow() - self.date_time_closed).total_seconds()
@property
def market_start_datetime(self):
if self.market_catalogue:
return self.market_catalogue.market_start_time
elif self.market_book:
return self.market_book.market_definition.market_time
else:
return datetime.datetime.utcfromtimestamp(0)
| 2.25 | 2 |
{{cookiecutter.project_name}}/tests/conftest.py | nelsonHolic/common-fastapi-microservice | 1 | 10964 | <filename>{{cookiecutter.project_name}}/tests/conftest.py
import pytest
from fastapi.testclient import TestClient
from {{cookiecutter.project_name}}.app import app
@pytest.fixture()
def app_client() -> TestClient:
client = TestClient(app)
return client
| 1.570313 | 2 |
scripts/naive_search.py | simonbowly/lp-generators | 9 | 10965 | <filename>scripts/naive_search.py
import itertools
import multiprocessing
import json
import numpy as np
from tqdm import tqdm
from lp_generators.features import coeff_features, solution_features
from lp_generators.performance import clp_simplex_performance
from search_operators import lp_column_neighbour, lp_row_neighbour
from seeds import cli_seeds
from search_common import condition, objective, start_instance
def calculate_features(instance):
return dict(
**coeff_features(instance),
**solution_features(instance))
def generate_by_search(seed):
results = []
pass_condition = 0
step_change = 0
random_state = np.random.RandomState(seed)
current_instance = start_instance(random_state)
current_features = calculate_features(current_instance)
for step in range(10001):
if (step % 100) == 0:
results.append(dict(
**coeff_features(current_instance),
**solution_features(current_instance),
**clp_simplex_performance(current_instance),
pass_condition=pass_condition,
step_change=step_change,
step=step, seed=seed))
if (step % 2) == 0:
new_instance = lp_row_neighbour(random_state, current_instance, 1)
else:
new_instance = lp_column_neighbour(random_state, current_instance, 1)
new_features = calculate_features(new_instance)
if condition(new_features):
pass_condition += 1
if objective(new_features) < objective(current_features):
step_change += 1
current_instance = new_instance
current_features = new_features
return results
@cli_seeds
def run(seed_values):
''' Generate the required number of instances and store feature results. '''
pool = multiprocessing.Pool()
mapper = pool.imap_unordered
print('Generating instances by naive search.')
features = list(tqdm(
mapper(generate_by_search, seed_values),
total=len(seed_values), smoothing=0))
features = list(itertools.chain(*features))
with open('data/naive_search.json', 'w') as outfile:
json.dump(features, outfile, indent=4, sort_keys=True)
run()
| 2.59375 | 3 |
tests/test_list_.py | aefalcon/iterable_collections | 4 | 10966 | import unittest
from iterable_collections import collect
class TestList_(unittest.TestCase):
def test_list(self):
c = collect(list(range(10))).list_()
self.assertEqual(c.iterable, list(list(range(10))))
def test_set(self):
c = collect(set(range(10))).list_()
self.assertEqual(c.iterable, list(set(range(10))))
def test_tuple(self):
c = collect(tuple(range(10))).list_()
self.assertEqual(c.iterable, list(tuple(range(10))))
def test_iterator(self):
c = collect(iter(range(10))).list_()
self.assertEqual(c.iterable, list(iter(range(10))))
def test_dict(self):
c = collect({'a': 1, 'b': 2}).list_()
self.assertEqual(c.iterable, list({'a': 1, 'b': 2}))
def test_dict_items(self):
c = collect({'a': 1, 'b': 2}.items()).list_()
self.assertEqual(c.iterable, list({'a': 1, 'b': 2}.items()))
def test_enumerate(self):
c = collect(list(range(10))).enumerate().list_()
self.assertEqual(c.iterable, list(enumerate(range(10))))
| 3.25 | 3 |
parcels/parcels/examples/example_peninsula.py | pdnooteboom/NA_forams | 1 | 10967 | <reponame>pdnooteboom/NA_forams<gh_stars>1-10
from parcels import FieldSet, ParticleSet, ScipyParticle, JITParticle, Variable
from parcels import AdvectionRK4, AdvectionEE, AdvectionRK45
from argparse import ArgumentParser
import numpy as np
import math # NOQA
import pytest
from datetime import timedelta as delta
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
method = {'RK4': AdvectionRK4, 'EE': AdvectionEE, 'RK45': AdvectionRK45}
def peninsula_fieldset(xdim, ydim, mesh='flat'):
"""Construct a fieldset encapsulating the flow field around an
idealised peninsula.
:param xdim: Horizontal dimension of the generated fieldset
:param xdim: Vertical dimension of the generated fieldset
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation:
1. spherical: Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat (default): No conversion, lat/lon are assumed to be in m.
The original test description can be found in Fig. 2.2.3 in:
<NAME>., <NAME>., <NAME>. (Eds). 2009. Manual of
recommended practices for modelling physical - biological
interactions during fish early life.
ICES Cooperative Research Report No. 295. 111 pp.
http://archimer.ifremer.fr/doc/00157/26792/24888.pdf
To avoid accuracy problems with interpolation from A-grid
to C-grid, we return NetCDF files that are on an A-grid.
"""
# Set Parcels FieldSet variables
# Generate the original test setup on A-grid in m
domainsizeX, domainsizeY = (1.e5, 5.e4)
dx, dy = domainsizeX / xdim, domainsizeY / ydim
La = np.linspace(dx, 1.e5-dx, xdim, dtype=np.float32)
Wa = np.linspace(dy, 5.e4-dy, ydim, dtype=np.float32)
u0 = 1
x0 = domainsizeX / 2
R = 0.32 * domainsizeX / 2
# Create the fields
x, y = np.meshgrid(La, Wa, sparse=True, indexing='xy')
P = (u0*R**2*y/((x-x0)**2+y**2)-u0*y) / 1e3
U = u0-u0*R**2*((x-x0)**2-y**2)/(((x-x0)**2+y**2)**2)
V = -2*u0*R**2*((x-x0)*y)/(((x-x0)**2+y**2)**2)
# Set land points to NaN
landpoints = P >= 0.
P[landpoints] = np.nan
U[landpoints] = np.nan
V[landpoints] = np.nan
# Convert from m to lat/lon for spherical meshes
lon = La / 1852. / 60. if mesh == 'spherical' else La
lat = Wa / 1852. / 60. if mesh == 'spherical' else Wa
data = {'U': U, 'V': V, 'P': P}
dimensions = {'lon': lon, 'lat': lat}
return FieldSet.from_data(data, dimensions, mesh=mesh)
def UpdateP(particle, fieldset, time):
particle.p = fieldset.P[time, particle.depth, particle.lat, particle.lon]
def pensinsula_example(fieldset, npart, mode='jit', degree=1,
verbose=False, output=True, method=AdvectionRK4):
"""Example configuration of particle flow around an idealised Peninsula
:arg filename: Basename of the input fieldset
:arg npart: Number of particles to intialise"""
# First, we define a custom Particle class to which we add a
# custom variable, the initial stream function value p.
# We determine the particle base class according to mode.
class MyParticle(ptype[mode]):
# JIT compilation requires a-priori knowledge of the particle
# data structure, so we define additional variables here.
p = Variable('p', dtype=np.float32, initial=0.)
p_start = Variable('p_start', dtype=np.float32, initial=fieldset.P)
# Initialise particles
if fieldset.U.grid.mesh == 'flat':
x = 3000 # 3 km offset from boundary
else:
x = 3. * (1. / 1.852 / 60) # 3 km offset from boundary
y = (fieldset.U.lat[0] + x, fieldset.U.lat[-1] - x) # latitude range, including offsets
pset = ParticleSet.from_line(fieldset, size=npart, pclass=MyParticle,
start=(x, y[0]), finish=(x, y[1]), time=0)
if verbose:
print("Initial particle positions:\n%s" % pset)
# Advect the particles for 24h
time = delta(hours=24)
dt = delta(minutes=5)
k_adv = pset.Kernel(method)
k_p = pset.Kernel(UpdateP)
out = pset.ParticleFile(name="MyParticle", outputdt=delta(hours=1)) if output else None
print("Peninsula: Advecting %d particles for %s" % (npart, str(time)))
pset.execute(k_adv + k_p, runtime=time, dt=dt, output_file=out)
if verbose:
print("Final particle positions:\n%s" % pset)
return pset
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('mesh', ['flat', 'spherical'])
def test_peninsula_fieldset(mode, mesh):
"""Execute peninsula test from fieldset generated in memory"""
fieldset = peninsula_fieldset(100, 50, mesh)
pset = pensinsula_example(fieldset, 5, mode=mode, degree=1)
# Test advection accuracy by comparing streamline values
err_adv = np.array([abs(p.p_start - p.p) for p in pset])
assert(err_adv <= 1.e-3).all()
# Test Field sampling accuracy by comparing kernel against Field sampling
err_smpl = np.array([abs(p.p - pset.fieldset.P[0., p.depth, p.lat, p.lon]) for p in pset])
assert(err_smpl <= 1.e-3).all()
def fieldsetfile(mesh):
"""Generate fieldset files for peninsula test"""
filename = 'peninsula'
fieldset = peninsula_fieldset(100, 50, mesh=mesh)
fieldset.write(filename)
return filename
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('mesh', ['flat', 'spherical'])
def test_peninsula_file(mode, mesh):
"""Open fieldset files and execute"""
fieldset = FieldSet.from_parcels(fieldsetfile(mesh), extra_fields={'P': 'P'}, allow_time_extrapolation=True)
pset = pensinsula_example(fieldset, 5, mode=mode, degree=1)
# Test advection accuracy by comparing streamline values
err_adv = np.array([abs(p.p_start - p.p) for p in pset])
assert(err_adv <= 1.e-3).all()
# Test Field sampling accuracy by comparing kernel against Field sampling
err_smpl = np.array([abs(p.p - pset.fieldset.P[0., p.depth, p.lat, p.lon]) for p in pset])
assert(err_smpl <= 1.e-3).all()
if __name__ == "__main__":
p = ArgumentParser(description="""
Example of particle advection around an idealised peninsula""")
p.add_argument('mode', choices=('scipy', 'jit'), nargs='?', default='jit',
help='Execution mode for performing RK4 computation')
p.add_argument('-p', '--particles', type=int, default=20,
help='Number of particles to advect')
p.add_argument('-d', '--degree', type=int, default=1,
help='Degree of spatial interpolation')
p.add_argument('-v', '--verbose', action='store_true', default=False,
help='Print particle information before and after execution')
p.add_argument('-o', '--nooutput', action='store_true', default=False,
help='Suppress trajectory output')
p.add_argument('--profiling', action='store_true', default=False,
help='Print profiling information after run')
p.add_argument('-f', '--fieldset', type=int, nargs=2, default=None,
help='Generate fieldset file with given dimensions')
p.add_argument('-m', '--method', choices=('RK4', 'EE', 'RK45'), default='RK4',
help='Numerical method used for advection')
args = p.parse_args()
if args.fieldset is not None:
filename = 'peninsula'
fieldset = peninsula_fieldset(args.fieldset[0], args.fieldset[1], mesh='flat')
fieldset.write(filename)
# Open fieldset file set
fieldset = FieldSet.from_parcels('peninsula', extra_fields={'P': 'P'}, allow_time_extrapolation=True)
if args.profiling:
from cProfile import runctx
from pstats import Stats
runctx("pensinsula_example(fieldset, args.particles, mode=args.mode,\
degree=args.degree, verbose=args.verbose,\
output=not args.nooutput, method=method[args.method])",
globals(), locals(), "Profile.prof")
Stats("Profile.prof").strip_dirs().sort_stats("time").print_stats(10)
else:
pensinsula_example(fieldset, args.particles, mode=args.mode,
degree=args.degree, verbose=args.verbose,
output=not args.nooutput, method=method[args.method])
| 2.34375 | 2 |
TestBegin.py | FrankWangJQ/HttpRunner-master | 0 | 10968 | <filename>TestBegin.py<gh_stars>0
from httprunner import HttpRunner
import time
kwargs = {
"failfast":False,
#"dot_env_path": "/path/to/.env"
}
runner = HttpRunner(**kwargs)
#ๅ
ฅๅฃ
runner.run("/Users/wangjianqing/PycharmProjects/HttpRunner-master/tests/testcases/Release/่ดฆๅท็ฎก็-่ฎพ็ฝฎ้กน.yml")
runner.gen_html_report(html_report_name="reportTestForBetaYunZS",html_report_template="/Users/wangjianqing/PycharmProjects/HttpRunner-master/httprunner/templates/default_report_template.html")
| 1.742188 | 2 |
pyaz/synapse/sql/pool/classification/recommendation/__init__.py | py-az-cli/py-az-cli | 0 | 10969 | <filename>pyaz/synapse/sql/pool/classification/recommendation/__init__.py
'''
Manage sensitivity classification recommendations.
'''
from ...... pyaz_utils import _call_az
def list(name, resource_group, workspace_name, filter=None, included_disabled=None, skip_token=None):
'''
List the recommended sensitivity classifications of a given SQL pool.
Required Parameters:
- name -- The SQL pool name.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The workspace name.
Optional Parameters:
- filter -- An OData filter expression that filters elements in the collection.
- included_disabled -- Indicates whether the result should include disabled recommendations
- skip_token -- An OData query option to indicate how many elements to skip in the collection.
'''
return _call_az("az synapse sql pool classification recommendation list", locals())
def enable(column, name, resource_group, schema, table, workspace_name):
'''
Enable sensitivity recommendations for a given column(recommendations are enabled by default on all columns).
Required Parameters:
- column -- The name of column.
- name -- The SQL pool name.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- schema -- The name of schema.
- table -- The name of table.
- workspace_name -- The workspace name.
'''
return _call_az("az synapse sql pool classification recommendation enable", locals())
def disable(column, name, resource_group, schema, table, workspace_name):
'''
Disable sensitivity recommendations for a given column(recommendations are enabled by default on all columns).
Required Parameters:
- column -- The name of column.
- name -- The SQL pool name.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- schema -- The name of schema.
- table -- The name of table.
- workspace_name -- The workspace name.
'''
return _call_az("az synapse sql pool classification recommendation disable", locals())
| 2.375 | 2 |
December Month Challenge/4KthfactorN.py | adesh-gadge/LeetCodePractice | 0 | 10970 | class Solution:
def kthFactor(self, n: int, k: int) -> int:
s1 = set()
s2 = set()
for i in range(1,int(n**0.5)+1):
if n%i ==0:
s1.add(i)
s2.add(int(n/i))
l = list(s1|s2)
l.sort()
if k > len(l):
return -1
return l[k-1] | 2.65625 | 3 |
dipy/utils/tests/test_arrfuncs.py | martcous/dipy | 0 | 10971 | """ Testing array utilities
"""
import sys
import numpy as np
from ..arrfuncs import as_native_array, pinv, eigh
from numpy.testing import (assert_array_almost_equal,
assert_array_equal)
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
NATIVE_ORDER = '<' if sys.byteorder == 'little' else '>'
SWAPPED_ORDER = '>' if sys.byteorder == 'little' else '<'
def test_as_native():
arr = np.arange(5) # native
assert_equal(arr.dtype.byteorder, '=')
narr = as_native_array(arr)
assert_true(arr is narr)
sdt = arr.dtype.newbyteorder('s')
barr = arr.astype(sdt)
assert_equal(barr.dtype.byteorder, SWAPPED_ORDER)
narr = as_native_array(barr)
assert_false(barr is narr)
assert_array_equal(barr, narr)
assert_equal(narr.dtype.byteorder, NATIVE_ORDER)
def test_pinv():
arr = np.random.randn(4, 4, 4, 3, 7)
_pinv = pinv(arr)
for i in range(4):
for j in range(4):
for k in range(4):
assert_array_almost_equal(_pinv[i, j, k],
np.linalg.pinv(arr[i, j, k]))
def test_eigh():
for i in range(10):
arr = np.random.randn(7, 7)
evals1, evecs1 = eigh(arr)
evals2, evecs2 = np.linalg.eigh(arr)
assert_array_almost_equal(evals1, evals2)
assert_array_almost_equal(evecs1, evecs2)
arr = np.random.randn(4, 4, 4, 7, 7)
evals, evecs = eigh(arr)
for i in range(4):
for j in range(4):
for k in range(4):
evals_vox, evecs_vox = np.linalg.eigh(arr[i, j, k])
assert_array_almost_equal(evals[i, j, k], evals_vox)
assert_array_almost_equal(evecs[i, j, k], evecs_vox)
| 2.25 | 2 |
ProgressBar.py | ArisKots1992/Similar-World-News-Articles | 1 | 10972 | # -*- coding: utf-8 -*-
import time
import sys
import math
#HOMEMADE WITHOUT ONLINE CODE by Aris
#LIENCE BY ARIS
class ProgressBar:
def __init__(self,max_size=36):
ProgressBar.max_size = max_size
ProgressBar.tick = 20.0/max_size
ProgressBar.progress_counter = 0.0
ProgressBar.counter = 0
spaces = ' ' * 20
hashes = 'โ' * 0
sys.stdout.write("\rPercent: โ{0}โ{1}%".format(hashes + spaces, 0))
sys.stdout.flush()
def update(self):
ProgressBar.counter += 1
if ProgressBar.counter == ProgressBar.max_size:
hashes = 'โ' * 20
spaces = ' ' * 0
sys.stdout.write("\rPercent: โ{0}โ{1}%".format(hashes + spaces, 100))
print
print "Finished Successfully!"
sys.stdout.flush()
return
elif ProgressBar.counter >= ProgressBar.max_size:
return
ProgressBar.progress_counter += ProgressBar.tick
hashes = 'โ' * int(ProgressBar.progress_counter)
spaces = ' ' * (20 - int(ProgressBar.progress_counter))
percentage = int(round(ProgressBar.progress_counter * 5))
sys.stdout.write("\rPercent: โ{0}โ{1}%".format(hashes + spaces, percentage))
sys.stdout.flush()
return
class SupportBar:
def __init__(self):
SupportBar.counter = 0
def increase(self):
SupportBar.counter += 1
def init(self):
SupportBar.counter = 0
def get(self):
return SupportBar.counter
| 3.25 | 3 |
src/vtra/plot/rail_network_map.py | GFDRR/vietnam-transport | 3 | 10973 | <filename>src/vtra/plot/rail_network_map.py
"""Rail network map
"""
import os
import sys
from collections import OrderedDict
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
from vtra.utils import *
def main():
config = load_config()
output_file = os.path.join(config['paths']['figures'], 'rail-map.png')
rail_edge_file = os.path.join(
config['paths']['data'], 'post_processed_networks', 'rail_edges.shp')
rail_node_file = os.path.join(
config['paths']['data'], 'post_processed_networks', 'rail_nodes.shp')
color_by_type = {'Rail line': '#006d2c', 'Rail stop': '#000000'}
ax = get_axes()
plot_basemap(ax, config['paths']['data'],highlight_region=[])
scale_bar(ax, location=(0.8, 0.05))
plot_basemap_labels(ax, config['paths']['data'])
proj_lat_lon = ccrs.PlateCarree()
for record in shpreader.Reader(rail_edge_file).records():
geom = record.geometry
ax.add_geometries(
geom,
crs=proj_lat_lon,
linewidth=1.5,
edgecolor='#006d2c',
facecolor='none',
zorder=3,
label='Rail line'
)
# Stations
xs = []
ys = []
for record in shpreader.Reader(rail_node_file).records():
node_type = record.attributes['name']
if node_type != '0':
geom = record.geometry
x = geom.x
y = geom.y
xs.append(x)
ys.append(y)
name = record.attributes['name']
ax.scatter(xs, ys, transform=proj_lat_lon, facecolor='#000000',
s=4, zorder=5, label='Rail station')
# Legend
legend_handles = [
mpatches.Patch(color=color, label=line)
for line, color in color_by_type.items()
]
plt.legend(handles=legend_handles, loc='lower left')
save_fig(output_file)
if __name__ == '__main__':
main()
| 2.546875 | 3 |
ecommerce_project/apps/ecommerce/migrations/0001_initial.py | mlopezf2019/guadalupe_sowos_examen_3 | 0 | 10974 | <gh_stars>0
# Generated by Django 3.1.1 on 2020-09-27 20:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('is_active', models.BooleanField(default=False)),
('is_deleted', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
('quantity', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('category_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.category')),
],
),
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('iva', models.DecimalField(decimal_places=2, max_digits=5)),
('subtotal', models.DecimalField(decimal_places=2, max_digits=5)),
('total', models.DecimalField(decimal_places=2, max_digits=5)),
('customer_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.customer')),
],
),
migrations.CreateModel(
name='PurchaseProducts',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('quantity', models.IntegerField()),
('product_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.product')),
('purchase_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.purchase')),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('last_name', models.CharField(max_length=200)),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='users.user')),
],
),
migrations.AddField(
model_name='customer',
name='person_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.person'),
),
]
| 1.789063 | 2 |
pydl/pydlspec2d/tests/test_spec1d.py | jhennawi/pydl | 0 | 10975 | <reponame>jhennawi/pydl<filename>pydl/pydlspec2d/tests/test_spec1d.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
import os
from astropy.tests.helper import raises
from astropy.utils.data import get_pkg_data_filename
from .. import Pydlspec2dException
from ..spec1d import (HMF, findspec, spec_append, spec_path, template_metadata,
wavevector)
class TestSpec1d(object):
"""Test the functions in pydl.pydlspec2d.spec1d.
"""
def setup(self):
self.env = {'BOSS_SPECTRO_REDUX': '/boss/spectro/redux',
'SPECTRO_REDUX': '/sdss/spectro/redux',
'RUN2D': 'v1_2_3',
'RUN1D': 'v1_2_3'}
self.original_env = dict()
for key in self.env:
if key in os.environ:
self.original_env[key] = os.environ[key]
else:
self.original_env[key] = None
os.environ[key] = self.env[key]
def teardown(self):
for key in self.original_env:
if self.original_env[key] is None:
del os.environ[key]
else:
os.environ[key] = self.original_env[key]
def test_findspec(self):
"""This is just a placeholder for now.
"""
# slist = findspec(infile='file.in', sdss=True)
assert True
def test_hmf_init(self):
"""Test initialization of HMF object
"""
spec = np.random.random((20, 100))
invvar = np.random.random((20, 100))
hmf = HMF(spec, invvar)
assert hmf.K == 4
assert hmf.log.level == 20 # INFO
hmf = HMF(spec, invvar, K=6, verbose=True)
assert hmf.K == 6
assert hmf.log.level == 10 # DEBUG
def test_spec_append(self):
spec1 = np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]])
spec2 = np.array([[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])
s = spec_append(spec1, spec2)
assert (s == np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])).all()
spec2 = np.array([[2, 2, 2, 2],
[2, 2, 2, 2]])
s = spec_append(spec1, spec2)
assert (s == np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 0],
[2, 2, 2, 2, 0]])).all()
s = spec_append(spec1, spec2, 1)
assert (s == np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 2, 2, 2, 2],
[0, 2, 2, 2, 2]])).all()
spec1 = np.array([[1, 1, 1],
[1, 1, 1]])
spec2 = np.array([[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])
s = spec_append(spec1, spec2, -2)
assert (s == np.array([[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])).all()
def test_spec_path(self):
bsr = self.env['BOSS_SPECTRO_REDUX']
run2d = self.env['RUN2D']
p = spec_path(123)
assert p[0] == os.path.join(bsr, run2d, '0123')
p = spec_path(1234)
assert p[0] == os.path.join(bsr, run2d, '1234')
p = spec_path(1234, topdir=bsr, run2d=run2d)
assert p[0] == os.path.join(bsr, run2d, '1234')
p = spec_path(np.array([1234, 5678]), topdir=bsr, run2d=run2d)
assert p[0] == os.path.join(bsr, run2d, '1234')
assert p[1] == os.path.join(bsr, run2d, '5678')
p = spec_path(1234, path=bsr)
assert p[0] == bsr
def test_template_metadata(self):
with raises(Pydlspec2dException):
slist, metadata = template_metadata('/no/such/file.par')
inputfile = get_pkg_data_filename('t/test_template_metadata.par')
slist, metadata = template_metadata(inputfile)
assert metadata['object'] == 'gal'
assert not metadata['nonnegative']
def test_wavevector(self):
l = wavevector(3, 4, binsz=0.1)
ll = np.array([3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0])
assert np.allclose(l, ll)
l = wavevector(3, 4, wavemin=3, binsz=0.1)
ll = np.array([3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0])
assert np.allclose(l, ll)
| 2.046875 | 2 |
final/good_evaluate.py | wuyuMk7/CSCI8980 | 0 | 10976 | <gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
from absl import flags
import numpy as np
import skimage.io as io
import cv2
import matplotlib.pyplot as plt
# import tensorflow as tf
# from psbody.mesh import Mesh
from smpl_webuser.serialization import load_model
import pyrender
import trimesh
from util import renderer as vis_util
from util import image as img_util
from flame import FLAME
from flame_config import get_config
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.optim as optim
import MyRingnet
def renderMesh(vertices, faces, vertex_colors, total_lmks):
scene = pyrender.Scene()
mesh = trimesh.Trimesh(vertices, faces, vertex_colors=vertex_colors)
render_mesh = pyrender.Mesh.from_trimesh(mesh)
scene.add(render_mesh)
sm = trimesh.creation.uv_sphere(radius=0.005)
sm.visual.vertex_colors = [0.9, 0.1, 0.1, 1.0]
tfs = np.tile(np.eye(4), (len(total_lmks), 1, 1))
tfs[:, :3, 3] = total_lmks
joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs)
scene.add(joints_pcl)
pyrender.Viewer(scene, use_raymond_lighting=True)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
# Input size: 2048 + 159, fc1_size: 512, fc2_size: 512, out_size: 159
class Regression(nn.Module):
def __init__(
self, input_size = 2048+159, fc1_size = 512,
fc2_size = 512, out_size = 159, iter = 8):
super().__init__()
self.fc1 = nn.Linear(input_size, fc1_size, bias=True)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(p=0.2)
self.fc2 = nn.Linear(fc1_size, fc2_size, bias = True)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(p=0.2)
self.fc3 = nn.Linear(fc2_size, out_size, bias=True)
# init.normal_(self.fc1, 0, 1)
# init.normal_(self.fc2, 0, 1)
# init.normal_(self.fc3, 0, 1)
def forward(self, x):
#x = self.dropout1(self.relu1(self.fc1(x)))
#x = self.dropout2(self.relu2(self.fc2(x)))
x = self.relu1(self.fc1(x))
x = self.relu2(self.fc2(x))
x = self.fc3(x)
return x
# if __name__ == '__main__':
config = get_config()
template_mesh = Mesh(filename='./flame_model/FLAME_sample.ply')
renderer = vis_util.SMPLRenderer(faces=template_mesh.f)
if not os.path.exists(config.out_folder):
os.makedirs(config.out_folder)
if not os.path.exists(config.out_folder + '/images'):
os.mkdir(config.out_folder + '/images')
main(config, template_mesh)
config_img_size = 244
if __name__ == '__main__':
# read images and scale
#input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180424_03335_TA/multiview_neutral/IMG_0101.jpg"
#input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180704_03355_TA/multiview_expressions/IMG_1948.jpg"
input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180427_03338_TA/multiview_expressions/IMG_0230.jpg"
#input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180502_00145_TA/multiview_expressions/IMG_0407.jpg"
openpose = np.load(input_img_path.replace("iphone_pictures", "openpose").replace("jpg", "npy"), allow_pickle=True, encoding='latin1')
img = io.imread(input_img_path)
if np.max(img.shape[:2]) != config_img_size:
# print('Resizing so the max image size is %d..' % self.config_img_size)
scale = (float(config_img_size) / np.max(img.shape[:2]))
else:
scale = 1.0#scaling_factor
center = np.round(np.array(img.shape[:2]) / 2).astype(int)
# image center in (x,y)
center = center[::-1]
crop, proc_param = img_util.scale_and_crop(
img, scale, center, config_img_size)
print(proc_param)
#exit(0)
crop = torch.tensor(crop)
crop = crop.permute(2, 0, 1)
crop = crop[None, :, :, :].float().cuda()
# print(crop)
# build model
resnet50 = torch.load("./good_resnet50.pkl")
resnet50.cuda()
resnet50.fc = Identity()
# print(resnet50)
regression = torch.load("./good_model.pkl")
regression.cuda()
config = get_config()
config.batch_size = 1
flamelayer = FLAME(config)
flamelayer.requires_grad_ = False
flamelayer.cuda()
# run the model
res_output = resnet50(crop)
# Empty estimates as the initial value for concatenation
regress_estimates = torch.zeros([ res_output.shape[0], MyRingnet.regress_out_size ]).cuda()
# Regression model
for _ in range(MyRingnet.regress_iteration_cnt):
# Preprocess regression input - concatenation
regress_input = torch.cat([res_output, regress_estimates], 1)
regress_estimates = regression(regress_input)
regress_output = regress_estimates
# FLAME model
cam_params, pose_params = regress_output[0:, 0:3], regress_output[0:, 3:9]
shape_params, exp_params = regress_output[0:, 9:109], regress_output[0:, 109:159]
# pose_params[0,2] = 3.14/5
flame_vert, flame_lmk = flamelayer(shape_params, exp_params, pose_params)
# Render and display the mesh
print(flame_lmk, cam_params)
# flame_lmk[0]=cam_params[0]*-1
# a_params = cam_params[:,:]*-1
mesh_vertices, mesh_faces = flame_vert.detach().cpu().numpy().squeeze(), flamelayer.faces
mesh_vertices_colors = np.ones([mesh_vertices.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8]
renderMesh(mesh_vertices, mesh_faces, mesh_vertices_colors, flame_lmk.detach().cpu().numpy().squeeze())
#renderMesh(mesh_vertices, mesh_faces, mesh_vertices_colors, cam_params[0])
# flame_lmk[:, :, 1] *= -1
# cam_params[:,1]*=-1
# cam_params[:, 0] = 2
# cam_params[:, 1] = 0.2
# print(flame_lmk)
center = torch.tensor(center.copy()).cuda()
print(cam_params)
new_cam = MyRingnet.transform_cam(cam_params, 1. / scale, config_img_size, center[None, :])
projected_lmks = MyRingnet.project_points(flame_lmk, new_cam)
#op_pts = openpose[0,:68,:]
#ground_truth_weights = ((op_pts[:,2] > 0.41).astype(float))
#print(ground_truth_weights)
#print(op_pts)
# print(projected_lmks)
# print(openpose)
plt.figure
plt.imshow(img)
count = 0
cpu_lmks = projected_lmks.cpu()
#print(img.shape)
for i in cpu_lmks[0]:
x = i[0].int()
y = i[1].int()
plt.annotate(str(count), xy=(x, y))
plt.scatter(x, y, s=50, c='red', marker='o')
count = count + 1
count = 0
#openpose[0] *= scale
for i in openpose[0]:
x = i[0]
y = i[1]
plt.annotate(str(count), xy=(x, y))
plt.scatter(x, y, s=50, c='blue', marker='o')
count = count + 1
plt.show()
renderer = vis_util.SMPLRenderer(faces=mesh_faces)
print(img.shape[:2])
cam_for_render, vert_shifted = vis_util.get_original(
#proc_param, mesh_vertices, new_cam.detach().cpu().numpy().squeeze(), img_size=img.shape[:2]
proc_param, mesh_vertices, cam_params.detach().cpu().numpy().squeeze(), img_size=img.shape[:2]
)
print(cam_params, new_cam, cam_for_render)
#exit(0)
# rend_img_overlay = renderer(
# #vert_shifted * 1.0, cam=new_cam.squeeze().detach().cpu().numpy(), img=img, do_alpha=True
# #vert_shifted * 1.0, cam=cam_for_render, img=img, do_alpha=True
# vert_shifted * 1.0, cam=cam_for_render, img=img, do_alpha=True
# )
rend_img_vp1 = renderer.rotated(
mesh_vertices, 30, cam=new_cam.squeeze().detach().cpu().numpy(), img_size=img.shape[:2]
#vert_shifted * 1.0, 30, cam=cam_for_render, img_size=img.shape[:2]
)
plt.imshow(rend_img_vp1)
plt.show()
| 1.710938 | 2 |
quick-scan.py | B3ND1X/py-air-script | 2 | 10977 | #!/usr/bin/python
import os
os.system("sudo ./scan.py")
os.system("sudo ./enable-wifi.py")
| 1.734375 | 2 |
src/classifier/classifier_tuning/tune_sklearn.py | krangelie/bias-in-german-nlg | 14 | 10978 | <filename>src/classifier/classifier_tuning/tune_sklearn.py
from sklearn.ensemble import RandomForestClassifier
import xgboost
def suggest_xgb(model_params, trial, xgb=None):
n_estimators = trial.suggest_int(
model_params.n_estimators.name,
model_params.n_estimators.lower,
model_params.n_estimators.upper,
model_params.n_estimators.step,
)
lr = trial.suggest_float(
model_params.learning_rate.name,
model_params.learning_rate.lower,
model_params.learning_rate.upper,
log=True,
)
max_depth = trial.suggest_int(
model_params.max_depth.name,
model_params.max_depth.lower,
model_params.max_depth.upper,
model_params.max_depth.step,
)
classifier = xgboost.XGBClassifier(
n_estimators=n_estimators,
learning_rate=lr,
max_depth=max_depth,
random_state=42,
use_label_encoder=False,
tree_method="gpu_hist",
gpu_id=0,
)
return classifier
def suggest_rf(model_params, trial):
n_estimators = trial.suggest_int(
model_params.n_estimators.name,
model_params.n_estimators.lower,
model_params.n_estimators.upper,
model_params.n_estimators.step,
)
max_depth = trial.suggest_int(
model_params.max_depth.name,
model_params.max_depth.lower,
model_params.max_depth.upper,
model_params.max_depth.step,
)
classifier = RandomForestClassifier(
n_estimators=n_estimators, max_depth=max_depth, random_state=42
)
return classifier
| 2.296875 | 2 |
textgenrnn/model.py | cosandr/textgenrnn | 0 | 10979 | from keras.optimizers import RMSprop
from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU
from keras.layers import concatenate, Reshape, SpatialDropout1D
from keras.models import Model
from keras import backend as K
from .AttentionWeightedAverage import AttentionWeightedAverage
def textgenrnn_model(num_classes, cfg, context_size=None,
weights_path=None,
dropout=0.0,
optimizer=RMSprop(lr=4e-3, rho=0.99)):
'''
Builds the model architecture for textgenrnn and
loads the specified weights for the model.
'''
input = Input(shape=(cfg['max_length'],), name='input')
embedded = Embedding(num_classes, cfg['dim_embeddings'],
input_length=cfg['max_length'],
name='embedding')(input)
if dropout > 0.0:
embedded = SpatialDropout1D(dropout, name='dropout')(embedded)
rnn_layer_list = []
for i in range(cfg['rnn_layers']):
prev_layer = embedded if i == 0 else rnn_layer_list[-1]
if cfg.get('rnn_type') == 'gru':
rnn_layer_list.append(new_rnn_gru(cfg, i + 1)(prev_layer))
else:
rnn_layer_list.append(new_rnn(cfg, i + 1)(prev_layer))
seq_concat = concatenate([embedded] + rnn_layer_list, name='rnn_concat')
attention = AttentionWeightedAverage(name='attention')(seq_concat)
output = Dense(num_classes, name='output', activation='softmax')(attention)
if context_size is None:
model = Model(inputs=[input], outputs=[output])
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
else:
context_input = Input(
shape=(context_size,), name='context_input')
context_reshape = Reshape((context_size,),
name='context_reshape')(context_input)
merged = concatenate([attention, context_reshape], name='concat')
main_output = Dense(num_classes, name='context_output',
activation='softmax')(merged)
model = Model(inputs=[input, context_input],
outputs=[main_output, output])
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
loss_weights=[0.8, 0.2])
return model
'''
Create a new LSTM layer per parameters. Unfortunately,
each combination of parameters must be hardcoded.
The normal LSTMs use sigmoid recurrent activations
for parity with CuDNNLSTM:
https://github.com/keras-team/keras/issues/8860
'''
def new_rnn(cfg, layer_num):
use_cudnnlstm = K.backend() == 'tensorflow' and len(K.tensorflow_backend._get_available_gpus()) > 0
if use_cudnnlstm:
from keras.layers import CuDNNLSTM
if cfg['rnn_bidirectional']:
return Bidirectional(CuDNNLSTM(cfg['rnn_size'],
return_sequences=True),
name='rnn_{}'.format(layer_num))
return CuDNNLSTM(cfg['rnn_size'],
return_sequences=True,
name='rnn_{}'.format(layer_num))
else:
if cfg['rnn_bidirectional']:
return Bidirectional(LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid'),
name='rnn_{}'.format(layer_num))
return LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
name='rnn_{}'.format(layer_num))
def new_rnn_gru(cfg, layer_num):
use_cudnngru = K.backend() == 'tensorflow' and len(K.tensorflow_backend._get_available_gpus()) > 0
if use_cudnngru:
from keras.layers import CuDNNGRU
if cfg['rnn_bidirectional']:
return Bidirectional(CuDNNGRU(cfg['rnn_size'],
return_sequences=True),
name='rnn_{}'.format(layer_num))
return CuDNNGRU(cfg['rnn_size'],
return_sequences=True,
name='rnn_{}'.format(layer_num))
else:
if cfg['rnn_bidirectional']:
return Bidirectional(GRU(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
reset_after=True),
name='rnn_{}'.format(layer_num))
return GRU(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
reset_after=True,
name='rnn_{}'.format(layer_num))
| 2.484375 | 2 |
tests/adapters/switches/brocade_test.py | FrancoisLopez/netman | 38 | 10980 | <gh_stars>10-100
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from flexmock import flexmock, flexmock_teardown
from hamcrest import assert_that, has_length, equal_to, is_, none, empty
from netaddr import IPNetwork
from netaddr.ip import IPAddress
from netman.adapters.switches import brocade_factory_ssh, brocade_factory_telnet
from netman.adapters.switches.brocade import Brocade, parse_if_ranges
from netman.adapters.switches.util import SubShell
from netman.core.objects.access_groups import IN, OUT
from netman.core.objects.exceptions import IPNotAvailable, UnknownVlan, UnknownIP, UnknownAccessGroup, BadVlanNumber, \
BadVlanName, UnknownInterface, TrunkVlanNotSet, UnknownVrf, VlanVrfNotSet, VrrpAlreadyExistsForVlan, BadVrrpPriorityNumber, BadVrrpGroupNumber, \
BadVrrpTimers, BadVrrpTracking, NoIpOnVlanForVrrp, VrrpDoesNotExistForVlan, UnknownDhcpRelayServer, DhcpRelayServerAlreadyExists, \
VlanAlreadyExist, InvalidAccessGroupName, IPAlreadySet
from netman.core.objects.interface_states import OFF, ON
from netman.core.objects.port_modes import ACCESS, TRUNK
from netman.core.objects.switch_descriptor import SwitchDescriptor
class BrocadeTest(unittest.TestCase):
def setUp(self):
self.switch = Brocade(SwitchDescriptor(model='brocade', hostname="my.hostname"), None)
SubShell.debug = True
self.shell_mock = flexmock()
self.switch.shell = self.shell_mock
def tearDown(self):
flexmock_teardown()
def test_switch_has_a_logger_configured_with_the_switch_name(self):
assert_that(self.switch.logger.name, is_(Brocade.__module__ + ".my.hostname"))
def test_ip_redirect_enable(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 999, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([
"interface ve 999",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip redirect").once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice()
self.switch.set_vlan_icmp_redirects_state(1234, True)
def test_ip_redirect_disable(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 999, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([
"interface ve 999",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip redirect").once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice()
self.switch.set_vlan_icmp_redirects_state(1234, False)
def test_set_vlan_icmp_redirects_state_without_interface_creates_it(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 999, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([
"Error - ve 999 was not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("interface ve 999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip redirect").once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice()
self.switch.set_vlan_icmp_redirects_state(1234, False)
def test_set_vlan_icmp_redirects_state_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return([
"Error: vlan 1234 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_vlan_icmp_redirects_state(1234, False)
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_get_vlans(self):
self.shell_mock.should_receive("do").with_args("show running-config vlan | begin vlan").once().ordered().and_return([
"vlan 1 name DEFAULT-VLAN",
""
" no untagged ethe 1/1 ethe 1/3 to 1/22",
"!",
"vlan 201",
" tagged ethe 1/1",
" router-interface ve 201",
"!",
"vlan 2222 name your-name-is-way-too-long-for-t",
" tagged ethe 1/1",
" untagged ethe 1/2",
"!",
"vlan 3333 name some-name",
"!",
"!"
])
self.shell_mock.should_receive("do").with_args("show running-config interface").once()\
.ordered().and_return([
'interface ve 428',
' port-name "My Awesome Port Name"',
' ip address 10.241.0.33/27',
' ip access-group ACL-IN in',
' ip access-group ACL-OUT out',
'!',
'interface ve 201',
' vrf forwarding SHIZZLE',
' ip address 1.1.1.1/24',
' ip address 2.1.1.1/27',
' ip address 1.1.1.9/24 secondary',
' ip helper-address 10.10.10.1',
' ip helper-address 10.10.10.2',
' ip vrrp-extended auth-type simple-text-auth VLAN201',
' ip vrrp-extended vrid 1',
' backup priority 110 track-priority 50',
' ip-address 1.1.1.2',
' hello-interval 5',
' dead-interval 15',
' advertise backup',
' track-port ethernet 1/1',
' activate',
' ip vrrp-extended vrid 2',
' backup priority 110 track-priority 50',
' ip-address 1.1.1.3',
' ip-address 1.1.1.4',
' hello-interval 5',
' dead-interval 15',
' advertise backup',
' track-port ethernet 1/1',
' activate',
' no ip redirect'
'!',
'interface ve 1203',
'!',
'interface ve 3993',
' port-name Another-port-name',
' ip address 4.4.4.0/27',
'!'])
vlan1, vlan201, vlan2222, vlan3333 = self.switch.get_vlans()
assert_that(vlan1.number, equal_to(1))
assert_that(vlan1.name, equal_to("default"))
assert_that(vlan1.ips, has_length(0))
assert_that(vlan1.vrf_forwarding, is_(none()))
assert_that(vlan201.number, equal_to(201))
assert_that(vlan201.name, equal_to(None))
assert_that(vlan201.ips, has_length(3))
assert_that(vlan201.vrf_forwarding, is_("SHIZZLE"))
assert_that(vlan201.icmp_redirects, equal_to(False))
assert_that(vlan2222.number, equal_to(2222))
assert_that(vlan2222.name, equal_to("your-name-is-way-too-long-for-t"))
assert_that(vlan2222.ips, has_length(0))
assert_that(vlan2222.icmp_redirects, equal_to(True))
assert_that(vlan3333.number, equal_to(3333))
assert_that(vlan3333.name, equal_to("some-name"))
assert_that(vlan3333.ips, has_length(0))
vrrp_group1, vrrp_group2 = vlan201.vrrp_groups
assert_that(len(vrrp_group1.ips), equal_to(1))
assert_that(vrrp_group1.ips[0], equal_to(IPAddress('1.1.1.2')))
assert_that(vrrp_group1.hello_interval, equal_to(5))
assert_that(vrrp_group1.dead_interval, equal_to(15))
assert_that(vrrp_group1.priority, equal_to(110))
assert_that(vrrp_group1.track_id, equal_to('ethernet 1/1'))
assert_that(vrrp_group1.track_decrement, equal_to(50))
assert_that(len(vrrp_group2.ips), equal_to(2))
assert_that(vrrp_group2.ips[0], equal_to(IPAddress('1.1.1.3')))
assert_that(vrrp_group2.ips[1], equal_to(IPAddress('1.1.1.4')))
assert_that(vrrp_group2.hello_interval, equal_to(5))
assert_that(vrrp_group2.dead_interval, equal_to(15))
assert_that(vrrp_group2.priority, equal_to(110))
assert_that(vrrp_group2.track_id, equal_to('ethernet 1/1'))
assert_that(vrrp_group2.track_decrement, equal_to(50))
assert_that(len(vlan201.dhcp_relay_servers), equal_to(2))
assert_that(str(vlan201.dhcp_relay_servers[0]), equal_to('10.10.10.1'))
assert_that(str(vlan201.dhcp_relay_servers[1]), equal_to('10.10.10.2'))
def test_get_vlan_with_no_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return(
vlan_display(1750)
)
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.name, is_(None))
assert_that(vlan.access_groups[IN], is_(none()))
assert_that(vlan.access_groups[OUT], is_(none()))
assert_that(vlan.vrf_forwarding, is_(none()))
assert_that(vlan.ips, is_(empty()))
assert_that(vlan.vrrp_groups, is_(empty()))
assert_that(vlan.dhcp_relay_servers, is_(empty()))
def test_get_vlan_with_an_empty_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return(
vlan_with_vif_display(1750, 999, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([
"interface ve 999",
"!",
])
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.name, is_("Shizzle"))
assert_that(vlan.access_groups[IN], is_(none()))
assert_that(vlan.access_groups[OUT], is_(none()))
assert_that(vlan.vrf_forwarding, is_(none()))
assert_that(vlan.ips, is_(empty()))
assert_that(vlan.vrrp_groups, is_(empty()))
assert_that(vlan.dhcp_relay_servers, is_(empty()))
def test_get_vlan_with_a_full_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return(
vlan_with_vif_display(1750, 1750, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1750").once().ordered().and_return([
"interface ve 1750",
" vrf forwarding SHIZZLE",
" ip address 1.1.1.1/24",
" ip address 2.1.1.1/27",
" ip address 1.1.1.9/24 secondary",
" ip access-group ACL-IN in",
" ip access-group ACL-OUT out",
" ip helper-address 10.10.10.1",
" ip helper-address 10.10.10.2",
" ip vrrp-extended auth-type simple-text-auth VLAN201",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.2",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
" ip vrrp-extended vrid 2",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.3",
" ip-address 1.1.1.4",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.name, is_("Shizzle"))
assert_that(vlan.access_groups[IN], is_("ACL-IN"))
assert_that(vlan.access_groups[OUT], is_("ACL-OUT"))
assert_that(vlan.vrf_forwarding, is_("SHIZZLE"))
assert_that(vlan.ips, has_length(3))
assert_that(vlan.icmp_redirects, equal_to(True))
vrrp_group1, vrrp_group2 = vlan.vrrp_groups
assert_that(len(vrrp_group1.ips), equal_to(1))
assert_that(vrrp_group1.ips[0], equal_to(IPAddress('172.16.58.3')))
assert_that(vrrp_group1.hello_interval, equal_to(5))
assert_that(vrrp_group1.dead_interval, equal_to(15))
assert_that(vrrp_group1.priority, equal_to(110))
assert_that(vrrp_group1.track_id, equal_to('ethernet 1/1'))
assert_that(vrrp_group1.track_decrement, equal_to(50))
assert_that(len(vrrp_group2.ips), equal_to(2))
assert_that(vrrp_group2.ips[0], equal_to(IPAddress('1.1.1.3')))
assert_that(vrrp_group2.ips[1], equal_to(IPAddress('1.1.1.4')))
assert_that(vrrp_group2.hello_interval, equal_to(5))
assert_that(vrrp_group2.dead_interval, equal_to(15))
assert_that(vrrp_group2.priority, equal_to(110))
assert_that(vrrp_group2.track_id, equal_to('ethernet 1/1'))
assert_that(vrrp_group2.track_decrement, equal_to(50))
assert_that(len(vlan.dhcp_relay_servers), equal_to(2))
assert_that(str(vlan.dhcp_relay_servers[0]), equal_to('10.10.10.1'))
assert_that(str(vlan.dhcp_relay_servers[1]), equal_to('10.10.10.2'))
def test_get_vlan_interface_with_untagged_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1").once().ordered().and_return(
vlan_display(1, 'DEFAULT-VLAN', tagged_port_str="ethe 1/2 ethe 1/23 to 1/24")
)
vlan_interfaces = self.switch.get_vlan_interfaces(1)
assert_that(vlan_interfaces, equal_to(["ethernet 1/2", "ethernet 1/23", "ethernet 1/24"]))
def test_get_vlan_interface_with_tagged_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1").once().ordered().and_return(
vlan_display(1, 'DEFAULT-VLAN', untagged_port_str="ethe 1/2")
)
vlan_interfaces = self.switch.get_vlan_interfaces(1)
assert_that(vlan_interfaces, equal_to(["ethernet 1/2"]))
def test_get_vlan_interface_with_untagged_and_tagged_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1").once().ordered().and_return(
vlan_display(1, 'DEFAULT-VLAN', untagged_port_str="ethe 1/1", tagged_port_str="ethe 1/2 ethe 1/23 to 1/24")
)
vlan_interfaces = self.switch.get_vlan_interfaces(1)
assert_that(vlan_interfaces, equal_to(["ethernet 1/1", "ethernet 1/2", "ethernet 1/23", "ethernet 1/24"]))
def test_get_vlan_interface_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan inexistent").once().ordered().and_return([
"Error: vlan inexistent is not configured"
])
with self.assertRaises(UnknownVlan):
self.switch.get_vlan_interfaces("inexistent")
def test_get_vlan_unknown_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return([
"Error: vlan 1750 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.get_vlan(1750)
assert_that(str(expect.exception), equal_to("Vlan 1750 not found"))
def test_get_vlan_with_both_ip_and_ipv6_vrrp_groups_ipv6_is_ignored(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return(
vlan_with_vif_display(1750, 1750, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1750").once()\
.ordered().and_return([
'interface ve 1750',
'port-name vrrp-extended vrid 42',
' ip address 10.241.0.33/27',
' no ip redirect',
' ip helper-address 10.10.10.1',
' ip helper-address 10.10.10.2',
' ipv6 address fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64',
' ipv6 address fdf8:f53e:61e4::18/64',
' ipv6 nd suppress-ra',
' ip vrrp-extended vrid 42',
' backup priority 130 track-priority 20',
' ip-address 1.1.1.2',
' advertise backup',
' hello-interval 4',
' track-port ethernet 1/3',
' activate',
' ipv6 vrrp-extended vrid 43',
' backup priority 110 track-priority 50',
' ipv6-address fdf8:f53e:61e4::18',
' advertise backup',
' hello-interval 5',
' track-port ethernet 1/2',
' activate',
'!'])
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.ips, has_length(1))
assert_that(vlan.icmp_redirects, equal_to(False))
assert_that(vlan.vrrp_groups, has_length(1))
vrrp_group1 = vlan.vrrp_groups[0]
assert_that(len(vrrp_group1.ips), equal_to(1))
assert_that(vrrp_group1.ips[0], equal_to(IPAddress('1.1.1.2')))
assert_that(vrrp_group1.hello_interval, equal_to(4))
assert_that(vrrp_group1.priority, equal_to(130))
assert_that(vrrp_group1.track_id, equal_to('ethernet 1/3'))
assert_that(vrrp_group1.track_decrement, equal_to(20))
assert_that(len(vlan.dhcp_relay_servers), equal_to(2))
assert_that(str(vlan.dhcp_relay_servers[0]), equal_to('10.10.10.1'))
assert_that(str(vlan.dhcp_relay_servers[1]), equal_to('10.10.10.2'))
def test_get_vlan_with_both_ip_and_ipv6_in_the_same_vrrp_group(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return(
vlan_with_vif_display(1750, 1750, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1750").once() \
.ordered()\
.and_return(['interface ve 1750',
'port-name vrrp-extended vrid 42',
' ip address 10.241.0.33/27',
' no ip redirect',
' ip helper-address 10.10.10.1',
' ip helper-address 10.10.10.2',
' ipv6 address fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/64',
' ipv6 address fdf8:f53e:61e4::18/64',
' ipv6 nd suppress-ra',
' ip vrrp-extended vrid 42',
' backup priority 130 track-priority 20',
' ip-address 1.1.1.2',
' advertise backup',
' hello-interval 4',
' track-port ethernet 1/3',
' activate',
' ipv6 vrrp-extended vrid 42',
' backup priority 170 track-priority 40',
' ipv6-address fdf8:f53e:61e4::18',
' advertise backup',
' hello-interval 400',
' track-port ethernet 4/6',
' activate',
'!'])
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.ips, has_length(1))
assert_that(vlan.icmp_redirects, equal_to(False))
vrrp_group = vlan.vrrp_groups[0]
assert_that(len(vrrp_group.ips), equal_to(1))
assert_that(vrrp_group.ips[0], equal_to(IPAddress('1.1.1.2')))
assert_that(vrrp_group.hello_interval, equal_to(4))
assert_that(vrrp_group.priority, equal_to(130))
assert_that(vrrp_group.track_id, equal_to('ethernet 1/3'))
assert_that(vrrp_group.track_decrement, equal_to(20))
def test_add_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999 name Gertrude").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vlan(2999, name="Gertrude")
def test_add_vlan_bad_number(self):
self.shell_mock.should_receive("do").with_args("show vlan 5000").and_return([
"Error: vlan 5000 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 5000 name Gertrude").once().ordered().and_return([
"Error: vlan id 4091 is outside of allowed max of 4090"
])
self.shell_mock.should_receive("do").with_args("exit").once().ordered()
with self.assertRaises(BadVlanNumber) as expect:
self.switch.add_vlan(5000, name="Gertrude")
assert_that(str(expect.exception), equal_to("Vlan number is invalid"))
def test_add_vlan_bad_name(self):
self.shell_mock.should_receive("do").with_args("show vlan 5000").and_return([
"Error: vlan 5000 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 5000 name Gertr ude").once().ordered().and_return([
"Invalid input -> ude"
])
self.shell_mock.should_receive("do").with_args("exit").once().ordered()
with self.assertRaises(BadVlanName) as expect:
self.switch.add_vlan(5000, name="<NAME>")
assert_that(str(expect.exception), equal_to("Vlan name is invalid"))
def test_add_vlan_no_name(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vlan(2999)
def test_add_vlan_already_exist_fails(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").and_return(
vlan_display(2999)
)
with self.assertRaises(VlanAlreadyExist) as expect:
self.switch.add_vlan(2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 already exists"))
def test_remove_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).once().ordered()
self.switch.remove_vlan(2999)
def test_remove_vlan_invalid_vlan_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_vlan(2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_set_access_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("untagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_access_vlan("ethernet 1/4", vlan=2999)
def test_set_access_vlan_invalid_vlan_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return([
"Error: vlan 2999 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_access_vlan("ethernet 1/4", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_set_access_vlan_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("untagged ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_access_vlan("ethernet 9/999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_reset_interfaces_works(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("no interface ethernet 1/4").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("exit").once().ordered()
self.switch.reset_interface("ethernet 1/4")
def test_reset_interfaces_on_invalid_input_raises_unknown_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999',
'Type ? for a list'])
with self.assertRaises(UnknownInterface):
self.switch.reset_interface("ethernet 9/999")
def test_reset_interfaces_on_invalid_interface_raises_unknown_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/64").once().ordered().and_return([
'Error - invalid interface 1/64'])
with self.assertRaises(UnknownInterface):
self.switch.reset_interface("ethernet 1/64")
def test_reset_interfaces_on_invalid_slot_raises_unknown_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 2/1").once().ordered().and_return([
'Error - interface 2/1 is not an ETHERNET interface'])
with self.assertRaises(UnknownInterface):
self.switch.reset_interface("ethernet 2/1")
def test_reset_interfaces_cleans_tagged_vlans(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").and_return(['VLAN: 1200 Untagged',
'VLAN: 1201 Tagged'])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 1200").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("no untagged ethernet 1/4").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 1201").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("no tagged ethernet 1/4").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no interface ethernet 1/4").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).once().ordered()
self.switch.reset_interface("ethernet 1/4")
def test_unset_interface_access_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan brief | include ethe 1/4").once().ordered().and_return([
"1202 your-name- 1202 - Untagged Ports : ethe 1/10"
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 1202").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no untagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_interface_access_vlan("ethernet 1/4")
def test_unset_interface_access_vlan_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan brief | include ethe 9/999").once().ordered().and_return([])
with self.assertRaises(UnknownInterface) as expect:
self.switch.unset_interface_access_vlan("ethernet 9/999")
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_access_mode_does_nothing_if_nothing_is_set(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").once().ordered().and_return([
"VLAN: 1 Untagged"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
self.switch.set_access_mode("ethernet 1/4")
def test_set_access_mode_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_access_mode("ethernet 9/999")
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_access_mode_does_nothing_if_only_an_untagged_vlan_not_knowing_if_it_is_an_access_or_native(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").once().ordered().and_return([
"VLAN: 123 Untagged"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
self.switch.set_access_mode("ethernet 1/4")
def test_set_access_mode_removes_all_tagged_vlans_and_the_untagged_because_it_is_a_native_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").once().ordered().and_return([
"VLAN: 100 Tagged",
"VLAN: 300 Untagged",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 100").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no tagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 300").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no untagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_access_mode("ethernet 1/4")
def test_set_trunk_mode(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").once().ordered().and_return([
"VLAN: 1 Untagged"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
self.switch.set_trunk_mode("ethernet 1/4")
def test_set_trunk_mode_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_trunk_mode("ethernet 9/999")
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_add_trunk_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("tagged ethernet 1/1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_trunk_vlan("ethernet 1/1", vlan=2999)
def test_add_trunk_vlan_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("tagged ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.add_trunk_vlan("ethernet 9/999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_add_trunk_vlan_invalid_vlan_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_trunk_vlan("ethernet 1/1", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_remove_trunk_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no tagged ethernet 1/11").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_trunk_vlan("ethernet 1/11", vlan=2999)
def test_remove_trunk_vlan_invalid_vlan_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return([
"Error: vlan 2999 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_trunk_vlan("ethernet 1/2", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_remove_trunk_vlan_not_set_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no tagged ethernet 1/14").and_return([
"Error: ports ethe 1/14 are not tagged members of vlan 2999"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(TrunkVlanNotSet) as expect:
self.switch.remove_trunk_vlan("ethernet 1/14", vlan=2999)
assert_that(str(expect.exception), equal_to("Trunk Vlan is not set on interface ethernet 1/14"))
def test_remove_trunk_vlan_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no tagged ethernet 9/999").and_return([
"Invalid input -> 1/99",
"Type ? for a list",
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.remove_trunk_vlan("ethernet 9/999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_interface_state_off(self):
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("disable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_interface_state("ethernet 1/4", OFF)
def test_set_interface_state_off_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_state("ethernet 9/999", OFF)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_interface_state_on(self):
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_interface_state("ethernet 1/4", ON)
def test_set_interface_state_on_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_state("ethernet 9/999", ON)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_interface_native_vlan_on_trunk(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("untagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_interface_native_vlan("ethernet 1/4", vlan=2999)
def test_set_interface_native_vlan_on_trunk_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("untagged ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_native_vlan("ethernet 9/999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_interface_native_vlan_on_trunk_invalid_vlan_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_interface_native_vlan("ethernet 1/4", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_unset_interface_native_vlan_on_trunk(self):
self.shell_mock.should_receive("do").with_args("show vlan brief | include ethe 1/4").once().ordered().and_return([
"1202 your-name- 1202 - Untagged Ports : ethe 1/10"
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 1202").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no untagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_interface_native_vlan("ethernet 1/4")
def test_unset_interface_native_vlan_on_trunk_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan brief | include ethe 9/999").once().ordered().and_return([])
with self.assertRaises(UnknownInterface) as expect:
self.switch.unset_interface_native_vlan("ethernet 9/999")
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_add_ip_creates_router_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_display(1234)
)
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("router-interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.4/25").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
def test_add_ip_doesnt_creates_router_interface_if_already_created(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 3333)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 3333").once().ordered().and_return([
"interface ve 3333",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 3333").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.4/25").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
def test_add_ip_contained_in_a_subnet_already_present_requires_the_keyword_secondary(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/24",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.4/25 secondary").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
def test_add_ip_already_defined_elsewhere_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.4/25").and_return([
"IP/Port: Errno(6) Duplicate ip address"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(IPNotAvailable) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/25 is not available in this vlan"))
def test_add_ip_already_a_subnet_of_another_ve(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.4/25").and_return([
"IP/Port: Errno(11) ip subnet overlap with another interface"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(IPNotAvailable) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/25 is not available in this vlan"))
def test_add_ip_already_in_this_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
"!",
])
with self.assertRaises(IPAlreadySet) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/25 is already present in this vlan as None"))
def test_add_ip_already_in_this_interface_as_a_secondary(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
" ip address 1.2.3.5/24 secondary",
"!",
])
with self.assertRaises(IPAlreadySet) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.5/25"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.5/25 is already present in this vlan as None"))
def test_add_ip_to_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return([
"Error: vlan 1234 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.5/25"))
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_remove_ip(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip address 1.2.3.4/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.4/24"))
def test_remove_secondary_ip(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
" ip address 1.2.3.5/24 secondary",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip address 1.2.3.5/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.5/24"))
def test_remove_ip_that_has_secondary_ip(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
" ip address 1.2.3.5/24 secondary",
" ip address 1.2.3.6/24 secondary",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip address 1.2.3.5/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip address 1.2.3.6/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip address 1.2.3.4/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.5/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.6/24 secondary").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.4/24"))
def test_remove_unknown_ip_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
" ip address 1.2.3.5/24 secondary",
" ip address 1.2.3.6/24 secondary",
"!",
])
with self.assertRaises(UnknownIP) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("5.5.5.5/25"))
assert_that(str(expect.exception), equal_to("IP 5.5.5.5/25 not found"))
def test_remove_known_ip_with_wrong_mask_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
" ip address 1.2.3.5/24 secondary",
" ip address 1.2.3.6/24 secondary",
"!",
])
with self.assertRaises(UnknownIP) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.5/25"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.5/25 not found"))
def test_remove_ip_fails_if_there_aint_even_a_router_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_display(1234)
)
with self.assertRaises(UnknownIP) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.4/24"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/24 not found"))
def test_remove_ip_on_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return([
"Error: vlan 1234 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.5/25"))
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_set_vlan_vrf_success(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").and_return([
"interface ve 2500",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vrf forwarding MYVRF").once().ordered().and_return([
"Warning: All IPv4 and IPv6 addresses (including link-local) on this interface have been removed"
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_vrf(2500, "MYVRF")
def test_set_vlan_vrf_incorrect_name(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").and_return([
"interface ve 2500",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vrf forwarding MYVRF").once().ordered().and_return([
"Error - VRF(MYVRF) does not exist or Route-Distinguisher not specified or Address Family not configured"
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(UnknownVrf) as expect:
self.switch.set_vlan_vrf(2500, "MYVRF")
assert_that(str(expect.exception), equal_to("VRF name \"MYVRF\" was not configured."))
def test_set_vlan_vrf_without_interface_creates_it(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_display(2500)
)
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("router-interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vrf forwarding MYVRF").once().ordered().and_return([
"Warning: All IPv4 and IPv6 addresses (including link-local) on this interface have been removed"
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_vrf(2500, "MYVRF")
def test_set_vlan_vrf_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return([
"Error: vlan 2500 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_vlan_vrf(2500, "MYVRF")
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
def test_unset_vlan_vrf_success(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").and_return([
"interface ve 2500",
" vrf forwarding MYVRF",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no vrf forwarding MYVRF").once().ordered().and_return([
"Warning: All IPv4 and IPv6 addresses (including link-local) on this interface have been removed"
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_vlan_vrf(2500)
def test_unset_vlan_vrf_not_set(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").and_return([
"interface ve 2500",
"!",
])
with self.assertRaises(VlanVrfNotSet) as expect:
self.switch.unset_vlan_vrf(2500)
assert_that(str(expect.exception), equal_to("VRF is not set on vlan 2500"))
def test_unset_vlan_vrf_from_known_vlan_with_no_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_display(2500)
)
with self.assertRaises(VlanVrfNotSet) as expect:
self.switch.unset_vlan_vrf(2500)
assert_that(str(expect.exception), equal_to("VRF is not set on vlan 2500"))
def test_unset_vlan_vrf_from_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return([
"Error: vlan 2500 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.unset_vlan_vrf(2500)
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
def test_set_access_group_creates_router_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_display(2500)
)
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("router-interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip access-group TheAccessGroup in").and_return([
"Warning: An undefined or zero length ACL has been applied. "
"Filtering will not occur for the specified interface VE 2500 (outbound)."
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_access_group(2500, IN, "TheAccessGroup")
def test_set_access_group_doesnt_creates_router_interface_if_already_created(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 3333)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 3333").once().ordered().and_return([
"interface ve 3333",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 3333").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip access-group TheAccessGroup out").and_return([
"Warning: An undefined or zero length ACL has been applied. "
"Filtering will not occur for the specified interface VE 2500 (outbound)."
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_access_group(2500, OUT, "TheAccessGroup")
def test_set_access_group_fails_if_switch_says_so(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 3333)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 3333").once().ordered().and_return([
"interface ve 3333",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 3333").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip access-group TheAcc essGroup out").once().ordered().and_return([
"Invalid input -> sss out",
"Type ? for a list"
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(InvalidAccessGroupName) as expect:
self.switch.set_vlan_access_group(2500, OUT, "TheAcc essGroup")
assert_that(str(expect.exception), equal_to("Access Group Name is invalid: TheAcc essGroup"))
def test_set_access_group_needs_to_remove_actual_access_group_to_override_it(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").once().ordered().and_return([
"interface ve 2500",
" ip access-group helloThere! in",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip access-group helloThere! in").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip access-group TheAccessGroup in").and_return([
"Warning: An undefined or zero length ACL has been applied. "
"Filtering will not occur for the specified interface VE 2500 (outbound)."
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_access_group(2500, IN, "TheAccessGroup")
def test_set_access_group_to_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return([
"Error: vlan 2500 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_vlan_access_group(2500, IN, "TheAccessGroup")
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
def test_remove_access_group(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").once().ordered().and_return([
"interface ve 2500",
" ip access-group helloThere! in",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip access-group helloThere! in").and_return([
"Warning: An undefined or zero length ACL has been applied. "
"Filtering will not occur for the specified interface VE 2500 (outbound)."
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_vlan_access_group(2500, IN)
def test_remove_access_group_out(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").once().ordered().and_return([
"interface ve 2500",
" ip access-group Waaaat out",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip access-group Waaaat out").and_return([
"Warning: An undefined or zero length ACL has been applied. "
"Filtering will not occur for the specified interface VE 2500 (outbound)."
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_vlan_access_group(2500, OUT)
def test_remove_access_group_unknown_access_group_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").once().ordered().and_return([
"interface ve 2500",
" ip access-group Waaaat out",
"!",
])
with self.assertRaises(UnknownAccessGroup) as expect:
self.switch.unset_vlan_access_group(2500, IN)
assert_that(str(expect.exception), equal_to("Inbound IP access group not found"))
def test_remove_access_group_fails_if_there_aint_even_a_router_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_display(2500)
)
with self.assertRaises(UnknownAccessGroup) as expect:
self.switch.unset_vlan_access_group(2500, OUT)
assert_that(str(expect.exception), equal_to("Outgoing IP access group not found"))
def test_remove_access_group_on_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return([
"Error: vlan 2500 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.unset_vlan_access_group(2500, OUT)
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
def test_get_interfaces(self):
self.shell_mock.should_receive("do").with_args("show interfaces").once().ordered().and_return([
"GigabitEthernet1/1 is down, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of VLAN 1999 (untagged), port is in untagged mode, port state is Disabled",
" No port name",
"GigabitEthernet1/2 is disabled, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of VLAN 2999 (untagged), 3 L2 VLANS (tagged), port is in dual mode, port state is Disabled",
" Port name is hello",
"GigabitEthernet1/3 is down, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of VLAN 1 (untagged), port is in untagged mode, port state is Disabled",
" No port name",
"GigabitEthernet1/4 is disabled, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of VLAN 1 (untagged), 1 L2 VLANS (tagged), port is in dual mode (default vlan), port state is Disabled",
" No port name",
"GigabitEthernet1/5 is disabled, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of 1 L2 VLAN(S) (tagged), port is in tagged mode, port state is Disabled",
" No port name",
"GigabitEthernet1/6 is disabled, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of 1 L2 VLAN(S) (tagged), port is in tagged mode, port state is Disabled",
" No port name",
"Ve1000 is down, line protocol is down",
" Hardware is Virtual Ethernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Port name is Salut",
" Vlan id: 1000",
" Internet address is 0.0.0.0/0, IP MTU 1500 bytes, encapsulation ethernet",
"Ve2000 is down, line protocol is down",
" Hardware is Virtual Ethernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" No port name",
" Vlan id: 2000",
" Internet address is 1.1.1.1/24, IP MTU 1500 bytes, encapsulation ethernet",
"Loopback1 is up, line protocol is up",
" Hardware is Loopback",
" Port name is LOOPBACK",
" Internet address is 172.16.17.32/32, IP MTU 1500 bytes, encapsulation LOOPBACK"
])
self.shell_mock.should_receive("do").with_args("show running-config vlan").once().ordered().and_return([
"spanning-tree",
"!",
"vlan 1 name DEFAULT-VLAN",
" no untagged ethe 1/3",
"!",
"vlan 100",
" tagged ethe 1/2 ethe 1/4 to 1/6",
"!",
"vlan 200",
" tagged ethe 1/2",
"!",
"vlan 300",
" tagged ethe 1/2",
"!",
"vlan 1999",
" untagged ethe 1/1",
"!",
"vlan 2999",
" untagged ethe 1/2",
"!",
"!"
])
result = self.switch.get_interfaces()
if1, if2, if3, if4, if5, if6 = result
assert_that(if1.name, equal_to("ethernet 1/1"))
assert_that(if1.shutdown, equal_to(False))
assert_that(if1.port_mode, equal_to(ACCESS))
assert_that(if1.access_vlan, equal_to(1999))
assert_that(if1.trunk_native_vlan, equal_to(None))
assert_that(if1.trunk_vlans, equal_to([]))
assert_that(if2.name, equal_to("ethernet 1/2"))
assert_that(if2.shutdown, equal_to(True))
assert_that(if2.port_mode, equal_to(TRUNK))
assert_that(if2.access_vlan, equal_to(None))
assert_that(if2.trunk_native_vlan, equal_to(2999))
assert_that(if2.trunk_vlans, equal_to([100, 200, 300]))
assert_that(if3.name, equal_to("ethernet 1/3"))
assert_that(if3.port_mode, equal_to(ACCESS))
assert_that(if3.access_vlan, equal_to(None))
assert_that(if3.trunk_native_vlan, equal_to(None))
assert_that(if3.trunk_vlans, equal_to([]))
assert_that(if4.name, equal_to("ethernet 1/4"))
assert_that(if4.port_mode, equal_to(TRUNK))
assert_that(if4.access_vlan, equal_to(None))
assert_that(if4.trunk_native_vlan, equal_to(None))
assert_that(if4.trunk_vlans, equal_to([100]))
assert_that(if5.trunk_vlans, equal_to([100]))
assert_that(if6.trunk_vlans, equal_to([100]))
def test_get_interface(self):
self.shell_mock.should_receive("do").with_args("show interfaces ethernet 1/2").once().ordered().and_return([
"GigabitEthernet1/2 is disabled, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of VLAN 2999 (untagged), 3 L2 VLANS (tagged), port is in dual mode, port state is Disabled",
" Port name is hello"
])
self.shell_mock.should_receive("do").with_args("show running-config vlan").once().ordered().and_return([
"spanning-tree",
"!",
"vlan 1 name DEFAULT-VLAN",
" no untagged ethe 1/3",
"!",
"vlan 100",
" tagged ethe 1/2 ethe 1/4 to 1/6",
"!",
"vlan 200",
" tagged ethe 1/2",
"!",
"vlan 300",
" tagged ethe 1/2",
"!",
"vlan 1999",
" untagged ethe 1/1",
"!",
"vlan 2999",
" untagged ethe 1/2",
"!",
"!"
])
interface = self.switch.get_interface("ethernet 1/2")
assert_that(interface.name, equal_to("ethernet 1/2"))
assert_that(interface.shutdown, equal_to(True))
assert_that(interface.port_mode, equal_to(TRUNK))
assert_that(interface.access_vlan, equal_to(None))
assert_that(interface.trunk_native_vlan, equal_to(2999))
assert_that(interface.trunk_vlans, equal_to([100, 200, 300]))
def test_get_nonexistent_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show interfaces ethernet 1/1999").once().ordered().and_return([
"Invalid input -> 1/1999",
"Type ? for a list"
])
self.shell_mock.should_receive("do").with_args("show running-config vlan").never()
with self.assertRaises(UnknownInterface) as expect:
self.switch.get_interface("ethernet 1/1999")
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 1/1999"))
def test_add_vrrp_success_single_ip(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("dead-interval 15").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("advertise backup").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("track-port ethernet 1/1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("activate").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
def test_add_vrrp_success_multiple_ip(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("dead-interval 15").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("advertise backup").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("track-port ethernet 1/1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("activate").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4"), IPAddress("1.2.3.5")], priority=110,
hello_interval=5, dead_interval=15, track_id="ethernet 1/1", track_decrement=50)
def test_add_vrrp_from_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return([
"Error: vlan 1234 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_add_existing_vrrp_to_same_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip vrrp-extended auth-type simple-text-auth ********",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.2.3.4",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
with self.assertRaises(VrrpAlreadyExistsForVlan) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("Vrrp group 1 is already in use on vlan 1234"))
def test_add_vrrp_to_vlan_with_another_vrrp(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip vrrp-extended auth-type simple-text-auth ********",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.2.3.4",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 2").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("dead-interval 15").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("advertise backup").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("track-port ethernet 1/1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("activate").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vrrp_group(1234, 2, ips=[IPAddress("1.2.3.5")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
def test_add_vrrp_with_out_of_range_group_id(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 256").and_return([
"Error - 256 not between 1 and 255"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(BadVrrpGroupNumber) as expect:
self.switch.add_vrrp_group(1234, 256, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP group number is invalid, must be contained between 1 and 255"))
def test_add_vrrp_with_bad_hello_interval(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 100").and_return([
"Error - 100 not between 1 and 84"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpTimers) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=100, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP timers values are invalid"))
def test_add_vrrp_with_bad_dead_interval(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("dead-interval 100").and_return([
"Error - 100 not between 1 and 84"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpTimers) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=100,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP timers values are invalid"))
def test_add_vrrp_with_bad_priority(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 256 track-priority 50").and_return([
"Error - 256 not between 1 and 255"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpPriorityNumber) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=256, hello_interval=5, dead_interval=100,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP priority value is invalid, must be contained between 1 and 255"))
def test_add_vrrp_with_bad_priority_type(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority testvalue track-priority 50").and_return([
"Invalid input -> testvalue track-priority 50"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpPriorityNumber) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority='testvalue', hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP priority value is invalid, must be contained between 1 and 255"))
def test_add_vrrp_with_bad_track_decrement(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 255").and_return([
"Error - 255 not between 1 and 254"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpTracking) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=255)
assert_that(str(expect.exception), equal_to("VRRP tracking values are invalid"))
def test_add_vrrp_with_bad_track_decrement_type(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority testvalue").and_return([
"Invalid input -> testvalue"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpTracking) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement='testvalue')
assert_that(str(expect.exception), equal_to("VRRP tracking values are invalid"))
def test_add_vrrp_with_no_ip_on_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([
"error - please configure ip address before configuring vrrp-extended"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(2).ordered().ordered()
with self.assertRaises(NoIpOnVlanForVrrp) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=100,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("Vlan 1234 needs an IP before configuring VRRP"))
def test_add_vrrp_with_bad_tracking_id(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("dead-interval 15").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("advertise backup").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("track-port ethernet not_an_interface").and_return([
"Invalid input -> not_an_interface"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpTracking) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet not_an_interface", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP tracking values are invalid"))
def test_remove_vrrp_success(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip vrrp-extended auth-type simple-text-auth ********",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.1",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type no-auth").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_vrrp_group(1234, 1)
def test_remove_one_of_two_vrrp_success(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip vrrp-extended auth-type simple-text-auth ********",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.1",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
" ip vrrp-extended vrid 2",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.2",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_vrrp_group(1234, 1)
def test_remove_vrrp_with_invalid_group_id(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip vrrp-extended auth-type simple-text-auth ********",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.1",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
with self.assertRaises(VrrpDoesNotExistForVlan) as expect:
self.switch.remove_vrrp_group(1234, 2)
assert_that(str(expect.exception), equal_to("Vrrp group 2 does not exist for vlan 1234"))
def test_remove_vrrp_from_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return([
"Error: vlan 1234 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_vrrp_group(1234, 2)
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def parse_range_test(self):
result = parse_if_ranges("")
assert_that(list(result), equal_to([]))
result = parse_if_ranges("ethe 1/2")
assert_that(list(result), equal_to(["ethe 1/2"]))
result = parse_if_ranges("ethe 1/1/2 to 1/1/5")
assert_that(list(result), equal_to(["ethe 1/1/2", "ethe 1/1/3", "ethe 1/1/4", "ethe 1/1/5"]))
result = parse_if_ranges("shizzle 1/1 shizzle 1/3 to 1/5 shizzle 1/7")
assert_that(list(result), equal_to(["shizzle 1/1", "shizzle 1/3", "shizzle 1/4", "shizzle 1/5", "shizzle 1/7"]))
@mock.patch("netman.adapters.switches.brocade.SshClient")
def test_connect(self, ssh_client_class_mock):
self.switch = brocade_factory_ssh(SwitchDescriptor(
hostname="my.hostname", username="the_user", password="<PASSWORD>", model="brocade", port=22), mock.Mock())
self.shell_mock = flexmock()
ssh_client_class_mock.return_value = self.shell_mock
self.shell_mock.should_receive("get_current_prompt").and_return("hostname>").once().ordered()
self.shell_mock.should_receive("do").with_args("enable", wait_for=":").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("the_password").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("skip-page-display").and_return([]).once().ordered()
self.switch.connect()
ssh_client_class_mock.assert_called_with(
host="my.hostname",
username="the_user",
password="<PASSWORD>",
port=22
)
@mock.patch("netman.adapters.switches.brocade.TelnetClient")
def test_connect_without_port_uses_default(self, telnet_client_class_mock):
self.switch = brocade_factory_telnet(SwitchDescriptor(
hostname="my.hostname", username="the_user", password="<PASSWORD>", model="brocade"), mock.Mock())
self.shell_mock = flexmock()
telnet_client_class_mock.return_value = self.shell_mock
self.shell_mock.should_receive("get_current_prompt").and_return("hostname>").once().ordered()
self.shell_mock.should_receive("do").with_args("enable", wait_for=":").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("the_password").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("skip-page-display").and_return([]).once().ordered()
self.switch.connect()
telnet_client_class_mock.assert_called_with(
host="my.hostname",
username="the_user",
password="<PASSWORD>"
)
@mock.patch("netman.adapters.switches.brocade.SshClient")
def test_auto_enabled_switch_doesnt_require_enable(self, ssh_client_class_mock):
self.switch = brocade_factory_ssh(SwitchDescriptor(hostname="my.hostname", username="the_user", password="<PASSWORD>", model="brocade", port=8000), mock.Mock())
self.shell_mock = flexmock()
ssh_client_class_mock.return_value = self.shell_mock
self.shell_mock.should_receive("get_current_prompt").and_return("hostname#").once().ordered()
self.shell_mock.should_receive("do").with_args("enable", wait_for=": ").never()
self.shell_mock.should_receive("do").with_args("skip-page-display").and_return([]).once().ordered()
self.switch.connect()
ssh_client_class_mock.assert_called_with(
host="my.hostname",
username="the_user",
password="<PASSWORD>",
port=8000
)
def test_disconnect(self):
logger = flexmock()
self.switch.logger = logger
logger.should_receive("debug")
self.shell_mock.should_receive("quit").with_args("exit").once().ordered()
logger.should_receive("info").with_args("FULL TRANSACTION LOG").once()
self.switch.shell.full_log = "FULL TRANSACTION LOG"
self.switch.disconnect()
def test_transactions_commit_write_memory(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999 name Gertrude").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.start_transaction()
self.switch.add_vlan(2999, name="Gertrude")
self.shell_mock.should_receive("do").with_args("write memory").once().ordered()
self.switch.commit_transaction()
self.switch.end_transaction()
def test_add_dhcp_relay_server(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip helper-address 10.10.10.1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
def test_add_second_dhcp_relay_server(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip helper-address 10.10.10.1",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip helper-address 10.10.10.2").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_dhcp_relay_server(1234, IPAddress('10.10.10.2'))
def test_add_same_dhcp_relay_server_fails(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 192.168.3.11/27",
" ip helper-address 10.10.10.1",
"!",
])
with self.assertRaises(DhcpRelayServerAlreadyExists) as expect:
self.switch.add_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
assert_that(str(expect.exception), equal_to("DHCP relay server 10.10.10.1 already exists on VLAN 1234"))
def test_remove_dhcp_relay_server(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 192.168.3.11/27",
" ip helper-address 10.10.10.1",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip helper-address 10.10.10.1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
def test_remove_non_existent_dhcp_relay_server_fails(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 192.168.3.11/27",
"!",
])
with self.assertRaises(UnknownDhcpRelayServer) as expect:
self.switch.remove_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
assert_that(str(expect.exception), equal_to("DHCP relay server 10.10.10.1 not found on VLAN 1234"))
def vlan_with_vif_display(vlan_id, vif_id, name="[None]"):
return vlan_display(vlan_id, name, vif_id=vif_id)
def vlan_display(vlan_id=9, vlan_name="[None]", tagged_port_str=None, untagged_port_str=None, vif_id=None):
ret = [
"PORT-VLAN {}, Name {}, Priority Level -, Priority Force 0, Creation Type STATIC".format(vlan_id, vlan_name),
"Topo HW idx : 81 Topo SW idx: 257 Topo next vlan: 0",
"L2 protocols : STP",
]
if untagged_port_str:
ret.append("Untagged Ports : {}".format(untagged_port_str))
if tagged_port_str:
ret.append("Statically tagged Ports : {}".format(tagged_port_str))
ret.extend([
"Associated Virtual Interface Id: {}".format(vif_id or "NONE"),
"----------------------------------------------------------",
"No ports associated with VLAN",
"Arp Inspection: 0",
"DHCP Snooping: 0",
"IPv4 Multicast Snooping: Disabled",
"IPv6 Multicast Snooping: Disabled",
])
if vif_id:
ret.extend([
"Ve{} is down, line protocol is down".format(vif_id),
" Type is Vlan (Vlan Id: {})".format(vlan_id),
" Hardware is Virtual Ethernet, address is 748e.f8a7.1b01 (bia 748e.f8a7.1b01)",
" No port name",
" Vlan id: {}".format(vlan_id),
" Internet address is 0.0.0.0/0, IP MTU 1500 bytes, encapsulation ethernet",
" Configured BW 0 kbps",
])
else:
ret.append("No Virtual Interfaces configured for this vlan")
return ret
| 1.6875 | 2 |
eahub/base/models.py | walambert/eahub.org | 36 | 10981 | <reponame>walambert/eahub.org
import uuid
from authtools import models as authtools_models
from django.core.validators import URLValidator
from django.db import models
from django.utils import timezone
from solo.models import SingletonModel
class User(authtools_models.AbstractEmailUser):
# django-allauth puts Google or EA.org SSO data in those fields only, not Profile
# because they have a slightly inflexible architecture
first_name = models.CharField(max_length=256, blank=True)
last_name = models.CharField(max_length=256, blank=True)
def has_profile(self) -> bool:
return hasattr(self, "profile")
class FeedbackURLConfig(SingletonModel):
site_url = models.TextField(
default="https://feedback.eahub.org", validators=[URLValidator()]
)
def __str__(self):
return "Feedback URL"
class Meta:
verbose_name = "Feedback URL"
class MessagingLog(models.Model):
USER = "USER"
GROUP = "GROUP"
RECIPIENT_TYPE_CHOICES = [
(USER, "User"),
(GROUP, "Group"),
]
sender_email = models.EmailField(max_length=254)
recipient_email = models.EmailField(max_length=254)
recipient_type = models.CharField(
max_length=5,
choices=RECIPIENT_TYPE_CHOICES,
default=USER,
)
send_action_uuid = models.UUIDField(default=uuid.uuid4)
time = models.DateTimeField(default=timezone.now)
| 2.234375 | 2 |
scripts/math/generate_matrix_test.py | chr15murray/ledger | 96 | 10982 | import numpy as np
types = ["int", "float", "double"]
def randi(*args):
return np.random.randint(-10, 10, size=args)
rngs = {"int": randi, "float": np.random.randn, "double": np.random.randn}
embodiments = {
"function": "R.%s(A,B).AllClose(C)",
"op": "(A %s B).AllClose(C)",
"inline_op": "(R = A, R %s B).AllClose(C)",
"inline_function": "( R = A, R.%s(B) ).AllClose(C)"
}
tests = {
'+': ("Addition", "Add", [], []),
'*': ("Multiplication", "Multiply", [], []),
'-': ("Subtraction", "Subtract", [], []),
'/': ("Division", "Divide", ["int"], []),
'dp': ("Dot product", "Dot", [], ["op", "inline_op"])
}
for type in types:
rng = rngs[type]
for op, details in tests.iteritems():
test_title, function, exclude, ignore = details
if type in exclude:
break
iop = op + "="
ifunction = "Inline" + function
names = {
"function": function,
"op": op,
"inline_op": iop,
"inline_function": ifunction
}
n = 7
m = 7
A = rng(n, m)
B = rng(n, m)
if op == "+":
C = A + B
elif op == "/":
C = A / B
elif op == "-":
C = A - B
elif op == "*":
C = A * B
elif op == "dp":
C = np.dot(A, B)
m1 = " ;\n".join([" ".join([str(y) for y in x]) for x in A])
m2 = " ;\n".join([" ".join([str(y) for y in x]) for x in B])
m3 = " ;\n".join([" ".join([str(y) for y in x]) for x in C])
print """
SCENARIO("%s") {
_M<%s> A,B,C,R;
R.Resize( %d, %d );
A = _M<%s>(R\"(\n%s\n)\");
B = _M<%s>(R\"(\n%s\n)\");
C = _M<%s>(R\"(\n%s\n)\");
""" % (test_title + " for " + type, type, n, m, type, m1, type, m2, type, m3)
for method, emb in embodiments.iteritems():
if method in ignore:
continue
name = names[method]
tt = emb % name
print "EXPECT( %s );" % tt
print "};"
print
| 2.890625 | 3 |
Lab 2/utils/inference_utils.py | davedecoder/aws-deepcomposer-samples | 6 | 10983 | <gh_stars>1-10
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import tensorflow as tf
import numpy as np
from utils import path_utils, midi_utils, display_utils
# --- local samples------------------------------------------------------------------
def load_melody_samples(n_sample=10):
"""Load the samples used for evaluation."""
sample_source_path = './dataset/eval.npy'
data = np.load(sample_source_path)
data = np.asarray(data, dtype=np.float32) # {-1, 1}
random_idx = np.random.choice(len(data), n_sample, replace=False)
sample_x = data[random_idx]
sample_z = tf.random.truncated_normal((n_sample, 2, 8, 512))
print("Loaded {} melody samples".format(len(sample_x)))
return sample_x, sample_z
# --- Training ------------------------------------------------------------------
def generate_pianoroll(generator, conditioned_track, noise_vector=None):
if noise_vector == None:
noise_vector = tf.random.truncated_normal((1, 2, 8, 512))
return generator((conditioned_track, noise_vector), training=False)
def generate_midi(generator, saveto_dir, input_midi_file='./Experiments/data/happy_birthday_easy.mid'):
conditioned_track = midi_utils.get_conditioned_track(midi=input_midi_file)
generated_pianoroll = generate_pianoroll(generator, conditioned_track)
destination_path = path_utils.new_temp_midi_path(saveto_dir=saveto_dir)
midi_utils.save_pianoroll_as_midi(generated_pianoroll.numpy(), destination_path=destination_path)
return destination_path
def show_generated_pianorolls(generator, eval_dir, input_midi_file='./Experiments/data/happy_birthday_easy.mid', n_pr = 4):
conditioned_track = midi_utils.get_conditioned_track(midi=input_midi_file)
for i in range(n_pr):
generated_pianoroll = generate_pianoroll(generator, conditioned_track)
display_utils.show_pianoroll(generated_pianoroll) | 1.820313 | 2 |
python/aisdk/player_movement.py | THUAI-Team/thuai2022-aisdk | 0 | 10984 | <gh_stars>0
from enum import Enum
from sys import stderr
class PlayerMovement(Enum):
STOPPED = 0
WALKING = 1
RUNNING = 2
SLIPPED = 3
def to_json_representation(self):
return (str(self).split('.')[1]).lower()
class MovementNotAllowedError(ValueError):
def __init__(self, message):
super().__init__(message) | 2.953125 | 3 |
diagnosticApp/admin.py | LASI-UFPI/diagnostic-imaging | 0 | 10985 | from django.contrib import admin
from .models import Image
@admin.register(Image)
class ImageAdmin(admin.ModelAdmin):
list_display = ('image', 'predict_covid', 'predict_no_findings', 'predict_pneumonia', 'created_at', 'updated_at', 'activated_at')
| 1.648438 | 2 |
digraph/source/generator.py | addy1997/python-RRT | 11 | 10986 | <reponame>addy1997/python-RRT
#!/usr/bin/env python
# coding: utf-8
# In[2]:
from time import time
import networkx as nx
from source.DiGraph import DiGraph
from source.model.Edge import Edge
def text_to_dict(filename):
in_file = open("filename", "r")
lines = in_file.read()
in_file.close()
open_bracket = lines.index("{")
close_bracket = lines.index("}")
graph = eval(lines[open_bracket:close_bracket])
return graph
def specify_vertices(graph):
vertices = []
for node in graph.keys():
vertices.append(node)
return vertices
def specify_edges(graph):
edges = []
for node in graph.key():
edges.append(Edge(node, i))
return edges
def design_graph_object(graph, G= None):
if not G:
G = DiGraph()
for node in graph.keys():
if (node not in G.get_vertices()):
G.add_node(node)
for z in graph[node]:
if (z not in G.get_vertices()):
G.add_node(z)
G.add_edge(node, z)
return G
def set_digraph_library(graph, G):
for nodes in graph.keys():
G.add_node(nodes)
for i in graph[nodes]:
G.add_edge(nodes, i)
return G
# In[ ]:
| 2.90625 | 3 |
[1] BEGINNER/1000 - Hello World!.py | tiago040/URI-SOLUTIONS | 1 | 10987 | '''
https://resources.urionlinejudge.com.br/gallery/images/problems/UOJ_1000.png
Bem-vindo ao URI Online Judge!
O seu primeiro programa em qualquer linguagem de programaรงรฃo normalmente รฉ o "Hello World!". Neste primeiro problema tudo o que vocรช precisa fazer รฉ imprimir esta mensagem na tela.
Entrada
Este problema nรฃo possui nenhuma entrada.
Saรญda
Vocรช deve imprimir a mensagem "Hello World!" conforme o exemplo abaixo.
'''
print('Hello World!') | 3.890625 | 4 |
py/py_0668_square_root_smooth_numbers.py | lcsm29/project-euler | 0 | 10988 | # Solution of;
# Project Euler Problem 668: Square root smooth Numbers
# https://projecteuler.net/problem=668
#
# A positive integer is called square root smooth if all of its prime factors
# are strictly less than its square root. Including the number $1$, there are
# $29$ square root smooth numbers not exceeding $100$. How many square root
# smooth numbers are there not exceeding $10\,000\,000\,000$?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 668
timed.caller(dummy, n, i, prob_id)
| 3 | 3 |
user/forms.py | Zidan-Kharisma-Sakana/uts-f02 | 0 | 10989 | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.forms import ValidationError, EmailField
from user import models
class MyAuthenticationForm(AuthenticationForm):
""""
Overide method clean from AuthenticationForm to show that a user hasn't activate their account
"""
error_messages = {
'invalid_login': (
"Please enter a correct %(username)s and password. Note that both "
"fields may be case-sensitive."
),
'inactive': ("This Account hasn't been activated yet, Please check your email :)"),
}
def confirm_login_allowed(self, user):
if not user.is_active:
raise ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password:
self.user_cache = authenticate(self.request, username=username, password=password)
if self.user_cache is None:
print(username)
try:
user_temp = User.objects.get(username=username)
except:
user_temp = None
print(user_temp)
if user_temp is not None:
self.confirm_login_allowed(user_temp)
else:
raise ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
return self.cleaned_data
class CreateUserForm(UserCreationForm):
""""
Override UserCreationForm to include email field
"""
email = EmailField(required=True, label='Email')
class Meta:
model = User
fields = ("username", "email", "password1", "<PASSWORD>")
error_messages = {
'password_mismatch': ('The two password fields didnโt match.'),
'email_taken': 'Your email has been taken'
}
def clean_email(self):
"""
Check if the email had already been taken
"""
email = self.cleaned_data.get('email')
num = User.objects.filter(email=email)
if num.count() > 0:
raise ValidationError(
self.error_messages['email_taken'],
code='email_taken',
)
return email
def save(self, commit= True):
user = super(CreateUserForm, self).save(commit=False)
email = self.cleaned_data.get('email')
user.email = email
user.is_active=False
if commit:
user.save()
return user
| 2.78125 | 3 |
data-structures-and-algorithms/examples/binary_tree_recursive.py | vinnyhoward/til | 0 | 10990 | class Node(object): # Similar to Linked List initial set-up
def __init__(self, value): # Constructor
self.value = value
self.left = None
self.right = None
class BinaryTree(object):
def __init__(self, root):
self.root = Node(root)
def print_tree(self, traversal_type):
if traversal_type == "preorder":
return self.preorder_print(tree.root, "") # init
elif traversal_type == "inorder":
return self.in_order_print(tree.root, "") # init
elif traversal_type == "postorder":
return self.post_order_print(tree.root, "") # init
else:
print("Traversal type " + str(traversal_type) + "not valid")
return False
def preorder_print(self, start, traversal):
# Root --> Left --> Right
if start:
traversal += (str(start.value) + "--")
traversal = self.preorder_print(start.left, traversal)
traversal = self.preorder_print(start.right, traversal)
return traversal
def in_order_print(self, start, traversal):
# Very Left --> Root --> Very Right
if start:
traversal = self.in_order_print(start.left, traversal)
traversal += (str(start.value) + '--')
traversal = self.in_order_print(start.right, traversal)
return traversal
def post_order_print(self, start, traversal):
# Very Left --> Very Right --> Root
if start:
traversal = self.post_order_print(start.left, traversal)
traversal = self.post_order_print(start.right, traversal)
traversal += (str(start.value) + '--')
return traversal
"""Try doing Post-Order tomorrow"""
# Visualization of Current Tree
# Pre-Order Output: 1--2--4--9--10--11--5--3--6--7--8--
# In-Order Output: 11--10--9--4--2--5--1--6--3--7--8--
# Pre-Order Output: 11--10--9--4--5--2--6--8--7--3--1--
# 1
# / \
# 2 3
# / | / |
# 4 5 6 7
# / \
# 9 8
# /
# 10
# /
# 11
# Tree Set-Up
# Another implementation
# class BinaryTree(object):
# def __init__(self, root):
# self.root = Node(root)
# def search(self, find_val):
# return self.preorder_search(tree.root, find_val)
# def print_tree(self):
# return self.preorder_print(tree.root, "")[:-1]
# def preorder_search(self, start, find_val):
# if start:
# if start.value == find_val:
# return True
# else:
# return self.preorder_search(start.left, find_val) or self.preorder_search(start.right, find_val)
# return False
# def preorder_print(self, start, traversal):
# if start:
# traversal += (str(start.value) + "-")
# traversal = self.preorder_print(start.left, traversal)
# traversal = self.preorder_print(start.right, traversal)
# return traversal
tree = BinaryTree(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
tree.root.right.left = Node(6)
tree.root.right.right = Node(7)
tree.root.right.right.right = Node(8)
tree.root.left.left.left = Node(9)
tree.root.left.left.left.left = Node(10)
tree.root.left.left.left.left.left = Node(11)
# print(tree.print_tree("preorder"))
# print(tree.print_tree("inorder"))
print(tree.print_tree("postorder"))
| 4.125 | 4 |
powerranger/files.py | clayboone/powerranger | 0 | 10991 | <reponame>clayboone/powerranger<gh_stars>0
import curses
import itertools
import os
from pathlib import Path
import stat
from typing import Optional, Union
import config
from colors import Colors
class Item:
"""An item inside of a Directory."""
def __init__(self, path: Union[Path, str]):
self._path = Path(path)
self._selected = False
@property
def name(self) -> str:
"""The name of the item, not including parents."""
return self._path.name
@property
def color(self) -> curses.color_pair:
"""An initialized ncurses color pair associated with the type of file
for this Item.
"""
if self.selected:
return Colors.black_on_white()
if self._path.is_dir():
return Colors.blue_on_black()
return Colors.default()
@property
def selected(self) -> Optional[bool]:
"""Return whether this item should appear as selected"""
return self._selected
@selected.setter
def selected(self, value: bool):
self._selected = value
def is_hidden(self) -> bool:
"""Return whether or not the file should be hidden."""
return self._has_hidden_attribute() or self._path.name.startswith(".")
def _has_hidden_attribute(self) -> bool:
return bool(os.stat(self._path.resolve()).st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN)
class Directory:
"""A list of items inside of a directory."""
def __init__(self, path: Union[Path, str]):
self.path = Path(path)
def __iter__(self):
elements = self.path.iterdir()
if config.SORT_FOLDERS_ON_TOP:
element1, element2 = itertools.tee(elements)
elements = itertools.chain(
(item for item in element1 if item.is_dir()),
(item for item in element2 if not item.is_dir()),
)
for element in elements:
item = Item(element)
if item.is_hidden() and not config.SHOW_HIDDEN_FILES:
continue
yield Item(element)
| 2.78125 | 3 |
parlai/mturk/webapp/run_mocks/mock_turk_manager.py | lizekang/ParlAI | 1 | 10992 | <filename>parlai/mturk/webapp/run_mocks/mock_turk_manager.py
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
import os
import threading
import time
import uuid
from parlai.mturk.core.agents import AssignState
from parlai.mturk.core.socket_manager import Packet
from parlai.mturk.webapp.run_mocks.mock_turk_agent import MockTurkAgent
import parlai.mturk.core.data_model as data_model
import parlai.mturk.core.shared_utils as shared_utils
parent_dir = os.path.dirname(os.path.abspath(__file__))
class MockTurkManager():
"""Manages interactions between MTurk agents as well as direct interactions
between a world and the MTurk server.
"""
current_manager = None
def __init__(self, opt, mturk_agent_ids, is_test=False, use_db=False):
"""Fake an MTurk manager that has the functionality to run a task,
but not on mturk
"""
self.opt = opt
self.mturk_agent_ids = mturk_agent_ids
self.has_run = False
self.sandbox = True
self.db_logger = None
# Required lifecycle functions below
def setup_server(self, task_directory_path=None):
"""Noop, we aren't connecting to a server"""
print('[mock] setup_server called')
def start_new_run(self):
"""Initialize expected state to not cause crashes"""
self.run_id = str(int(time.time()))
self.task_group_id = '{}_{}'.format(self.opt['task'], self.run_id)
print('[mock] start_new_run called')
def ready_to_accept_workers(self, timeout_seconds=None):
"""No threads, as there is no sustained worker pool. Instead
we instantiate x MockTurkAgents in onboarding"""
self.id_to_agent = {
agent_id: MockTurkAgent(
self.opt, self, 'hit_id_{}'.format(agent_id),
'assignment_id_{}'.format(agent_id), agent_id,
) for agent_id in self.mturk_agent_ids
}
self.agents = list(self.id_to_agent.values())
MockTurkManager.current_manager = self
print('[mock] ready_to_accept_workers called')
def set_onboard_function(self, onboard_function):
self.onboard_function = onboard_function
print('[mock] set_onboard_function called')
def start_task(self, eligibility_function, assign_role_function,
task_function):
"""Handle running a task by checking to see when enough agents are
in the pool to start an instance of the task. Continue doing this
until the desired number of conversations is had.
"""
print('[mock] start_task called')
if callable(eligibility_function):
# Convert legacy eligibility_functions to the new format
eligibility_function = {
'multiple': False,
'func': eligibility_function,
}
else:
# Ensure the eligibility function is valid
if 'func' not in eligibility_function:
shared_utils.print_and_log(
logging.CRITICAL,
"eligibility_function has no 'func'. Cancelling."
)
raise Exception(
'eligibility_function dict must contain a `func` field '
'containing the actual function.'
)
elif not callable(eligibility_function['func']):
shared_utils.print_and_log(
logging.CRITICAL,
"eligibility_function['func'] not a function. Cancelling."
)
raise Exception(
"eligibility_function['func'] must contain a function. "
"If eligibility_function['multiple'] is set, it should "
"filter through the list of workers and only return those "
"that are currently eligible to participate. If it is not "
"set, it should take in a single worker and return whether"
" or not they are eligible."
)
if 'multiple' not in eligibility_function:
eligibility_function['multiple'] = False
valid_agents = [a for a in self.agents if a.mock_status == 'waiting']
needed_agents = len(self.mturk_agent_ids)
while len(valid_agents) < needed_agents:
valid_agents = [a for a in self.agents
if a.mock_status == 'waiting']
# Add the required number of valid agents to the conv
agents = [a for a in valid_agents[:needed_agents]]
assign_role_function(agents)
# Allow task creator to filter out agents and run
# versions of the task that require fewer agents
agents = [a for a in agents if a.id is not None]
for agent in agents:
agent.mock_status = AssignState.STATUS_IN_TASK
agent.set_status(AssignState.STATUS_IN_TASK)
agent.conversation_id = 'in_task'
task_function(mturk_manager=self, opt=self.opt, workers=agents)
for agent in agents:
agent.mock_status = AssignState.STATUS_DONE
agent.set_status(AssignState.STATUS_DONE)
agent.task_done = True
def shutdown(self, force=False):
"""No servers, nothing to clean up"""
print('[mock] shutdown called')
def move_agents_to_waiting(self, agents):
"""Mock moving to a waiting world"""
for agent in agents:
agent.mock_status = AssignState.STATUS_WAITING
agent.set_status(AssignState.STATUS_WAITING)
agent.conversation_id = 'waiting'
def disconnect_agent(self, worker_id, assignment_id):
"""Set an agent to status disconnect, and all other agents to
partner disconnect. send them the correct message. Mocks
MTurkManager._handle_agent_disconnect
"""
worker = self.id_to_agent[worker_id]
worker.disconnected = True
for agent in self.agents:
if not agent.disconnected:
agent.some_agent_disconnected = True
def worker_alive(self, worker_id, hit_id, assign_id):
"""Mocks baseline worker_alive status changes for mock agents"""
agent = self.id_to_agent[worker_id]
if agent.mock_status == AssignState.STATUS_NONE:
agent.status = AssignState.STATUS_ONBOARDING
agent.set_status(AssignState.STATUS_ONBOARDING)
self.onboard_new_agent(agent)
else:
if agent.status in [AssignState.STATUS_ONBOARDING,
AssignState.STATUS_IN_TASK]:
pass
elif (agent.status == AssignState.STATUS_DISCONNECT or
agent.status == AssignState.STATUS_DONE or
agent.status == AssignState.STATUS_EXPIRED or
agent.status == AssignState.STATUS_RETURNED or
agent.status == AssignState.STATUS_PARTNER_DISCONNECT):
# reconnect is an inactive command
data = agent.get_inactive_command_data()
self.send_command(worker_id, assign_id, data)
def on_new_message(self, worker_id, msg):
agent = self.id_to_agent[worker_id]
agent.put_data(msg.id, msg.data)
agent.append_message(msg.data)
def onboard_new_agent(self, agent):
"""Creates an onboarding thread for the given agent"""
# get state variable in question
worker_id = agent.worker_id
assignment_id = agent.assignment_id
def _onboard_function(agent):
"""Onboarding wrapper to set state to onboarding properly"""
if self.onboard_function:
agent.id = 'Onboarding'
self.onboard_function(agent)
# once onboarding is done, move into a waiting world
self.move_agents_to_waiting([agent])
# Start the onboarding thread and run it
onboard_thread = threading.Thread(
target=_onboard_function,
args=(agent,),
name='onboard-{}-{}'.format(worker_id, assignment_id)
)
onboard_thread.daemon = True
onboard_thread.start()
return True
# MTurk Agent Interaction Functions #
def send_message(self, receiver_id, assignment_id, data,
blocking=True, ack_func=None):
"""'Send' a message directly by updating the queue of messages not
yet recieved that the agent can pull from
"""
data = data.copy() # Ensure data packet is sent in current state
data['type'] = data_model.MESSAGE_TYPE_MESSAGE
# Force messages to have a unique ID
if 'message_id' not in data:
data['message_id'] = str(uuid.uuid4())
conversation_id = None
agent = self.id_to_agent[receiver_id]
conversation_id = agent.conversation_id
event_id = shared_utils.generate_event_id(receiver_id)
packet = Packet(
event_id,
Packet.TYPE_MESSAGE,
'world',
receiver_id,
assignment_id,
data,
conversation_id=conversation_id,
blocking=blocking,
ack_func=ack_func
)
shared_utils.print_and_log(
logging.INFO,
'Manager sending: {}'.format(packet),
should_print=self.opt['verbose']
)
# Push message to restore queue and incoming queue
agent.append_message(packet.data)
agent.unread_messages.append(packet)
return data['message_id']
def send_command(self, receiver_id, assignment_id, data, blocking=True,
ack_func=None):
"""Commands aren't actually sent this way, as state updates are read"""
return None
def timeout_all_agents(self):
"""Set all agent statuses to disconnect to kill the world"""
for agent in self.agents:
agent.disconnected = True
# BELOW ARE STUBS THAT EXIST TO HOPEFULLY MAKE RUN FILES NOT CRASH
# NONE OF THEM DO ANYTHING (though some return success values)
def mark_workers_done(self, workers):
pass
def free_workers(self, workers):
pass
def get_agent_work_status(self, assignment_id):
pass
def get_qualification_list(self, qualifications=None):
return []
def create_additional_hits(self, num_hits, qualifications=None):
return 'fake_page_url'
def create_hits(self, qualifications=None):
return 'fake_page_url'
def get_hit(self, hit_id):
pass
def get_assignment(self, assignment_id):
pass
def get_assignments_for_hit(self, hit_id):
pass
def expire_all_unassigned_hits(self):
pass
def approve_work(self, assignment_id, override_rejection=False):
print('[mock] Assignment {} approved'.format(assignment_id))
def reject_work(self, assignment_id, reason):
print('[mock] Assignment {} rejected for {}'.format(
assignment_id, reason))
def approve_assignments_for_hit(self, hit_id, override_rejection=False):
print('[mock] HIT {} approved'.format(hit_id))
def block_worker(self, worker_id, reason):
print('[mock] Worker {} blocked for reason {}'.format(
worker_id, reason))
def soft_block_worker(self, worker_id, qual='block_qualification'):
print('[mock] Worker {} given qual {}'.format(worker_id, qual))
def un_soft_block_worker(self, worker_id, qual='block_qualification'):
print('[mock] Worker {} revoked qual {}'.format(worker_id, qual))
def give_worker_qualification(self, worker_id, qual_name, qual_value=None):
print('[mock] Worker {} given qual {}'.format(worker_id, qual_name))
def remove_worker_qualification(self, worker_id, qual_name, reason=''):
print('[mock] Worker {} revoked qual {}'.format(worker_id, qual_name))
def create_qualification(self, qualification_name, description,
can_exist=True):
pass
def pay_bonus(self, worker_id, bonus_amount, assignment_id, reason,
unique_request_token):
print('[mock] Worker {} paid bonus {}'.format(worker_id, bonus_amount))
def email_worker(self, worker_id, subject, message_text):
print('[mock] Worker {} emailed {}'.format(worker_id, message_text))
return {'success': True}
| 2.03125 | 2 |
scripts/train_model.py | allenai/sledgehammer | 47 | 10993 | <gh_stars>10-100
#!/usr/bin/env python
import sys
import os
import random
import copy
import subprocess
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
# PYTHON_DIR="/".join(os.environ['CONDA_EXE'].split("/")[:-2])+'/envs/allennlp_0.8.4/bin/'
exit_threshold=0.9
def main():
parser = arg_parser()
args = parser.parse_args()
lrs = [2e-5, 3e-5, 5e-5]
dropout = [0.1, 0.1]
layer_indices = args.layer_indices
dont_delete = len(layer_indices.split("_")) > 1
n_test = args.n_tests
dataset = args.dataset
start = args.start
is_lowercase = args.bert_type[-7:] == 'uncased'
cwd = os.getcwd()+"/"
training_config_file = cwd+"training_config/sledgehammer_bert_classification.jsonnet"
base_path = args.data_dir+"/text_cat/"
if args.nli:
training_config_file = cwd+"training_config/sledgehammer_bert_nli.jsonnet"
base_path = args.data_dir+"/nli/"
slurm = args.slurm
extra_args = ""
if slurm is None:
os.environ["BERT_TYPE"] = args.bert_type
os.environ["IS_LOWERCASE"] = str(is_lowercase).lower()
os.environ["TRAIN_PATH"] = base_path+dataset+"/train"
os.environ["DEV_PATH"] = base_path+dataset+"/dev"
os.environ["TEST_PATH"] = base_path+dataset+"/test"
os.environ["LAYER_INDICES"] = layer_indices
# @todo change me back to 0
os.environ["CUDA_DEVICE"] = str(args.cuda_device)
os.environ["SCALING_TEMPERATURE"] = "_".join(["1" for i in range(len(layer_indices.split("_")))])
os.environ["BATCH_SIZE"] = str(args.batch_size)
os.environ["MAX_PIECES"] = str(args.max_pieces)
os.environ["TEMPERATURE_THRESHOLD"] = str(exit_threshold)
os.environ["ADD_PREVIOUS_LAYER_LOGITS"] = 'false'
os.environ["MULTITASK"] = 'false'
os.environ["NUM_EPOCHS"] = str(args.num_epochs)
else:
extra_args = "--export BERT_TYPE={},IS_LOWERCASE={},TRAIN_PATH={},DEV_PATH={},TEST_PATH={},LAYER_INDICES={},CUDA_DEVICE={},SCALING_TEMPERATURE={},BATCH_SIZE={},MAX_PIECES={},TEMPERATURE_THRESHOLD={},ADD_PREVIOUS_LAYER_LOGITS={},MULTITASK={},NUM_EPOCHS={}".format(args.bert_type,str(is_lowercase).lower(),base_path+dataset+"/train",base_path+dataset+"/dev",base_path+dataset+"/test","'"+layer_indices+"'",0,"'"+"_".join(["1" for i in range(len(layer_indices.split("_")))])+"'",args.batch_size,args.max_pieces,exit_threshold,'false','false',args.num_epochs)
for i in range(start, n_test):
#lr = str(10**random.uniform(lrs[0], lrs[1]))
lr = str(lrs[random.randint(0, len(lrs))-1])
dr = str(random.uniform(dropout[0], dropout[1]))
seed = str(random.randint(0,100000))
local_dir = args.work_dir+args.bert_type+"/"+dataset+"/experiment_{}_{}/".format(layer_indices, i)
local_extra_args = copy.copy(extra_args)
allennlp_cmd = "allennlp train {} --serialization-dir {} --include-package allennlp_overrides -f".format(training_config_file, local_dir)
if slurm is None:
os.environ["SEED"] = seed
os.environ["PYTORCH_SEED"] = seed
os.environ["NUMPY_SEED"] = seed
os.environ["DROPOUT"] = dr
os.environ["LEARNING_RATE"] = lr
cmd = allennlp_cmd
else:
local_extra_args += ",SEED={},PYTORCH_SEED={},NUMPY_SEED={},DROPOUT={},LEARNING_RATE={}".format(seed,seed,seed,dr,lr)
cmd = "srun -p allennlp_hipri -w {} --gpus=1 {} {}".format(slurm, local_extra_args, allennlp_cmd)
print(cmd)
return_value = subprocess.call(cmd, shell=True)
if return_value != 0:
for j in range(200):
if not dont_delete:
f = "{}/model_state_epoch_{}.th".format(local_dir, j)
rm_if_exists(f)
f = "{}/training_state_epoch_{}.th".format(local_dir, j)
rm_if_exists(f)
f = local_dir+"/best.th"
rm_if_exists(f)
# If we are not deleting intermediate models, we don't need the final model.tar.gz file
if dont_delete:
f = local_dir+"/model.tar.gz"
rm_if_exists(f)
return 0
def rm_if_exists(f):
if os.path.exists(f):
os.remove(f)
return 1
else:
return 0
def arg_parser():
"""Extracting CLI arguments"""
p = ArgumentParser(add_help=False)
p.add_argument("-b", "--batch_size", help="Batch size", type=int, default=72)
p.add_argument("-s", "--start",
help="First experiment index to run",
type=int, default=0)
p.add_argument("-t", "--bert_type", help="Bert type (bert-{base,large}-{cased,uncased})", type=str,
default='bert-large-uncased')
p.add_argument("-n", "--n_tests", help="Number of grid search experiments to run", type=int, default=1)
p.add_argument("-x", "--max_pieces", help="Maximum number of word pieces for BERT", type=int, default=512)
p.add_argument("-c", "--num_epochs", help="Number of epochs to run", type=int, default=2)
p.add_argument("-l", "--layer_indices", help="Indices of layers to train classifiers for", type=str, default="23")
p.add_argument("-d", "--dataset", help="Dataset to work with", required=True)
p.add_argument("-i", "--nli", help="Is this an NLI experiment? (if not, it's text_cat)", action='store_true')
p.add_argument("-r", "--slurm", help="Run jobs on SLURM using this server", type=str)
p.add_argument('-w', '--work_dir', help="Working directory. Should contain a directory for the bert_type, which contains another directory for the dataset", type=str, default="")
p.add_argument('--data_dir', help="Dataset directory. Should contain 'text_cat' and/or 'nli' folders, containing a directory for the dataset, which contains three files: train, dev and test", type=str, required=True)
p.add_argument("-u", "--cuda_device", help="CUDA device (or -1 for CPU)", type=int, default=0)
return ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter,
parents=[p])
if __name__ == '__main__':
sys.exit(main())
| 2.078125 | 2 |
utils/gather_files.py | letsgo247/KFG | 0 | 10994 | <reponame>letsgo247/KFG
import os
import shutil
import time
def read_all_file(path):
output = os.listdir(path)
file_list = []
for i in output:
if os.path.isdir(path+"/"+i):
file_list.extend(read_all_file(path+"/"+i))
elif os.path.isfile(path+"/"+i):
file_list.append(path+"/"+i)
return file_list
def copy_all_file(file_list, new_path):
for src_path in file_list:
file = src_path.split("/")[-1]
shutil.copyfile(src_path, new_path+"/"+file)
# print("ํ์ผ {} ์์
์๋ฃ".format(file)) # ์์
ํ ํ์ผ๋ช
์ถ๋ ฅ
src_path = "C:\dev\KFG\Data/Korean/AFAD/AFAD-Lite" # ๊ธฐ์กด ํด๋ ๊ฒฝ๋ก
new_path = "C:\dev\KFG\Data/Korean/AFAD/AFAD_gathered" # ์ฎ๊ธธ ํด๋ ๊ฒฝ๋ก
start_time = time.time() # ์์
์์ ์๊ฐ
file_list = read_all_file(src_path)
copy_all_file(file_list, new_path)
print("=" * 40)
print("๋ฌ๋ ํ์ : {}".format(time.time() - start_time)) # ์ด ์์์๊ฐ ๊ณ์ฐ | 2.734375 | 3 |
code/diva_evaluation_cli/bin/commands/actev_get_system_subcommands/git_command.py | wenhel/Argus | 4 | 10995 | """Actev module: get-system git
Actev modules are used to parse actev commands in order to get arguments
before calling associated entry point methods to execute systems.
Warning: this file should not be modified: see src/entry_points to add your source code.
"""
from diva_evaluation_cli.bin.commands.actev_command import ActevCommand
class ActevGetSystemGit(ActevCommand):
"""Clones a git repository
Command Args:
* location or l: path to store the system
* user or U: url to get the system
* password or p: password to access the url
* token or t: token to access the url
* install-cli or i: install the cli to use it
"""
def __init__(self):
super(ActevGetSystemGit, self).__init__('git', "get_git.sh")
def cli_parser(self, arg_parser):
"""Configure the description and the arguments (positional and optional) to parse.
Args:
arg_parser(:obj:`ArgParser`): Python arg parser to describe how to parse the command
"""
arg_parser.description= "Downloads a git repository"
required_named = arg_parser.add_argument_group('required named arguments')
arg_parser.add_argument("-U", "--user", help="username to access the url")
arg_parser.add_argument("-p", "--password", help="password to access the url"
"Warning: if password starts with \'-\', use this: --password=<your password>")
arg_parser.add_argument("-l", "--location", help="path to store the system")
arg_parser.add_argument("-t", "--token", help="token to access the url"
"Warning: if token starts with \'-\', use this: --token=<your token>",
type=str)
arg_parser.add_argument("-i", "--install-cli", help="install the cli to use it", action='store_true')
| 2.59375 | 3 |
1.8.first-promise.py | senpl/course-promises | 3 | 10996 | import re
textinput = widget_inputs["text1"]
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
is_correct = False
result = re.match(".*window.*", textinput, flags=re.IGNORECASE)
if result:
is_correct = True
commentizer("You're right, but there's a little more to it than that. Make sure you watch the solution video.")
result = re.match(".*global.*", textinput, flags=re.IGNORECASE)
if result:
is_correct = True
commentizer("Right! It's the global object.")
result = re.match(".*promise.*", textinput, flags=re.IGNORECASE)
if result:
is_correct = False
commentizer("It's not the Promise. Take another look!")
if not is_correct and len(comments) == 0:
commentizer("Not quite. Just log `this` somewhere in the Promise to see what happens.")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct | 2.640625 | 3 |
test/test_markdown_parser.py | Asana/SGTM | 8 | 10997 | import unittest
from html import escape
from src.markdown_parser import convert_github_markdown_to_asana_xml
class TestConvertGithubMarkdownToAsanaXml(unittest.TestCase):
def test_basic_markdown(self):
md = """~~strike~~ **bold** _italic_ `code` [link](asana.com)"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
'<s>strike</s> <strong>bold</strong> <em>italic</em> <code>code</code> <a href="asana.com">link</a>\n',
)
def test_ul_tag(self):
md = """* bullet one\n* bullet two"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml, """<ul>\n<li>bullet one</li>\n<li>bullet two</li>\n</ul>\n""",
)
def test_ol_tag(self):
md = """1. bullet one\n2. bullet two"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml, """<ol>\n<li>bullet one</li>\n<li>bullet two</li>\n</ol>\n""",
)
def test_paragraph(self):
md = "we don't wrap random text in p tags"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(md + "\n", xml)
def test_block_quote(self):
md = "> block quote"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<em>> block quote\n</em>")
def test_horizontal_rule(self):
# Asana doesn't support <hr /> tags, so we just ignore them
md = "hello\n\n---\nworld\n"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, md) # unchanged
def test_auto_linking(self):
md = "https://asana.com/ [still works](www.test.com)"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
'<a href="https://asana.com/">https://asana.com/</a> <a href="www.test.com">still works</a>\n',
)
def test_converts_headings_to_bold(self):
md = "## heading"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "\n<b>heading</b>\n")
def test_nested_code_within_block_quote(self):
md = "> abc `123`"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<em>> abc <code>123</code>\n</em>")
def test_removes_pre_tags_inline(self):
md = """```test```"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<code>test</code>\n")
def test_removes_pre_tags_block(self):
md = """see:
```
function foo = () => null;
```
"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "see:\n<code>function foo = () => null;\n</code>\n")
def test_escapes_raw_html_mixed_with_markdown(self):
md = """## <img href="link" />still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
"\n<b>"
+ escape('<img href="link" />')
+ "still here "
+ escape("<h3>header</h3>")
+ "</b>\n",
)
def test_escapes_raw_html_on_own_lines(self):
md = """## blah blah blah
<img href="link">
still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
"\n<b>blah blah blah</b>\n"
+ escape('<img href="link">\n')
+ "still here "
+ escape("<h3>header</h3>"),
)
def test_escapes_raw_html(self):
md = """<img href="link" />still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
escape('<img href="link" />') + "still here " + escape("<h3>header</h3>\n"),
)
def test_removes_images(self):
md = """"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, '<a href="https://image.com">image</a>\n')
if __name__ == "__main__":
from unittest import main as run_tests
run_tests()
| 3 | 3 |
ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/ftd/ftd_file_upload.py | otus-devops-2019-02/yyashkin_infra | 1 | 10998 | #!/usr/bin/python
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ftd_file_upload
short_description: Uploads files to Cisco FTD devices over HTTP(S)
description:
- Uploads files to Cisco FTD devices including disk files, backups, and upgrades.
version_added: "2.7"
author: "Cisco Systems, Inc."
options:
operation:
description:
- The name of the operation to execute.
- Only operations that upload file can be used in this module.
required: true
type: str
file_to_upload:
description:
- Absolute path to the file that should be uploaded.
required: true
type: path
version_added: "2.8"
register_as:
description:
- Specifies Ansible fact name that is used to register received response from the FTD device.
type: str
"""
EXAMPLES = """
- name: Upload disk file
ftd_file_upload:
operation: 'postuploaddiskfile'
file_to_upload: /tmp/test1.txt
"""
RETURN = """
msg:
description: The error message describing why the module failed.
returned: error
type: string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.ftd.common import construct_ansible_facts, FtdServerError, HTTPMethod
from ansible.module_utils.network.ftd.fdm_swagger_client import OperationField
def is_upload_operation(op_spec):
return op_spec[OperationField.METHOD] == HTTPMethod.POST or 'UploadStatus' in op_spec[OperationField.MODEL_NAME]
def main():
fields = dict(
operation=dict(type='str', required=True),
file_to_upload=dict(type='path', required=True),
register_as=dict(type='str'),
)
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
params = module.params
connection = Connection(module._socket_path)
op_spec = connection.get_operation_spec(params['operation'])
if op_spec is None:
module.fail_json(msg='Operation with specified name is not found: %s' % params['operation'])
if not is_upload_operation(op_spec):
module.fail_json(
msg='Invalid upload operation: %s. The operation must make POST request and return UploadStatus model.' %
params['operation'])
try:
if module.check_mode:
module.exit_json()
resp = connection.upload_file(params['file_to_upload'], op_spec[OperationField.URL])
module.exit_json(changed=True, response=resp, ansible_facts=construct_ansible_facts(resp, module.params))
except FtdServerError as e:
module.fail_json(msg='Upload request for %s operation failed. Status code: %s. '
'Server response: %s' % (params['operation'], e.code, e.response))
if __name__ == '__main__':
main()
| 1.898438 | 2 |
agent/indy_catalyst_agent/messaging/trustping/routes.py | nairobi222/indy-catalyst | 2 | 10999 | """Trust ping admin routes."""
from aiohttp import web
from aiohttp_apispec import docs
from ..connections.models.connection_record import ConnectionRecord
from .messages.ping import Ping
from ...storage.error import StorageNotFoundError
@docs(tags=["trustping"], summary="Send a trust ping to a connection")
async def connections_send_ping(request: web.BaseRequest):
"""
Request handler for sending a trust ping to a connection.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
connection_id = request.match_info["id"]
outbound_handler = request.app["outbound_message_router"]
try:
connection = await ConnectionRecord.retrieve_by_id(context, connection_id)
except StorageNotFoundError:
return web.HTTPNotFound()
if connection.is_active or connection.state == connection.STATE_RESPONSE:
msg = Ping()
await outbound_handler(msg, connection_id=connection_id)
await connection.log_activity(context, "ping", connection.DIRECTION_SENT)
return web.HTTPOk()
async def register(app: web.Application):
"""Register routes."""
app.add_routes([web.post("/connections/{id}/send-ping", connections_send_ping)])
| 2.421875 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.