max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
ide/api/project.py
|
Ramonrlb/cloudpebble
| 147 |
138404
|
import re
import json
import time
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.db import transaction, IntegrityError
from django.shortcuts import get_object_or_404
from django.views.decorators.http import require_safe, require_POST
from ide.models.build import BuildResult
from ide.models.project import Project, TemplateProject
from ide.models.files import SourceFile, ResourceFile
from ide.tasks.archive import create_archive, do_import_archive
from ide.tasks.build import run_compile
from ide.tasks.gist import import_gist
from ide.tasks.git import do_import_github
from utils.td_helper import send_td_event
from utils.jsonview import json_view, BadRequest
__author__ = 'katharine'
@require_safe
@login_required
@json_view
def project_info(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
source_files = SourceFile.objects.filter(project=project).order_by('file_name')
resources = ResourceFile.objects.filter(project=project).order_by('file_name')
return {
'type': project.project_type,
'name': project.name,
'last_modified': str(project.last_modified),
'app_uuid': project.app_uuid or '',
'app_company_name': project.app_company_name,
'app_short_name': project.app_short_name,
'app_long_name': project.app_long_name,
'app_version_label': project.app_version_label,
'app_is_watchface': project.app_is_watchface,
'app_is_hidden': project.app_is_hidden,
'app_keys': json.loads(project.app_keys),
'parsed_app_keys': project.get_parsed_appkeys(),
'app_is_shown_on_communication': project.app_is_shown_on_communication,
'app_capabilities': project.app_capabilities,
'app_jshint': project.app_jshint,
'app_dependencies': project.get_dependencies(include_interdependencies=False),
'interdependencies': [p.id for p in project.project_dependencies.all()],
'sdk_version': project.sdk_version,
'app_platforms': project.app_platforms,
'app_modern_multi_js': project.app_modern_multi_js,
'menu_icon': project.menu_icon.id if project.menu_icon else None,
'source_files': [{
'name': f.file_name,
'id': f.id,
'target': f.target,
'file_path': f.project_path,
'lastModified': time.mktime(f.last_modified.utctimetuple())
} for f in source_files],
'resources': [{
'id': x.id,
'file_name': x.file_name,
'kind': x.kind,
'identifiers': [y.resource_id for y in x.identifiers.all()],
'extra': {y.resource_id: y.get_options_dict(with_id=False) for y in x.identifiers.all()},
'variants': [y.get_tags() for y in x.variants.all()],
} for x in resources],
'github': {
'repo': "github.com/%s" % project.github_repo if project.github_repo is not None else None,
'branch': project.github_branch if project.github_branch is not None else None,
'last_sync': str(project.github_last_sync) if project.github_last_sync is not None else None,
'last_commit': project.github_last_commit,
'auto_build': project.github_hook_build,
'auto_pull': project.github_hook_uuid is not None
},
'supported_platforms': project.supported_platforms
}
@require_POST
@login_required
@json_view
def compile_project(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
build = BuildResult.objects.create(project=project)
task = run_compile.delay(build.id)
return {"build_id": build.id, "task_id": task.task_id}
@require_safe
@login_required
@json_view
def last_build(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
try:
build = project.builds.order_by('-started')[0]
except (IndexError, BuildResult.DoesNotExist):
return {"build": None}
else:
b = {
'uuid': build.uuid,
'state': build.state,
'started': str(build.started),
'finished': str(build.finished) if build.finished else None,
'id': build.id,
'download': build.package_url if project.project_type == 'package' else build.pbw_url,
'log': build.build_log_url,
'build_dir': build.get_url(),
'sizes': build.get_sizes(),
}
return {"build": b}
@require_safe
@login_required
@json_view
def build_history(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
try:
builds = project.builds.order_by('-started')[:10]
except (IndexError, BuildResult.DoesNotExist):
return {"build": None}
out = []
for build in builds:
out.append({
'uuid': build.uuid,
'state': build.state,
'started': str(build.started),
'finished': str(build.finished) if build.finished else None,
'id': build.id,
'download': build.package_url if project.project_type == 'package' else build.pbw_url,
'log': build.build_log_url,
'build_dir': build.get_url(),
'sizes': build.get_sizes()
})
return {"builds": out}
@require_safe
@login_required
@json_view
def build_log(request, project_id, build_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
build = get_object_or_404(BuildResult, project=project, pk=build_id)
log = build.read_build_log()
send_td_event('cloudpebble_view_build_log', data={
'data': {
'build_state': build.state
}
}, request=request, project=project)
return {"log": log}
@require_POST
@login_required
@json_view
def create_project(request):
name = request.POST['name']
template_id = request.POST.get('template', None)
if template_id is not None:
template_id = int(template_id)
project_type = request.POST.get('type', 'native')
template_name = None
sdk_version = str(request.POST.get('sdk', '2'))
try:
with transaction.atomic():
app_keys = '{}' if sdk_version == '2' else '[]'
project = Project.objects.create(
name=name,
owner=request.user,
app_company_name=request.user.username,
app_short_name=name,
app_long_name=name,
app_version_label='1.0',
app_is_watchface=False,
app_capabilities='',
project_type=project_type,
sdk_version=sdk_version,
app_keys=app_keys
)
if template_id is not None and template_id != 0:
template = TemplateProject.objects.get(pk=template_id)
template_name = template.name
template.copy_into_project(project)
elif project_type == 'simplyjs':
f = SourceFile.objects.create(project=project, file_name="app.js")
f.save_text(open('{}/src/html/demo.js'.format(settings.SIMPLYJS_ROOT)).read())
elif project_type == 'pebblejs':
f = SourceFile.objects.create(project=project, file_name="app.js")
f.save_text(open('{}/src/js/app.js'.format(settings.PEBBLEJS_ROOT)).read())
# TODO: Default file for Rocky?
project.full_clean()
project.save()
except IntegrityError as e:
raise BadRequest(str(e))
else:
send_td_event('cloudpebble_create_project', {'data': {'template': {'id': template_id, 'name': template_name}}},
request=request, project=project)
return {"id": project.id}
@require_POST
@login_required
@json_view
def save_project_settings(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
try:
with transaction.atomic():
project.name = request.POST['name']
project.app_uuid = request.POST['app_uuid']
project.app_company_name = request.POST['app_company_name']
project.app_short_name = request.POST['app_short_name']
project.app_long_name = request.POST['app_long_name']
project.app_version_label = request.POST['app_version_label']
project.app_is_watchface = bool(int(request.POST['app_is_watchface']))
project.app_is_hidden = bool(int(request.POST['app_is_hidden']))
project.app_is_shown_on_communication = bool(int(request.POST['app_is_shown_on_communication']))
project.app_capabilities = request.POST['app_capabilities']
project.app_keys = request.POST['app_keys']
project.app_jshint = bool(int(request.POST['app_jshint']))
project.sdk_version = request.POST['sdk_version']
project.app_platforms = request.POST['app_platforms']
project.app_modern_multi_js = bool(int(request.POST['app_modern_multi_js']))
menu_icon = request.POST['menu_icon']
old_icon = project.menu_icon
if menu_icon != '':
menu_icon = int(menu_icon)
if old_icon is not None:
old_icon.is_menu_icon = False
old_icon.save()
icon_resource = project.resources.filter(id=menu_icon)[0]
icon_resource.is_menu_icon = True
icon_resource.save()
elif old_icon is not None:
old_icon.is_menu_icon = False
old_icon.save()
project.save()
except IntegrityError as e:
return BadRequest(str(e))
else:
send_td_event('cloudpebble_save_project_settings', request=request, project=project)
@require_POST
@login_required
@json_view
def save_project_dependencies(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
try:
project.set_dependencies(json.loads(request.POST['dependencies']))
project.set_interdependencies([int(x) for x in json.loads(request.POST['interdependencies'])])
return {'dependencies': project.get_dependencies()}
except (IntegrityError, ValueError) as e:
raise BadRequest(str(e))
else:
send_td_event('cloudpebble_save_project_settings', request=request, project=project)
@require_POST
@login_required
@json_view
def delete_project(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
if not bool(request.POST.get('confirm', False)):
raise BadRequest(_("Not confirmed"))
project.delete()
send_td_event('cloudpebble_delete_project', request=request, project=project)
@login_required
@require_POST
@json_view
def begin_export(request, project_id):
project = get_object_or_404(Project, pk=project_id, owner=request.user)
result = create_archive.delay(project.id)
return {'task_id': result.task_id}
@login_required
@require_safe
@json_view
def get_projects(request):
""" Gets a list of all projects owned by the user.
Accepts one possible filter: '?libraries=[id]'. If given, the list of projects
is limited to packages, and each returned package includes a 'depended_on' attribute
which is true if it is depended on by the project where pk=[id].
"""
filters = {
'owner': request.user
}
exclusions = {}
parent_project = None
libraries_for_project = int(request.GET['libraries']) if 'libraries' in request.GET else None
if libraries_for_project:
filters['project_type'] = 'package'
parent_project = get_object_or_404(Project, pk=libraries_for_project, owner=request.user)
parent_project_dependencies = parent_project.project_dependencies.all()
exclusions['pk'] = libraries_for_project
projects = Project.objects.filter(**filters).exclude(**exclusions)
def process_project(project):
data = {
'name': project.name,
'package_name': project.npm_name,
'id': project.id,
'app_version_label': project.app_version_label,
'latest_successful_build': None
}
try:
data['latest_successful_build'] = str(BuildResult.objects.filter(project=project, state=BuildResult.STATE_SUCCEEDED).latest('id').finished)
except BuildResult.DoesNotExist:
pass
if parent_project:
data['depended_on'] = project in parent_project_dependencies
return data
return {
'projects': [process_project(project) for project in projects]
}
@login_required
@require_POST
@json_view
def import_zip(request):
zip_file = request.FILES['archive']
name = request.POST['name']
try:
project = Project.objects.create(owner=request.user, name=name)
except IntegrityError as e:
raise BadRequest(str(e))
task = do_import_archive.delay(project.id, zip_file.read(), delete_project=True)
return {'task_id': task.task_id, 'project_id': project.id}
@login_required
@require_POST
@json_view
def import_github(request):
name = request.POST['name']
repo = request.POST['repo']
branch = request.POST['branch']
add_remote = (request.POST['add_remote'] == 'true')
match = re.match(r'^(?:https?://|git@|git://)?(?:www\.)?github\.com[/:]([\w.-]+)/([\w.-]+?)(?:\.git|/|$)', repo)
if match is None:
raise BadRequest(_("Invalid Github URL."))
github_user = match.group(1)
github_project = match.group(2)
try:
project = Project.objects.create(owner=request.user, name=name)
except IntegrityError as e:
raise BadRequest(str(e))
if add_remote:
project.github_repo = "%s/%s" % (github_user, github_project)
project.github_branch = branch
project.save()
task = do_import_github.delay(project.id, github_user, github_project, branch, delete_project=True)
return {'task_id': task.task_id, 'project_id': project.id}
@login_required
@require_POST
@json_view
def do_import_gist(request):
task = import_gist.delay(request.user.id, request.POST['gist_id'])
return {'task_id': task.task_id}
|
test/completion/comprehensions.py
|
kirat-singh/jedi
| 4,213 |
138472
|
<gh_stars>1000+
# -----------------
# list comprehensions
# -----------------
# basics:
a = ['' for a in [1]]
#? str()
a[0]
#? ['insert']
a.insert
a = [a for a in [1]]
#? int()
a[0]
y = 1.0
# Should not leak.
[y for y in [3]]
#? float()
y
a = [a for a in (1, 2)]
#? int()
a[0]
a = [a for a,b in [(1,'')]]
#? int()
a[0]
a = [a for (a,b) in [(1,'')]]
#? int()
a[0]
arr = [1,'']
a = [a for a in arr]
#? int()
a[0]
#? str()
a[1]
#? int() str()
a[2]
a = [a if 1.0 else '' for a in [1] if [1.0]]
#? int() str()
a[0]
# name resolve should be correct
left, right = 'a', 'b'
left, right = [x for x in (left, right)]
#? str()
left
# with a dict literal
#? int()
[a for a in {1:'x'}][0]
# list comprehensions should also work in combination with functions
def _listen(arg):
for x in arg:
#? str()
x
_listen(['' for x in [1]])
#?
([str for x in []])[0]
# -----------------
# nested list comprehensions
# -----------------
b = [a for arr in [[1, 1.0]] for a in arr]
#? int()
b[0]
#? float()
b[1]
b = [arr for arr in [[1, 1.0]] for a in arr]
#? int()
b[0][0]
#? float()
b[1][1]
b = [a for arr in [[1]] if '' for a in arr if '']
#? int()
b[0]
b = [b for arr in [[[1.0]]] for a in arr for b in a]
#? float()
b[0]
#? str()
[x for x in 'chr'][0]
# From GitHub #26
#? list()
a = [[int(v) for v in line.strip().split() if v] for line in ["123", str(), "123"] if line]
#? list()
a[0]
#? int()
a[0][0]
# From GitHub #1524
#?
[nothing for nothing, _ in [1]][0]
# -----------------
# generator comprehensions
# -----------------
left, right = (i for i in (1, ''))
#? int()
left
#? str()
right
gen = (i for i in (1,))
#? int()
next(gen)
#?
gen[0]
gen = (a for arr in [[1.0]] for a in arr)
#? float()
next(gen)
#? int()
(i for i in (1,)).send()
# issues with different formats
left, right = (i for i in
('1', 2))
#? str()
left
#? int()
right
# -----------------
# name resolution in comprehensions.
# -----------------
def x():
"""Should not try to resolve to the if hio, which was a bug."""
#? 22
[a for a in h if hio]
if hio: pass
# -----------------
# slices
# -----------------
#? list()
foo = [x for x in [1, '']][:1]
#? int()
foo[0]
#? str()
foo[1]
# -----------------
# In class
# -----------------
class X():
def __init__(self, bar):
self.bar = bar
def foo(self):
x = [a for a in self.bar][0]
#? int()
x
return x
#? int()
X([1]).foo()
# -----------------
# dict comprehensions
# -----------------
#? int()
list({a - 1: 3 for a in [1]})[0]
d = {a - 1: b for a, b in {1: 'a', 3: 1.0}.items()}
#? int()
list(d)[0]
#? str() float()
d.values()[0]
#? str()
d[0]
#? float() str()
d[1]
#? float()
d[2]
# -----------------
# set comprehensions
# -----------------
#? set()
{a - 1 for a in [1]}
#? set()
{a for a in range(10)}
#? int()
[x for x in {a for a in range(10)}][0]
#? int()
{a for a in range(10)}.pop()
#? float() str()
{b for a in [[3.0], ['']] for b in a}.pop()
#? int()
next(iter({a for a in range(10)}))
#? int()
[a for a in {1, 2, 3}][0]
# -----------------
# syntax errors
# -----------------
# Issue #1146
#? ['list']
[int(str(x.value) for x in list
def reset_missing_bracket(): pass
# -----------------
# function calls
# -----------------
def foo(arg):
return arg
x = foo(x for x in [1])
#? int()
next(x)
#?
x[0]
# While it's illegal to have more than one argument, when a generator
# expression is involved, it's still a valid parse tree and Jedi should still
# work (and especially not raise Exceptions). It's debatable wheter inferring
# values for invalid statements is a good idea, but not failing is a must.
#? int()
next(foo(x for x in [1], 1))
def bar(x, y):
return y
#? str()
next(bar(x for x in [1], x for x in ['']))
|
events/migrations/0047_participant_date_of_birth.py
|
horacexd/clist
| 166 |
138502
|
<filename>events/migrations/0047_participant_date_of_birth.py<gh_stars>100-1000
# Generated by Django 2.2.10 on 2020-04-16 20:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0046_participant_addition_fields'),
]
operations = [
migrations.AddField(
model_name='participant',
name='date_of_birth',
field=models.DateField(blank=True, null=True),
),
]
|
bitcoinetl/cli/stream.py
|
BTCGPU/bitcoin-etl
| 274 |
138557
|
<filename>bitcoinetl/cli/stream.py
# MIT License
#
# Copyright (c) 2018 <NAME>, <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import click
from bitcoinetl.enumeration.chain import Chain
from bitcoinetl.rpc.bitcoin_rpc import BitcoinRpc
from blockchainetl.logging_utils import logging_basic_config
from blockchainetl.streaming.streaming_utils import configure_logging, configure_signals
from blockchainetl.thread_local_proxy import ThreadLocalProxy
logging_basic_config()
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('-l', '--last-synced-block-file', default='last_synced_block.txt', type=str,
help='The file with the last synced block number.')
@click.option('--lag', default=0, type=int, help='The number of blocks to lag behind the network.')
@click.option('-p', '--provider-uri', default='http://user:pass@localhost:8332', type=str,
help='The URI of the remote Bitcoin node.')
@click.option('-o', '--output', type=str,
help='Google PubSub topic path e.g. projects/your-project/topics/bitcoin_blockchain. '
'If not specified will print to console.')
@click.option('-s', '--start-block', default=None, type=int, help='Start block.')
@click.option('-c', '--chain', default=Chain.BITCOIN, type=click.Choice(Chain.ALL), help='The type of chain.')
@click.option('--period-seconds', default=10, type=int, help='How many seconds to sleep between syncs.')
@click.option('-b', '--batch-size', default=2, type=int, help='How many blocks to batch in single request.')
@click.option('-B', '--block-batch-size', default=10, type=int, help='How many blocks to batch in single sync round.')
@click.option('-w', '--max-workers', default=5, type=int, help='The number of workers.')
@click.option('--log-file', default=None, type=str, help='Log file.')
@click.option('--pid-file', default=None, type=str, help='pid file.')
@click.option('--enrich', default=True, type=bool, help='Enable filling in transactions inputs fields.')
def stream(last_synced_block_file, lag, provider_uri, output, start_block, chain=Chain.BITCOIN,
period_seconds=10, batch_size=2, block_batch_size=10, max_workers=5, log_file=None, pid_file=None,
enrich=True):
"""Streams all data types to console or Google Pub/Sub."""
configure_logging(log_file)
configure_signals()
from bitcoinetl.streaming.streaming_utils import get_item_exporter
from bitcoinetl.streaming.btc_streamer_adapter import BtcStreamerAdapter
from blockchainetl.streaming.streamer import Streamer
streamer_adapter = BtcStreamerAdapter(
bitcoin_rpc=ThreadLocalProxy(lambda: BitcoinRpc(provider_uri)),
item_exporter=get_item_exporter(output),
chain=chain,
batch_size=batch_size,
enable_enrich=enrich,
max_workers=max_workers
)
streamer = Streamer(
blockchain_streamer_adapter=streamer_adapter,
last_synced_block_file=last_synced_block_file,
lag=lag,
start_block=start_block,
period_seconds=period_seconds,
block_batch_size=block_batch_size,
pid_file=pid_file,
)
streamer.stream()
|
tests/contrib/aiopg/py37/test.py
|
p7g/dd-trace-py
| 308 |
138585
|
import aiopg
# project
from ddtrace import Pin
from ddtrace.contrib.aiopg.patch import patch
from ddtrace.contrib.aiopg.patch import unpatch
from tests.contrib.asyncio.utils import AsyncioTestCase
from tests.contrib.asyncio.utils import mark_asyncio
from tests.contrib.config import POSTGRES_CONFIG
TEST_PORT = str(POSTGRES_CONFIG["port"])
class AiopgTestCase(AsyncioTestCase):
# default service
TEST_SERVICE = "postgres"
def setUp(self):
super().setUp()
self._conn = None
patch()
def tearDown(self):
super().tearDown()
if self._conn and not self._conn.closed:
self._conn.close()
unpatch()
async def _get_conn_and_tracer(self):
conn = self._conn = await aiopg.connect(**POSTGRES_CONFIG)
Pin.get_from(conn).clone(tracer=self.tracer).onto(conn)
return conn, self.tracer
@mark_asyncio
async def test_async_generator(self):
conn, tracer = await self._get_conn_and_tracer()
cursor = await conn.cursor()
q = "select 'foobarblah'"
await cursor.execute(q)
rows = []
async for row in cursor:
rows.append(row)
assert rows == [("foobarblah",)]
spans = self.pop_spans()
assert len(spans) == 1
span = spans[0]
assert span.name == "postgres.query"
|
UVTextureConverter/__init__.py
|
shiba6v/UVTextureConverter
| 115 |
138586
|
<reponame>shiba6v/UVTextureConverter<filename>UVTextureConverter/__init__.py<gh_stars>100-1000
from .Atlas2Normal import Atlas2Normal
from .Normal2Atlas import Normal2Atlas
from .UVConverter import UVConverter
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/test/coding20731.py
|
brianherrera/lumberyard
| 1,738 |
138595
|
#coding:latin1
|
sc2/generate_ids.py
|
Yousazoe/oxBot
| 242 |
138599
|
import importlib
import json
import platform
import subprocess
import sys
from pathlib import Path
from loguru import logger
from sc2.game_data import AbilityData, GameData, UnitTypeData, UpgradeData
try:
from sc2.ids.id_version import ID_VERSION_STRING
except ImportError:
ID_VERSION_STRING = "4.11.4.78285"
class IdGenerator:
def __init__(self, game_data: GameData = None, game_version: str = None, verbose: bool = False):
self.game_data: GameData = game_data
self.game_version = game_version
self.verbose = verbose
self.HEADER = f'# DO NOT EDIT!\n# This file was automatically generated by "{Path(__file__).name}"\n'
self.PF = platform.system()
self.HOME_DIR = str(Path.home())
self.DATA_JSON = {
"Darwin": self.HOME_DIR + "/Library/Application Support/Blizzard/StarCraft II/stableid.json",
"Windows": self.HOME_DIR + "/Documents/StarCraft II/stableid.json",
"Linux": self.HOME_DIR + "/Documents/StarCraft II/stableid.json",
}
self.ENUM_TRANSLATE = {
"Units": "UnitTypeId",
"Abilities": "AbilityId",
"Upgrades": "UpgradeId",
"Buffs": "BuffId",
"Effects": "EffectId",
}
self.FILE_TRANSLATE = {
"Units": "unit_typeid",
"Abilities": "ability_id",
"Upgrades": "upgrade_id",
"Buffs": "buff_id",
"Effects": "effect_id",
}
def make_key(self, key):
if key[0].isdigit():
key = "_" + key
# In patch 5.0, the key has "@" character in it which is not possible with python enums
return key.upper().replace(" ", "_").replace("@", "")
def parse_data(self, data):
# for d in data: # Units, Abilities, Upgrades, Buffs, Effects
units = self.parse_simple("Units", data)
upgrades = self.parse_simple("Upgrades", data)
effects = self.parse_simple("Effects", data)
buffs = self.parse_simple("Buffs", data)
abilities = {}
for v in data["Abilities"]:
key = v["buttonname"]
remapid = v.get("remapid")
if (not key) and (remapid is None):
assert v["buttonname"] == ""
continue
if not key:
if v["friendlyname"] != "":
key = v["friendlyname"]
else:
exit(f"Not mapped: {v !r}")
key = key.upper().replace(" ", "_").replace("@", "")
if "name" in v:
key = f'{v["name"].upper().replace(" ", "_")}_{key}'
if "friendlyname" in v:
key = v["friendlyname"].upper().replace(" ", "_")
if key[0].isdigit():
key = "_" + key
if key in abilities and v["index"] == 0:
print(f"{key} has value 0 and id {v['id']}, overwriting {key}: {abilities[key]}")
# Commented out to try to fix: 3670 is not a valid AbilityId
abilities[key] = v["id"]
elif key in abilities:
print(f"{key} has appeared a second time with id={v['id']}")
else:
abilities[key] = v["id"]
abilities["SMART"] = 1
enums = {}
enums["Units"] = units
enums["Abilities"] = abilities
enums["Upgrades"] = upgrades
enums["Buffs"] = buffs
enums["Effects"] = effects
return enums
def parse_simple(self, d, data):
units = {}
for v in data[d]:
key = v["name"]
if not key:
continue
key_to_insert = self.make_key(key)
if key_to_insert in units:
index = 2
tmp = f"{key_to_insert}_{index}"
while tmp in units:
index += 1
tmp = f"{key_to_insert}_{index}"
key_to_insert = tmp
units[key_to_insert] = v["id"]
return units
def generate_python_code(self, enums):
assert {"Units", "Abilities", "Upgrades", "Buffs", "Effects"} <= enums.keys()
sc2dir = Path(__file__).parent
idsdir = sc2dir / "ids"
idsdir.mkdir(exist_ok=True)
with (idsdir / "__init__.py").open("w") as f:
initstring = f"__all__ = {[n.lower() for n in self.FILE_TRANSLATE.values()] !r}\n".replace("'", '"')
f.write("\n".join([self.HEADER, initstring]))
for name, body in enums.items():
class_name = self.ENUM_TRANSLATE[name]
code = [self.HEADER, "import enum", "\n", f"class {class_name}(enum.Enum):"]
for key, value in sorted(body.items(), key=lambda p: p[1]):
code.append(f" {key} = {value}")
# Add repr function to more easily dump enums to dict
code += ["\n", " def __repr__(self):", ' return f"' + class_name + '.{self.name}"']
code += [
"\n",
f"for item in {class_name}:",
# f" assert not item.name in globals()",
f" globals()[item.name] = item",
"",
]
ids_file_path = (idsdir / self.FILE_TRANSLATE[name]).with_suffix(".py")
with ids_file_path.open("w") as f:
f.write("\n".join(code))
# Apply formatting]
try:
subprocess.run(["black", "--line-length", "120", ids_file_path])
except FileNotFoundError:
print(
f"Black is not installed. Please use 'pip install black' to install black formatter.\nCould not autoformat file {ids_file_path}"
)
if self.game_version is not None:
version_path = Path(__file__).parent / "ids" / "id_version.py"
with open(version_path, "w") as f:
f.write(f'ID_VERSION_STRING = "{self.game_version}"\n')
def update_ids_from_stableid_json(self):
if self.game_version is None or ID_VERSION_STRING is None or ID_VERSION_STRING != self.game_version:
if self.verbose and self.game_version is not None and ID_VERSION_STRING is not None:
logger.info(
f"Game version is different (Old: {self.game_version}, new: {ID_VERSION_STRING}. Updating ids to match game version"
)
with open(self.DATA_JSON[self.PF], encoding="utf-8") as data_file:
data = json.loads(data_file.read())
self.generate_python_code(self.parse_data(data))
# Update game_data if this is a live game
if self.game_data is not None:
self.reimport_ids()
self.update_game_data()
def reimport_ids(self):
# Reload the newly written "id" files
# TODO This only re-imports modules, but if they haven't been imported, it will yield an error
from sc2.ids.ability_id import AbilityId
importlib.reload(sys.modules["sc2.ids.ability_id"])
importlib.reload(sys.modules["sc2.ids.unit_typeid"])
importlib.reload(sys.modules["sc2.ids.upgrade_id"])
importlib.reload(sys.modules["sc2.ids.effect_id"])
importlib.reload(sys.modules["sc2.ids.buff_id"])
# importlib.reload(sys.modules["sc2.ids.id_version"])
importlib.reload(sys.modules["sc2.constants"])
def update_game_data(self):
"""Re-generate the dicts from self.game_data.
This should be done after the ids have been reimported."""
from sc2.ids.ability_id import AbilityId
ids = set(a.value for a in AbilityId if a.value != 0)
self.game_data.abilities = {
a.ability_id: AbilityData(self.game_data, a)
for a in self.game_data._proto.abilities if a.ability_id in ids
}
# self.game_data.abilities = {
# a.ability_id: AbilityData(self.game_data, a) for a in self.game_data._proto.abilities
# }
self.game_data.units = {
u.unit_id: UnitTypeData(self.game_data, u)
for u in self.game_data._proto.units if u.available
}
self.game_data.upgrades = {u.upgrade_id: UpgradeData(self.game_data, u) for u in self.game_data._proto.upgrades}
self.game_data.unit_types = {}
if __name__ == "__main__":
updater = IdGenerator()
updater.update_ids_from_stableid_json()
|
dynamic_models/exceptions.py
|
wieczorek1990/django-dynamic-models
| 122 |
138617
|
"""Provide exceptions to be raised by the `dynamic_models` app.
All exceptions inherit from a `DynamicModelError` base class.
"""
class DynamicModelError(Exception):
"""Base exception for use in dynamic models."""
class OutdatedModelError(DynamicModelError):
"""Raised when a model's schema is outdated on save."""
class NullFieldChangedError(DynamicModelError):
"""Raised when a field is attempted to be change from NULL to NOT NULL."""
class InvalidFieldNameError(DynamicModelError):
"""Raised when a field name is invalid."""
class UnsavedSchemaError(DynamicModelError):
"""
Raised when a model schema has not been saved to the db and a dynamic model
is attempted to be created.
"""
|
examples/cluster.py
|
adujardin/OpenIBL
| 182 |
138650
|
<reponame>adujardin/OpenIBL<gh_stars>100-1000
from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys, os
import math
import h5py
from sklearn.cluster import KMeans
import torch
from torch import nn
from torch.backends import cudnn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from ibl import datasets
from ibl import models
from ibl.evaluators import extract_features, pairwise_distance
from ibl.utils.data import get_transformer_train, get_transformer_test
from ibl.utils.data.sampler import SubsetRandomSampler
from ibl.utils.data.preprocessor import Preprocessor
from ibl.utils.logging import Logger
from ibl.utils.serialization import load_checkpoint, copy_state_dict
def get_data(args, nIm):
root = osp.join(args.data_dir, args.dataset)
dataset = datasets.create(args.dataset, root, scale='30k')
cluster_set = list(set(dataset.q_train) | set(dataset.db_train))
transformer = get_transformer_test(args.height, args.width)
sampler = SubsetRandomSampler(np.random.choice(len(cluster_set), nIm, replace=False))
cluster_loader = DataLoader(Preprocessor(cluster_set, root=dataset.images_dir, transform=transformer),
batch_size=args.batch_size, num_workers=args.workers, sampler=sampler,
shuffle=False, pin_memory=True)
return dataset, cluster_loader
def get_model(args):
model = models.create(args.arch, pretrained=True, cut_at_pooling=True, matconvnet='logs/vd16_offtheshelf_conv5_3_max.pth')
model.cuda()
model = nn.DataParallel(model)
return model
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
cudnn.benchmark = True
print("==========\nArgs:{}\n==========".format(args))
nDescriptors = 50000
nPerImage = 100
nIm = math.ceil(nDescriptors/nPerImage)
# Create data loaders
dataset, data_loader = get_data(args, nIm)
# Create model
model = get_model(args)
encoder_dim = model.module.feature_dim
# Load from resume
if args.resume:
print('Loading weights from {}'.format(args.resume))
checkpoint = load_checkpoint(args.resume)
copy_state_dict(checkpoint['state_dict'], model)
if not osp.exists(osp.join(args.logs_dir)):
os.makedirs(osp.join(args.logs_dir))
initcache = osp.join(args.logs_dir, args.arch + '_' + args.dataset + '_' + str(args.num_clusters) + '_desc_cen.hdf5')
with h5py.File(initcache, mode='w') as h5:
with torch.no_grad():
model.eval()
print('====> Extracting Descriptors')
dbFeat = h5.create_dataset("descriptors",
[nDescriptors, encoder_dim],
dtype=np.float32)
for iteration, (input, _, _, _, _) in enumerate(data_loader, 1):
input = input.cuda()
image_descriptors = model(input)
# normalization is IMPORTANT!
image_descriptors = F.normalize(image_descriptors, p=2, dim=1).view(input.size(0), encoder_dim, -1).permute(0, 2, 1)
batchix = (iteration-1)*args.batch_size*nPerImage
for ix in range(image_descriptors.size(0)):
# sample different location for each image in batch
sample = np.random.choice(image_descriptors.size(1), nPerImage, replace=False)
startix = batchix + ix*nPerImage
dbFeat[startix:startix+nPerImage, :] = image_descriptors[ix, sample, :].detach().cpu().numpy()
if (iteration % args.print_freq == 0) or (len(data_loader) <= args.print_freq):
print("==> Batch ({}/{})".format(iteration, math.ceil(nIm/args.batch_size)), flush=True)
del input, image_descriptors
print('====> Clustering')
niter = 100
kmeans = KMeans(n_clusters=args.num_clusters, max_iter=niter, random_state=args.seed).fit(dbFeat[...])
print('====> Storing centroids', kmeans.cluster_centers_.shape)
h5.create_dataset('centroids', data=kmeans.cluster_centers_)
print('====> Done!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="VLAD centers initialization clustering")
# data
parser.add_argument('-d', '--dataset', type=str, default='pitts',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=256,
help="tuple numbers in a batch")
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--num-clusters', type=int, default=64)
parser.add_argument('--height', type=int, default=480, help="input height")
parser.add_argument('--width', type=int, default=640, help="input width")
parser.add_argument('--seed', type=int, default=43)
parser.add_argument('--print-freq', type=int, default=10)
# model
parser.add_argument('-a', '--arch', type=str, default='vgg16',
choices=models.names())
parser.add_argument('--resume', type=str, default='', metavar='PATH')
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, '..', 'logs'))
main()
|
arelle/PrototypeInstanceObject.py
|
DataFinnovation/Arelle
| 292 |
138696
|
<filename>arelle/PrototypeInstanceObject.py<gh_stars>100-1000
from arelle import XmlUtil
from arelle.ModelValue import QName
from arelle.ModelObject import ModelObject
Aspect = None
class FactPrototype(): # behaves like a fact for dimensional validity testing
def __init__(self, v, aspectValues=None):
global Aspect
if Aspect is None:
from arelle.ModelFormulaObject import Aspect
self.modelXbrl = v.modelXbrl
if aspectValues is None:
aspectValues = {}
self.aspectEntryObjectId = aspectValues.get("aspectEntryObjectId", None)
if Aspect.CONCEPT in aspectValues:
qname = aspectValues[Aspect.CONCEPT]
self.qname = qname
self.concept = v.modelXbrl.qnameConcepts.get(qname)
self.isItem = self.concept is not None and self.concept.isItem
self.isTuple = self.concept is not None and self.concept.isTuple
else:
self.qname = None # undefined concept
self.concept = None # undefined concept
self.isItem = False # don't block aspectMatches
self.isTuple = False
if Aspect.LOCATION in aspectValues:
self.parent = aspectValues[Aspect.LOCATION]
try:
self.isTuple = self.parent.isTuple
except AttributeError:
self.isTuple = False
else:
self.parent = v.modelXbrl.modelDocument.xmlRootElement
self.isNumeric = self.concept is not None and self.concept.isNumeric
self.context = ContextPrototype(v, aspectValues)
if Aspect.UNIT in aspectValues:
self.unit = UnitPrototype(v, aspectValues)
else:
self.unit = None
self.factObjectId = None
def clear(self):
if self.context is not None:
self.context.clear()
self.__dict__.clear() # delete local attributes
def objectId(self):
return "_factPrototype_" + str(self.qname)
def getparent(self):
return self.parent
@property
def propertyView(self):
dims = self.context.qnameDims
return (("concept", str(self.qname) if self.concept is not None else "not specified"),
("dimensions", "({0})".format(len(dims)),
tuple(dimVal.propertyView if dimVal is not None else (str(dim.qname),"None")
for dim,dimVal in sorted(dims.items(), key=lambda i:i[0])
if hasattr(dimVal,'propertyView')))
if dims else (),
)
@property
def viewConcept(self):
return self
class ContextPrototype(): # behaves like a context
def __init__(self, v, aspectValues):
self.modelXbrl = v.modelXbrl
self.segDimVals = {}
self.scenDimVals = {}
self.qnameDims = {}
self.entityIdentifierHash = self.entityIdentifier = None
self.isStartEndPeriod = self.isInstantPeriod = self.isForeverPeriod = False
for aspect, aspectValue in aspectValues.items():
if aspect == Aspect.PERIOD_TYPE:
if aspectValue == "forever":
self.isForeverPeriod = True
elif aspectValue == "instant":
self.isInstantPeriod = True
elif aspectValue == "duration":
self.isStartEndPeriod = True
elif aspect == Aspect.START:
self.isStartEndPeriod = True
self.startDatetime = aspectValue
elif aspect == Aspect.END:
self.isStartEndPeriod = True
self.endDatetime = aspectValue
elif aspect == Aspect.INSTANT:
self.isInstantPeriod = True
self.endDatetime = self.instantDatetime = aspectValue
elif isinstance(aspect, QName):
try: # if a DimVal, then it has a suggested context element
contextElement = aspectValue.contextElement
aspectValue = (aspectValue.memberQname or aspectValue.typedMember)
except AttributeError: # probably is a QName, not a dim value or dim prototype
contextElement = v.modelXbrl.qnameDimensionContextElement.get(aspect)
if v.modelXbrl.qnameDimensionDefaults.get(aspect) != aspectValue: # not a default
try:
dimConcept = v.modelXbrl.qnameConcepts[aspect]
dimValPrototype = DimValuePrototype(v, dimConcept, aspect, aspectValue, contextElement)
self.qnameDims[aspect] = dimValPrototype
if contextElement != "scenario": # could be segment, ambiguous, or no information
self.segDimVals[dimConcept] = dimValPrototype
else:
self.scenDimVals[dimConcept] = dimValPrototype
except KeyError:
pass
elif isinstance(aspectValue, ModelObject):
# these do not expect a string aspectValue, but the object model aspect value
if aspect == Aspect.PERIOD: # period xml object
context = aspectValue.getparent()
for contextPeriodAttribute in ("isForeverPeriod", "isStartEndPeriod", "isInstantPeriod",
"startDatetime", "endDatetime", "instantDatetime",
"periodHash"):
setattr(self, contextPeriodAttribute, getattr(context, contextPeriodAttribute, None))
elif aspect == Aspect.ENTITY_IDENTIFIER: # entitytIdentifier xml object
context = aspectValue.getparent().getparent()
for entityIdentAttribute in ("entityIdentifier", "entityIdentifierHash"):
setattr(self, entityIdentAttribute, getattr(context, entityIdentAttribute, None))
def clear(self):
try:
for dim in self.qnameDims.values():
# only clear if its a prototype, but not a 'reused' model object from other instance
if isinstance(dim, DimValuePrototype):
dim.clear()
except AttributeError:
pass
self.__dict__.clear() # delete local attributes
def dimValue(self, dimQname):
"""(ModelDimension or QName) -- ModelDimension object if dimension is reported (in either context element), or QName of dimension default if there is a default, otherwise None"""
try:
return self.qnameDims[dimQname]
except KeyError:
try:
return self.modelXbrl.qnameDimensionDefaults[dimQname]
except KeyError:
return None
def dimValues(self, contextElement, oppositeContextElement=False):
if not oppositeContextElement:
return self.segDimVals if contextElement == "segment" else self.scenDimVals
else:
return self.scenDimVals if contextElement == "segment" else self.segDimVals
def nonDimValues(self, contextElement):
return []
def isEntityIdentifierEqualTo(self, cntx2):
return self.entityIdentifierHash is None or self.entityIdentifierHash == cntx2.entityIdentifierHash
def isPeriodEqualTo(self, cntx2):
if self.isForeverPeriod:
return cntx2.isForeverPeriod
elif self.isStartEndPeriod:
if not cntx2.isStartEndPeriod:
return False
return self.startDatetime == cntx2.startDatetime and self.endDatetime == cntx2.endDatetime
elif self.isInstantPeriod:
if not cntx2.isInstantPeriod:
return False
return self.instantDatetime == cntx2.instantDatetime
else:
return False
class DimValuePrototype():
def __init__(self, v, dimConcept, dimQname, mem, contextElement):
from arelle.ModelValue import QName
if dimConcept is None: # note no concepts if modelXbrl.skipDTS:
dimConcept = v.modelXbrl.qnameConcepts.get(dimQname)
self.dimension = dimConcept
self.dimensionQname = dimQname
self.contextElement = contextElement
if isinstance(mem,QName):
self.isExplicit = True
self.isTyped = False
self.memberQname = mem
self.member = v.modelXbrl.qnameConcepts.get(mem)
self.typedMember = None
else:
self.isExplicit = False
self.isTyped = True
self.typedMember = mem
self.memberQname = None
self.member = None
def clear(self):
self.__dict__.clear() # delete local attributes
@property
def propertyView(self):
if self.isExplicit:
return (str(self.dimensionQname),str(self.memberQname))
else:
return (str(self.dimensionQname),
XmlUtil.xmlstring( self.typedMember, stripXmlns=True, prettyPrint=True )
if isinstance(self.typedMember, ModelObject) else "None" )
class UnitPrototype(): # behaves like a context
def __init__(self, v, aspectValues):
self.modelXbrl = v.modelXbrl
self.hash = self.measures = self.isSingleMeasure = None
for aspect, aspectValue in aspectValues.items():
if aspect == Aspect.UNIT: # entitytIdentifier xml object
for unitAttribute in ("measures", "hash", "isSingleMeasure", "isDivide"):
setattr(self, unitAttribute, getattr(aspectValue, unitAttribute, None))
def clear(self):
self.__dict__.clear() # delete local attributes
def isEqualTo(self, unit2):
if unit2 is None or unit2.hash != self.hash:
return False
return unit2 is self or self.measures == unit2.measures
@property
def propertyView(self):
measures = self.measures
if measures[1]:
return tuple(('mul',m) for m in measures[0]) + \
tuple(('div',d) for d in measures[1])
else:
return tuple(('measure',m) for m in measures[0])
class XbrlPrototype(): # behaves like ModelXbrl
def __init__(self, modelManager, uri, *arg, **kwarg):
self.modelManager = modelManager
self.errors = []
self.skipDTS = False
from arelle.PrototypeDtsObject import DocumentPrototype
self.modelDocument = DocumentPrototype(self, uri)
def close(self):
self.modelDocument.clear()
self.__dict__.clear() # delete local attributes
|
third-party/llvm/llvm-src/utils/docker/scripts/llvm_checksum/llvm_checksum.py
|
jhh67/chapel
| 2,338 |
138697
|
<reponame>jhh67/chapel
#!/usr/bin/env python
""" A small program to compute checksums of LLVM checkout.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import logging
import re
import sys
from argparse import ArgumentParser
from project_tree import *
SVN_DATES_REGEX = re.compile(r"\$(Date|LastChangedDate)[^\$]+\$")
def main():
parser = ArgumentParser()
parser.add_argument(
"-v", "--verbose", action="store_true", help="enable debug logging")
parser.add_argument(
"-c",
"--check",
metavar="reference_file",
help="read checksums from reference_file and " +
"check they match checksums of llvm_path.")
parser.add_argument(
"--partial",
action="store_true",
help="ignore projects from reference_file " +
"that are not checked out in llvm_path.")
parser.add_argument(
"--multi_dir",
action="store_true",
help="indicates llvm_path contains llvm, checked out " +
"into multiple directories, as opposed to a " +
"typical single source tree checkout.")
parser.add_argument("llvm_path")
args = parser.parse_args()
if args.check is not None:
with open(args.check, "r") as f:
reference_checksums = ReadLLVMChecksums(f)
else:
reference_checksums = None
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
llvm_projects = CreateLLVMProjects(not args.multi_dir)
checksums = ComputeLLVMChecksums(args.llvm_path, llvm_projects)
if reference_checksums is None:
WriteLLVMChecksums(checksums, sys.stdout)
sys.exit(0)
if not ValidateChecksums(reference_checksums, checksums, args.partial):
sys.stdout.write("Checksums differ.\nNew checksums:\n")
WriteLLVMChecksums(checksums, sys.stdout)
sys.stdout.write("Reference checksums:\n")
WriteLLVMChecksums(reference_checksums, sys.stdout)
sys.exit(1)
else:
sys.stdout.write("Checksums match.")
def ComputeLLVMChecksums(root_path, projects):
"""Compute checksums for LLVM sources checked out using svn.
Args:
root_path: a directory of llvm checkout.
projects: a list of LLVMProject instances, which describe checkout paths,
relative to root_path.
Returns:
A dict mapping from project name to project checksum.
"""
hash_algo = hashlib.sha256
def collapse_svn_substitutions(contents):
# Replace svn substitutions for $Date$ and $LastChangedDate$.
# Unfortunately, these are locale-specific.
return SVN_DATES_REGEX.sub("$\1$", contents)
def read_and_collapse_svn_subsitutions(file_path):
with open(file_path, "rb") as f:
contents = f.read()
new_contents = collapse_svn_substitutions(contents)
if contents != new_contents:
logging.debug("Replaced svn keyword substitutions in %s", file_path)
logging.debug("\n\tBefore\n%s\n\tAfter\n%s", contents, new_contents)
return new_contents
project_checksums = dict()
# Hash each project.
for proj in projects:
project_root = os.path.join(root_path, proj.relpath)
if not os.path.exists(project_root):
logging.info("Folder %s doesn't exist, skipping project %s", proj.relpath,
proj.name)
continue
files = list()
def add_file_hash(file_path):
if os.path.islink(file_path) and not os.path.exists(file_path):
content = os.readlink(file_path)
else:
content = read_and_collapse_svn_subsitutions(file_path)
hasher = hash_algo()
hasher.update(content)
file_digest = hasher.hexdigest()
logging.debug("Checksum %s for file %s", file_digest, file_path)
files.append((file_path, file_digest))
logging.info("Computing checksum for %s", proj.name)
WalkProjectFiles(root_path, projects, proj, add_file_hash)
# Compute final checksum.
files.sort(key=lambda x: x[0])
hasher = hash_algo()
for file_path, file_digest in files:
file_path = os.path.relpath(file_path, project_root)
hasher.update(file_path)
hasher.update(file_digest)
project_checksums[proj.name] = hasher.hexdigest()
return project_checksums
def WriteLLVMChecksums(checksums, f):
"""Writes checksums to a text file.
Args:
checksums: a dict mapping from project name to project checksum (result of
ComputeLLVMChecksums).
f: a file object to write into.
"""
for proj in sorted(checksums.keys()):
f.write("{} {}\n".format(checksums[proj], proj))
def ReadLLVMChecksums(f):
"""Reads checksums from a text file, produced by WriteLLVMChecksums.
Returns:
A dict, mapping from project name to project checksum.
"""
checksums = {}
while True:
line = f.readline()
if line == "":
break
checksum, proj = line.split()
checksums[proj] = checksum
return checksums
def ValidateChecksums(reference_checksums,
new_checksums,
allow_missing_projects=False):
"""Validates that reference_checksums and new_checksums match.
Args:
reference_checksums: a dict of reference checksums, mapping from a project
name to a project checksum.
new_checksums: a dict of checksums to be checked, mapping from a project
name to a project checksum.
allow_missing_projects:
When True, reference_checksums may contain more projects than
new_checksums. Projects missing from new_checksums are ignored.
When False, new_checksums and reference_checksums must contain checksums
for the same set of projects. If there is a project in
reference_checksums, missing from new_checksums, ValidateChecksums
will return False.
Returns:
True, if checksums match with regards to allow_missing_projects flag value.
False, otherwise.
"""
if not allow_missing_projects:
if len(new_checksums) != len(reference_checksums):
return False
for proj, checksum in new_checksums.items():
# We never computed a checksum for this project.
if proj not in reference_checksums:
return False
# Checksum did not match.
if reference_checksums[proj] != checksum:
return False
return True
if __name__ == "__main__":
main()
|
gnocchi/tests/test_bin.py
|
Dmitry-Eremeev/gnocchi
| 299 |
138712
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import subprocess
from gnocchi.tests import base
class BinTestCase(base.BaseTestCase):
def test_gnocchi_config_generator_run(self):
with open(os.devnull, 'w') as f:
subp = subprocess.Popen(['gnocchi-config-generator'], stdout=f)
self.assertEqual(0, subp.wait())
|
score_lib.py
|
jazzmozart/text_scalpel
| 183 |
138724
|
<reponame>jazzmozart/text_scalpel
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utility functions for computing evaluation metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from nltk.translate import bleu_score
import numpy as np
import tensorflow as tf
import sari_hook
import utils
from curLine_file import curLine
def read_data(
path,
lowercase):
"""Reads data from prediction TSV file.
The prediction file should contain 3 or more columns:
1: sources (concatenated)
2: prediction
3-n: targets (1 or more)
Args:
path: Path to the prediction file.
lowercase: Whether to lowercase the data (to compute case insensitive
scores).
Returns:
Tuple (list of sources, list of predictions, list of target lists)
"""
sources = []
predictions = []
target_lists = []
with tf.gfile.GFile(path) as f:
for line_id, line in enumerate(f):
if line_id == 0:
continue
source, pred, *targets = line.rstrip('\n').split('\t')
if lowercase:
source = source.lower()
pred = pred.lower()
targets = [t.lower() for t in targets]
sources.append(source)
predictions.append(pred)
target_lists.append(targets)
return sources, predictions, target_lists
def compute_exact_score(predictions,
target_lists):
"""Computes the Exact score (accuracy) of the predictions.
Exact score is defined as the percentage of predictions that match at least
one of the targets.
Args:
predictions: List of predictions.
target_lists: List of targets (1 or more per prediction).
Returns:
Exact score between [0, 1].
"""
num_matches = sum(any(pred == target for target in targets)
for pred, targets in zip(predictions, target_lists))
return num_matches / max(len(predictions), 0.1) # Avoids 0/0.
def bleu(hyps, refs_list):
"""
calculate bleu1, bleu2, bleu3
"""
bleu_1 = []
bleu_2 = []
for hyp, refs in zip(hyps, refs_list):
if len(hyp) <= 1:
# print("ignore hyp:%s, refs:" % hyp, refs)
bleu_1.append(0.0)
bleu_2.append(0.0)
continue
score = bleu_score.sentence_bleu(
refs, hyp,
smoothing_function=None, # bleu_score.SmoothingFunction().method7,
weights=[1, 0, 0, 0])
# input(curLine())
if score > 1.0:
print(curLine(), refs, hyp)
print(curLine(), "score=", score)
input(curLine())
bleu_1.append(score)
score = bleu_score.sentence_bleu(
refs, hyp,
smoothing_function=None, # bleu_score.SmoothingFunction().method7,
weights=[0.5, 0.5, 0, 0])
bleu_2.append(score)
bleu_1 = np.average(bleu_1)
bleu_2 = np.average(bleu_2)
bleu_average_score = (bleu_1 + bleu_2) * 0.5
print("bleu_1=%f, bleu_2=%f, bleu_average_score=%f" % (bleu_1, bleu_2, bleu_average_score))
return bleu_average_score
def compute_sari_scores(
sources,
predictions,
target_lists,
ignore_wikisplit_separators=True
):
"""Computes SARI scores.
Wraps the t2t implementation of SARI computation.
Args:
sources: List of sources.
predictions: List of predictions.
target_lists: List of targets (1 or more per prediction).
ignore_wikisplit_separators: Whether to ignore "<::::>" tokens, used as
sentence separators in Wikisplit, when evaluating. For the numbers
reported in the paper, we accidentally ignored those tokens. Ignoring them
does not affect the Exact score (since there's usually always a period
before the separator to indicate sentence break), but it decreases the
SARI score (since the Addition score goes down as the model doesn't get
points for correctly adding <::::> anymore).
Returns:
Tuple (SARI score, keep score, addition score, deletion score).
"""
sari_sum = 0
keep_sum = 0
add_sum = 0
del_sum = 0
for source, pred, targets in zip(sources, predictions, target_lists):
if ignore_wikisplit_separators:
source = re.sub(' <::::> ', ' ', source)
pred = re.sub(' <::::> ', ' ', pred)
targets = [re.sub(' <::::> ', ' ', t) for t in targets]
source_ids = list(source) # utils.get_token_list(source)
pred_ids = list(pred) # utils.get_token_list(pred)
list_of_targets = [list(t) for t in targets]
sari, keep, addition, deletion = sari_hook.get_sari_score(
source_ids, pred_ids, list_of_targets, beta_for_deletion=1)
sari_sum += sari
keep_sum += keep
add_sum += addition
del_sum += deletion
n = max(len(sources), 0.1) # Avoids 0/0.
return (sari_sum / n, keep_sum / n, add_sum / n, del_sum / n)
|
AzureEnhancedMonitor/ext/test/test_aem.py
|
shridpant/azure-linux-extensions
| 266 |
138726
|
<filename>AzureEnhancedMonitor/ext/test/test_aem.py
#!/usr/bin/env python
#
#CustomScript extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import json
import unittest
import env
import aem
from Utils.WAAgentUtil import waagent
TestPublicConfig = """\
{
"cfg": [{
"key": "vmsize",
"value": "Small (A1)"
},{
"key": "vm.roleinstance",
"value": "osupdate"
},{
"key": "vm.role",
"value": "IaaS"
},{
"key": "vm.deploymentid",
"value": "cd98461b43364478a908d03d0c3135a7"
},{
"key": "vm.memory.isovercommitted",
"value": 0
},{
"key": "vm.cpu.isovercommitted",
"value": 0
},{
"key": "script.version",
"value": "1.2.0.0"
},{
"key": "verbose",
"value": "0"
},{
"key": "osdisk.connminute",
"value": "asdf.minute"
},{
"key": "osdisk.connhour",
"value": "asdf.hour"
},{
"key": "osdisk.name",
"value": "osupdate-osupdate-2015-02-12.vhd"
},{
"key": "asdf.hour.uri",
"value": "https://asdf.table.core.windows.net/$metricshourprimarytransactionsblob"
},{
"key": "asdf.minute.uri",
"value": "https://asdf.table.core.windows.net/$metricsminuteprimarytransactionsblob"
},{
"key": "asdf.hour.name",
"value": "asdf"
},{
"key": "asdf.minute.name",
"value": "asdf"
},{
"key": "wad.name",
"value": "asdf"
},{
"key": "wad.isenabled",
"value": "1"
},{
"key": "wad.uri",
"value": "https://asdf.table.core.windows.net/wadperformancecounterstable"
}]
}
"""
TestPrivateConfig = """\
{
"cfg" : [{
"key" : "asdf.minute.key",
"value" : "qwer"
},{
"key" : "wad.key",
"value" : "qwer"
}]
}
"""
class TestAEM(unittest.TestCase):
def setUp(self):
waagent.LoggerInit("/dev/null", "/dev/stdout")
def test_config(self):
publicConfig = json.loads(TestPublicConfig)
privateConfig = json.loads(TestPrivateConfig)
config = aem.EnhancedMonitorConfig(publicConfig, privateConfig)
self.assertNotEquals(None, config)
self.assertEquals(".table.core.windows.net",
config.getStorageHostBase('asdf'))
self.assertEquals(".table.core.windows.net",
config.getLADHostBase())
return config
def test_static_datasource(self):
config = self.test_config()
dataSource = aem.StaticDataSource(config)
counters = dataSource.collect()
self.assertNotEquals(None, counters)
self.assertNotEquals(0, len(counters))
name = "Cloud Provider"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertEquals("Microsoft Azure", counter.value)
name = "Virtualization Solution Version"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertNotEquals(None, counter.value)
name = "Virtualization Solution"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertNotEquals(None, counter.value)
name = "Instance Type"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertEquals("Small (A1)", counter.value)
name = "Data Sources"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertEquals("wad", counter.value)
name = "Data Provider Version"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertEquals("2.0.0", counter.value)
name = "Memory Over-Provisioning"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertEquals("no", counter.value)
name = "CPU Over-Provisioning"
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertEquals("no", counter.value)
def test_cpuinfo(self):
cpuinfo = aem.CPUInfo.getCPUInfo()
self.assertNotEquals(None, cpuinfo)
self.assertNotEquals(0, cpuinfo.getNumOfCoresPerCPU())
self.assertNotEquals(0, cpuinfo.getNumOfCores())
self.assertNotEquals(None, cpuinfo.getProcessorType())
self.assertEquals(float, type(cpuinfo.getFrequency()))
self.assertEquals(bool, type(cpuinfo.isHyperThreadingOn()))
percent = cpuinfo.getCPUPercent()
self.assertEquals(float, type(percent))
self.assertTrue(percent >= 0 and percent <= 100)
def test_meminfo(self):
meminfo = aem.MemoryInfo()
self.assertNotEquals(None, meminfo.getMemSize())
self.assertEquals(long, type(meminfo.getMemSize()))
percent = meminfo.getMemPercent()
self.assertEquals(float, type(percent))
self.assertTrue(percent >= 0 and percent <= 100)
def test_networkinfo(self):
netinfo = aem.NetworkInfo()
adapterIds = netinfo.getAdapterIds()
self.assertNotEquals(None, adapterIds)
self.assertNotEquals(0, len(adapterIds))
adapterId = adapterIds[0]
self.assertNotEquals(None, aem.getMacAddress(adapterId))
self.assertNotEquals(None, netinfo.getNetworkReadBytes())
self.assertNotEquals(None, netinfo.getNetworkWriteBytes())
self.assertNotEquals(None, netinfo.getNetworkPacketRetransmitted())
def test_hwchangeinfo(self):
netinfo = aem.NetworkInfo()
testHwInfoFile = "/tmp/HwInfo"
aem.HwInfoFile = testHwInfoFile
if os.path.isfile(testHwInfoFile):
os.remove(testHwInfoFile)
hwChangeInfo = aem.HardwareChangeInfo(netinfo)
self.assertNotEquals(None, hwChangeInfo.getLastHardwareChange())
self.assertTrue(os.path.isfile, aem.HwInfoFile)
#No hardware change
lastChange = hwChangeInfo.getLastHardwareChange()
hwChangeInfo = aem.HardwareChangeInfo(netinfo)
self.assertEquals(lastChange, hwChangeInfo.getLastHardwareChange())
#Create mock hardware
waagent.SetFileContents(testHwInfoFile, ("0\nma-ca-sa-ds-02"))
hwChangeInfo = aem.HardwareChangeInfo(netinfo)
self.assertNotEquals(None, hwChangeInfo.getLastHardwareChange())
def test_linux_metric(self):
config = self.test_config()
metric = aem.LinuxMetric(config)
self.validate_cnm_metric(metric)
#Metric for CPU, network and memory
def validate_cnm_metric(self, metric):
self.assertNotEquals(None, metric.getCurrHwFrequency())
self.assertNotEquals(None, metric.getMaxHwFrequency())
self.assertNotEquals(None, metric.getCurrVMProcessingPower())
self.assertNotEquals(None, metric.getGuaranteedMemAssigned())
self.assertNotEquals(None, metric.getMaxVMProcessingPower())
self.assertNotEquals(None, metric.getNumOfCoresPerCPU())
self.assertNotEquals(None, metric.getNumOfThreadsPerCore())
self.assertNotEquals(None, metric.getPhysProcessingPowerPerVCPU())
self.assertNotEquals(None, metric.getProcessorType())
self.assertNotEquals(None, metric.getReferenceComputeUnit())
self.assertNotEquals(None, metric.getVCPUMapping())
self.assertNotEquals(None, metric.getVMProcessingPowerConsumption())
self.assertNotEquals(None, metric.getCurrMemAssigned())
self.assertNotEquals(None, metric.getGuaranteedMemAssigned())
self.assertNotEquals(None, metric.getMaxMemAssigned())
self.assertNotEquals(None, metric.getVMMemConsumption())
adapterIds = metric.getNetworkAdapterIds()
self.assertNotEquals(None, adapterIds)
self.assertNotEquals(0, len(adapterIds))
adapterId = adapterIds[0]
self.assertNotEquals(None, metric.getNetworkAdapterMapping(adapterId))
self.assertNotEquals(None, metric.getMaxNetworkBandwidth(adapterId))
self.assertNotEquals(None, metric.getMinNetworkBandwidth(adapterId))
self.assertNotEquals(None, metric.getNetworkReadBytes())
self.assertNotEquals(None, metric.getNetworkWriteBytes())
self.assertNotEquals(None, metric.getNetworkPacketRetransmitted())
self.assertNotEquals(None, metric.getLastHardwareChange())
def test_vm_datasource(self):
config = self.test_config()
config.configData["wad.isenabled"] = "0"
dataSource = aem.VMDataSource(config)
counters = dataSource.collect()
self.assertNotEquals(None, counters)
self.assertNotEquals(0, len(counters))
counterNames = [
"Current Hw Frequency",
"Current VM Processing Power",
"Guaranteed VM Processing Power",
"Max Hw Frequency",
"Max. VM Processing Power",
"Number of Cores per CPU",
"Number of Threads per Core",
"Phys. Processing Power per vCPU",
"Processor Type",
"Reference Compute Unit",
"vCPU Mapping",
"VM Processing Power Consumption",
"Current Memory assigned",
"Guaranteed Memory assigned",
"Max Memory assigned",
"VM Memory Consumption",
"Adapter Id",
"Mapping",
"Maximum Network Bandwidth",
"Minimum Network Bandwidth",
"Network Read Bytes",
"Network Write Bytes",
"Packets Retransmitted"
]
#print "\n".join(map(lambda c: str(c), counters))
for name in counterNames:
#print name
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertNotEquals(None, counter.value)
def test_storagemetric(self):
metrics = mock_getStorageMetrics()
self.assertNotEquals(None, metrics)
stat = aem.AzureStorageStat(metrics)
self.assertNotEquals(None, stat.getReadBytes())
self.assertNotEquals(None, stat.getReadOps())
self.assertNotEquals(None, stat.getReadOpE2ELatency())
self.assertNotEquals(None, stat.getReadOpServerLatency())
self.assertNotEquals(None, stat.getReadOpThroughput())
self.assertNotEquals(None, stat.getWriteBytes())
self.assertNotEquals(None, stat.getWriteOps())
self.assertNotEquals(None, stat.getWriteOpE2ELatency())
self.assertNotEquals(None, stat.getWriteOpServerLatency())
self.assertNotEquals(None, stat.getWriteOpThroughput())
def test_disk_info(self):
config = self.test_config()
mapping = aem.DiskInfo(config).getDiskMapping()
self.assertNotEquals(None, mapping)
def test_get_storage_key_range(self):
startKey, endKey = aem.getStorageTableKeyRange()
self.assertNotEquals(None, startKey)
self.assertEquals(13, len(startKey))
self.assertNotEquals(None, endKey)
self.assertEquals(13, len(endKey))
def test_storage_datasource(self):
aem.getStorageMetrics = mock_getStorageMetrics
config = self.test_config()
dataSource = aem.StorageDataSource(config)
counters = dataSource.collect()
self.assertNotEquals(None, counters)
self.assertNotEquals(0, len(counters))
counterNames = [
"Phys. Disc to Storage Mapping",
"Storage ID",
"Storage Read Bytes",
"Storage Read Op Latency E2E msec",
"Storage Read Op Latency Server msec",
"Storage Read Ops",
"Storage Read Throughput E2E MB/sec",
"Storage Write Bytes",
"Storage Write Op Latency E2E msec",
"Storage Write Op Latency Server msec",
"Storage Write Ops",
"Storage Write Throughput E2E MB/sec"
]
#print "\n".join(map(lambda c: str(c), counters))
for name in counterNames:
#print name
counter = next((c for c in counters if c.name == name))
self.assertNotEquals(None, counter)
self.assertNotEquals(None, counter.value)
def test_writer(self):
testEventFile = "/tmp/Event"
if os.path.isfile(testEventFile):
os.remove(testEventFile)
writer = aem.PerfCounterWriter()
counters = [aem.PerfCounter(counterType = 0,
category = "test",
name = "test",
value = "test",
unit = "test")]
writer.write(counters, eventFile = testEventFile)
with open(testEventFile) as F:
content = F.read()
self.assertEquals(str(counters[0]), content)
testEventFile = "/dev/console"
print("==============================")
print("The warning below is expected.")
self.assertRaises(IOError, writer.write, counters, 2, testEventFile)
print("==============================")
def test_easyHash(self):
hashVal = aem.easyHash('a')
self.assertEquals(97, hashVal)
hashVal = aem.easyHash('ab')
self.assertEquals(87, hashVal)
hashVal = aem.easyHash(("ciextension-SUSELinuxEnterpriseServer11SP3"
"___role1___"
"ciextension-SUSELinuxEnterpriseServer11SP3"))
self.assertEquals(5, hashVal)
def test_get_ad_key_range(self):
startKey, endKey = aem.getAzureDiagnosticKeyRange()
print(startKey)
print(endKey)
def test_get_mds_timestamp(self):
date = datetime.datetime(2015, 1, 26, 3, 54)
epoch = datetime.datetime.utcfromtimestamp(0)
unixTimestamp = (int((date - epoch).total_seconds()))
mdsTimestamp = aem.getMDSTimestamp(unixTimestamp)
self.assertEquals(635578412400000000, mdsTimestamp)
def test_get_storage_timestamp(self):
date = datetime.datetime(2015, 1, 26, 3, 54)
epoch = datetime.datetime.utcfromtimestamp(0)
unixTimestamp = (int((date - epoch).total_seconds()))
storageTimestamp = aem.getStorageTimestamp(unixTimestamp)
self.assertEquals("20150126T0354", storageTimestamp)
def mock_getStorageMetrics(*args, **kwargs):
with open(os.path.join(env.test_dir, "storage_metrics")) as F:
test_data = F.read()
jsonObjs = json.loads(test_data)
class ObjectView(object):
def __init__(self, data):
self.__dict__ = data
metrics = map(lambda x : ObjectView(x), jsonObjs)
return metrics
if __name__ == '__main__':
unittest.main()
|
chrome/browser/resources/settings/people_page/compiled_resources2.gyp
|
google-ar/chromium
| 777 |
138727
|
<filename>chrome/browser/resources/settings/people_page/compiled_resources2.gyp
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'camera',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:load_time_data',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'change_picture',
'dependencies': [
'<(DEPTH)/third_party/polymer/v1_0/components-chromium/iron-selector/compiled_resources2.gyp:iron-selector-extracted',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:load_time_data',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:web_ui_listener_behavior',
'../compiled_resources2.gyp:route',
'camera',
'change_picture_browser_proxy',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'change_picture_browser_proxy',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'easy_unlock_browser_proxy',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'easy_unlock_turn_off_dialog',
'dependencies': [
'<(DEPTH)/ui/webui/resources/cr_elements/cr_dialog/compiled_resources2.gyp:cr_dialog',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:web_ui_listener_behavior',
'easy_unlock_browser_proxy',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'import_data_browser_proxy',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'manage_profile',
'dependencies': [
'../compiled_resources2.gyp:route',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:web_ui_listener_behavior',
'manage_profile_browser_proxy',
'sync_browser_proxy',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'manage_profile_browser_proxy',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'password_prompt_dialog',
'dependencies': [
'../compiled_resources2.gyp:route',
'<(EXTERNS_GYP):quick_unlock_private',
'lock_screen_constants',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'people_page',
'dependencies': [
'../compiled_resources2.gyp:route',
'../settings_page/compiled_resources2.gyp:settings_animated_pages',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:icon',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:load_time_data',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:web_ui_listener_behavior',
'easy_unlock_browser_proxy',
'easy_unlock_turn_off_dialog',
'lock_screen_constants',
'lock_state_behavior',
'profile_info_browser_proxy',
'sync_browser_proxy',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'profile_info_browser_proxy',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'lock_state_behavior',
'dependencies': [
'../compiled_resources2.gyp:route',
'<(EXTERNS_GYP):quick_unlock_private',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'lock_screen_constants',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'lock_screen',
'dependencies': [
'../compiled_resources2.gyp:route',
'lock_screen_constants',
'lock_state_behavior',
'password_prompt_dialog',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'setup_pin_dialog',
'dependencies': [
'../compiled_resources2.gyp:route',
'lock_screen_constants',
'password_prompt_dialog',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'sync_page',
'dependencies': [
'../compiled_resources2.gyp:route',
'../settings_page/compiled_resources2.gyp:settings_animated_pages',
'sync_browser_proxy',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:load_time_data',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:web_ui_listener_behavior',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'sync_browser_proxy',
'dependencies': [
'<(DEPTH)/third_party/closure_compiler/externs/compiled_resources2.gyp:metrics_private',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:load_time_data',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'user_list',
'dependencies': [
'../compiled_resources2.gyp:route',
'<(EXTERNS_GYP):settings_private',
'<(EXTERNS_GYP):users_private',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'users_add_user_dialog',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(EXTERNS_GYP):users_private',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'users_page',
'dependencies': [
'user_list',
'users_add_user_dialog',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'import_data_dialog',
'dependencies': [
'../prefs/compiled_resources2.gyp:prefs_behavior',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:web_ui_listener_behavior',
'import_data_browser_proxy',
],
'includes': ['../../../../../third_party/closure_compiler/compile_js2.gypi'],
},
],
}
|
exps/losses/customize.py
|
YacobBY/DFF
| 196 |
138731
|
<gh_stars>100-1000
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Created by: <NAME>
# Email: <EMAIL>
# Copyright (c) 2019
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""Calculate Multi-label Loss (Semantic Loss)"""
import torch
from torch.nn.modules.loss import _Loss
torch_ver = torch.__version__[:3]
__all__ = ['EdgeDetectionReweightedLosses', 'EdgeDetectionReweightedLosses_CPU']
class WeightedCrossEntropyWithLogits(_Loss):
def __init__(self, weight=None, size_average=None, reduce=None, reduction='elementwise_mean'):
super(WeightedCrossEntropyWithLogits, self).__init__(size_average, reduce, reduction)
def forward(self, inputs, targets):
loss_total = 0
for i in range(targets.size(0)): # iterate for batch size
pred = inputs[i]
target = targets[i]
pad_mask = target[0,:,:]
target = target[1:,:,:]
target_nopad = torch.mul(target, pad_mask) # zero out the padding area
num_pos = torch.sum(target_nopad) # true positive number
num_total = torch.sum(pad_mask) # true total number
num_neg = num_total - num_pos
pos_weight = (num_neg / num_pos).clamp(min=1, max=num_total) # compute a pos_weight for each image
max_val = (-pred).clamp(min=0)
log_weight = 1 + (pos_weight - 1) * target
loss = pred - pred * target + log_weight * (max_val + ((-max_val).exp() + (-pred - max_val).exp()).log())
loss = loss * pad_mask
loss = loss.mean()
loss_total = loss_total + loss
loss_total = loss_total / targets.size(0)
return loss_total
class EdgeDetectionReweightedLosses(WeightedCrossEntropyWithLogits):
"""docstring for EdgeDetectionReweightedLosses"""
def __init__(self, weight=None, side5_weight=1, fuse_weight=1):
super(EdgeDetectionReweightedLosses, self).__init__(weight=weight)
self.side5_weight = side5_weight
self.fuse_weight = fuse_weight
def forward(self, *inputs):
side5, fuse, target = tuple(inputs)
loss_side5 = super(EdgeDetectionReweightedLosses, self).forward(side5, target)
loss_fuse = super(EdgeDetectionReweightedLosses, self).forward(fuse, target)
loss = loss_side5 * self.side5_weight + loss_fuse * self.fuse_weight
return loss
class EdgeDetectionReweightedLosses_CPU(WeightedCrossEntropyWithLogits):
"""docstring for EdgeDetectionReweightedLosses"""
"""CPU version used to dubug"""
def __init__(self, weight=None, side5_weight=1, fuse_weight=1):
super(EdgeDetectionReweightedLosses_CPU, self).__init__(weight=weight)
self.side5_weight = side5_weight
self.fuse_weight = fuse_weight
def forward(self, *inputs):
pred, target = tuple(inputs)
loss_side5 = super(EdgeDetectionReweightedLosses_CPU, self).forward(pred[0], target)
loss_fuse = super(EdgeDetectionReweightedLosses_CPU, self).forward(pred[1], target)
loss = loss_side5 * self.side5_weight + loss_fuse * self.fuse_weight
return loss
|
jupyter_ascending/requests/sync.py
|
nikvdp/jupyter_ascending
| 178 |
138734
|
import argparse
from pathlib import Path
from loguru import logger
from jupyter_ascending._environment import SYNC_EXTENSION
from jupyter_ascending.json_requests import SyncRequest
from jupyter_ascending.logger import setup_logger
from jupyter_ascending.requests.client_lib import request_notebook_command
@logger.catch
def send(file_name: str):
if f".{SYNC_EXTENSION}.py" not in file_name:
return
logger.info(f"Syncing File: {file_name}...")
file_name = str(Path(file_name).absolute())
with open(file_name, "r") as reader:
raw_result = reader.read()
request_obj = SyncRequest(file_name=file_name, contents=raw_result)
request_notebook_command(request_obj)
logger.info("... Complete")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
setup_logger()
parser.add_argument("--filename", help="Filename to send")
arguments = parser.parse_args()
send(arguments.filename)
|
turbo/helper.py
|
wecatch/app-turbo
| 157 |
138740
|
from __future__ import absolute_import, division, print_function, with_statement
import sys
from turbo.log import helper_log
from turbo.util import import_object, camel_to_underscore
class _HelperObjectDict(dict):
def __setitem__(self, name, value):
return super(_HelperObjectDict, self).setdefault(name, value)
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise ValueError(name)
def install_helper(installing_helper_list, package_space):
for item in installing_helper_list:
# db model package
package = import_object('.'.join(['helpers', item]), package_space)
package_space[item] = _HelperObjectDict()
# all py files included by package
all_modules = getattr(package, '__all__', [])
for m in all_modules:
try:
module = import_object(
'.'.join(['helpers', item, m]), package_space)
except:
helper_log.error('module helpers.%s.%s Import Error' %
(item, m), exc_info=True)
sys.exit(0)
for model_name in getattr(module, 'MODEL_SLOTS', []):
model = getattr(module, model_name, None)
if model:
camel_name = model.__name__
underscore_name = camel_to_underscore(camel_name)
package_space[item][underscore_name] = model()
package_space[item][camel_name] = model
|
gpustat_web/app.py
|
Linyxus/gpustat-web
| 234 |
138753
|
<filename>gpustat_web/app.py
"""
gpustat.web
MIT License
Copyright (c) 2018-2020 <NAME> (@wookayin)
"""
from typing import List, Tuple, Optional
import os
import sys
import traceback
import urllib
import ssl
import asyncio
import asyncssh
import aiohttp
from datetime import datetime
from collections import OrderedDict, Counter
from termcolor import cprint, colored
from aiohttp import web
import aiohttp_jinja2 as aiojinja2
__PATH__ = os.path.abspath(os.path.dirname(__file__))
DEFAULT_GPUSTAT_COMMAND = "gpustat --color --gpuname-width 25"
###############################################################################
# Background workers to collect information from nodes
###############################################################################
class Context(object):
'''The global context object.'''
def __init__(self):
self.host_status = OrderedDict()
self.interval = 5.0
def host_set_message(self, hostname: str, msg: str):
self.host_status[hostname] = colored(f"({hostname}) ", 'white') + msg + '\n'
context = Context()
async def run_client(hostname: str, exec_cmd: str, *, port=22,
poll_delay=None, timeout=30.0,
name_length=None, verbose=False):
'''An async handler to collect gpustat through a SSH channel.'''
L = name_length or 0
if poll_delay is None:
poll_delay = context.interval
async def _loop_body():
# establish a SSH connection.
async with asyncssh.connect(hostname, port=port) as conn:
cprint(f"[{hostname:<{L}}] SSH connection established!", attrs=['bold'])
while True:
if False: #verbose: XXX DEBUG
print(f"[{hostname:<{L}}] querying... ")
result = await asyncio.wait_for(conn.run(exec_cmd), timeout=timeout)
now = datetime.now().strftime('%Y/%m/%d-%H:%M:%S.%f')
if result.exit_status != 0:
cprint(f"[{now} [{hostname:<{L}}] Error, exitcode={result.exit_status}", color='red')
cprint(result.stderr or '', color='red')
stderr_summary = (result.stderr or '').split('\n')[0]
context.host_set_message(hostname, colored(f'[exitcode {result.exit_status}] {stderr_summary}', 'red'))
else:
if verbose:
cprint(f"[{now} [{hostname:<{L}}] OK from gpustat ({len(result.stdout)} bytes)", color='cyan')
# update data
context.host_status[hostname] = result.stdout
# wait for a while...
await asyncio.sleep(poll_delay)
while True:
try:
# start SSH connection, or reconnect if it was disconnected
await _loop_body()
except asyncio.CancelledError:
cprint(f"[{hostname:<{L}}] Closed as being cancelled.", attrs=['bold'])
break
except (asyncio.TimeoutError) as ex:
# timeout (retry)
cprint(f"Timeout after {timeout} sec: {hostname}", color='red')
context.host_set_message(hostname, colored(f"Timeout after {timeout} sec", 'red'))
except (asyncssh.misc.DisconnectError, asyncssh.misc.ChannelOpenError, OSError) as ex:
# error or disconnected (retry)
cprint(f"Disconnected : {hostname}, {str(ex)}", color='red')
context.host_set_message(hostname, colored(str(ex), 'red'))
except Exception as e:
# A general exception unhandled, throw
cprint(f"[{hostname:<{L}}] {e}", color='red')
context.host_set_message(hostname, colored(f"{type(e).__name__}: {e}", 'red'))
cprint(traceback.format_exc())
raise
# retry upon timeout/disconnected, etc.
cprint(f"[{hostname:<{L}}] Disconnected, retrying in {poll_delay} sec...", color='yellow')
await asyncio.sleep(poll_delay)
async def spawn_clients(hosts: List[str], exec_cmd: str, *,
default_port: int, verbose=False):
'''Create a set of async handlers, one per host.'''
def _parse_host_string(netloc: str) -> Tuple[str, Optional[int]]:
"""Parse a connection string (netloc) in the form of `HOSTNAME[:PORT]`
and returns (HOSTNAME, PORT)."""
pr = urllib.parse.urlparse('ssh://{}/'.format(netloc))
assert pr.hostname is not None, netloc
return (pr.hostname, pr.port)
try:
host_names, host_ports = zip(*(_parse_host_string(host) for host in hosts))
# initial response
for hostname in host_names:
context.host_set_message(hostname, "Loading ...")
name_length = max(len(hostname) for hostname in host_names)
# launch all clients parallel
await asyncio.gather(*[
run_client(hostname, exec_cmd, port=port or default_port,
verbose=verbose, name_length=name_length)
for (hostname, port) in zip(host_names, host_ports)
])
except Exception as ex:
# TODO: throw the exception outside and let aiohttp abort startup
traceback.print_exc()
cprint(colored("Error: An exception occured during the startup.", 'red'))
###############################################################################
# webserver handlers.
###############################################################################
# monkey-patch ansi2html scheme. TODO: better color codes
import ansi2html
scheme = 'solarized'
ansi2html.style.SCHEME[scheme] = list(ansi2html.style.SCHEME[scheme])
ansi2html.style.SCHEME[scheme][0] = '#555555'
ansi_conv = ansi2html.Ansi2HTMLConverter(dark_bg=True, scheme=scheme)
def render_gpustat_body():
body = ''
for host, status in context.host_status.items():
if not status:
continue
body += status
return ansi_conv.convert(body, full=False)
async def handler(request):
'''Renders the html page.'''
data = dict(
ansi2html_headers=ansi_conv.produce_headers().replace('\n', ' '),
http_host=request.host,
interval=int(context.interval * 1000)
)
response = aiojinja2.render_template('index.html', request, data)
response.headers['Content-Language'] = 'en'
return response
async def websocket_handler(request):
print("INFO: Websocket connection from {} established".format(request.remote))
ws = web.WebSocketResponse()
await ws.prepare(request)
async def _handle_websocketmessage(msg):
if msg.data == 'close':
await ws.close()
else:
# send the rendered HTML body as a websocket message.
body = render_gpustat_body()
await ws.send_str(body)
async for msg in ws:
if msg.type == aiohttp.WSMsgType.CLOSE:
break
elif msg.type == aiohttp.WSMsgType.TEXT:
await _handle_websocketmessage(msg)
elif msg.type == aiohttp.WSMsgType.ERROR:
cprint("Websocket connection closed with exception %s" % ws.exception(), color='red')
print("INFO: Websocket connection from {} closed".format(request.remote))
return ws
###############################################################################
# app factory and entrypoint.
###############################################################################
def create_app(loop, *,
hosts=['localhost'],
default_port: int = 22,
ssl_certfile: Optional[str] = None,
ssl_keyfile: Optional[str] = None,
exec_cmd: Optional[str] = None,
verbose=True):
if not exec_cmd:
exec_cmd = DEFAULT_GPUSTAT_COMMAND
app = web.Application()
app.router.add_get('/', handler)
app.add_routes([web.get('/ws', websocket_handler)])
async def start_background_tasks(app):
clients = spawn_clients(
hosts, exec_cmd, default_port=default_port, verbose=verbose)
app['tasks'] = loop.create_task(clients)
await asyncio.sleep(0.1)
app.on_startup.append(start_background_tasks)
async def shutdown_background_tasks(app):
cprint(f"... Terminating the application", color='yellow')
app['tasks'].cancel()
app.on_shutdown.append(shutdown_background_tasks)
# jinja2 setup
import jinja2
aiojinja2.setup(app,
loader=jinja2.FileSystemLoader(
os.path.join(__PATH__, 'template'))
)
# SSL setup
if ssl_certfile and ssl_keyfile:
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(certfile=ssl_certfile,
keyfile=ssl_keyfile)
cprint(f"Using Secure HTTPS (SSL/TLS) server ...", color='green')
else:
ssl_context = None # type: ignore
return app, ssl_context
def main():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('hosts', nargs='*',
help='List of nodes. Syntax: HOSTNAME[:PORT]')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--port', type=int, default=48109,
help="Port number the web application will listen to. (Default: 48109)")
parser.add_argument('--ssh-port', type=int, default=22,
help="Default SSH port to establish connection through. (Default: 22)")
parser.add_argument('--interval', type=float, default=5.0,
help="Interval (in seconds) between two consecutive requests.")
parser.add_argument('--ssl-certfile', type=str, default=None,
help="Path to the SSL certificate file (Optional, if want to run HTTPS server)")
parser.add_argument('--ssl-keyfile', type=str, default=None,
help="Path to the SSL private key file (Optional, if want to run HTTPS server)")
parser.add_argument('--exec', type=str,
default=DEFAULT_GPUSTAT_COMMAND,
help="command-line to execute (e.g. gpustat --color --gpuname-width 25)")
args = parser.parse_args()
hosts = args.hosts or ['localhost']
cprint(f"Hosts : {hosts}", color='green')
cprint(f"Cmd : {args.exec}", color='yellow')
if args.interval > 0.1:
context.interval = args.interval
loop = asyncio.get_event_loop()
app, ssl_context = create_app(
loop, hosts=hosts, default_port=args.ssh_port,
ssl_certfile=args.ssl_certfile, ssl_keyfile=args.ssl_keyfile,
exec_cmd=args.exec,
verbose=args.verbose)
web.run_app(app, host='0.0.0.0', port=args.port,
ssl_context=ssl_context)
if __name__ == '__main__':
main()
|
pandas_ml/skaccessors/pipeline.py
|
matsavage/pandas-ml
| 305 |
138782
|
#!/usr/bin/env python
from pandas_ml.core.accessor import _AccessorMethods
class PipelineMethods(_AccessorMethods):
"""
Accessor to ``sklearn.pipeline``.
"""
_module_name = 'sklearn.pipeline'
@property
def make_pipeline(self):
"""``sklearn.pipeline.make_pipeline``"""
# not included in __all__
return self._module.make_pipeline
@property
def make_union(self):
"""``sklearn.pipeline.make_union``"""
# not included in __all__
return self._module.make_union
|
utests/specs/test.py
|
linarnan/ptf
| 113 |
138792
|
import ptf
from ptf.base_tests import BaseTest
from ptf import testutils
class TestParamsGet(BaseTest):
def setUp(self):
BaseTest.setUp(self)
def runTest(self):
params = testutils.test_params_get(default=None)
if params is None:
print(">>>None")
else:
for k, v in params.items():
print(">>>{}={}".format(k, v))
class TestParamGet(BaseTest):
def setUp(self):
BaseTest.setUp(self)
def runTest(self):
v = testutils.test_param_get('k1', default=-1)
if v is None:
print(">>>None")
else:
print(">>>k1={}".format(v))
|
alipay/aop/api/domain/AlipayUserSafeboxRecordSaveModel.py
|
snowxmas/alipay-sdk-python-all
| 213 |
138794
|
<reponame>snowxmas/alipay-sdk-python-all<filename>alipay/aop/api/domain/AlipayUserSafeboxRecordSaveModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserSafeboxRecordSaveModel(object):
def __init__(self):
self._content = None
self._key_version = None
self._scene_code = None
self._title = None
self._unique_id = None
self._user_id = None
@property
def content(self):
return self._content
@content.setter
def content(self, value):
self._content = value
@property
def key_version(self):
return self._key_version
@key_version.setter
def key_version(self, value):
self._key_version = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def unique_id(self):
return self._unique_id
@unique_id.setter
def unique_id(self, value):
self._unique_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.content:
if hasattr(self.content, 'to_alipay_dict'):
params['content'] = self.content.to_alipay_dict()
else:
params['content'] = self.content
if self.key_version:
if hasattr(self.key_version, 'to_alipay_dict'):
params['key_version'] = self.key_version.to_alipay_dict()
else:
params['key_version'] = self.key_version
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
if self.unique_id:
if hasattr(self.unique_id, 'to_alipay_dict'):
params['unique_id'] = self.unique_id.to_alipay_dict()
else:
params['unique_id'] = self.unique_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserSafeboxRecordSaveModel()
if 'content' in d:
o.content = d['content']
if 'key_version' in d:
o.key_version = d['key_version']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'title' in d:
o.title = d['title']
if 'unique_id' in d:
o.unique_id = d['unique_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
machina/algos/qtopt.py
|
krish-dx/machina
| 302 |
138797
|
<reponame>krish-dx/machina
"""
This is an implementation of QT-Opt.
https://arxiv.org/abs/1806.10293
"""
from machina import loss_functional as lf
from machina import logger
def train(traj,
qf, lagged_qf, targ_qf1, targ_qf2,
optim_qf,
epoch, batch_size, # optimization hypers
tau=0.9999, gamma=0.9, # advantage estimation
loss_type='mse',
log_enable=True,
):
"""
Train function for qtopt
Parameters
----------
traj : Traj
Off policy trajectory.
qf : SAVfunction
Q function.
lagged_qf : SAVfunction
Lagged Q function.
targ_qf1 : CEMSAVfunction
Target Q function.
targ_qf2 : CEMSAVfunction
Lagged Target Q function.
optim_qf : torch.optim.Optimizer
Optimizer for Q function.
epoch : int
Number of iteration.
batch_size : int
Number of batches.
tau : float
Target updating rate.
gamma : float
Discounting rate.
loss_type : string
Type of belleman loss.
log_enable: bool
If True, enable logging
Returns
-------
result_dict : dict
Dictionary which contains losses information.
"""
qf_losses = []
if log_enable:
logger.log("Optimizing...")
iterator = traj.random_batch(batch_size, epoch)
for batch in iterator:
qf_bellman_loss = lf.clipped_double_bellman(
qf, targ_qf1, targ_qf2, batch, gamma, loss_type=loss_type)
optim_qf.zero_grad()
qf_bellman_loss.backward()
optim_qf.step()
for q, targ_q1 in zip(qf.parameters(), targ_qf1.parameters()):
targ_q1.detach().copy_((1 - tau) * targ_q1.detach() + tau * q.detach())
for lagged_q, targ_q2 in zip(lagged_qf.parameters(), targ_qf2.parameters()):
targ_q2.detach().copy_((1 - tau) * targ_q2.detach() + tau * lagged_q.detach())
qf_losses.append(qf_bellman_loss.detach().cpu().numpy())
if log_enable:
logger.log("Optimization finished!")
return {'QfLoss': qf_losses}
|
internetdefense/apps/include/management/commands/clearcache.py
|
gnubrasil/idl-members
| 175 |
138805
|
<reponame>gnubrasil/idl-members
from django.core.cache import cache
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Flush the entire cache'
def handle(self, *args, **options):
cache.clear()
self.stdout.write('Cache cleared\n')
|
veles/external/manhole.py
|
AkshayJainG/veles
| 1,007 |
138810
|
<gh_stars>1000+
from __future__ import print_function
from logging import getLogger
logger = getLogger(__name__)
import traceback
import socket
import struct
import sys
import os
import atexit
import signal
import errno
import platform
import weakref
try:
import signalfd
except ImportError:
signalfd = None
try:
string = basestring
except NameError: # python 3
string = str
try:
InterruptedError = InterruptedError
except NameError: # python <= 3.2
InterruptedError = OSError
if hasattr(sys, 'setswitchinterval'):
setinterval = sys.setswitchinterval
getinterval = sys.getswitchinterval
else:
setinterval = sys.setcheckinterval
getinterval = sys.getcheckinterval
def _get_original(qual_name):
mod, name = qual_name.split('.')
original = getattr(__import__(mod), name)
try:
from gevent.monkey import get_original
original = get_original(mod, name)
except (ImportError, SyntaxError):
pass
try:
from eventlet.patcher import original
original = getattr(original(mod), name)
except (ImportError, SyntaxError):
pass
return original
_ORIGINAL_SOCKET = _get_original('socket.socket')
_ORIGINAL_FDOPEN = _get_original('os.fdopen')
try:
_ORIGINAL_ALLOCATE_LOCK = _get_original('thread.allocate_lock')
except ImportError: # python 3
_ORIGINAL_ALLOCATE_LOCK = _get_original('_thread.allocate_lock')
_ORIGINAL_THREAD = _get_original('threading.Thread')
_ORIGINAL_EVENT = _get_original('threading.Event')
_ORIGINAL__ACTIVE = _get_original('threading._active')
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[:2] == (2, 6)
VERBOSE = True
START_TIMEOUT = None
try:
import ctypes
import ctypes.util
libpthread_path = ctypes.util.find_library("pthread")
if not libpthread_path:
raise ImportError
libpthread = ctypes.CDLL(libpthread_path)
if not hasattr(libpthread, "pthread_setname_np"):
raise ImportError
_pthread_setname_np = libpthread.pthread_setname_np
_pthread_setname_np.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
_pthread_setname_np.restype = ctypes.c_int
pthread_setname_np = lambda ident, name: _pthread_setname_np(ident, name[:15].encode('utf8'))
except ImportError:
pthread_setname_np = lambda ident, name: None
# OS X getsockopt(2) defines (may work for BSD too?)
SOL_LOCAL = 0
LOCAL_PEERCRED = 1
SO_PEERCRED = 17
COLOR_HIGHLIGHT = "\033[1;35m" if sys.stdout.isatty() else ""
COLOR_RESET = "\033[0m" if sys.stdout.isatty() else ""
system = platform.system()
def cry(message):
"""
Fail-ignorant logging function.
"""
if VERBOSE:
try:
_STDERR.write("%sMANHOLE:%s%s\n" % (COLOR_HIGHLIGHT, COLOR_RESET,
message))
except: # pylint: disable=W0702
pass
def get_peercred(sock):
"""Gets the (pid, uid, gid) for the client on the given *connected* socket."""
if system == 'Darwin':
return struct.unpack('3i', sock.getsockopt(
SOL_LOCAL, LOCAL_PEERCRED, struct.calcsize('3i')
))
else:
return struct.unpack('3i', sock.getsockopt(
socket.SOL_SOCKET, SO_PEERCRED, struct.calcsize('3i')
))
class SuspiciousClient(Exception):
pass
class Manhole(_ORIGINAL_THREAD):
"""
Thread that runs the infamous "Manhole".
"""
def __init__(self, sigmask, start_timeout, workflow):
super(Manhole, self).__init__()
self.daemon = True
self.name = "Manhole"
self.sigmask = sigmask
self.serious = _ORIGINAL_EVENT()
# time to wait for the manhole to get serious (to have a complete start)
# see: http://emptysqua.re/blog/dawn-of-the-thread/
self.start_timeout = start_timeout
self.workflow = workflow
@property
def workflow(self):
return self._workflow()
@workflow.setter
def workflow(self, value):
self._workflow = weakref.ref(value)
def start(self):
super(Manhole, self).start()
if not self.serious.wait(self.start_timeout) and not PY26:
cry("WARNING: Waited %s seconds but Manhole thread didn't start yet :(" % self.start_timeout)
@staticmethod
def get_socket():
sock = _ORIGINAL_SOCKET(socket.AF_UNIX, socket.SOCK_STREAM)
pid = os.getpid()
name = "/tmp/manhole-%s" % pid
if os.path.exists(name):
os.unlink(name)
sock.bind(name)
sock.listen(5)
cry("Manhole UDS path: nc -U %s" % name)
return sock, pid
def interact(self):
dump_stacktraces()
from veles.interaction import Shell
Shell.fix_netcat_colors()
shell = Shell(self.workflow, name="Manhole")
shell.thread_pool.pause()
shell.initialize()
try:
shell.interact({
'dump_stacktraces': dump_stacktraces,
'sys': sys,
'os': os,
'socket': socket,
'traceback': traceback,
'pause': shell.thread_pool.pause,
'resume': shell.thread_pool.resume,
})
except (EOFError, BrokenPipeError):
cry("Client has been dropped.")
finally:
shell.workflow.del_ref(shell)
shell.thread_pool.resume()
"""
import code
code.InteractiveConsole({
'dump_stacktraces': dump_stacktraces,
'sys': sys,
'os': os,
'socket': socket,
'traceback': traceback,
}).interact()
"""
def run(self):
self.serious.set()
if signalfd and self.sigmask:
signalfd.sigprocmask(signalfd.SIG_BLOCK, self.sigmask)
pthread_setname_np(self.ident, self.name)
sock, pid = self.get_socket()
while True:
cry("Waiting for a new connection (in pid %s) ..." % pid)
try:
client = ManholeConnection(sock.accept()[0], self.sigmask)
client.start()
client.join()
except (InterruptedError, socket.error) as e:
if e.errno != errno.EINTR:
raise
continue
finally:
client = None
class ManholeConnection(_ORIGINAL_THREAD):
def __init__(self, client, sigmask):
super(ManholeConnection, self).__init__()
self.daemon = False
self.client = client
self.name = "ManholeConnection"
self.sigmask = sigmask
def run(self):
cry('Started ManholeConnection thread. Checking credentials ...')
if signalfd and self.sigmask:
signalfd.sigprocmask(signalfd.SIG_BLOCK, self.sigmask)
pthread_setname_np(self.ident, "Manhole ----")
pid, _, _ = self.check_credentials(self.client)
pthread_setname_np(self.ident, "Manhole %s" % pid)
self.handle(self.client)
@staticmethod
def check_credentials(client):
pid, uid, gid = get_peercred(client)
euid = os.geteuid()
client_name = "PID:%s UID:%s GID:%s" % (pid, uid, gid)
if uid not in (0, euid):
raise SuspiciousClient("Can't accept client with %s. It doesn't match the current EUID:%s or ROOT." % (
client_name, euid
))
cry("Accepted connection %s from %s" % (client, client_name))
return pid, uid, gid
@staticmethod
def handle(client):
client.settimeout(None)
# # disable this till we have evidence that it's needed
# client.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 0)
# # Note: setting SO_RCVBUF on UDS has no effect, see: http://man7.org/linux/man-pages/man7/unix.7.html
backup = []
old_interval = getinterval()
try:
try:
client_fd = client.fileno()
for mode, names in (
('w', (
'stderr',
'stdout',
'__stderr__',
'__stdout__'
)),
('r', (
'stdin',
'__stdin__'
))
):
for name in names:
backup.append((name, getattr(sys, name)))
setattr(sys, name, _ORIGINAL_FDOPEN(client_fd, mode, 1 if PY3 else 0))
_INST.interact()
cry("Finished interaction.")
finally:
try:
# Change the switch/check interval to something ridiculous. We don't want to have other thread try
# to write to the redirected sys.__std*/sys.std* - it would fail horribly.
setinterval(2147483647)
client.close() # close before it's too late. it may already be dead
junk = [] # keep the old file objects alive for a bit
for name, fh in backup:
junk.append(getattr(sys, name))
setattr(sys, name, fh)
del backup
for fh in junk:
try:
fh.close()
except IOError:
pass
del fh
del junk
finally:
setinterval(old_interval)
cry("Cleaned up.")
except:
cry("ManholeConnection thread failed:")
cry(traceback.format_exc())
def _handle_oneshot(_signum, _frame):
try:
sock, pid = Manhole.get_socket()
cry("Waiting for new connection (in pid:%s) ..." % pid)
client, _ = sock.accept()
ManholeConnection.check_credentials(client)
ManholeConnection.handle(client)
except: # pylint: disable=W0702
# we don't want to let any exception out, it might make the application missbehave
cry("Manhole oneshot connection failed:")
cry(traceback.format_exc())
finally:
_remove_manhole_uds()
def _remove_manhole_uds():
name = "/tmp/manhole-%s" % os.getpid()
if os.path.exists(name):
os.unlink(name)
_INST_LOCK = _ORIGINAL_ALLOCATE_LOCK()
_STDERR = _INST = _ORIGINAL_OS_FORK = _ORIGINAL_OS_FORKPTY = _SHOULD_RESTART = None
def _patched_fork():
"""Fork a child process."""
pid = _ORIGINAL_OS_FORK()
if not pid:
cry('Fork detected. Reinstalling Manhole.')
reinstall()
return pid
def _patched_forkpty():
"""Fork a new process with a new pseudo-terminal as controlling tty."""
pid, master_fd = _ORIGINAL_OS_FORKPTY()
if not pid:
cry('Fork detected. Reinstalling Manhole.')
reinstall()
return pid, master_fd
def _patch_os_fork_functions():
global _ORIGINAL_OS_FORK, _ORIGINAL_OS_FORKPTY # pylint: disable=W0603
if not _ORIGINAL_OS_FORK:
_ORIGINAL_OS_FORK, os.fork = os.fork, _patched_fork
if not _ORIGINAL_OS_FORKPTY:
_ORIGINAL_OS_FORKPTY, os.forkpty = os.forkpty, _patched_forkpty
cry("Patched %s and %s." % (_ORIGINAL_OS_FORK, _ORIGINAL_OS_FORKPTY))
def _activate_on_signal(_signum, _frame):
assert _INST, "Manhole wasn't installed !"
_INST.start()
ALL_SIGNALS = [
getattr(signal, sig) for sig in dir(signal)
if sig.startswith('SIG') and '_' not in sig
]
def install(verbose=True, patch_fork=True, activate_on=None,
sigmask=ALL_SIGNALS, oneshot_on=None, start_timeout=0.5,
workflow=None):
global _STDERR, _INST, _SHOULD_RESTART, VERBOSE, START_TIMEOUT # pylint: disable=W0603
with _INST_LOCK:
VERBOSE = verbose
START_TIMEOUT = start_timeout
_STDERR = sys.__stderr__
if _INST is None:
_INST = Manhole(sigmask, start_timeout, workflow)
if oneshot_on is not None:
oneshot_on = getattr(signal, 'SIG' + oneshot_on) if isinstance(oneshot_on, string) else oneshot_on
signal.signal(oneshot_on, _handle_oneshot)
if activate_on is None:
if oneshot_on is None:
_INST.start()
_SHOULD_RESTART = True
else:
activate_on = getattr(signal, 'SIG' + activate_on) if isinstance(activate_on, string) else activate_on
if activate_on == oneshot_on:
raise RuntimeError('You cannot do activation of the Manhole thread on the same signal '
'that you want to do oneshot activation !')
signal.signal(activate_on, _activate_on_signal)
atexit.register(_remove_manhole_uds)
if patch_fork:
if activate_on is None and oneshot_on is None:
_patch_os_fork_functions()
else:
if activate_on:
cry("Not patching os.fork and os.forkpty. Activation is done by signal %s" % activate_on)
elif oneshot_on:
cry("Not patching os.fork and os.forkpty. Oneshot activation is done by signal %s" % oneshot_on)
def reinstall():
global _INST # pylint: disable=W0603
assert _INST
with _INST_LOCK:
if not (_INST.is_alive() and _INST in _ORIGINAL__ACTIVE):
_INST = Manhole(_INST.sigmask, START_TIMEOUT)
if _SHOULD_RESTART:
_INST.start()
def dump_stacktraces():
import threading
lines = []
tmap = {thr.ident: thr.name for thr in threading.enumerate()}
for thread_id, stack in sys._current_frames().items(): # pylint: disable=W0212
lines.append("\n## ProcessID=%s, ThreadID=%s \"%s\" ##" % (
os.getpid(), thread_id, tmap.get(thread_id, "<unknown>")
))
for filename, lineno, name, line in traceback.extract_stack(stack):
lines.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
lines.append(" %s" % (line.strip()))
lines.append('#' * 80 + "\n\n")
print('\n'.join(lines), file=sys.stderr)
|
ocs_ci/utility/baremetal.py
|
annagitel/ocs-ci
| 130 |
138815
|
import logging
import pyipmi
import pyipmi.interfaces
from ocs_ci.ocs import constants, defaults
from ocs_ci.ocs.constants import VM_POWERED_OFF, VM_POWERED_ON
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
from ocs_ci.ocs.node import wait_for_nodes_status, get_worker_nodes, get_master_nodes
from ocs_ci.ocs.ocp import OCP, wait_for_cluster_connectivity
from ocs_ci.utility.utils import TimeoutSampler, load_auth_config, exec_cmd
logger = logging.getLogger(__name__)
class BAREMETAL(object):
"""
wrapper for Baremetal
"""
def __init__(self):
"""
Initialize the variables required
"""
self.mgmt_details = load_auth_config()["ipmi"]
def get_ipmi_ctx(self, host, user, password):
"""
Function to get ipmi handler
Args:
host (str): Host mgmt address
user (str): User Name for accessing mgmt console
password (str): Password for accessing mgmt console
Returns (object): ipmi handler
"""
interface = pyipmi.interfaces.create_interface(
"ipmitool", interface_type=defaults.IPMI_INTERFACE_TYPE
)
ipmi = pyipmi.create_connection(interface)
ipmi.session.set_session_type_rmcp(host, port=defaults.IPMI_RMCP_PORT)
ipmi.session.set_auth_type_user(user, password)
ipmi.session.establish()
ipmi.target = pyipmi.Target(ipmb_address=defaults.IPMI_IPMB_ADDRESS)
return ipmi
def get_power_status(self, ipmi_ctx):
"""
Get BM Power status
Args:
ipmi_ctx (object) : Ipmi host handler
Returns: (bool): bm power status
"""
chassis_status = ipmi_ctx.get_chassis_status()
return VM_POWERED_ON if chassis_status.power_on else VM_POWERED_OFF
def verify_machine_is_down(self, node):
"""
Verifiy Baremetal machine is completely power off
Args:
node (object): Node objects
Returns:
bool: True if machine is down, False otherwise
"""
result = exec_cmd(cmd=f"ping {node.name} -c 10", ignore_error=True)
if result.returncode == 0:
return False
else:
return True
def stop_baremetal_machines(self, baremetal_machine, force=True):
"""
Stop Baremetal Machines
Args:
baremetal_machine (list): BM objects
force (bool): True for BM ungraceful power off, False for
graceful BM shutdown
Raises:
UnexpectedBehaviour: If baremetal machine is still up
"""
for node in baremetal_machine:
if force:
if self.mgmt_details[node.name]:
ipmi_ctx = self.get_ipmi_ctx(
host=self.mgmt_details[node.name]["mgmt_console"],
user=self.mgmt_details[node.name]["mgmt_username"],
password=self.mgmt_details[node.name]["mgmt_password"],
)
logger.info(f"Powering Off {node.name}")
ipmi_ctx.chassis_control_power_down()
else:
ocp = OCP(kind="node")
ocp.exec_oc_debug_cmd(
node=node.name, cmd_list=["shutdown now"], timeout=60
)
if self.mgmt_details[node.name]:
ipmi_ctx = self.get_ipmi_ctx(
host=self.mgmt_details[node.name]["mgmt_console"],
user=self.mgmt_details[node.name]["mgmt_username"],
password=self.mgmt_details[node.name]["mgmt_password"],
)
for status in TimeoutSampler(
600, 5, self.get_power_status, ipmi_ctx
):
logger.info(
f"Waiting for Baremetal Machine {node.name} to power off"
f"Current Baremetal status: {status}"
)
if status == VM_POWERED_OFF:
logger.info(
f"Baremetal Machine {node.name} reached poweredOff status"
)
break
logger.info("Verifing machine is down")
ret = TimeoutSampler(
timeout=300,
sleep=3,
func=self.verify_machine_is_down,
node=node,
)
logger.info(ret)
if not ret.wait_for_func_status(result=True):
raise UnexpectedBehaviour("Machine {node.name} is still Running")
def start_baremetal_machines_with_ipmi_ctx(self, ipmi_ctxs, wait=True):
"""
Start Baremetal Machines using Ipmi ctx
Args:
ipmi_ctxs (list): List of BM ipmi_ctx
wait (bool): Wait for BMs to start
"""
for ipmi_ctx in ipmi_ctxs:
ipmi_ctx.chassis_control_power_up()
if wait:
for ipmi_ctx in ipmi_ctxs:
for status in TimeoutSampler(600, 5, self.get_power_status, ipmi_ctx):
logger.info(
f"Waiting for Baremetal Machine to power on. "
f"Current Baremetal status: {status}"
)
if status == VM_POWERED_ON:
logger.info("Baremetal Machine reached poweredOn status")
break
wait_for_cluster_connectivity(tries=400)
wait_for_nodes_status(
node_names=get_master_nodes(), status=constants.NODE_READY, timeout=800
)
wait_for_nodes_status(
node_names=get_worker_nodes(), status=constants.NODE_READY, timeout=800
)
def start_baremetal_machines(self, baremetal_machine, wait=True):
"""
Start Baremetal Machines
Args:
baremetal_machine (list): BM objects
wait (bool): Wait for BMs to start
"""
for node in baremetal_machine:
if self.mgmt_details[node.name]:
ipmi_ctx = self.get_ipmi_ctx(
host=self.mgmt_details[node.name]["mgmt_console"],
user=self.mgmt_details[node.name]["mgmt_username"],
password=self.mgmt_details[node.name]["mgmt_password"],
)
logger.info(f"Powering On {node.name}")
ipmi_ctx.chassis_control_power_up()
if wait:
if self.mgmt_details[node.name]:
ipmi_ctx = self.get_ipmi_ctx(
host=self.mgmt_details[node.name]["mgmt_console"],
user=self.mgmt_details[node.name]["mgmt_username"],
password=self.mgmt_details[node.name]["mgmt_password"],
)
for status in TimeoutSampler(
600, 5, self.get_power_status, ipmi_ctx
):
logger.info(
f"Waiting for Baremetal Machine {node.name} to power on. "
f"Current Baremetal status: {status}"
)
if status == VM_POWERED_ON:
logger.info(
f"Baremetal Machine {node.name} reached poweredOn status"
)
ipmi_ctx.session.close()
break
wait_for_cluster_connectivity(tries=400)
wait_for_nodes_status(
node_names=get_master_nodes(), status=constants.NODE_READY, timeout=800
)
wait_for_nodes_status(
node_names=get_worker_nodes(), status=constants.NODE_READY, timeout=800
)
def restart_baremetal_machines(self, baremetal_machine, force=True):
"""
Restart Baremetal Machines
Args:
baremetal_machine (list): BM objects
force (bool): True for BM ungraceful power off, False for
graceful BM shutdown
"""
self.stop_baremetal_machines(baremetal_machine, force=force)
self.start_baremetal_machines(baremetal_machine)
def get_nodes_ipmi_ctx(self, baremetal_machine):
"""
Get Node Ipmi handler
Args:
baremetal_machine: BM objects
"""
node_ipmi_ctx = list()
for node in baremetal_machine:
if self.mgmt_details[node.name]:
ipmi_ctx = self.get_ipmi_ctx(
host=self.mgmt_details[node.name]["mgmt_console"],
user=self.mgmt_details[node.name]["mgmt_username"],
password=self.mgmt_details[node.name]["mgmt_password"],
)
node_ipmi_ctx.append(ipmi_ctx)
return node_ipmi_ctx
|
opcode/opcode/__init__.py
|
MaxTurchin/pycopy-lib
| 126 |
138819
|
<gh_stars>100-1000
# This file is part of the standard library of Pycopy project, minimalist
# and lightweight Python implementation.
#
# https://github.com/pfalcon/pycopy
# https://github.com/pfalcon/pycopy-lib
#
# The MIT License (MIT)
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .upyopcodes import *
from .upyopmap import *
def stack_effect(opcode, *args):
delta = op_stack_effect[opcode]
if delta is not None:
return delta
if opcode == opmap["CALL_FUNCTION"]:
return -(1 + args[0] + args[1] * 2) + 1
if opcode == opmap["CALL_FUNCTION_VAR_KW"]:
return -(1 + args[0] + args[1] * 2 + 2) + 1
if opcode == opmap["CALL_METHOD"]:
return -(2 + args[0] + args[1] * 2) + 1
if opcode == opmap["CALL_METHOD_VAR_KW"]:
return -(2 + args[0] + args[1] * 2 + 2) + 1
if opcode == opmap["MAKE_CLOSURE"]:
return -args[1] + 1
if opcode == opmap["MAKE_CLOSURE_DEFARGS"]:
return -args[1] -2 + 1
if opcode in (opmap["BUILD_TUPLE"], opmap["BUILD_LIST"], opmap["BUILD_SET"], opmap["BUILD_SLICE"]):
return -args[0] + 1
if opcode == opmap["STORE_COMP"]:
if args[0] & 3 == 1:
return -2
else:
return -1
if opcode == opmap["RAISE_VARARGS"]:
return -args[0]
print(opcode, *args)
assert 0, opname[opcode]
|
src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/server.py
|
Mannan2812/azure-cli-extensions
| 207 |
138846
|
<filename>src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/server.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource import TrackedResource
class Server(TrackedResource):
"""An Azure SQL Database server.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param identity: The Azure Active Directory identity of the server.
:type identity: ~azure.mgmt.sql.models.ResourceIdentity
:ivar kind: Kind of sql server. This is metadata used for the Azure portal
experience.
:vartype kind: str
:param administrator_login: Administrator username for the server. Once
created it cannot be changed.
:type administrator_login: str
:param administrator_login_password: The administrator login password
(required for server creation).
:type administrator_login_password: str
:param version: The version of the server.
:type version: str
:ivar state: The state of the server.
:vartype state: str
:ivar fully_qualified_domain_name: The fully qualified domain name of the
server.
:vartype fully_qualified_domain_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'kind': {'readonly': True},
'state': {'readonly': True},
'fully_qualified_domain_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'administrator_login': {'key': 'properties.administratorLogin', 'type': 'str'},
'administrator_login_password': {'key': 'properties.administratorLoginPassword', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'fully_qualified_domain_name': {'key': 'properties.fullyQualifiedDomainName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Server, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.kind = None
self.administrator_login = kwargs.get('administrator_login', None)
self.administrator_login_password = kwargs.get('administrator_login_password', None)
self.version = kwargs.get('version', None)
self.state = None
self.fully_qualified_domain_name = None
|
src/data_gen/ohem.py
|
kehuaWangfff/FashionAI_KeyPoint_Detection_Challenge_Keras
| 169 |
138848
|
<filename>src/data_gen/ohem.py
import sys
sys.path.insert(0, "../unet/")
from keras.models import *
from keras.layers import *
from utils import np_euclidean_l2
from dataset import getKpNum
def generate_topk_mask_ohem(input_data, gthmap, keras_model, graph, topK, image_category, dynamicFlag=False):
'''
:param input_data: input
:param gthmap: ground truth
:param keras_model: keras model
:param graph: tf grpah to WA thread issue
:param topK: number of kp selected
:return:
'''
# do inference, and calculate loss of each channel
mimg, mmask = input_data
ximg = mimg[np.newaxis,:,:,:]
xmask = mmask[np.newaxis,:,:,:]
if len(keras_model.input_layers) == 3:
# use original mask as ohem_mask
inputs = [ximg, xmask, xmask]
else:
inputs = [ximg, xmask]
with graph.as_default():
keras_output = keras_model.predict(inputs)
# heatmap of last stage
outhmap = keras_output[-1]
channel_num = gthmap.shape[-1]
# calculate loss
mloss = list()
for i in range(channel_num):
_dtmap = outhmap[0, :, :, i]
_gtmap = gthmap[:, :, i]
loss = np_euclidean_l2(_dtmap, _gtmap)
mloss.append(loss)
# refill input_mask, set topk as 1.0 and fill 0.0 for rest
# fixme: topk may different b/w category
if dynamicFlag:
topK = getKpNum(image_category)//2
ohem_mask = adjsut_mask(mloss, mmask, topK)
ohem_gthmap = ohem_mask * gthmap
return ohem_mask, ohem_gthmap
def adjsut_mask(loss, input_mask, topk):
# pick topk loss from losses
# fill topk with 1.0 and fill the rest as 0.0
assert (len(loss) == input_mask.shape[-1]), \
"shape should be same" + str(len(loss)) + " vs " + str(input_mask.shape)
outmask = np.zeros(input_mask.shape, dtype=np.float)
topk_index = sorted(range(len(loss)), key=lambda i:loss[i])[-topk:]
for i in range(len(loss)):
if i in topk_index:
outmask[:,:,i] = 1.0
return outmask
|
cortex/mapper/samplers.py
|
mvdoc/pycortex
| 423 |
138849
|
<filename>cortex/mapper/samplers.py
import numpy as np
def collapse(j, data):
"""Collapses samples into a single row"""
uniques = np.unique(j)
return uniques, np.array([data[j == u].sum() for u in uniques])
def nearest(coords, shape, **kwargs):
valid = ~(np.isnan(coords).all(1))
valid = np.logical_and(valid, np.logical_and(coords[:,0] > -.5, coords[:,0] < shape[2]+.5))
valid = np.logical_and(valid, np.logical_and(coords[:,1] > -.5, coords[:,1] < shape[1]+.5))
valid = np.logical_and(valid, np.logical_and(coords[:,2] > -.5, coords[:,2] < shape[0]+.5))
rcoords = coords[valid].round().astype(int)
j = np.ravel_multi_index(rcoords.T[::-1], shape, mode='clip')
#return np.nonzero(valid)[0], j, (rcoords > 0).all(1) #np.ones((valid.sum(),))
return np.nonzero(valid)[0], j, np.ones((valid.sum(),))
def trilinear(coords, shape, **kwargs):
#trilinear interpolation equation from http://paulbourke.net/miscellaneous/interpolation/
valid = ~(np.isnan(coords).all(1))
(x, y, z), floor = np.modf(coords[valid].T)
floor = floor.astype(int)
ceil = floor + 1
x[x < 0] = 0
y[y < 0] = 0
z[z < 0] = 0
i000 = np.array([floor[2], floor[1], floor[0]])
i100 = np.array([floor[2], floor[1], ceil[0]])
i010 = np.array([floor[2], ceil[1], floor[0]])
i001 = np.array([ ceil[2], floor[1], floor[0]])
i101 = np.array([ ceil[2], floor[1], ceil[0]])
i011 = np.array([ ceil[2], ceil[1], floor[0]])
i110 = np.array([floor[2], ceil[1], ceil[0]])
i111 = np.array([ ceil[2], ceil[1], ceil[0]])
v000 = (1-x)*(1-y)*(1-z)
v100 = x*(1-y)*(1-z)
v010 = (1-x)*y*(1-z)
v110 = x*y*(1-z)
v001 = (1-x)*(1-y)*z
v101 = x*(1-y)*z
v011 = (1-x)*y*z
v111 = x*y*z
i = np.tile(np.nonzero(valid)[0], [1, 8]).ravel()
j = np.hstack([i000, i100, i010, i001, i101, i011, i110, i111])
data = np.vstack([v000, v100, v010, v001, v101, v011, v110, v111]).ravel()
return i, np.ravel_multi_index(j, shape, mode='clip'), data
def distance_func(func, coords, shape, renorm=True, mp=True):
"""Generates masks for seperable distance functions"""
nZ, nY, nX = shape
dx = coords[:,0] - np.atleast_2d(np.arange(nX)).T
dy = coords[:,1] - np.atleast_2d(np.arange(nY)).T
dz = coords[:,2] - np.atleast_2d(np.arange(nZ)).T
Lx, Ly, Lz = func(dx), func(dy), func(dz)
ix, jx = np.nonzero(Lx)
iy, jy = np.nonzero(Ly)
iz, jz = np.nonzero(Lz)
ba = np.broadcast_arrays
def func(v):
mx, my, mz = ix[jx == v], iy[jy == v], iz[jz == v]
idx, idy, idz = [i.ravel() for i in ba(*np.ix_(mx, my, mz))]
vx, vy, vz = [i.ravel() for i in ba(*np.ix_(Lx[mx, v], Ly[my, v], Lz[mz, v]))]
i = v * np.ones((len(idx,)))
j = np.ravel_multi_index((idz, idy, idx), shape, mode='clip')
data = vx*vy*vz
if renorm:
data /= data.sum()
return i, j, data
if mp:
from .. import mp
ijdata = mp.map(func, range(len(coords)))
else:
#ijdata = map(func, range(len(coords)))
ijdata = [func(x) for x in range(len(coords))]
return np.hstack(ijdata)
def gaussian(coords, shape, sigma=1, window=3, **kwargs):
raise NotImplementedError
def gaussian(x):
pass
return distance_func(gaussian, coords, shape, **kwargs)
def lanczos(coords, shape, window=3, **kwargs):
def lanczos(x):
out = np.zeros_like(x)
sel = np.abs(x)<window
selx = x[sel]
out[sel] = np.sin(np.pi * selx) * np.sin(np.pi * selx / window) * (window / (np.pi**2 * selx**2))
return out
return distance_func(lanczos, coords, shape, **kwargs)
|
recipes/Python/499350_NonLinear_Units/recipe-499350.py
|
tdiprima/code
| 2,023 |
138903
|
class DB:
'''
Convience class for decibel scale. Other non-linear scales such as the richter scale could be handled similarly.
Usage:
dB = DB()
.
. (later)
.
gain = 15 * dB
'''
def __rmul__(self, val):
'''
Only allow multiplication from the right to avoid confusing situation
like: 15 * dB * 10
'''
return 10 ** (val / 10.)
def __test__():
dB = DB()
gain = 10 * dB
assert abs(gain - 10) < 1e-8
try:
gain2 = dB * 10
raise Exception('Should raise a type error!')
except TypeError:
pass
__test__()
|
cortex_DIM/nn_modules/convnet.py
|
Soapy-Salted-Fish-King/DIM
| 749 |
138905
|
<reponame>Soapy-Salted-Fish-King/DIM<filename>cortex_DIM/nn_modules/convnet.py
'''Convnet encoder module.
'''
import copy
import torch
import torch.nn as nn
from cortex.built_ins.networks.utils import get_nonlinearity
from cortex_DIM.nn_modules.misc import Expand2d, Fold, Unfold, View
class Convnet(nn.Module):
'''Basic convnet convenience class.
Attributes:
layers: nn.Sequential of layers with batch norm,
dropout, nonlinearity, etc.
shapes: list of output shapes for every layer..
'''
_supported_types = ('linear', 'conv', 'tconv', 'reshape', 'flatten', None)
def __init__(self, *args, **kwargs):
super().__init__()
self.create_layers(*args, **kwargs)
def create_layers(self, shape, layers=None):
'''Creates layers
Args:
shape: Shape of input.
layers: list of layer arguments.
'''
self.layers, self.shapes = self.create_sequential(shape, layers=layers)
def create_sequential(self, shape, layers=None):
'''Creates a sequence of layers.
Args:
shape: Input shape.
layers: list of layer arguments.
Returns:
nn.Sequential: a sequence of convolutional layers.
'''
modules = nn.Sequential()
layers = layers or []
layers = copy.deepcopy(layers)
shapes = []
for i, layer in enumerate(layers):
layer_type = layer.pop('layer', None)
name = 'layer{}'.format(i)
block = nn.Sequential()
shape = self.handle_layer(block, shape, layer, layer_type)
shape = self.finish_block(block, shape, **layer)
if len(block) == 1:
block = block[0]
shapes.append(shape)
modules.add_module(name, block)
return modules, shapes
def handle_layer(self, block, shape, layer, layer_type):
'''Handles the layer arguments and adds layer to the block.
Args:
block: nn.Sequential to add modules to.
shape: Shape of the input.
layer: Layer arguments.
layer_type: Type of layer.
Returns:
tuple: Output shape.
'''
args = layer.pop('args', None)
if layer_type == 'linear':
if len(shape) == 3:
dim_x, dim_y, dim_out = shape
shape = (dim_x * dim_y * dim_out,)
block.add_module('flatten', View(-1, shape[0]))
bn = layer.get('bn', False)
bias = layer.pop('bias', None)
init = layer.pop('init', None)
init_args = layer.pop('init_args', {})
shape = self.add_linear_layer(block, shape, args=args, bn=bn, bias=bias, init=init, init_args=init_args)
elif layer_type == 'conv':
if len(shape) == 1:
shape = (1, 1, shape[0])
block.add_module('expand', Expand2d())
bn = layer.get('bn', False)
bias = layer.pop('bias', None)
init = layer.pop('init', None)
init_args = layer.pop('init_args', {})
shape = self.add_conv_layer(block, shape, args=args, bn=bn, bias=bias, init=init, init_args=init_args)
elif layer_type == 'tconv':
if len(shape) == 1:
raise ValueError('Transpose conv needs 4d input')
bn = layer.get('bn', False)
bias = layer.pop('bias', True)
shape = self.add_tconv_layer(block, shape, args=args, bn=bn, bias=bias)
elif layer_type == 'flatten':
if len(shape) == 3:
dim_x, dim_y, dim_out = shape
shape = (dim_x * dim_y * dim_out,)
block.add_module(layer_type, View(-1, shape[0]))
elif layer_type == 'reshape':
if args is None:
raise ValueError('reshape needs args')
new_shape = args
dim_new = 1
dim_out = 1
for s in new_shape:
dim_new *= s
for s in shape:
dim_out *= s
if dim_new != dim_out:
raise ValueError('New shape {} not compatible with old shape {}.'
.format(new_shape, shape))
block.add_module(layer_type, View((-1,) + new_shape))
shape = new_shape[::-1]
elif layer_type is None:
pass
else:
raise NotImplementedError(
'Layer {} not supported. Use {}'.format(layer_type, self._supported_types))
return shape
def add_conv_layer(self, block, shape, args=None, bn=False, bias=None, init=None, init_args=None):
'''Adds a convolutional layer to the block.
Args:
block: nn.Sequential to add conv layer to.
shape: Shape of the input.
args: conv layer arguments (n_units, filter size, stride, padding)
bn (bool): Batch normalization.
bias (bool): Controls bias in layer.
init: Initialization of layer.
init_args: Arguments for initialization.
Returns:
tuple: Output shape.
'''
dim_x, dim_y, dim_in = shape
try:
dim_out, f, s, p = args
except:
raise ValueError('args must be provided for conv layer and in format '
'`(depth, kernel size, stride, padding)`')
if bias is None:
bias = not (bn)
conv = nn.Conv2d(dim_in, dim_out, kernel_size=f, stride=s, padding=p, bias=bias)
if init:
init = getattr(nn.init, init)
init(conv.weight, **init_args)
block.add_module('conv', conv)
dim_x, dim_y = self.next_conv_size(dim_x, dim_y, f, s, p)
return (dim_x, dim_y, dim_out)
def add_tconv_layer(self, block, shape, args=None, bn=False, bias=None):
'''Adds a transpose convolutional layer to the block.
Args:
block: nn.Sequential to add tconv layer to.
shape: Shape of the input.
args: tconv layer arguments (n_units, filter size, stride, padding)
bn (bool): Batch normalization.
bias (bool): Controls bias in layer.
Returns:
tuple: Output shape.
'''
dim_x, dim_y, dim_in = shape
try:
dim_out, f, s, p = args
except:
raise ValueError('args must be provided for tconv layer and in format '
'`(depth, kernel size, stride, padding)`')
if bias is None:
bias = not (bn)
tconv = nn.ConvTranspose2d(dim_in, dim_out, kernel_size=f, stride=s, padding=p, bias=bias)
block.add_module('tconv', tconv)
dim_x, dim_y = self.next_tconv_size(dim_x, dim_y, f, s, p)
return (dim_x, dim_y, dim_out)
def add_linear_layer(self, block, shape, args=None, bn=False, bias=None, init=None, init_args=None):
'''Adds a linear layer
Args:
block: nn.Sequential to add linear layer to.
shape: Shape of the input.
args: linear layer arguments (n_units,)
bn (bool): Batch normalization.
bias (bool): Controls bias in layer.
init: Initialization of layer.
init_args: Arguments for initialization.
Returns:
tuple: Output shape.
'''
try:
dim_out, = args
except:
raise ValueError('args must be provided for fully-connected layer and in format '
'`(depth,)`')
dim_in, = shape
if bias is None:
bias = not (bn)
layer = nn.Linear(dim_in, dim_out, bias=bias)
if init:
init = getattr(nn.init, init)
init(layer.weight, **init_args)
block.add_module('fc', layer)
return (dim_out,)
def finish_block(self, block, shape, bn=False, ln=False, do=False, act=None, pool=None):
'''Finishes a block.
Adds batch norm, dropout, activation, pooling.
Args:
block (nn.Sequential): Block to add conv layer to.
shape (tuple): Shape of the input.
bn (bool): Batch normalization.
ln (bool): Layer normalization.
do (float): Dropout.
act (str): Activation.
pool (tuple): Pooling. In format (pool type, kernel size, stride).
Returns:
'''
if len(shape) == 1:
BN = nn.BatchNorm1d
DO = nn.Dropout
elif len(shape) == 3:
BN = nn.BatchNorm2d
DO = nn.Dropout2d
else:
raise NotImplementedError('Shape {} not supported'.format(shape))
LN = nn.LayerNorm
if ln and bn:
raise ValueError('Use only one sort of normalization.')
dim_out = shape[-1]
if do:
block.add_module('do', DO(p=do))
if bn:
block.add_module('bn', BN(dim_out))
if ln:
block.add_module('ln', LN(dim_out))
if act:
nonlinearity = get_nonlinearity(act)
block.add_module(nonlinearity.__class__.__name__, nonlinearity)
if pool:
if len(shape) == 1:
raise ValueError('Cannot pool on 1d tensor.')
(pool_type, kernel, stride) = pool
Pool = getattr(nn, pool_type)
block.add_module('pool', Pool(kernel_size=kernel, stride=stride))
dim_x, dim_y, dim_out = shape
dim_x, dim_y = self.next_conv_size(dim_x, dim_y, kernel, stride, 0)
shape = (dim_x, dim_y, dim_out)
return shape
def next_conv_size(self, dim_x, dim_y, k, s, p):
'''Infers the next size of a convolutional layer.
Args:
dim_x: First dimension.
dim_y: Second dimension.
k: Kernel size.
s: Stride.
p: Padding.
Returns:
(int, int): (First output dimension, Second output dimension)
'''
def infer_conv_size(w, k, s, p):
'''Infers the next size after convolution.
Args:
w: Input size.
k: Kernel size.
s: Stride.
p: Padding.
Returns:
int: Output size.
'''
x = (w - k + 2 * p) // s + 1
return x
if isinstance(k, int):
kx, ky = (k, k)
else:
kx, ky = k
if isinstance(s, int):
sx, sy = (s, s)
else:
sx, sy = s
if isinstance(p, int):
px, py = (p, p)
else:
px, py = p
return (infer_conv_size(dim_x, kx, sx, px),
infer_conv_size(dim_y, ky, sy, py))
def next_tconv_size(self, dim_x, dim_y, k, s, p):
'''Infers the next size of a transpose convolutional layer.
Args:
dim_x: First dimension.
dim_y: Second dimension.
k: Kernel size.
s: Stride.
p: Padding.
Returns:
(int, int): (First output dimension, Second output dimension)
'''
def infer_conv_size(w, k, s, p):
'''Infers the next size after convolution.
Args:
w: Input size.
k: Kernel size.
s: Stride.
p: Padding.
Returns:
int: Output size.
'''
x = s * (w - 1) - 2 * p + k
return x
if isinstance(k, int):
kx, ky = (k, k)
else:
kx, ky = k
if isinstance(s, int):
sx, sy = (s, s)
else:
sx, sy = s
if isinstance(p, int):
px, py = (p, p)
else:
px, py = p
return (infer_conv_size(dim_x, kx, sx, px),
infer_conv_size(dim_y, ky, sy, py))
def forward(self, x: torch.Tensor, return_full_list=False, clip_grad=False):
'''Forward pass
Args:
x: Input.
return_full_list: Optional, returns all layer outputs.
Returns:
torch.Tensor or list of torch.Tensor.
'''
def _clip_grad(v, min, max):
v_tmp = v.expand_as(v)
v_tmp.register_hook(lambda g: g.clamp(min, max))
return v_tmp
out = []
for layer in self.layers:
x = layer(x)
if clip_grad:
x = _clip_grad(x, -clip_grad, clip_grad)
out.append(x)
if not return_full_list:
out = out[-1]
return out
class FoldedConvnet(Convnet):
'''Convnet with strided crop input.
'''
_supported_types = ('linear', 'conv', 'tconv', 'flatten', 'fold', 'unfold', None)
def create_layers(self, shape, crop_size=8, layers=None):
'''Creates layers
Args:
shape: Shape of input.
crop_size: Size of crops
layers: list of layer arguments.
'''
self.crop_size = crop_size
self.layers, self.shapes = self.create_sequential(shape, layers=layers)
def create_sequential(self, shape, layers=None):
'''Creates a sequence of layers.
Args:
shape: Input shape.
layers: list of layer arguments.
Returns:
nn.Sequential: a sequence of convolutional layers.
'''
self.final_size = None
return super().create_sequential(shape, layers=layers)
def handle_layer(self, block, shape, layer, layer_type):
'''Handles the layer arguments and adds layer to the block.
Args:
block: nn.Sequential to add modules to.
shape: Shape of the input.
layer: Layer arguments.
layer_type: Type of layer.
Returns:
tuple: Output shape.
'''
if layer_type == 'unfold':
dim_x, dim_y, dim_out = shape
self.final_size = 2 * (dim_x // self.crop_size) - 1
block.add_module(layer_type, Unfold(dim_x, self.crop_size))
shape = (self.crop_size, self.crop_size, dim_out)
elif layer_type == 'fold':
if self.final_size is None:
raise ValueError('Cannot fold without unfolding first.')
dim_out = shape[2]
block.add_module(layer_type, Fold(self.final_size))
shape = (self.final_size, self.final_size, dim_out)
elif layer_type is None:
pass
else:
shape = super().handle_layer(block, shape, layer, layer_type)
return shape
|
internal/core/build-support/run_cpplint.py
|
chriswarnock/milvus
| 10,504 |
138970
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import lintutils
from subprocess import PIPE, STDOUT
import argparse
import multiprocessing as mp
import sys
import platform
from functools import partial
# NOTE(wesm):
#
# * readability/casting is disabled as it aggressively warns about functions
# with names like "int32", so "int32(x)", where int32 is a function name,
# warns with
_filters = '''
-whitespace/comments
-readability/casting
-readability/todo
-readability/alt_tokens
-build/header_guard
-build/c++11
-runtime/references
-build/include_order
'''.split()
def _get_chunk_key(filenames):
# lists are not hashable so key on the first filename in a chunk
return filenames[0]
def _check_some_files(completed_processes, filenames):
# cpplint outputs complaints in '/path:line_number: complaint' format,
# so we can scan its output to get a list of files to fix
result = completed_processes[_get_chunk_key(filenames)]
return lintutils.stdout_pathcolonline(result, filenames)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Runs cpplint on all of the source files.")
parser.add_argument("--cpplint_binary",
required=True,
help="Path to the cpplint binary")
parser.add_argument("--exclude_globs",
help="Filename containing globs for files "
"that should be excluded from the checks")
parser.add_argument("--source_dir",
required=True,
help="Root directory of the source code")
parser.add_argument("--quiet", default=False,
action="store_true",
help="If specified, only print errors")
arguments = parser.parse_args()
exclude_globs = []
if arguments.exclude_globs:
for line in open(arguments.exclude_globs):
exclude_globs.append(line.strip())
linted_filenames = []
for path in lintutils.get_sources(arguments.source_dir, exclude_globs):
linted_filenames.append(str(path))
cmd = [
arguments.cpplint_binary,
'--verbose=2',
'--linelength=120',
'--filter=' + ','.join(_filters)
]
if (arguments.cpplint_binary.endswith('.py') and
platform.system() == 'Windows'):
# Windows doesn't support executable scripts; execute with
# sys.executable
cmd.insert(0, sys.executable)
if arguments.quiet:
cmd.append('--quiet')
else:
print("\n".join(map(lambda x: "Linting {}".format(x),
linted_filenames)))
# lint files in chunks: each invocation of cpplint will process 16 files
chunks = lintutils.chunk(linted_filenames, 16)
cmds = [cmd + some for some in chunks]
results = lintutils.run_parallel(cmds, stdout=PIPE, stderr=STDOUT)
error = False
# record completed processes (keyed by the first filename in the input
# chunk) for lookup in _check_some_files
completed_processes = {
_get_chunk_key(filenames): result
for filenames, result in zip(chunks, results)
}
checker = partial(_check_some_files, completed_processes)
pool = mp.Pool()
try:
# scan the outputs of various cpplint invocations in parallel to
# distill a list of problematic files
for problem_files, stdout in pool.imap(checker, chunks):
if problem_files:
if isinstance(stdout, bytes):
stdout = stdout.decode('utf8')
print(stdout, file=sys.stderr)
error = True
except Exception:
error = True
raise
finally:
pool.terminate()
pool.join()
sys.exit(1 if error else 0)
|
h5Nastran/h5Nastran/h5/nastran/__init__.py
|
ACea15/pyNastran
| 293 |
138971
|
<reponame>ACea15/pyNastran
from .nastran_node import NastranNode
|
tests/test_data_structures.py
|
proudzhu/algorithms
| 2,545 |
139043
|
<reponame>proudzhu/algorithms
from random import shuffle
import unittest
from algorithms.data_structures import (
binary_search_tree,
digraph,
queue,
singly_linked_list,
stack,
undirected_graph,
union_find,
union_find_by_rank,
union_find_with_path_compression,
lcp_array
)
class TestBinarySearchTree(unittest.TestCase):
"""
Test Binary Search Tree Implementation
"""
key_val = [
("a", 1), ("b", 2), ("c", 3),
("d", 4), ("e", 5), ("f", 6),
("g", 7), ("h", 8), ("i", 9)
]
def shuffle_list(self, ls):
shuffle(ls)
return ls
def test_size(self):
# Size starts at 0
self.bst = binary_search_tree.BinarySearchTree()
self.assertEqual(self.bst.size(), 0)
# Doing a put increases the size to 1
self.bst.put("one", 1)
self.assertEqual(self.bst.size(), 1)
# Putting a key that is already in doesn't change size
self.bst.put("one", 1)
self.assertEqual(self.bst.size(), 1)
self.bst.put("one", 2)
self.assertEqual(self.bst.size(), 1)
self.bst = binary_search_tree.BinarySearchTree()
size = 0
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
size += 1
self.assertEqual(self.bst.size(), size)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
size = 0
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
size += 1
self.assertEqual(self.bst.size(), size)
def test_is_empty(self):
self.bst = binary_search_tree.BinarySearchTree()
self.assertTrue(self.bst.is_empty())
self.bst.put("a", 1)
self.assertFalse(self.bst.is_empty())
def test_get(self):
self.bst = binary_search_tree.BinarySearchTree()
# Getting a key not in BST returns None
self.assertEqual(self.bst.get("one"), None)
# Get with a present key returns proper value
self.bst.put("one", 1)
self.assertEqual(self.bst.get("one"), 1)
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.get(k), v)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.get(k), v)
def test_contains(self):
self.bst = binary_search_tree.BinarySearchTree()
self.assertFalse(self.bst.contains("a"))
self.bst.put("a", 1)
self.assertTrue(self.bst.contains("a"))
def test_put(self):
self.bst = binary_search_tree.BinarySearchTree()
# When BST is empty first put becomes root
self.bst.put("bbb", 1)
self.assertEqual(self.bst.root.key, "bbb")
self.assertEqual(self.bst.root.left, None)
# Adding a key greater than root doesn't update the left tree
# but does update the right
self.bst.put("ccc", 2)
self.assertEqual(self.bst.root.key, "bbb")
self.assertEqual(self.bst.root.left, None)
self.assertEqual(self.bst.root.right.key, "ccc")
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("bbb", 1)
# Adding a key less than root doesn't update the right tree
# but does update the left
self.bst.put("aaa", 2)
self.assertEqual(self.bst.root.key, "bbb")
self.assertEqual(self.bst.root.right, None)
self.assertEqual(self.bst.root.left.key, "aaa")
self.bst = binary_search_tree.BinarySearchTree()
size = 0
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
size += 1
self.assertEqual(self.bst.get(k), v)
self.assertEqual(self.bst.size(), size)
self.bst = binary_search_tree.BinarySearchTree()
shuffled = self.shuffle_list(self.key_val[:])
size = 0
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
size += 1
self.assertEqual(self.bst.get(k), v)
self.assertEqual(self.bst.size(), size)
def test_min_key(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val[::-1]:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.min_key(), k)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.min_key(), "a")
def test_max_key(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.max_key(), k)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.max_key(), "i")
def test_floor_key(self):
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.bst.put("c", 3)
self.bst.put("e", 5)
self.bst.put("g", 7)
self.assertEqual(self.bst.floor_key("a"), "a")
self.assertEqual(self.bst.floor_key("b"), "a")
self.assertEqual(self.bst.floor_key("g"), "g")
self.assertEqual(self.bst.floor_key("h"), "g")
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("c", 3)
self.bst.put("e", 5)
self.bst.put("a", 1)
self.bst.put("g", 7)
self.assertEqual(self.bst.floor_key("a"), "a")
self.assertEqual(self.bst.floor_key("b"), "a")
self.assertEqual(self.bst.floor_key("g"), "g")
self.assertEqual(self.bst.floor_key("h"), "g")
def test_ceiling_key(self):
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.bst.put("c", 3)
self.bst.put("e", 5)
self.bst.put("g", 7)
self.assertEqual(self.bst.ceiling_key("a"), "a")
self.assertEqual(self.bst.ceiling_key("b"), "c")
self.assertEqual(self.bst.ceiling_key("g"), "g")
self.assertEqual(self.bst.ceiling_key("f"), "g")
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("c", 3)
self.bst.put("e", 5)
self.bst.put("a", 1)
self.bst.put("g", 7)
self.assertEqual(self.bst.ceiling_key("a"), "a")
self.assertEqual(self.bst.ceiling_key("b"), "c")
self.assertEqual(self.bst.ceiling_key("g"), "g")
self.assertEqual(self.bst.ceiling_key("f"), "g")
def test_select_key(self):
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.select_key(0), "a")
self.assertEqual(self.bst.select_key(1), "b")
self.assertEqual(self.bst.select_key(2), "c")
def test_rank(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.rank("a"), 0)
self.assertEqual(self.bst.rank("b"), 1)
self.assertEqual(self.bst.rank("c"), 2)
self.assertEqual(self.bst.rank("d"), 3)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
self.assertEqual(self.bst.rank("a"), 0)
self.assertEqual(self.bst.rank("b"), 1)
self.assertEqual(self.bst.rank("c"), 2)
self.assertEqual(self.bst.rank("d"), 3)
def test_delete_min(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
for i in range(self.bst.size() - 1):
self.bst.delete_min()
self.assertEqual(self.bst.min_key(), self.key_val[i+1][0])
self.bst.delete_min()
self.assertEqual(self.bst.min_key(), None)
shuffled = self.shuffle_list(self.key_val[:])
self.bst = binary_search_tree.BinarySearchTree()
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
for i in range(self.bst.size() - 1):
self.bst.delete_min()
self.assertEqual(self.bst.min_key(), self.key_val[i+1][0])
self.bst.delete_min()
self.assertEqual(self.bst.min_key(), None)
def test_delete_max(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
for i in range(self.bst.size() - 1, 0, -1):
self.bst.delete_max()
self.assertEqual(self.bst.max_key(), self.key_val[i-1][0])
self.bst.delete_max()
self.assertEqual(self.bst.max_key(), None)
shuffled = self.shuffle_list(self.key_val[:])
for pair in shuffled:
k, v = pair
self.bst.put(k, v)
for i in range(self.bst.size() - 1, 0, -1):
self.bst.delete_max()
self.assertEqual(self.bst.max_key(), self.key_val[i-1][0])
self.bst.delete_max()
self.assertEqual(self.bst.max_key(), None)
def test_delete(self):
# delete key from an empty bst
self.bst = binary_search_tree.BinarySearchTree()
self.bst.delete("a")
self.assertEqual(self.bst.root, None)
self.assertEqual(self.bst.size(), 0)
# delete key not present in bst
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.bst.delete("b")
self.assertEqual(self.bst.root.key, "a")
self.assertEqual(self.bst.size(), 1)
# delete key when bst only contains one key
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.assertEqual(self.bst.root.key, "a")
self.bst.delete("a")
self.assertEqual(self.bst.root, None)
self.assertEqual(self.bst.size(), 0)
# delete parent key when it only has a left child
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("b", 2)
self.bst.put("a", 1)
self.assertEqual(self.bst.root.left.key, "a")
self.bst.delete("b")
self.assertEqual(self.bst.root.key, "a")
self.assertEqual(self.bst.size(), 1)
# delete parent key when it only has a right child
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.bst.put("b", 2)
self.assertEqual(self.bst.root.right.key, "b")
self.bst.delete("a")
self.assertEqual(self.bst.root.key, "b")
self.assertEqual(self.bst.size(), 1)
# delete left child key
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("b", 2)
self.bst.put("a", 1)
self.assertEqual(self.bst.root.left.key, "a")
self.bst.delete("a")
self.assertEqual(self.bst.root.key, "b")
self.assertEqual(self.bst.size(), 1)
# delete right child key
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("a", 1)
self.bst.put("b", 2)
self.assertEqual(self.bst.root.right.key, "b")
self.bst.delete("b")
self.assertEqual(self.bst.root.key, "a")
self.assertEqual(self.bst.size(), 1)
# delete parent key when it has a left and right child
self.bst = binary_search_tree.BinarySearchTree()
self.bst.put("b", 2)
self.bst.put("a", 1)
self.bst.put("c", 3)
self.bst.delete("b")
self.assertEqual(self.bst.root.key, "c")
self.assertEqual(self.bst.size(), 2)
def test_keys(self):
self.bst = binary_search_tree.BinarySearchTree()
for pair in self.key_val:
k, v = pair
self.bst.put(k, v)
self.assertEqual(
self.bst.keys(),
["a", "b", "c", "d", "e", "f", "g", "h", "i"]
)
class TestDirectedGraph(unittest.TestCase):
"""
Test Undirected Graph Implementation
"""
def test_directed_graph(self):
# init
self.dg0 = digraph.Digraph()
self.dg1 = digraph.Digraph()
self.dg2 = digraph.Digraph()
self.dg3 = digraph.Digraph()
# populating
self.dg1.add_edge(1, 2)
self.dg1_rev = self.dg1.reverse() # reverse
self.dg2.add_edge(1, 2)
self.dg2.add_edge(1, 2)
self.dg3.add_edge(1, 2)
self.dg3.add_edge(1, 2)
self.dg3.add_edge(3, 1)
# test adj
self.assertTrue(2 in self.dg1.adj(1))
self.assertEqual(len(self.dg1.adj(1)), 1)
self.assertTrue(1 not in self.dg1.adj(2))
self.assertEqual(len(self.dg1.adj(2)), 0)
self.assertTrue(1 in self.dg1_rev.adj(2))
self.assertEqual(len(self.dg1_rev.adj(2)), 1)
self.assertTrue(2 not in self.dg1_rev.adj(1))
self.assertEqual(len(self.dg1_rev.adj(1)), 0)
self.assertTrue(2 in self.dg2.adj(1))
self.assertEqual(len(self.dg2.adj(1)), 2)
self.assertTrue(1 not in self.dg2.adj(2))
self.assertEqual(len(self.dg2.adj(2)), 0)
self.assertTrue(2 in self.dg3.adj(1))
self.assertTrue(1 in self.dg3.adj(3))
self.assertEqual(len(self.dg3.adj(1)), 2)
self.assertTrue(1 not in self.dg3.adj(2))
self.assertEqual(len(self.dg3.adj(2)), 0)
self.assertTrue(3 not in self.dg3.adj(1))
self.assertEqual(len(self.dg3.adj(3)), 1)
# test degree
self.assertEqual(self.dg1.outdegree(1), 1)
self.assertEqual(self.dg1.outdegree(2), 0)
self.assertEqual(self.dg1_rev.outdegree(2), 1)
self.assertEqual(self.dg1_rev.outdegree(1), 0)
self.assertEqual(self.dg2.outdegree(1), 2)
self.assertEqual(self.dg2.outdegree(2), 0)
self.assertEqual(self.dg3.outdegree(1), 2)
self.assertEqual(self.dg3.outdegree(2), 0)
self.assertEqual(self.dg3.outdegree(3), 1)
# test vertices
self.assertEqual(list(self.dg0.vertices()), [])
self.assertEqual(len(self.dg0.vertices()), 0)
self.assertTrue(1 in self.dg1.vertices())
self.assertTrue(2 in self.dg1.vertices())
self.assertEqual(len(self.dg1.vertices()), 2)
self.assertTrue(2 in self.dg1_rev.vertices())
self.assertTrue(1 in self.dg1_rev.vertices())
self.assertEqual(len(self.dg1_rev.vertices()), 2)
self.assertTrue(1 in self.dg2.vertices())
self.assertTrue(2 in self.dg2.vertices())
self.assertEqual(len(self.dg2.vertices()), 2)
self.assertTrue(1 in self.dg3.vertices())
self.assertTrue(2 in self.dg3.vertices())
self.assertTrue(3 in self.dg3.vertices())
self.assertEqual(len(self.dg3.vertices()), 3)
# test vertex_count
self.assertEqual(self.dg0.vertex_count(), 0)
self.assertEqual(self.dg1.vertex_count(), 2)
self.assertEqual(self.dg1_rev.vertex_count(), 2)
self.assertEqual(self.dg2.vertex_count(), 2)
self.assertEqual(self.dg3.vertex_count(), 3)
# test edge_count
self.assertEqual(self.dg0.edge_count(), 0)
self.assertEqual(self.dg1.edge_count(), 1)
self.assertEqual(self.dg1_rev.edge_count(), 1)
self.assertEqual(self.dg2.edge_count(), 2)
self.assertEqual(self.dg3.edge_count(), 3)
class TestQueue(unittest.TestCase):
"""
Test Queue Implementation
"""
def test_queue(self):
self.que = queue.Queue()
self.que.add(1)
self.que.add(2)
self.que.add(8)
self.que.add(5)
self.que.add(6)
self.assertEqual(self.que.remove(), 1)
self.assertEqual(self.que.size(), 4)
self.assertEqual(self.que.remove(), 2)
self.assertEqual(self.que.remove(), 8)
self.assertEqual(self.que.remove(), 5)
self.assertEqual(self.que.remove(), 6)
self.assertEqual(self.que.is_empty(), True)
class TestSinglyLinkedList(unittest.TestCase):
"""
Test Singly Linked List Implementation
"""
def test_singly_linked_list(self):
self.sl = singly_linked_list.SinglyLinkedList()
self.sl.add(10)
self.sl.add(5)
self.sl.add(30)
self.sl.remove(30)
self.assertEqual(self.sl.size, 2)
self.assertEqual(self.sl.search(30), False)
self.assertEqual(self.sl.search(5), True)
self.assertEqual(self.sl.search(10), True)
self.assertEqual(self.sl.remove(5), True)
self.assertEqual(self.sl.remove(10), True)
self.assertEqual(self.sl.size, 0)
class TestStack(unittest.TestCase):
"""
Test Stack Implementation
"""
def test_stack(self):
self.sta = stack.Stack()
self.sta.add(5)
self.sta.add(8)
self.sta.add(10)
self.sta.add(2)
self.assertEqual(self.sta.remove(), 2)
self.assertEqual(self.sta.is_empty(), False)
self.assertEqual(self.sta.size(), 3)
class TestUndirectedGraph(unittest.TestCase):
"""
Test Undirected Graph Implementation
"""
def test_undirected_graph(self):
# init
self.ug0 = undirected_graph.Undirected_Graph()
self.ug1 = undirected_graph.Undirected_Graph()
self.ug2 = undirected_graph.Undirected_Graph()
self.ug3 = undirected_graph.Undirected_Graph()
# populating
self.ug1.add_edge(1, 2)
self.ug2.add_edge(1, 2)
self.ug2.add_edge(1, 2)
self.ug3.add_edge(1, 2)
self.ug3.add_edge(1, 2)
self.ug3.add_edge(3, 1)
# test adj
self.assertTrue(2 in self.ug1.adj(1))
self.assertEqual(len(self.ug1.adj(1)), 1)
self.assertTrue(1 in self.ug1.adj(2))
self.assertEqual(len(self.ug1.adj(1)), 1)
self.assertTrue(2 in self.ug2.adj(1))
self.assertEqual(len(self.ug2.adj(1)), 2)
self.assertTrue(1 in self.ug2.adj(2))
self.assertEqual(len(self.ug2.adj(1)), 2)
self.assertTrue(2 in self.ug3.adj(1))
self.assertTrue(3 in self.ug3.adj(1))
self.assertEqual(len(self.ug3.adj(1)), 3)
self.assertTrue(1 in self.ug3.adj(2))
self.assertEqual(len(self.ug3.adj(2)), 2)
self.assertTrue(1 in self.ug3.adj(3))
self.assertEqual(len(self.ug3.adj(3)), 1)
# test degree
self.assertEqual(self.ug1.degree(1), 1)
self.assertEqual(self.ug1.degree(2), 1)
self.assertEqual(self.ug2.degree(1), 2)
self.assertEqual(self.ug2.degree(2), 2)
self.assertEqual(self.ug3.degree(1), 3)
self.assertEqual(self.ug3.degree(2), 2)
self.assertEqual(self.ug3.degree(3), 1)
# test vertices
self.assertEqual(list(self.ug0.vertices()), [])
self.assertEqual(len(self.ug0.vertices()), 0)
self.assertTrue(1 in self.ug1.vertices())
self.assertTrue(2 in self.ug1.vertices())
self.assertEqual(len(self.ug1.vertices()), 2)
self.assertTrue(1 in self.ug2.vertices())
self.assertTrue(2 in self.ug2.vertices())
self.assertEqual(len(self.ug2.vertices()), 2)
self.assertTrue(1 in self.ug3.vertices())
self.assertTrue(2 in self.ug3.vertices())
self.assertTrue(3 in self.ug3.vertices())
self.assertEqual(len(self.ug3.vertices()), 3)
# test vertex_count
self.assertEqual(self.ug0.vertex_count(), 0)
self.assertEqual(self.ug1.vertex_count(), 2)
self.assertEqual(self.ug2.vertex_count(), 2)
self.assertEqual(self.ug3.vertex_count(), 3)
# test edge_count
self.assertEqual(self.ug0.edge_count(), 0)
self.assertEqual(self.ug1.edge_count(), 1)
self.assertEqual(self.ug2.edge_count(), 2)
self.assertEqual(self.ug3.edge_count(), 3)
class TestUnionFind(unittest.TestCase):
"""
Test Union Find Implementation
"""
def test_union_find(self):
self.uf = union_find.UnionFind(4)
self.uf.make_set(4)
self.uf.union(1, 0)
self.uf.union(3, 4)
self.assertEqual(self.uf.find(1), 0)
self.assertEqual(self.uf.find(3), 4)
self.assertEqual(self.uf.is_connected(0, 1), True)
self.assertEqual(self.uf.is_connected(3, 4), True)
class TestUnionFindByRank(unittest.TestCase):
"""
Test Union Find Implementation
"""
def test_union_find_by_rank(self):
self.uf = union_find_by_rank.UnionFindByRank(6)
self.uf.make_set(6)
self.uf.union(1, 0)
self.uf.union(3, 4)
self.uf.union(2, 4)
self.uf.union(5, 2)
self.uf.union(6, 5)
self.assertEqual(self.uf.find(1), 1)
self.assertEqual(self.uf.find(3), 3)
# test tree is created by rank
self.uf.union(5, 0)
self.assertEqual(self.uf.find(2), 3)
self.assertEqual(self.uf.find(5), 3)
self.assertEqual(self.uf.find(6), 3)
self.assertEqual(self.uf.find(0), 3)
self.assertEqual(self.uf.is_connected(0, 1), True)
self.assertEqual(self.uf.is_connected(3, 4), True)
self.assertEqual(self.uf.is_connected(5, 3), True)
class TestUnionFindWithPathCompression(unittest.TestCase):
"""
Test Union Find Implementation
"""
def test_union_find_with_path_compression(self):
self.uf = (
union_find_with_path_compression
.UnionFindWithPathCompression(5)
)
self.uf.make_set(5)
self.uf.union(0, 1)
self.uf.union(2, 3)
self.uf.union(1, 3)
self.uf.union(4, 5)
self.assertEqual(self.uf.find(1), 0)
self.assertEqual(self.uf.find(3), 0)
self.assertEqual(self.uf.parent(3), 2)
self.assertEqual(self.uf.parent(5), 4)
self.assertEqual(self.uf.is_connected(3, 5), False)
self.assertEqual(self.uf.is_connected(4, 5), True)
self.assertEqual(self.uf.is_connected(2, 3), True)
# test tree is created by path compression
self.uf.union(5, 3)
self.assertEqual(self.uf.parent(3), 0)
self.assertEqual(self.uf.is_connected(3, 5), True)
class TestLCPSuffixArrays(unittest.TestCase):
def setUp(self):
super(TestLCPSuffixArrays, self).setUp()
self.case_1 = "aaaaaa"
self.s_array_1 = [5, 4, 3, 2, 1, 0]
self.rank_1 = [5, 4, 3, 2, 1, 0]
self.lcp_1 = [1, 2, 3, 4, 5, 0]
self.case_2 = "abcabcdd"
self.s_array_2 = [0, 2, 4, 1, 3, 5, 7, 6]
self.rank_2 = [0, 3, 1, 4, 2, 5, 7, 6]
self.lcp_2 = [3, 0, 2, 0, 1, 0, 1, 0]
self.case_3 = "kmckirrrmppp"
self.s_array_3 = [3, 4, 0, 2, 1, 11, 10, 9, 5, 8, 7, 6]
self.rank_3 = [2, 4, 3, 0, 1, 8, 11, 10, 9, 7, 6, 5]
self.lcp_3 = [0, 0, 1, 0, 1, 0, 1, 2, 0, 1, 2, 0]
def test_lcp_array(self):
lcp = lcp_array.lcp_array(self.case_1, self.s_array_1, self.rank_1)
self.assertEqual(lcp, self.lcp_1)
lcp = lcp_array.lcp_array(self.case_2, self.s_array_2, self.rank_2)
self.assertEqual(lcp, self.lcp_2)
lcp = lcp_array.lcp_array(self.case_3, self.s_array_3, self.rank_3)
self.assertEqual(lcp, self.lcp_3)
def test_suffix_array(self):
s_array, rank = lcp_array.suffix_array(self.case_1)
self.assertEqual(s_array, self.s_array_1)
self.assertEqual(rank, self.rank_1)
s_array, rank = lcp_array.suffix_array(self.case_2)
self.assertEqual(s_array, self.s_array_2)
self.assertEqual(rank, self.rank_2)
s_array, rank = lcp_array.suffix_array(self.case_3)
self.assertEqual(s_array, self.s_array_3)
self.assertEqual(rank, self.rank_3)
|
tests/example/reflexes/example_reflex.py
|
FarhanAliRaza/django-sockpuppet
| 371 |
139049
|
<reponame>FarhanAliRaza/django-sockpuppet
from sockpuppet.reflex import Reflex
class ExampleReflex(Reflex):
def increment(self, step=1):
self.session['count'] = int(self.element.dataset['count']) + step
class DecrementReflex(Reflex):
def decrement(self, step=1):
self.session['otherCount'] = int(self.element.dataset['count']) - step
class ParamReflex(Reflex):
def change_word(self):
self.word = 'space'
self.success = True
class FormReflex(Reflex):
def submit(self):
self.text_output = self.request.POST['text-input']
class ErrorReflex(Reflex):
def increment(self, step=1):
raise Exception('error happened')
class UserReflex(Reflex):
def get_user(self):
context = self.get_context_data()
self.user_reveal = context['object']
|
PhysicsTools/PatExamples/bin/PatBasicFWLiteJetAnalyzer_Selector_cfg.py
|
ckamtsikis/cmssw
| 852 |
139050
|
<filename>PhysicsTools/PatExamples/bin/PatBasicFWLiteJetAnalyzer_Selector_cfg.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
process = cms.Process("FWLitePlots")
#input stuff for Run/Lumi selection with the "JSON"-formatted files from the PVT group
import FWCore.PythonUtilities.LumiList as LumiList
# setup process
process = cms.Process("FWLitePlots")
# get JSON file correctly parced
JSONfile = 'DCSTRONLY_132440-140388'
myList = LumiList.LumiList (filename = JSONfile).getCMSSWString().split(',')
# Set up the parameters for the calo jet analyzer
process.jetStudies = cms.PSet(
# input parameter sets
jetSrc = cms.InputTag('selectedPatJets'),
pfJetSrc = cms.InputTag('selectedPatJetsAK5PF'),
metSrc = cms.InputTag('patMETs'),
pfMetSrc = cms.InputTag('patMETsPF'),
useCalo = cms.bool(True)
)
# Set up the parameters for the PF jet analyzer
process.pfJetStudies = process.jetStudies.clone( useCalo = cms.bool(False) )
process.load('PhysicsTools.SelectorUtils.pfJetIDSelector_cfi')
process.load('PhysicsTools.SelectorUtils.jetIDSelector_cfi')
process.plotParameters = cms.PSet (
doTracks = cms.bool(False),
useMC = cms.bool(False)
)
process.inputs = cms.PSet (
fileNames = cms.vstring(
'reco_7TeV_380_pat.root'
),
lumisToProcess = cms.untracked.VLuminosityBlockRange( myList )
)
process.outputs = cms.PSet (
outputName = cms.string('jetPlots.root')
)
|
src/ZODB/tests/testConfig.py
|
timgates42/ZODB
| 514 |
139057
|
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import doctest
import tempfile
import unittest
import transaction
import ZODB.config
import ZODB.tests.util
from ZODB.POSException import ReadOnlyError
class ConfigTestBase(ZODB.tests.util.TestCase):
def _opendb(self, s):
return ZODB.config.databaseFromString(s)
def tearDown(self):
ZODB.tests.util.TestCase.tearDown(self)
if getattr(self, "storage", None) is not None:
self.storage.cleanup()
def _test(self, s):
db = self._opendb(s)
try:
self.storage = db._storage
# Do something with the database to make sure it works
cn = db.open()
rt = cn.root()
rt["test"] = 1
transaction.commit()
finally:
db.close()
class ZODBConfigTest(ConfigTestBase):
def test_map_config1(self):
self._test(
"""
<zodb>
<mappingstorage/>
</zodb>
""")
def test_map_config2(self):
self._test(
"""
<zodb>
<mappingstorage/>
cache-size 1000
</zodb>
""")
def test_file_config1(self):
path = tempfile.mktemp()
self._test(
"""
<zodb>
<filestorage>
path %s
</filestorage>
</zodb>
""" % path)
def test_file_config2(self):
path = tempfile.mktemp()
# first pass to actually create database file
self._test(
"""
<zodb>
<filestorage>
path %s
</filestorage>
</zodb>
""" % path)
# write operations must be disallowed on read-only access
cfg = """
<zodb>
<filestorage>
path %s
create false
read-only true
</filestorage>
</zodb>
""" % path
self.assertRaises(ReadOnlyError, self._test, cfg)
def test_demo_config(self):
cfg = """
<zodb unused-name>
<demostorage>
name foo
<mappingstorage/>
</demostorage>
</zodb>
"""
self._test(cfg)
def database_xrefs_config():
r"""
>>> db = ZODB.config.databaseFromString(
... "<zodb>\n<mappingstorage>\n</mappingstorage>\n</zodb>\n")
>>> db.xrefs
True
>>> db = ZODB.config.databaseFromString(
... "<zodb>\nallow-implicit-cross-references true\n"
... "<mappingstorage>\n</mappingstorage>\n</zodb>\n")
>>> db.xrefs
True
>>> db = ZODB.config.databaseFromString(
... "<zodb>\nallow-implicit-cross-references false\n"
... "<mappingstorage>\n</mappingstorage>\n</zodb>\n")
>>> db.xrefs
False
"""
def multi_atabases():
r"""If there are multiple codb sections -> multidatabase
>>> db = ZODB.config.databaseFromString('''
... <zodb>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... <zodb Foo>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... <zodb>
... database-name Bar
... <mappingstorage>
... </mappingstorage>
... </zodb>
... ''')
>>> sorted(db.databases)
['', 'Bar', 'foo']
>>> db.database_name
''
>>> db.databases[db.database_name] is db
True
>>> db.databases['foo'] is not db
True
>>> db.databases['Bar'] is not db
True
>>> db.databases['Bar'] is not db.databases['foo']
True
Can't have repeats:
>>> ZODB.config.databaseFromString('''
... <zodb 1>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... <zodb 1>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... <zodb 1>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... ''') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ConfigurationSyntaxError:
section names must not be re-used within the same container:'1' (line 9)
>>> ZODB.config.databaseFromString('''
... <zodb>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... <zodb>
... <mappingstorage>
... </mappingstorage>
... </zodb>
... ''') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: database_name '' already in databases
"""
def test_suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(
setUp=ZODB.tests.util.setUp,
tearDown=ZODB.tests.util.tearDown,
checker=ZODB.tests.util.checker))
suite.addTest(unittest.makeSuite(ZODBConfigTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
src/ostorlab/runtimes/cloud/runtime.py
|
bbhunter/ostorlab
| 113 |
139082
|
"""Remote runtime runs on Ostorlab cloud.
The remote runtime provides capabilities identical to local runtime with extra features, like data persistence,
improved data visualization, automated scaling for improved performance, agent improved data warehouse for improved
detection and several other improvements.
"""
from typing import List, Optional, Dict, Union
import click
import markdownify
import rich
from rich import markdown, panel
from ostorlab import configuration_manager
from ostorlab.apis import agent_details
from ostorlab.apis import agent_group
from ostorlab.apis import assets as api_assets
from ostorlab.apis import create_agent_scan
from ostorlab.apis import scan_list
from ostorlab.apis import scan_stop
from ostorlab.apis import vulnz_describe
from ostorlab.apis import vulnz_list
from ostorlab.apis.runners import authenticated_runner
from ostorlab.apis.runners import runner
from ostorlab.assets import asset as base_asset
from ostorlab.cli import console as cli_console
from ostorlab.cli import dumpers
from ostorlab.runtimes import definitions
from ostorlab.runtimes import runtime
from ostorlab.utils import styles
AgentType = Dict[str, Union[str, List]]
console = cli_console.Console()
class CloudRuntime(runtime.Runtime):
"""Cloud runtime runs agents from Ostorlab Cloud."""
def __init__(self, *args, **kwargs) -> None:
"""cloud runtime instance running on Ostorlab cloud.
cloud runtime communicates over a GraphQL API.
"""
super().__init__()
del args, kwargs
def can_run(self, agent_group_definition: definitions.AgentGroupDefinition) -> bool:
"""Checks if the runtime is capable of running the provided agent run definition.
Args:
agent_group_definition: The agent group definition of a set of agents.
Returns:
True if can run, false otherwise.
"""
try:
config_manager = configuration_manager.ConfigurationManager()
if not config_manager.is_authenticated:
console.error('You need to be authenticated before using the cloud runtime.')
return False
do_agents_exist = self._check_agents_exist(agent_group_definition)
return do_agents_exist
except runner.ResponseError as error_msg:
console.error(error_msg)
return False
def scan(
self,
title: Optional[str],
agent_group_definition: definitions.AgentGroupDefinition,
assets: Optional[List[base_asset.Asset]]
) -> None:
"""Triggers a scan using the provided agent group definition and asset target.
Args:
title: The title of the scan.
agent_group_definition: The agent group definition of a set of agents.
assets: The scan target asset.
Returns:
None
"""
try:
if len(assets) > 1:
raise NotImplementedError()
else:
asset = assets[0]
# we support multiple assets for local runtime for the cloud runtime. we take just the first asset.
api_runner = authenticated_runner.AuthenticatedAPIRunner()
agents = self._agents_from_agent_group_def(api_runner, agent_group_definition)
name = agent_group_definition.name
description = agent_group_definition.description
console.info('Creating agent group')
agent_group_id = self._create_agent_group(api_runner, name, description, agents)
console.info('Creating asset')
asset_id = self._create_asset(api_runner, asset)
console.info('Creating scan')
self._create_scan(api_runner, asset_id, agent_group_id, title)
console.success('Scan created successfully.')
except runner.ResponseError as error_msg:
console.error(error_msg)
def stop(self, scan_id: int) -> None:
"""Stops a scan.
Args:
scan_id: The id of the scan to stop.
"""
try:
api_runner = authenticated_runner.AuthenticatedAPIRunner()
response = api_runner.execute(scan_stop.ScanStopAPIRequest(scan_id))
if response.get('errors') is not None:
console.error(f'Scan with id {scan_id} not found')
else:
console.success('Scan stopped successfully')
except runner.Error:
console.error('Could not stop scan.')
def list(self, page: int = 1, number_elements: int = 10) -> List[runtime.Scan]:
"""Lists scans managed by runtime.
Args:
page: Page number for list pagination (default 1).
number_elements: count of elements to show in the listed page (default 10).
Returns:
List of scan objects.
"""
try:
api_runner = authenticated_runner.AuthenticatedAPIRunner()
response = api_runner.execute(scan_list.ScansListAPIRequest(page, number_elements))
scans = response['data']['scans']['scans']
return [
runtime.Scan(
id=scan['id'],
asset=scan['assetType'],
created_time=scan['createdTime'],
progress=scan['progress'],
) for scan in scans
]
except runner.Error:
console.error('Could not fetch scans.')
def install(self) -> None:
"""No installation action.
Returns:
None
"""
pass
def list_vulnz(self, scan_id: int, page: int = 1, number_elements: int = 10):
"""List vulnz from the cloud using and render them in a table.
Args:
scan_id: scan id to list vulnz from.
page: optional page number.
number_elements: optional number of elements per page.
"""
try:
api_runner = authenticated_runner.AuthenticatedAPIRunner()
response = api_runner.execute(
vulnz_list.VulnzListAPIRequest(scan_id=scan_id, number_elements=number_elements, page=page))
vulnerabilities = response['data']['scan']['vulnerabilities']['vulnerabilities']
vulnz_list_table = []
for vulnerability in vulnerabilities:
vulnz_list_table.append({
'id': str(vulnerability['id']),
'risk_rating': styles.style_risk(vulnerability['detail']['riskRating'].upper()),
'cvss_v3_vector': vulnerability['detail']['cvssV3Vector'],
'title': vulnerability['detail']['title'],
'short_description': markdown.Markdown(vulnerability['detail']['shortDescription']),
})
columns = {
'Id': 'id',
'Title': 'title',
'Risk Rating': 'risk_rating',
'CVSS V3 Vector': 'cvss_v3_vector',
'Short Description': 'short_description',
}
title = f'Scan {scan_id}: Found {len(vulnz_list_table)} vulnerabilities.'
console.table(columns=columns, data=vulnz_list_table, title=title)
has_next_page: bool = response['data']['scan']['vulnerabilities']['pageInfo']['hasNext']
num_pages = response['data']['scan']['vulnerabilities']['pageInfo']['numPages']
if has_next_page is True:
console.info('Fetch next page?')
page = page + 1
if click.confirm(f'page {page + 1} of {num_pages}'):
self.list_vulnz(scan_id=scan_id, page=page, number_elements=number_elements)
except runner.Error:
console.error(f'scan with id {scan_id} does not exist.')
def _print_vulnerability(self, vulnerability):
"""Print vulnerability details"""
if vulnerability is None:
return
vulnz_list_data = [
{'id': str(vulnerability['id']),
'risk_rating': styles.style_risk(vulnerability['customRiskRating'].upper()),
'cvss_v3_vector': vulnerability['detail']['cvssV3Vector'],
'title': vulnerability['detail']['title'],
'short_description': markdown.Markdown(vulnerability['detail']['shortDescription']),
}
]
columns = {
'Id': 'id',
'Title': 'title',
'Risk Rating': 'risk_rating',
'CVSSv3 Vector': 'cvss_v3_vector',
'Short Description': 'short_description',
}
title = f'Describing vulnerability {vulnerability["id"]}'
console.table(columns=columns, data=vulnz_list_data, title=title)
rich.print(panel.Panel(markdown.Markdown(vulnerability['detail']['description']), title='Description'))
rich.print(panel.Panel(markdown.Markdown(vulnerability['detail']['recommendation']), title='Recommendation'))
if vulnerability['technicalDetailFormat'] == 'HTML':
rich.print(panel.Panel(markdown.Markdown(markdownify.markdownify(vulnerability['technicalDetail'])),
title='Technical details'))
else:
rich.print(panel.Panel(markdown.Markdown(vulnerability['technicalDetail']), title='Technical details'))
def describe_vuln(self, scan_id: int, vuln_id: int, page: int = 1, number_elements: int = 10):
"""Fetch and show the full details of specific vuln from the cloud, or all the vulnz for a specific scan.
Args:
scan_id: scan id to show all vulnerabilities.
vuln_id: optional vuln id to describe.
page: page number.
number_elements: number of items to show per page.
"""
try:
if vuln_id is None:
click.BadParameter('You should at least provide --vuln_id or --scan_id.')
api_runner = authenticated_runner.AuthenticatedAPIRunner()
if scan_id is not None:
response = api_runner.execute(
vulnz_describe.ScanVulnzDescribeAPIRequest(scan_id=scan_id,
vuln_id=vuln_id,
page=page,
number_elements=number_elements))
vulnerabilities = response['data']['scan']['vulnerabilities']['vulnerabilities']
for v in vulnerabilities:
self._print_vulnerability(v)
num_pages = response['data']['scan']['vulnerabilities']['pageInfo']['numPages']
console.success(f'Vulnerabilities listed successfully. page {page} of {num_pages} pages')
has_next_page: bool = response['data']['scan']['vulnerabilities']['pageInfo']['hasNext']
if has_next_page is True:
console.info('Fetch next page?')
page = page + 1
if click.confirm(f'page {page + 1} of {num_pages}'):
self.describe_vuln(scan_id=scan_id, vuln_id=vuln_id, page=page, number_elements=number_elements)
except runner.ResponseError:
console.error('Vulnerability / scan not Found.')
def _fetch_scan_vulnz(self, scan_id: int, page: int = 1, number_elements: int = 10):
api_runner = authenticated_runner.AuthenticatedAPIRunner()
return api_runner.execute(
vulnz_list.VulnzListAPIRequest(scan_id=scan_id, number_elements=number_elements, page=page))
def dump_vulnz(self, scan_id: int, dumper: dumpers.VulnzDumper, page: int = 1, number_elements: int = 10):
"""fetch vulnz from the cloud runtime.
fetching all vulnz for a specific scan and saving in a specific format, in order to fetch all vulnerabilities
{number_elements|10} for each request, this function run in an infinity loop (recursive)
Args:
dumper: VulnzDumper class
scan_id: scan id to dump vulnz from.
page: page number
number_elements: number of elements per reach page
"""
has_next_page = True
with console.status(f'fetching vulnerabilities for scan scan-id={scan_id}'):
while has_next_page:
response = self._fetch_scan_vulnz(scan_id, page=page, number_elements=number_elements)
has_next_page: bool = response['data']['scan']['vulnerabilities']['pageInfo']['hasNext'] | False
page = page + 1
vulnerabilities = response['data']['scan']['vulnerabilities']['vulnerabilities']
vulnz_list_table = []
for vulnerability in vulnerabilities:
vuln = {
'id': str(vulnerability['id']),
'risk_rating': vulnerability['detail']['riskRating'],
'cvss_v3_vector': vulnerability['detail']['cvssV3Vector'],
'title': vulnerability['detail']['title'],
'short_description': vulnerability['detail']['shortDescription'],
'description': vulnerability['detail']['description'],
'recommendation': vulnerability['detail']['recommendation'],
'technical_detail': vulnerability['technicalDetail'],
}
vulnz_list_table.append(vuln)
dumper.dump(vulnz_list_table)
console.success(f'{len(vulnerabilities)} Vulnerabilities saved to {dumper.output_path}')
def _check_agents_exist(self, agent_group_definition: definitions.AgentGroupDefinition) -> bool:
"""Send API requests to check if agents exist."""
api_runner = authenticated_runner.AuthenticatedAPIRunner()
for agent in agent_group_definition.agents:
response = api_runner.execute(agent_details.AgentDetailsAPIRequest(agent.key))
if response.get('errors') is not None:
console.errors('The agent {agent.key} does not exists')
return False
return True
def _agents_from_agent_group_def(self,
api_runner: runner.APIRunner,
agent_group_definition: definitions.AgentGroupDefinition) -> List[AgentType]:
"""Creates list of agents dicts from an agent group definition."""
agents = []
for agent_def in agent_group_definition.agents:
agent_detail = api_runner.execute(agent_details.AgentDetailsAPIRequest(agent_def.key))
agent_version = agent_detail['data']['agent']['versions']['versions'][0]['version']
agent = {}
agent['agentKey'] = agent_def.key
agent['version'] = agent_version
agent_args = []
for arg in agent_def.args:
agent_args.append({
'name': arg.name,
'type': arg.type,
'value': arg.value
})
agent['args'] = agent_args
agents.append(agent)
return agents
def _create_agent_group(self, api_runner: runner.APIRunner, name: str, description: str, agents: List[AgentType]):
"""Sends an API request to create an agent group.
Returns:
id opf the created agent group.
"""
request = agent_group.CreateAgentGroupAPIRequest(name, description, agents)
response = api_runner.execute(request)
agent_group_id = response['data']['publishAgentGroup']['agentGroup']['id']
return agent_group_id
def _create_asset(self, api_runner: runner.APIRunner, asset: base_asset.Asset):
"""Sends an API request to create an asset.
Returns:
id of the created asset.
"""
request = api_assets.CreateAssetAPIRequest(asset)
response = api_runner.execute(request)
asset_id = response['data']['createAsset']['asset']['id']
return asset_id
def _create_scan(self, api_runner: runner.APIRunner, asset_id: int, agent_group_id: int, title: str):
"""Sends an API request to create a scan."""
request = create_agent_scan.CreateAgentScanAPIRequest(title, asset_id, agent_group_id)
_ = api_runner.execute(request)
|
scripts/external_libs/pyzmq-14.5.0/python3/ucs4/64bit/zmq/backend/cffi/_cffi.py
|
alialnu/trex-core
| 652 |
139139
|
<gh_stars>100-1000
# coding: utf-8
"""The main CFFI wrapping of libzmq"""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import json
import os
from os.path import dirname, join
from cffi import FFI
from zmq.utils.constant_names import all_names, no_prefix
base_zmq_version = (3,2,2)
def load_compiler_config():
"""load pyzmq compiler arguments"""
import zmq
zmq_dir = dirname(zmq.__file__)
zmq_parent = dirname(zmq_dir)
fname = join(zmq_dir, 'utils', 'compiler.json')
if os.path.exists(fname):
with open(fname) as f:
cfg = json.load(f)
else:
cfg = {}
cfg.setdefault("include_dirs", [])
cfg.setdefault("library_dirs", [])
cfg.setdefault("runtime_library_dirs", [])
cfg.setdefault("libraries", ["zmq"])
# cast to str, because cffi can't handle unicode paths (?!)
cfg['libraries'] = [str(lib) for lib in cfg['libraries']]
for key in ("include_dirs", "library_dirs", "runtime_library_dirs"):
# interpret paths relative to parent of zmq (like source tree)
abs_paths = []
for p in cfg[key]:
if p.startswith('zmq'):
p = join(zmq_parent, p)
abs_paths.append(str(p))
cfg[key] = abs_paths
return cfg
def zmq_version_info():
"""Get libzmq version as tuple of ints"""
major = ffi.new('int*')
minor = ffi.new('int*')
patch = ffi.new('int*')
C.zmq_version(major, minor, patch)
return (int(major[0]), int(minor[0]), int(patch[0]))
cfg = load_compiler_config()
ffi = FFI()
def _make_defines(names):
_names = []
for name in names:
define_line = "#define %s ..." % (name)
_names.append(define_line)
return "\n".join(_names)
c_constant_names = []
for name in all_names:
if no_prefix(name):
c_constant_names.append(name)
else:
c_constant_names.append("ZMQ_" + name)
# load ffi definitions
here = os.path.dirname(__file__)
with open(os.path.join(here, '_cdefs.h')) as f:
_cdefs = f.read()
with open(os.path.join(here, '_verify.c')) as f:
_verify = f.read()
ffi.cdef(_cdefs)
ffi.cdef(_make_defines(c_constant_names))
try:
C = ffi.verify(_verify,
modulename='_cffi_ext',
libraries=cfg['libraries'],
include_dirs=cfg['include_dirs'],
library_dirs=cfg['library_dirs'],
runtime_library_dirs=cfg['runtime_library_dirs'],
)
_version_info = zmq_version_info()
except Exception as e:
raise ImportError("PyZMQ CFFI backend couldn't find zeromq: %s\n"
"Please check that you have zeromq headers and libraries." % e)
if _version_info < (3,2,2):
raise ImportError("PyZMQ CFFI backend requires zeromq >= 3.2.2,"
" but found %i.%i.%i" % _version_info
)
nsp = new_sizet_pointer = lambda length: ffi.new('size_t*', length)
new_uint64_pointer = lambda: (ffi.new('uint64_t*'),
nsp(ffi.sizeof('uint64_t')))
new_int64_pointer = lambda: (ffi.new('int64_t*'),
nsp(ffi.sizeof('int64_t')))
new_int_pointer = lambda: (ffi.new('int*'),
nsp(ffi.sizeof('int')))
new_binary_data = lambda length: (ffi.new('char[%d]' % (length)),
nsp(ffi.sizeof('char') * length))
value_uint64_pointer = lambda val : (ffi.new('uint64_t*', val),
ffi.sizeof('uint64_t'))
value_int64_pointer = lambda val: (ffi.new('int64_t*', val),
ffi.sizeof('int64_t'))
value_int_pointer = lambda val: (ffi.new('int*', val),
ffi.sizeof('int'))
value_binary_data = lambda val, length: (ffi.new('char[%d]' % (length + 1), val),
ffi.sizeof('char') * length)
IPC_PATH_MAX_LEN = C.get_ipc_path_max_len()
|
Python-Regular-Expressions/emails.py
|
sagarsaliya/code_snippets
| 9,588 |
139151
|
import re
emails = '''
<EMAIL>
<EMAIL>
<EMAIL>
'''
pattern = re.compile(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+')
matches = pattern.finditer(emails)
for match in matches:
print(match)
|
tests/unit/test_curved_polygon.py
|
kianmeng/bezier
| 165 |
139198
|
<filename>tests/unit/test_curved_polygon.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest.mock
import numpy as np
from tests.unit import utils
class TestCurvedPolygon(utils.NumPyTestCase):
NODES0 = np.asfortranarray([[0.0, 0.5, 1.0], [0.0, -1.0, 0.0]])
NODES1 = np.asfortranarray([[1.0, 0.5, 0.0], [0.0, 1.0, 0.0]])
COLOR = (0.125, 0.125, 0.0)
@staticmethod
def _get_target_class():
from bezier import curved_polygon
return curved_polygon.CurvedPolygon
def _make_one(self, *args, **kwargs):
klass = self._get_target_class()
return klass(*args, **kwargs)
def _make_default(self):
import bezier
edge0 = bezier.Curve(self.NODES0, 2)
edge1 = bezier.Curve(self.NODES1, 2)
return self._make_one(edge0, edge1)
def test_constructor(self):
import bezier
edge0 = bezier.Curve(self.NODES0, 2)
edge1 = bezier.Curve(self.NODES1, 2)
curved_poly = self._make_one(edge0, edge1)
self.assertEqual(curved_poly._edges, (edge0, edge1))
self.assertEqual(curved_poly._num_sides, 2)
self.assertIsNone(curved_poly._metadata)
def test_constructor_without_verify(self):
import bezier
edge0 = bezier.Curve(self.NODES0, 2)
with self.assertRaises(ValueError):
self._make_one(edge0)
curved_poly = self._make_one(edge0, verify=False)
self.assertEqual(curved_poly._edges, (edge0,))
self.assertEqual(curved_poly._num_sides, 1)
self.assertIsNone(curved_poly._metadata)
def test_constructor_with_metadata(self):
import bezier
edge0 = bezier.Curve(self.NODES0, 2)
edge1 = bezier.Curve(self.NODES1, 2)
metadata = ((0, 0.0, 0.5), (4, 0.5, 1.0))
curved_poly = self._make_one(edge0, edge1, metadata=metadata)
self.assertEqual(curved_poly._edges, (edge0, edge1))
self.assertEqual(curved_poly._num_sides, 2)
self.assertEqual(curved_poly._metadata, metadata)
def test__verify_too_few(self):
with self.assertRaises(ValueError):
self._make_one()
with self.assertRaises(ValueError):
self._make_one(None)
def test__verify_bad_dimension(self):
import bezier
nodes0 = np.asfortranarray([[1.0, 2.0], [1.0, 2.0]])
edge0 = bezier.Curve(nodes0, 1)
edge1 = bezier.Curve(self.NODES1, 2)
with self.assertRaises(ValueError):
self._make_one(edge0, edge1)
def test__verify_not_aligned(self):
import bezier
edge0 = bezier.Curve(np.asfortranarray([[0.0, 0.0]]), 1)
edge1 = bezier.Curve(self.NODES1, 2)
with self.assertRaises(ValueError):
self._make_one(edge0, edge1)
def test_num_sides_property(self):
curved_poly = self._make_default()
self.assertIs(curved_poly.num_sides, 2)
def test___dict___property(self):
curved_poly = self._make_default()
props_dict = curved_poly.__dict__
expected = {
"_edges": curved_poly._edges,
"_num_sides": curved_poly._num_sides,
}
self.assertEqual(props_dict, expected)
# Check that modifying ``props_dict`` won't modify ``curved_poly``.
expected["_num_sides"] = 5
self.assertNotEqual(curved_poly._num_sides, expected["_num_sides"])
def test_area(self):
curved_poly = self._make_default()
self.assertEqual(curved_poly.area, 2.0 / 3.0)
def test___repr__(self):
curved_poly = self._make_default()
self.assertEqual(repr(curved_poly), "<CurvedPolygon (num_sides=2)>")
@unittest.mock.patch("bezier._plot_helpers.new_axis")
@unittest.mock.patch("bezier._plot_helpers.add_patch")
def test_plot_defaults(self, add_patch_mock, new_axis_mock):
ax = unittest.mock.Mock(spec=[])
new_axis_mock.return_value = ax
curved_poly = self._make_default()
pts_per_edge = 16
result = curved_poly.plot(pts_per_edge)
self.assertIs(result, ax)
# Verify mocks.
new_axis_mock.assert_called_once_with()
add_patch_mock.assert_called_once_with(
ax, None, pts_per_edge, *curved_poly._edges
)
@unittest.mock.patch("bezier._plot_helpers.new_axis")
@unittest.mock.patch("bezier._plot_helpers.add_patch")
def test_plot_explicit(self, add_patch_mock, new_axis_mock):
ax = unittest.mock.Mock(spec=[])
color = (0.5, 0.5, 0.5)
curved_poly = self._make_default()
pts_per_edge = 16
result = curved_poly.plot(pts_per_edge, color=color, ax=ax)
self.assertIs(result, ax)
# Verify mocks.
new_axis_mock.assert_not_called()
add_patch_mock.assert_called_once_with(
ax, color, pts_per_edge, *curved_poly._edges
)
|
skdist/distribute/search.py
|
synapticarbors/sk-dist
| 292 |
139203
|
<gh_stars>100-1000
"""
Distributed grid search meta-estimators
"""
import time
import numbers
import random
import numpy as np
import pandas as pd
from copy import copy
from abc import ABCMeta
from joblib import Parallel, delayed
from sklearn.model_selection import (
ParameterGrid,
GridSearchCV,
RandomizedSearchCV,
ParameterSampler,
check_cv,
)
from sklearn.metrics import check_scoring
from sklearn.base import BaseEstimator, is_classifier
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils.validation import indexable
from functools import partial
from numpy.ma import MaskedArray
from scipy.stats import rankdata
from scipy.sparse import issparse
from itertools import product
from collections import defaultdict
from .validation import (
_check_estimator,
_check_base_estimator,
_validate_params,
_validate_models,
_validate_names,
_validate_estimators,
_check_n_iter,
_is_arraylike,
_num_samples,
_safe_indexing,
_check_is_fitted,
)
from .utils import (
_multimetric_score,
_num_samples,
_aggregate_score_dicts,
_score,
_check_multimetric_scoring,
_safe_split,
_dict_slice_remove,
)
from .base import _clone, _get_value, _parse_partitions
__all__ = ["DistGridSearchCV", "DistRandomizedSearchCV", "DistMultiModelSearch"]
def _sample_one(n_iter, param_distributions, random_state=None):
""" Sample from param distributions for one model """
return list(
ParameterSampler(
param_distributions,
n_iter=_check_n_iter(n_iter, param_distributions),
random_state=random_state,
)
)
def _raw_sampler(models, n_params=None, n=None, random_state=None):
""" Sample from param distributions for each model """
if n_params is None:
if n is None:
raise Exception("Must supply either 'n_params' or 'n' as arguments")
else:
n_params = [n] * len(models)
param_sets = []
for index in range(len(models)):
sampler = _sample_one(
n_params[index], models[index][2], random_state=random_state
)
for sample_index in range(len(sampler)):
param_set = {
"model_index": index,
"params_index": sample_index,
"param_set": sampler[sample_index],
}
param_sets.append(param_set)
return param_sets
def _fit_one_fold(fit_set, models, X, y, scoring, fit_params):
"""
Fits the given estimator on one fold of training data.
Scores the fitted estimator against the test fold.
"""
train = fit_set[0][0]
test = fit_set[0][1]
estimator_ = _clone(models[fit_set[1]["model_index"]][1])
parameters = fit_set[1]["param_set"]
X_train, y_train = _safe_split(estimator_, X, y, train)
X_test, y_test = _safe_split(estimator_, X, y, test, train)
if parameters is not None:
estimator_.set_params(**parameters)
estimator_.fit(X_train, y_train, **fit_params)
scorer = check_scoring(estimator_, scoring=scoring)
is_multimetric = not callable(scorer)
out_dct = fit_set[1]
out_dct["score"] = _score(estimator_, X_test, y_test, scorer, is_multimetric)
return out_dct
def _fit_batch(
X,
y,
folds,
param_sets,
models,
n,
scoring,
fit_params,
random_state=None,
sc=None,
partitions="auto",
n_jobs=None,
):
"""
Fits a batch of combinations of parameter sets, models
and cross validation folds. Returns results pandas
DataFrames.
"""
fit_sets = product(folds, param_sets)
if sc is None:
scores = Parallel(n_jobs=n_jobs)(
delayed(_fit_one_fold)(x, models, X, y, scoring, fit_params)
for x in fit_sets
)
else:
fit_sets = list(fit_sets)
partitions = _parse_partitions(partitions, len(fit_sets))
scores = (
sc.parallelize(fit_sets, numSlices=partitions)
.map(lambda x: _fit_one_fold(x, models, X, y, scoring, fit_params))
.collect()
)
param_results = _get_results(scores)
model_results = (
param_results.groupby(["model_index"])["score"]
.max()
.reset_index()
.sort_values("model_index")
)
return param_results, model_results
def _get_results(scores):
""" Converts 'scores' list to pandas DataFrame """
cols = ["model_index", "params_index", "param_set", "score"]
df = pd.DataFrame(scores, columns=cols).sort_values(["model_index", "params_index"])
if len(df) == 0:
return pd.DataFrame(columns=cols)
return (
df.groupby(["model_index", "params_index"])
.agg({"score": "mean", "param_set": "first"})
.reset_index()
.sort_values(["model_index", "params_index"])[cols]
)
def _index_param_value(X, v, indices):
""" Private helper function for parameter value indexing """
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
return v
if issparse(v):
v = v.tocsr()
return _safe_indexing(v, indices)
def _fit_and_score(
estimator,
X,
y,
scorer,
train,
test,
verbose,
parameters,
fit_params,
return_train_score=False,
return_parameters=False,
return_n_test_samples=False,
return_times=False,
error_score="raise",
):
""" Fit estimator and compute scores for a given dataset split """
estimator_ = _clone(_get_value(estimator))
if verbose > 1:
if parameters is None:
msg = ""
else:
msg = "%s" % (
", ".join("%s=%s" % (k, v) for k, v in list(parameters.items()))
)
print(("[CV] %s %s" % (msg, (64 - len(msg)) * ".")))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict(
[(k, _index_param_value(X, v, train)) for k, v in list(fit_params.items())]
)
test_scores = {}
train_scores = {}
if parameters is not None:
estimator_.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator_, X, y, train)
X_test, y_test = _safe_split(estimator_, X, y, test, train)
is_multimetric = not callable(scorer)
n_scorers = len(list(scorer.keys())) if is_multimetric else 1
try:
if y_train is None:
estimator_.fit(X_train, **fit_params)
else:
estimator_.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == "raise":
raise
elif isinstance(error_score, numbers.Number):
score_dict = dict(list(zip(list(scorer.keys()), [error_score] * n_scorers)))
if is_multimetric:
test_scores = score_dict
if return_train_score:
train_scores = score_dict
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
warnings.warn(
"Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e),
FitFailedWarning,
)
else:
raise ValueError(
"error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
fit_time = time.time() - start_time
# _score will return dict if is_multimetric is True
test_scores = _score(estimator_, X_test, y_test, scorer, is_multimetric)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(estimator_, X_train, y_train, scorer, is_multimetric)
if verbose > 2:
if is_multimetric:
for scorer_name, score in list(test_scores.items()):
msg += ", %s=%s" % (scorer_name, score)
else:
msg += ", score=%s" % test_scores
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time))
print(("[CV] %s %s" % ((64 - len(end_msg)) * ".", end_msg)))
ret = [train_scores, test_scores] if return_train_score else [test_scores]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
class DistBaseSearchCV(BaseEstimator, metaclass=ABCMeta):
"""
Same as sklearn `BaseSearchCV` but with distributed
training using spark
Args:
estimator (estimator object):
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
sc (sparkContext): Spark context for spark broadcasting and rdd operations.
partitions (int or 'auto'): default 'auto'
Number of partitions to use for parallelization of parameter
search space. Integer values or None will be used directly for `numSlices`,
while 'auto' will set `numSlices` to the number required fits.
preds (bool): keep predictions as attribute
"""
def __init__(self, estimator, sc=None, partitions="auto", preds=False):
self.estimator = estimator
self.sc = sc
self.partitions = partitions
self.preds = preds
def fit(self, X, y=None, groups=None, **fit_params):
"""
Run fit with all sets of parameters. Parallelize fit operations
using spark.
Args:
X (array-like, shape = [n_samples, n_features]): training vector,
where n_samples is the number of samples and
n_features is the number of features
y (array-like, shape = [n_samples] or [n_samples, n_output]): target
relative to X for classification or regression
groups (array-like, with shape (n_samples,)): group labels for
the samples used while splitting the dataset into
train/test set
**fit_params (dict of string -> object): parameters passed
to the ``fit`` method of the estimator
"""
_check_estimator(self, verbose=self.verbose)
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
scorers, self.multimetric_ = _check_multimetric_scoring(
self.estimator, scoring=self.scoring
)
if self.multimetric_:
if self.refit is not False and (
not isinstance(self.refit, six.string_types)
or
# This will work for both dict / list (tuple)
self.refit not in scorers
):
raise ValueError(
"For multi-metric scoring, the parameter "
"refit must be set to a scorer key "
"to refit an estimator with the best "
"parameter setting on the whole data and "
"make the best_* attributes "
"available for that metric. If this is not "
"needed, refit should be set to False "
"explicitly. %r was passed." % self.refit
)
else:
refit_metric = self.refit
else:
refit_metric = "score"
X, y, groups = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
# Regenerate parameter iterable for each fit
candidate_params = list(self._get_param_iterator())
n_candidates = len(candidate_params)
if self.verbose > 0:
print(
(
"Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates, n_candidates * n_splits)
)
)
base_estimator = _clone(self.estimator)
pre_dispatch = self.pre_dispatch
fit_sets = []
cv_splitted = list(cv.split(X, y, groups))
count = -1
for fit_set in product(candidate_params, cv_splitted):
count += 1
fit_sets.append((count,) + fit_set)
verbose = self.verbose
return_train_score = self.return_train_score
error_score = self.error_score
if self.sc is None:
base_estimator_ = base_estimator
out = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_and_score)(
base_estimator_,
X,
y,
scorers,
x[2][0],
x[2][1],
verbose,
x[1],
fit_params=fit_params,
return_train_score=return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=error_score,
)
for x in fit_sets
)
out = [[fit_sets[ind][0], out[ind]] for ind in range(len(fit_sets))]
else:
base_estimator_ = self.sc.broadcast(base_estimator)
partitions = _parse_partitions(self.partitions, len(fit_sets))
out = (
self.sc.parallelize(fit_sets, numSlices=partitions)
.map(
lambda x: [
x[0],
_fit_and_score(
base_estimator_,
X,
y,
scorers,
x[2][0],
x[2][1],
verbose,
x[1],
fit_params=fit_params,
return_train_score=return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=error_score,
),
]
)
.collect()
)
out = [out[i][1] for i in np.argsort([x[0] for x in out])]
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(
train_score_dicts,
test_score_dicts,
test_sample_counts,
fit_time,
score_time,
) = list(zip(*out))
else:
(test_score_dicts, test_sample_counts, fit_time, score_time) = list(
zip(*out)
)
# test_score_dicts and train_score dicts are lists of dictionaries and
# we make them into dict of lists
test_scores = _aggregate_score_dicts(test_score_dicts)
if self.return_train_score:
train_scores = _aggregate_score_dicts(train_score_dicts)
results = {}
def _store(key_name, array, weights=None, splits=False, rank=False):
""" A small helper to store the scores/times to the cv_results_ """
array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits)
if splits:
for split_i in range(n_splits):
# Uses closure to alter the results
results["split%d_%s" % (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results["mean_%s" % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(
np.average(
(array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights
)
)
results["std_%s" % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method="min"), dtype=np.int32
)
_store("fit_time", fit_time)
_store("score_time", score_time)
param_results = defaultdict(
partial(
MaskedArray,
np.empty(
n_candidates,
),
mask=True,
dtype=object,
)
)
for cand_i, params in enumerate(candidate_params):
for name, value in list(params.items()):
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results["params"] = candidate_params
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits], dtype=np.int)
for scorer_name in list(scorers.keys()):
# Computed the (weighted) mean and std for test scores alone
_store(
"test_%s" % scorer_name,
test_scores[scorer_name],
splits=True,
rank=True,
weights=test_sample_counts if self.iid else None,
)
if self.return_train_score:
prev_keys = set(results.keys())
_store("train_%s" % scorer_name, train_scores[scorer_name], splits=True)
if self.return_train_score == "warn":
for key in set(results.keys()) - prev_keys:
message = (
"You are accessing a training score ({!r}), "
"which will not be available by default "
"any more in 0.21. If you need training scores, "
"please set return_train_score=True"
).format(key)
# warn on key access
results.add_warning(key, message, FutureWarning)
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
self.best_index_ = results["rank_test_%s" % refit_metric].argmin()
self.best_params_ = candidate_params[self.best_index_]
self.best_score_ = results["mean_test_%s" % refit_metric][self.best_index_]
if self.refit:
self.best_estimator_ = _clone(base_estimator).set_params(
**self.best_params_
)
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
if self.preds:
preds = []
for train_index, test_index in cv_splitted:
estimator_ = _clone(base_estimator).set_params(**self.best_params_)
estimator_.fit(X[train_index], y[train_index])
try:
preds.append(estimator_.predict_proba(X[test_index]))
except:
preds.append(estimator_.predict(X[test_index]))
self.preds_ = np.vstack(preds)
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers if self.multimetric_ else scorers["score"]
self.cv_results_ = results
self.n_splits_ = n_splits
del self.sc
if hasattr(self.estimator, "sc"):
del self.estimator.sc
return self
def get_preds(self):
""" Get CV predictions """
if hasattr(self, "preds_"):
return self.preds_
def drop_preds(self):
""" Remove preds_ attribute """
if hasattr(self, "preds_"):
del self.preds_
class DistGridSearchCV(DistBaseSearchCV, GridSearchCV):
"""
Same as sklearn `GridSearchCV` but with distributed
training using spark.
Args:
estimator (estimator object):
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid (dict or list of dictionaries):
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
sc (sparkContext): Spark context for spark broadcasting and rdd operations.
partitions (int or 'auto'): default 'auto'
Number of partitions to use for parallelization of parameter
search space. Integer values or None will be used directly for `numSlices`,
while 'auto' will set `numSlices` to the number required fits.
preds (bool): keep predictions as attribute
"""
def __init__(
self,
estimator,
param_grid,
sc=None,
partitions="auto",
preds=False,
scoring=None,
n_jobs=None,
iid="warn",
refit=True,
cv=5,
verbose=0,
pre_dispatch="2*n_jobs",
error_score="raise-deprecating",
return_train_score=False,
):
GridSearchCV.__init__(
self,
estimator,
param_grid,
scoring=scoring,
n_jobs=n_jobs,
iid=iid,
refit=refit,
cv=cv,
verbose=verbose,
pre_dispatch=pre_dispatch,
error_score=error_score,
return_train_score=return_train_score,
)
self.sc = sc
self.partitions = partitions
self.preds = preds
def _get_param_iterator(self):
""" Return ParameterGrid instance for the given param_grid """
return ParameterGrid(self.param_grid)
class DistRandomizedSearchCV(DistBaseSearchCV, RandomizedSearchCV):
"""
Same as sklearn `RandomizedSearchCV` but with distributed
training using spark.
Args:
estimator (estimator object):
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions (dict):
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
sc (sparkContext): Spark context for spark broadcasting and rdd operations.
partitions (int or 'auto'): default 'auto'
Number of partitions to use for parallelization of parameter
search space. Integer values or None will be used directly for `numSlices`,
while 'auto' will set `numSlices` to the number required fits.
preds (bool): keep predictions as attribute
"""
def __init__(
self,
estimator,
param_distributions,
sc=None,
partitions="auto",
preds=False,
n_iter=10,
scoring=None,
n_jobs=None,
iid="warn",
refit=True,
cv=5,
verbose=0,
pre_dispatch="2*n_jobs",
random_state=None,
error_score="raise-deprecating",
return_train_score=False,
):
RandomizedSearchCV.__init__(
self,
estimator,
param_distributions,
n_iter=n_iter,
scoring=scoring,
n_jobs=n_jobs,
iid=iid,
refit=refit,
cv=cv,
verbose=verbose,
pre_dispatch=pre_dispatch,
random_state=random_state,
error_score=error_score,
return_train_score=return_train_score,
)
self.sc = sc
self.partitions = partitions
self.preds = preds
def _get_param_iterator(self):
""" Return ParameterSampler instance for the given distributions """
return ParameterSampler(
self.param_distributions, self.n_iter, random_state=self.random_state
)
class DistMultiModelSearch(BaseEstimator, metaclass=ABCMeta):
"""
Distributed multi-model search meta-estimator. Similar to
`DistRandomizedSearchCV` but with handling for multiple models.
Takes a `models` input containing a list of tuples, each with a
string name, instantiated estimator object, and parameter set
dictionary for random search.
The fit method will compute a cross validation score for each
estimator/parameter set combination after randomly sampling from
the parameter set for each estimator. The best estimator/parameter
set combination will be refit if appropriate. The process is distrubuted
with spark if a sparkContext is provided, else joblib is used.
Args:
models (array-like): List of tuples containing estimator and parameter
set information to generate candidates. Each tuple is of the form:
('name' <str>, 'estimator' <sklearn Estimator>, 'param_set' <dict>)
For example: ('rf', RandomForestClassifier(), {'max_depth': [5,10]})
sc (sparkContext): Spark context for spark broadcasting and rdd operations.
partitions (int or 'auto'): default 'auto'
Number of partitions to use for parallelization of parameter
search space. Integer values or None will be used directly for `numSlices`,
while 'auto' will set `numSlices` to the number required fits.
n (int): Number of parameter sets to sample from parameter space for each
estimator.
cv (int, cross-validation generator or an iterable): Determines the
cross-validation splitting strategy.
scoring (string, callable, list/tuple, dict or None): A single string or a
callable to evaluate the predictions on the test set. If None,
the estimator's score method is used.
random_state (int): Random state used throughout to ensure consistent runs.
verbose (int, bool): Used to indicate level of stdout logging.
refit (bool): Refits best estimator at the end of fit method.
n_jobs (int): Number of jobs to run in parallel. Only used if sc=None.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
pre_dispatch (int): Controls the number of jobs that get dispatched
during parallel execution. Reducing this number can be useful
to avoid an explosion of memory consumption when more jobs
get dispatched than CPUs can process. Only used if sc=None.
"""
def __init__(
self,
models,
sc=None,
partitions="auto",
n=5,
cv=5,
scoring=None,
random_state=None,
verbose=0,
refit=True,
n_jobs=None,
pre_dispatch="2*n_jobs",
):
self.models = models
self.sc = sc
self.partitions = partitions
self.n = n
self.cv = cv
self.scoring = scoring
self.random_state = random_state
self.verbose = verbose
self.refit = refit
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
def fit(self, X, y=None, groups=None, **fit_params):
"""
Run fit with all sets of parameters. Parallelize fit operations
using spark.
Args:
X (array-like, shape = [n_samples, n_features]): training vector,
where n_samples is the number of samples and
n_features is the number of features
y (array-like, shape = [n_samples] or [n_samples, n_output]): target
relative to X for classification or regression
groups (array-like, with shape (n_samples,)): group labels for
the samples used while splitting the dataset into
train/test set
**fit_params (dict of string -> object): parameters passed
to the ``fit`` method of the estimator
"""
_check_estimator(self, verbose=self.verbose)
models = _validate_models(self.models, self)
cv = check_cv(self.cv, y, classifier=is_classifier(models[0][1]))
folds = list(cv.split(X, y, groups))
results = pd.DataFrame()
def _sample_generator(models, n, random_state):
rs = None if random_state is None else random_state * (i + 1)
yield _raw_sampler(models, n=n, random_state=random_state)
sample_gen = _sample_generator(models, n=self.n, random_state=self.random_state)
param_sets = list(sample_gen)[0]
results, model_results = _fit_batch(
X,
y,
folds,
param_sets,
models,
self.n,
self.scoring,
fit_params,
sc=self.sc,
n_jobs=self.n_jobs,
partitions=self.partitions,
random_state=self.random_state,
)
if self.verbose:
print(model_results)
best_index = np.argmax(results["score"].values)
self.best_model_index_ = results.iloc[best_index]["model_index"]
self.best_model_name_ = models[self.best_model_index_][0]
self.best_params_ = results.iloc[best_index]["param_set"]
self.best_score_ = results.iloc[best_index]["score"]
self.worst_score_ = results.iloc[best_index]["score"]
results["rank_test_score"] = np.asarray(
rankdata(-results["score"].values), dtype=np.int32
)
results["mean_test_score"] = results["score"]
results["params"] = results["param_set"]
results["model_name"] = results["model_index"].apply(lambda x: models[x][0])
result_cols = [
"model_index",
"model_name",
"params",
"rank_test_score",
"mean_test_score",
]
self.cv_results_ = results[result_cols].to_dict(orient="list")
if self.refit:
self.best_estimator_ = _clone(models[self.best_model_index_][1])
self.best_estimator_.set_params(**self.best_params_)
self.best_estimator_.fit(X, y, **fit_params)
del self.sc
return self
def _check_is_fitted(self):
if not self.refit:
raise NotFittedError(
"This %s instance was initialized "
"with refit=False. The method is "
"available only after refitting on the best "
"parameters. You can refit an estimator "
"manually using the ``best_params_`` "
"attribute" % (type(self).__name__)
)
else:
_check_is_fitted(self, "best_estimator_")
@if_delegate_has_method(delegate=("best_estimator_", "estimator"))
def predict(self, X):
self._check_is_fitted()
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=("best_estimator_", "estimator"))
def predict_proba(self, X):
self._check_is_fitted()
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=("best_estimator_", "estimator"))
def predict_log_proba(self, X):
self._check_is_fitted()
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=("best_estimator_", "estimator"))
def decision_function(self, X):
self._check_is_fitted()
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=("best_estimator_", "estimator"))
def transform(self, X):
self._check_is_fitted()
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=("best_estimator_", "estimator"))
def inverse_transform(self, Xt):
self._check_is_fitted()
return self.best_estimator_.inverse_transform(Xt)
@property
def classes_(self):
self._check_is_fitted()
return self.best_estimator_.classes_
|
python/121_Best_Time_to_Buy_and_Sell_Stock.py
|
dvlpsh/leetcode-1
| 4,416 |
139227
|
<gh_stars>1000+
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
length = len(prices)
if length == 0:
return 0
max_profit, low = 0, prices[0]
for i in range(1, length):
if low > prices[i]:
low = prices[i]
else:
temp = prices[i] - low
if temp > max_profit:
max_profit = temp
return max_profit
|
saleor/graphql/invoice/types.py
|
siyoola/saleor
| 1,392 |
139275
|
import graphene
from ...invoice import models
from ..core.types import Job, ModelObjectType
from ..meta.types import ObjectWithMetadata
class Invoice(ModelObjectType):
number = graphene.String()
external_url = graphene.String()
created_at = graphene.DateTime(required=True)
updated_at = graphene.DateTime(required=True)
message = graphene.String()
url = graphene.String(description="URL to download an invoice.")
class Meta:
description = "Represents an Invoice."
interfaces = [ObjectWithMetadata, Job, graphene.relay.Node]
model = models.Invoice
|
tests/test_bert.py
|
clarencechen/keras-transformer
| 555 |
139277
|
<reponame>clarencechen/keras-transformer<filename>tests/test_bert.py
import random
from itertools import islice
import numpy as np
from keras_transformer.bert import BatchGeneratorForBERT
def test_bert_sample_generator():
token_ids = list(range(3, 1000))
def sampler(size):
start = random.randint(0, len(token_ids) - size - 1)
return token_ids[start: start + size]
gen = BatchGeneratorForBERT(
sampler, len(token_ids), sep_token_id=0, cls_token_id=1,
mask_token_id=2, first_normal_token_id=3,
last_normal_token_id=token_ids[-1],
sequence_length=128, batch_size=16)
for has_next, output_mask, sequence, section_id, masked_sequence in islice(
gen.generate_samples(), 10):
assert sequence[0] == gen.cls_token_id
assert sequence[-1] == gen.sep_token_id
assert len(sequence) == gen.sequence_length
assert masked_sequence != sequence
assert len(section_id) == gen.sequence_length
assert np.sum(section_id == 0) > 1
assert np.sum(section_id == 1) > 1
assert (np.sum(section_id == 1) + np.sum(section_id == 0)
== gen.sequence_length)
first_sep = sequence.index(gen.sep_token_id)
if has_next:
# checking that the second sentence is truly a continuation
assert sequence[first_sep - 1] == sequence[first_sep + 1] - 1
else:
assert sequence[first_sep - 1] != sequence[first_sep + 1] - 1
# Checking that output_mask correctly marks the changes
for i, (s, ms) in enumerate(zip(sequence, masked_sequence)):
if s != ms:
assert output_mask[i] == 1
if output_mask[i] == 0:
assert s == ms
else:
assert ms not in (gen.cls_token_id, gen.sep_token_id)
# Checking batch generator
batches = gen.generate_batches()
batch = next(batches)
x, y = batch
assert isinstance(x[0], np.ndarray)
assert isinstance(x[1], np.ndarray)
assert x[0].shape == (gen.batch_size, gen.sequence_length)
assert x[1].shape == (gen.batch_size, gen.sequence_length)
assert isinstance(y, list)
assert isinstance(y[0], np.ndarray)
assert y[0].shape == (gen.batch_size, gen.sequence_length, 2)
assert len(y[1]) == gen.batch_size
|
lingvo/tasks/car/calibration_processing.py
|
allenwang28/lingvo
| 2,611 |
139301
|
<filename>lingvo/tasks/car/calibration_processing.py
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for calculating calibration on a prediction."""
from lingvo import compat as tf
from lingvo.core import plot
import numpy as np
def ExpectedCalibrationError(confidence,
empirical_accuracy,
num_examples,
min_confidence=None):
"""Calculate the expected calibration error.
Args:
confidence: 1-D np.array of float32 binned confidence scores with one number
per bin
empirical_accuracy: 1-D np.array of float32 binned empirical accuracies with
one number per bin
num_examples: 1-D np.array of int for the number of examples within a bin.
min_confidence: float32 of minimum confidence score to use in the
calculation. If None, no filtering is applied.
Returns:
float32 of expected calibration error
"""
assert confidence.shape[0] == empirical_accuracy.shape[0]
assert empirical_accuracy.shape[0] == num_examples.shape[0]
ece = np.abs(empirical_accuracy - confidence) * num_examples
if min_confidence:
bin_indices = np.where(confidence > min_confidence)
ece = ece[bin_indices]
num_examples = num_examples[bin_indices]
ece = np.sum(ece)
total_num_examples = np.sum(num_examples)
if total_num_examples != 0:
ece /= total_num_examples
else:
ece = 0.0
return ece
def CalibrationCurve(scores, hits, num_bins):
"""Compute data for calibration reliability diagrams.
Args:
scores: 1-D np.array of float32 confidence scores
hits: 1-D np.array of int32 (either 0 or 1) indicating whether predicted
label matches the ground truth label
num_bins: int for the number of calibration bins
Returns:
A tuple containing:
- mean_predicted_accuracies: np.array of mean predicted accuracy for each
bin
- mean_empirical_accuracies: np.array of mean empirical accuracy for each
bin
- num_examples: np.array of the number of examples in each bin
"""
mean_predicted_accuracies = []
mean_empirical_accuracies = []
num_examples = []
# Bin the hits and scores based on the scores.
edges = np.linspace(0.0, 1.0, num_bins + 1)
bin_indices = np.digitize(scores, edges, right=True)
# Put examples with score equal to 0 in bin 1 because we will skip bin 0.
bin_indices = np.where(scores == 0.0, 1, bin_indices)
for j in range(num_bins + 1):
if j == 0:
continue
indices = np.where(bin_indices == j)[0]
# pylint: disable=g-explicit-length-test
if len(indices) > 0:
mean_predicted_accuracy = np.mean(scores[indices])
mean_empirical_accuracy = np.mean(hits[indices])
num_example = len(indices)
else:
mean_predicted_accuracy = (edges[j - 1] + edges[j]) / 2.0
mean_empirical_accuracy = 0.0
num_example = 0
# pylint: enable=g-explicit-length-test
mean_predicted_accuracies.append(mean_predicted_accuracy)
mean_empirical_accuracies.append(mean_empirical_accuracy)
num_examples.append(num_example)
mean_predicted_accuracies = np.array(mean_predicted_accuracies)
mean_empirical_accuracies = np.array(mean_empirical_accuracies)
num_examples = np.array(num_examples)
return mean_predicted_accuracies, mean_empirical_accuracies, num_examples
class CalibrationCalculator:
"""Base class for calculating calibration on a prediction."""
def __init__(self, metadata):
self._metadata = metadata
self._num_calibration_bins = self._metadata.NumberOfCalibrationBins()
self._calibration_by_class = None
self._classnames = self._metadata.ClassNames()
self._classids = self._metadata.EvalClassIndices()
def Calculate(self, metrics):
"""Calculate metrics for calibration.
Args:
metrics: A dict. Each entry in the dict is a list of C (number of classes)
dicts containing mapping from metric names to individual results.
Individual entries may be the following items:
- scalars: A list of C (number of classes) dicts mapping metric names to
scalar values.
- curves: A list of C dicts mapping metrics names to np.float32 arrays of
shape [NumberOfPrecisionRecallPoints()+1, 2]. In the last dimension, 0
indexes precision and 1 indexes recall.
- calibrations: A list of C dicts mapping metrics names to np.float32
arrays of shape [number of predictions, 2]. The first column is the
predicted probability and the second column is 0 or 1 indicating that
the prediction matched a ground truth item.
Returns:
nothing
"""
if 'calibrations' not in metrics:
tf.logging.info(
'CalibrationProcessing invoked but no metrics available '
'for calculating calibration.')
return
self._calibration_by_class = {}
for i, c in enumerate(metrics['calibrations']):
classid = self._classids[i]
classname = self._classnames[classid]
if np.all(np.isnan(c['calibrations'])) or c['calibrations'].size == 0:
tf.logging.info(
'Skipping %s for calibration calculation because no '
'output provided.' % classname)
continue
tf.logging.info('Calculating calibration for %s: %d items.' %
(classname, len(c['calibrations'])))
# Ensure that all counts are greater then zero and less then or equal
# to 1.0 to guarantee that all scores are counted.
scores_and_hits = np.clip(c['calibrations'], 1e-10, 1.0)
scores = scores_and_hits[:, 0]
hits = scores_and_hits[:, 1]
curve_data = CalibrationCurve(scores, hits, self._num_calibration_bins)
self._calibration_by_class[classname] = np.array(curve_data[0:3])
tf.logging.info('Finished calculating calibration for %s.' %
classname)
def Summary(self, name):
"""Generate tf summaries for calibration.
Args:
name: str, name of summary.
Returns:
list of tf.Summary
"""
summaries = []
for class_id in self._metadata.EvalClassIndices():
classname = self._metadata.ClassNames()[class_id]
tag_str = '{}/{}/calibration'.format(name, classname)
if classname not in self._calibration_by_class:
continue
# Extract the data.
mean_predicted_accuracy = self._calibration_by_class[classname][0, :]
mean_empirical_accuracy = self._calibration_by_class[classname][1, :]
num_examples_per_bin = self._calibration_by_class[classname][-1, :]
total_examples = np.sum(num_examples_per_bin)
legend = ['%s (%d)' % (classname, total_examples)]
def _CalibrationSetter(fig, axes):
"""Configure the plot for calibration."""
ticks = np.arange(0, 1.05, 0.1)
axes.grid(b=False)
axes.set_xlabel('Predicted accuracy')
axes.set_xticks(ticks)
axes.set_ylabel('Empirical accuracy')
axes.set_yticks(ticks)
axes.legend(legend, numpoints=1) # pylint: disable=cell-var-from-loop
fig.tight_layout()
calibration_curve_summary = plot.Curve(
name=tag_str,
figsize=(10, 8),
xs=mean_predicted_accuracy,
ys=mean_empirical_accuracy,
setter=_CalibrationSetter,
marker='.',
markersize=14,
linestyle='-',
linewidth=2,
alpha=0.5)
ece = ExpectedCalibrationError(mean_predicted_accuracy,
mean_empirical_accuracy,
num_examples_per_bin)
ece_summary = tf.Summary(value=[
tf.Summary.Value(
tag='{}/{}/calibration_ece'.format(name, classname),
simple_value=ece)
])
summaries.extend([calibration_curve_summary, ece_summary])
return summaries
|
core/lib/constant.py
|
duruyi/OnlineSchemaChange
| 949 |
139316
|
#!/usr/bin/env python3
"""
Copyright (c) 2017-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
PREFIX = "__osc_"
OUTFILE_TABLE = "__osc_tbl_"
OUTFILE_EXCLUDE_ID = "__osc_ex_"
OUTFILE_INCLUDE_ID = "__osc_in_"
NEW_TABLE_PREFIX = "__osc_new_"
DELTA_TABLE_PREFIX = "__osc_chg_"
RENAMED_TABLE_PREFIX = "__osc_old_"
INSERT_TRIGGER_PREFIX = "__osc_ins_"
UPDATE_TRIGGER_PREFIX = "__osc_upd_"
DELETE_TRIGGER_PREFIX = "__osc_del_"
# tables with 64 character length names need a generic place-holder name
GENERIC_TABLE_NAME = "online_schema_change_temp_tbl"
# Special prefixes for tables that have longer table names
SHORT_NEW_TABLE_PREFIX = "n!"
SHORT_DELTA_TABLE_PREFIX = "c!"
SHORT_RENAMED_TABLE_PREFIX = "o!"
SHORT_INSERT_TRIGGER_PREFIX = "i!"
SHORT_UPDATE_TRIGGER_PREFIX = "u!"
SHORT_DELETE_TRIGGER_PREFIX = "d!"
OSC_LOCK_NAME = "OnlineSchemaChange"
CHUNK_BYTES = 2 * 1024 * 1024
REPLAY_DEFAULT_TIMEOUT = 5 # replay until we can finish in 5 seconds
DEFAULT_BATCH_SIZE = 500
DEFAULT_REPLAY_ATTEMPT = 10
DEFAULT_RESERVED_SPACE_PERCENT = 1
LONG_TRX_TIME = 30
MAX_RUNNING_BEFORE_DDL = 200
DDL_GUARD_ATTEMPTS = 600
LOCK_MAX_ATTEMPTS = 3
LOCK_MAX_WAIT_BEFORE_KILL_SECONDS = 0.5
SESSION_TIMEOUT = 600
DEFAULT_REPLAY_GROUP_SIZE = 200
PK_COVERAGE_SIZE_THRESHOLD = 500 * 1024 * 1024
MAX_WAIT_FOR_SLOW_QUERY = 100
MAX_TABLE_LENGTH = 64
MAX_REPLAY_BATCH_SIZE = 500000
MAX_REPLAY_CHANGES = 2146483647
|
frontend/api/tasks/frontend_app.py
|
quarkslab/irma
| 248 |
139321
|
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import humanfriendly
import celery
from celery.utils.log import get_task_logger
import api.files.services as file_ctrl
import api.scans.services as scan_ctrl
import config.parser as config
from irma.common.base.exceptions import IrmaDatabaseError, IrmaFileSystemError
import api.common.ftp as ftp_ctrl
import api.tasks.braintasks as celery_brain
from api.files_ext.models import FileExt
from api.scans.models import Scan
from irma.common.base.exceptions import IrmaFtpError
from irma.common.base.utils import IrmaScanStatus
from api.common.sessions import session_transaction
log = get_task_logger(__name__)
# declare a new application
frontend_app = celery.Celery('frontend_app')
config.conf_frontend_celery(frontend_app)
config.configure_syslog(frontend_app)
# IRMA specific debug messages are enables through
# config file Section: log / Key: debug
if config.debug_enabled():
def after_setup_logger_handler(sender=None, logger=None, loglevel=None,
logfile=None, format=None,
colorize=None, **kwds):
config.setup_debug_logger(logging.getLogger(__name__))
log.debug("debug is enabled")
celery.signals.after_setup_logger.connect(after_setup_logger_handler)
celery.signals.after_setup_task_logger.connect(after_setup_logger_handler)
@frontend_app.task(acks_late=True)
def scan_launch(scan_id):
with session_transaction() as session:
scan = None
try:
log.debug("scan: %s launching", scan_id)
# Part for common action for whole scan
scan = Scan.load_from_ext_id(scan_id, session)
scan_request = scan_ctrl._create_scan_request(
scan.files_ext,
scan.get_probelist(),
scan.mimetype_filtering)
scan_request = scan_ctrl._add_empty_results(
scan.files_ext,
scan_request,
scan, session)
# Nothing to do
if scan_request.nb_files == 0:
scan.set_status(IrmaScanStatus.finished)
log.warning("scan %s: finished nothing to do", scan_id)
return
# Part for action file_ext by file_ext
file_ext_id_list = [file.external_id for file in scan.files_ext]
celery.group(scan_launch_file_ext.si(file_ext_id)
for file_ext_id in file_ext_id_list)()
scan.set_status(IrmaScanStatus.launched)
session.commit()
log.info("scan %s: launched", scan_id)
return
except Exception as e:
log.exception(type(e).__name__ + " : " + str(e))
if scan is not None:
scan.set_status(IrmaScanStatus.error)
@frontend_app.task(acks_late=True)
def scan_launch_file_ext(file_ext_id):
file_ext = None
with session_transaction() as session:
try:
file_ext = FileExt.load_from_ext_id(file_ext_id, session)
scan_id = file_ext.scan.external_id
log.debug("scan %s: launch scan for file_ext: %s",
scan_id, file_ext_id)
ftp_ctrl.upload_file(file_ext_id, file_ext.file.path)
# launch new celery scan task on brain
celery_brain.scan_launch(file_ext_id, file_ext.probes, scan_id)
except IrmaFtpError as e:
log.error("file_ext %s: ftp upload error %s", file_ext_id, str(e))
if file_ext is not None:
file_ext.scan.set_status(IrmaScanStatus.error_ftp_upload)
except Exception as e:
log.exception(type(e).__name__ + " : " + str(e))
@frontend_app.task(bind=True, acks_late=True)
def scan_result(self, file_ext_id, probe, result):
max_retries = 3
try:
log.debug("file_ext: %s result from probe %s retry: %d",
file_ext_id, probe, self.request.retries)
scan_ctrl.handle_output_files(file_ext_id, result)
scan_ctrl.set_result(file_ext_id, probe, result)
except IrmaDatabaseError as e:
log.exception(type(e).__name__ + " : " + str(e))
raise self.retry(countdown=2, max_retries=max_retries, exc=e)
@frontend_app.task(bind=True, acks_late=True)
def scan_result_error(self, parent_taskid, file_ext_id, probe, result):
log.debug("Error result file_ext: %s probe %s", file_ext_id, probe)
result["error"] = "Error raised during result insert"
result["status"] = -1
scan_ctrl.handle_output_files(file_ext_id, result, error_case=True)
scan_ctrl.set_result(file_ext_id, probe, result)
@frontend_app.task()
def clean_fs_age():
try:
cron_cfg = config.frontend_config['cron_clean_file_age']
max_age_file = cron_cfg['clean_fs_max_age']
# 0 means disabled
if max_age_file == "0":
log.debug("disabled by config")
return 0
# convert to seconds
max_age_secs = int(humanfriendly.parse_timespan(max_age_file))
nb_files = file_ctrl.remove_files(max_age_secs)
log.info("removed %d files (older than %s)", nb_files, max_age_file)
return nb_files
except (IrmaDatabaseError, IrmaFileSystemError) as e:
log.exception(type(e).__name__ + " : " + str(e))
@frontend_app.task()
def clean_fs_size():
try:
cron_cfg = config.frontend_config['cron_clean_file_size']
max_size = cron_cfg['clean_fs_max_size']
# 0 means disabled
if max_size == '0':
log.debug("disabled by config")
return 0
max_size_bytes = humanfriendly.parse_size(max_size, binary=True)
nb_files = file_ctrl.remove_files_size(max_size_bytes)
log.info("removed %d files", nb_files)
return nb_files
except (IrmaDatabaseError, IrmaFileSystemError) as e:
log.exception(type(e).__name__ + " : " + str(e))
########################
# command line launcher
########################
if __name__ == '__main__': # pragma: no cover
options = config.get_celery_options("api.tasks.frontend_app",
"frontend_app")
frontend_app.worker_main(options)
|
convlab/lib/util.py
|
ngduyanhece/ConvLab
| 405 |
139439
|
# Modified by Microsoft Corporation.
# Licensed under the MIT license.
import json
import operator
import os
import pickle
import subprocess
import sys
import time
from collections import deque
from contextlib import contextmanager
from datetime import datetime
from importlib import reload
from pprint import pformat
import numpy as np
import pandas as pd
import pydash as ps
import regex as re
import torch
import torch.multiprocessing as mp
import ujson
import yaml
from convlab import ROOT_DIR, EVAL_MODES
NUM_CPUS = mp.cpu_count()
FILE_TS_FORMAT = '%Y_%m_%d_%H%M%S'
RE_FILE_TS = re.compile(r'(\d{4}_\d{2}_\d{2}_\d{6})')
class LabJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, (np.ndarray, pd.Series)):
return obj.tolist()
else:
return str(obj)
def batch_get(arr, idxs):
'''Get multi-idxs from an array depending if it's a python list or np.array'''
if isinstance(arr, (list, deque)):
return np.array(operator.itemgetter(*idxs)(arr))
else:
return arr[idxs]
def calc_srs_mean_std(sr_list):
'''Given a list of series, calculate their mean and std'''
cat_df = pd.DataFrame(dict(enumerate(sr_list)))
mean_sr = cat_df.mean(axis=1)
std_sr = cat_df.std(axis=1)
return mean_sr, std_sr
def calc_ts_diff(ts2, ts1):
'''
Calculate the time from tss ts1 to ts2
@param {str} ts2 Later ts in the FILE_TS_FORMAT
@param {str} ts1 Earlier ts in the FILE_TS_FORMAT
@returns {str} delta_t in %H:%M:%S format
@example
ts1 = '2017_10_17_084739'
ts2 = '2017_10_17_084740'
ts_diff = util.calc_ts_diff(ts2, ts1)
# => '0:00:01'
'''
delta_t = datetime.strptime(ts2, FILE_TS_FORMAT) - datetime.strptime(ts1, FILE_TS_FORMAT)
return str(delta_t)
def cast_df(val):
'''missing pydash method to cast value as DataFrame'''
if isinstance(val, pd.DataFrame):
return val
return pd.DataFrame(val)
def cast_list(val):
'''missing pydash method to cast value as list'''
if ps.is_list(val):
return val
else:
return [val]
def clear_periodic_ckpt(prepath):
'''Clear periodic (with -epi) ckpt files in prepath'''
if '-epi' in prepath:
run_cmd(f'rm {prepath}*')
def concat_batches(batches):
'''
Concat batch objects from body.memory.sample() into one batch, when all bodies experience similar envs
Also concat any nested epi sub-batches into flat batch
{k: arr1} + {k: arr2} = {k: arr1 + arr2}
'''
# if is nested, then is episodic
is_episodic = isinstance(batches[0]['dones'][0], (list, np.ndarray))
concat_batch = {}
for k in batches[0]:
datas = []
for batch in batches:
data = batch[k]
if is_episodic: # make into plain batch instead of nested
data = np.concatenate(data)
datas.append(data)
concat_batch[k] = np.concatenate(datas)
return concat_batch
def downcast_float32(df):
'''Downcast any float64 col to float32 to allow safer pandas comparison'''
for col in df.columns:
if df[col].dtype == 'float':
df[col] = df[col].astype('float32')
return df
def epi_done(done):
'''
General method to check if episode is done for both single and vectorized env
Only return True for singleton done since vectorized env does not have a natural episode boundary
'''
return np.isscalar(done) and done
def find_ckpt(prepath):
'''Find the ckpt-lorem-ipsum in a string and return lorem-ipsum'''
if 'ckpt' in prepath:
ckpt_str = ps.find(prepath.split('_'), lambda s: s.startswith('ckpt'))
ckpt = ckpt_str.replace('ckpt-', '')
else:
ckpt = None
return ckpt
def frame_mod(frame, frequency, num_envs):
'''
Generic mod for (frame % frequency == 0) for when num_envs is 1 or more,
since frame will increase multiple ticks for vector env, use the remainder'''
remainder = num_envs or 1
return (frame % frequency < remainder)
def flatten_dict(obj, delim='.'):
'''Missing pydash method to flatten dict'''
nobj = {}
for key, val in obj.items():
if ps.is_dict(val) and not ps.is_empty(val):
strip = flatten_dict(val, delim)
for k, v in strip.items():
nobj[key + delim + k] = v
elif ps.is_list(val) and not ps.is_empty(val) and ps.is_dict(val[0]):
for idx, v in enumerate(val):
nobj[key + delim + str(idx)] = v
if ps.is_object(v):
nobj = flatten_dict(nobj, delim)
else:
nobj[key] = val
return nobj
def get_class_name(obj, lower=False):
'''Get the class name of an object'''
class_name = obj.__class__.__name__
if lower:
class_name = class_name.lower()
return class_name
def get_class_attr(obj):
'''Get the class attr of an object as dict'''
attr_dict = {}
for k, v in obj.__dict__.items():
if hasattr(v, '__dict__') or ps.is_tuple(v):
val = str(v)
else:
val = v
attr_dict[k] = val
return attr_dict
def get_file_ext(data_path):
'''get the `.ext` of file.ext'''
return os.path.splitext(data_path)[-1]
def get_fn_list(a_cls):
'''
Get the callable, non-private functions of a class
@returns {[*str]} A list of strings of fn names
'''
fn_list = ps.filter_(dir(a_cls), lambda fn: not fn.endswith('__') and callable(getattr(a_cls, fn)))
return fn_list
def get_git_sha():
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], close_fds=True, cwd=ROOT_DIR).decode().strip()
def get_lab_mode():
return os.environ.get('lab_mode')
def get_prepath(spec, unit='experiment'):
spec_name = spec['name']
meta_spec = spec['meta']
predir = f'output/{spec_name}_{meta_spec["experiment_ts"]}'
prename = f'{spec_name}'
trial_index = meta_spec['trial']
session_index = meta_spec['session']
t_str = '' if trial_index is None else f'_t{trial_index}'
s_str = '' if session_index is None else f'_s{session_index}'
if unit == 'trial':
prename += t_str
elif unit == 'session':
prename += f'{t_str}{s_str}'
ckpt = meta_spec['ckpt']
if ckpt is not None:
prename += f'_ckpt-{ckpt}'
prepath = f'{predir}/{prename}'
return prepath
def get_ts(pattern=FILE_TS_FORMAT):
'''
Get current ts, defaults to format used for filename
@param {str} pattern To format the ts
@returns {str} ts
@example
util.get_ts()
# => '2017_10_17_084739'
'''
ts_obj = datetime.now()
ts = ts_obj.strftime(pattern)
assert RE_FILE_TS.search(ts)
return ts
def insert_folder(prepath, folder):
'''Insert a folder into prepath'''
split_path = prepath.split('/')
prename = split_path.pop()
split_path += [folder, prename]
return '/'.join(split_path)
def in_eval_lab_modes():
'''Check if lab_mode is one of EVAL_MODES'''
return get_lab_mode() in EVAL_MODES
def is_jupyter():
'''Check if process is in Jupyter kernel'''
try:
get_ipython().config
return True
except NameError:
return False
return False
@contextmanager
def ctx_lab_mode(lab_mode):
'''
Creates context to run method with a specific lab_mode
@example
with util.ctx_lab_mode('eval'):
foo()
@util.ctx_lab_mode('eval')
def foo():
...
'''
prev_lab_mode = os.environ.get('lab_mode')
os.environ['lab_mode'] = lab_mode
yield
if prev_lab_mode is None:
del os.environ['lab_mode']
else:
os.environ['lab_mode'] = prev_lab_mode
def monkey_patch(base_cls, extend_cls):
'''Monkey patch a base class with methods from extend_cls'''
ext_fn_list = get_fn_list(extend_cls)
for fn in ext_fn_list:
setattr(base_cls, fn, getattr(extend_cls, fn))
def parallelize(fn, args, num_cpus=NUM_CPUS):
'''
Parallelize a method fn, args and return results with order preserved per args.
args should be a list of tuples.
@returns {list} results Order preserved output from fn.
'''
pool = mp.Pool(num_cpus, maxtasksperchild=1)
results = pool.starmap(fn, args)
pool.close()
pool.join()
return results
def prepath_split(prepath):
'''
Split prepath into useful names. Works with predir (prename will be None)
prepath: output/dqn_pong_2018_12_02_082510/dqn_pong_t0_s0
predir: output/dqn_pong_2018_12_02_082510
prefolder: dqn_pong_2018_12_02_082510
prename: dqn_pong_t0_s0
spec_name: dqn_pong
experiment_ts: 2018_12_02_082510
ckpt: ckpt-best of dqn_pong_t0_s0_ckpt-best if available
'''
prepath = prepath.strip('_')
tail = prepath.split('output/')[-1]
ckpt = find_ckpt(tail)
if ckpt is not None: # separate ckpt
tail = tail.replace(f'_ckpt-{ckpt}', '')
if '/' in tail: # tail = prefolder/prename
prefolder, prename = tail.split('/', 1)
else:
prefolder, prename = tail, None
predir = f'output/{prefolder}'
spec_name = RE_FILE_TS.sub('', prefolder).strip('_')
experiment_ts = RE_FILE_TS.findall(prefolder)[0]
return predir, prefolder, prename, spec_name, experiment_ts, ckpt
def prepath_to_idxs(prepath):
'''Extract trial index and session index from prepath if available'''
_, _, prename, spec_name, _, _ = prepath_split(prepath)
idxs_tail = prename.replace(spec_name, '').strip('_')
idxs_strs = ps.compact(idxs_tail.split('_')[:2])
if ps.is_empty(idxs_strs):
return None, None
tidx = idxs_strs[0]
assert tidx.startswith('t')
trial_index = int(tidx.strip('t'))
if len(idxs_strs) == 1: # has session
session_index = None
else:
sidx = idxs_strs[1]
assert sidx.startswith('s')
session_index = int(sidx.strip('s'))
return trial_index, session_index
def prepath_to_spec(prepath):
'''
Given a prepath, read the correct spec recover the meta_spec that will return the same prepath for eval lab modes
example: output/a2c_cartpole_2018_06_13_220436/a2c_cartpole_t0_s0
'''
predir, _, prename, _, experiment_ts, ckpt = prepath_split(prepath)
sidx_res = re.search('_s\d+', prename)
if sidx_res: # replace the _s0 if any
prename = prename.replace(sidx_res[0], '')
spec_path = f'{predir}/{prename}_spec.json'
# read the spec of prepath
spec = read(spec_path)
# recover meta_spec
trial_index, session_index = prepath_to_idxs(prepath)
meta_spec = spec['meta']
meta_spec['experiment_ts'] = experiment_ts
meta_spec['ckpt'] = ckpt
meta_spec['experiment'] = 0
meta_spec['trial'] = trial_index
meta_spec['session'] = session_index
check_prepath = get_prepath(spec, unit='session')
assert check_prepath in prepath, f'{check_prepath}, {prepath}'
return spec
def read(data_path, **kwargs):
'''
Universal data reading method with smart data parsing
- {.csv} to DataFrame
- {.json} to dict, list
- {.yml} to dict
- {*} to str
@param {str} data_path The data path to read from
@returns {data} The read data in sensible format
@example
data_df = util.read('test/fixture/lib/util/test_df.csv')
# => <DataFrame>
data_dict = util.read('test/fixture/lib/util/test_dict.json')
data_dict = util.read('test/fixture/lib/util/test_dict.yml')
# => <dict>
data_list = util.read('test/fixture/lib/util/test_list.json')
# => <list>
data_str = util.read('test/fixture/lib/util/test_str.txt')
# => <str>
'''
data_path = smart_path(data_path)
try:
assert os.path.isfile(data_path)
except AssertionError:
raise FileNotFoundError(data_path)
ext = get_file_ext(data_path)
if ext == '.csv':
data = read_as_df(data_path, **kwargs)
elif ext == '.pkl':
data = read_as_pickle(data_path, **kwargs)
else:
data = read_as_plain(data_path, **kwargs)
return data
def read_as_df(data_path, **kwargs):
'''Submethod to read data as DataFrame'''
ext = get_file_ext(data_path)
data = pd.read_csv(data_path, **kwargs)
return data
def read_as_pickle(data_path, **kwargs):
'''Submethod to read data as pickle'''
with open(data_path, 'rb') as f:
data = pickle.load(f)
return data
def read_as_plain(data_path, **kwargs):
'''Submethod to read data as plain type'''
open_file = open(data_path, 'r')
ext = get_file_ext(data_path)
if ext == '.json':
data = ujson.load(open_file, **kwargs)
elif ext == '.yml':
data = yaml.load(open_file, **kwargs)
else:
data = open_file.read()
open_file.close()
return data
def run_cmd(cmd):
'''Run shell command'''
print(f'+ {cmd}')
proc = subprocess.Popen(cmd, cwd=ROOT_DIR, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
return proc
def run_cmd_wait(proc):
'''Wait on a running process created by util.run_cmd and print its stdout'''
for line in proc.stdout:
print(line.decode(), end='')
output = proc.communicate()[0]
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.args, proc.returncode, output)
else:
return output
def self_desc(cls):
'''Method to get self description, used at init.'''
desc_list = [f'{get_class_name(cls)}:']
for k, v in get_class_attr(cls).items():
if k == 'spec':
desc_v = v['name']
elif ps.is_dict(v) or ps.is_dict(ps.head(v)):
desc_v = pformat(v)
else:
desc_v = v
desc_list.append(f'- {k} = {desc_v}')
desc = '\n'.join(desc_list)
return desc
def set_attr(obj, attr_dict, keys=None):
'''Set attribute of an object from a dict'''
if keys is not None:
attr_dict = ps.pick(attr_dict, keys)
for attr, val in attr_dict.items():
setattr(obj, attr, val)
return obj
def set_cuda_id(spec):
'''Use trial and session id to hash and modulo cuda device count for a cuda_id to maximize device usage. Sets the net_spec for the base Net class to pick up.'''
# Don't trigger any cuda call if not using GPU. Otherwise will break multiprocessing on machines with CUDA.
# see issues https://github.com/pytorch/pytorch/issues/334 https://github.com/pytorch/pytorch/issues/3491 https://github.com/pytorch/pytorch/issues/9996
for agent_spec in spec['agent']:
if 'net' not in agent_spec or not agent_spec['net'].get('gpu'):
return
meta_spec = spec['meta']
trial_idx = meta_spec['trial'] or 0
session_idx = meta_spec['session'] or 0
if meta_spec['distributed'] == 'shared': # shared hogwild uses only global networks, offset them to idx 0
session_idx = 0
job_idx = trial_idx * meta_spec['max_session'] + session_idx
job_idx += meta_spec['cuda_offset']
device_count = torch.cuda.device_count()
cuda_id = None if not device_count else job_idx % device_count
for agent_spec in spec['agent']:
agent_spec['net']['cuda_id'] = cuda_id
def set_logger(spec, logger, unit=None):
'''Set the logger for a lab unit give its spec'''
os.environ['LOG_PREPATH'] = insert_folder(get_prepath(spec, unit=unit), 'log')
reload(logger) # to set session-specific logger
def set_random_seed(spec):
'''Generate and set random seed for relevant modules, and record it in spec.meta.random_seed'''
torch.set_num_threads(1) # prevent multithread slowdown, set again for hogwild
trial = spec['meta']['trial']
session = spec['meta']['session']
random_seed = int(1e5 * (trial or 0) + 1e3 * (session or 0) + time.time())
torch.cuda.manual_seed_all(random_seed)
torch.manual_seed(random_seed)
np.random.seed(random_seed)
spec['meta']['random_seed'] = random_seed
return random_seed
def _sizeof(obj, seen=None):
'''Recursively finds size of objects'''
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([_sizeof(v, seen) for v in obj.values()])
size += sum([_sizeof(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += _sizeof(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([_sizeof(i, seen) for i in obj])
return size
def sizeof(obj, divisor=1e6):
'''Return the size of object, in MB by default'''
return _sizeof(obj) / divisor
def smart_path(data_path, as_dir=False):
'''
Resolve data_path into abspath with fallback to join from ROOT_DIR
@param {str} data_path The input data path to resolve
@param {bool} as_dir Whether to return as dirname
@returns {str} The normalized absolute data_path
@example
util.smart_path('convlab/lib')
# => '/Users/ANON/Documents/convlab/convlab/lib'
util.smart_path('/tmp')
# => '/tmp'
'''
if not os.path.isabs(data_path):
abs_path = os.path.abspath(data_path)
if os.path.exists(abs_path):
data_path = abs_path
else:
data_path = os.path.join(ROOT_DIR, data_path)
if as_dir:
data_path = os.path.dirname(data_path)
return os.path.normpath(data_path)
def split_minibatch(batch, mb_size):
'''Split a batch into minibatches of mb_size or smaller, without replacement'''
size = len(batch['rewards'])
assert mb_size < size, f'Minibatch size {mb_size} must be < batch size {size}'
idxs = np.arange(size)
np.random.shuffle(idxs)
chunks = int(size / mb_size)
nested_idxs = np.array_split(idxs, chunks)
mini_batches = []
for minibatch_idxs in nested_idxs:
minibatch = {k: v[minibatch_idxs] for k, v in batch.items()}
mini_batches.append(minibatch)
return mini_batches
def to_json(d, indent=2):
'''Shorthand method for stringify JSON with indent'''
return json.dumps(d, indent=indent, cls=LabJsonEncoder)
def to_render():
return get_lab_mode() in ('dev', 'enjoy') and os.environ.get('RENDER', 'true') == 'true'
def to_torch_batch(batch, device, is_episodic):
'''Mutate a batch (dict) to make its values from numpy into PyTorch tensor'''
for k in batch:
if is_episodic: # for episodic format
batch[k] = np.concatenate(batch[k])
elif ps.is_list(batch[k]):
batch[k] = np.array(batch[k])
batch[k] = torch.from_numpy(batch[k].astype(np.float32)).to(device)
return batch
def write(data, data_path):
'''
Universal data writing method with smart data parsing
- {.csv} from DataFrame
- {.json} from dict, list
- {.yml} from dict
- {*} from str(*)
@param {*} data The data to write
@param {str} data_path The data path to write to
@returns {data_path} The data path written to
@example
data_path = util.write(data_df, 'test/fixture/lib/util/test_df.csv')
data_path = util.write(data_dict, 'test/fixture/lib/util/test_dict.json')
data_path = util.write(data_dict, 'test/fixture/lib/util/test_dict.yml')
data_path = util.write(data_list, 'test/fixture/lib/util/test_list.json')
data_path = util.write(data_str, 'test/fixture/lib/util/test_str.txt')
'''
data_path = smart_path(data_path)
data_dir = os.path.dirname(data_path)
os.makedirs(data_dir, exist_ok=True)
ext = get_file_ext(data_path)
if ext == '.csv':
write_as_df(data, data_path)
elif ext == '.pkl':
write_as_pickle(data, data_path)
else:
write_as_plain(data, data_path)
return data_path
def write_as_df(data, data_path):
'''Submethod to write data as DataFrame'''
df = cast_df(data)
ext = get_file_ext(data_path)
df.to_csv(data_path, index=False)
return data_path
def write_as_pickle(data, data_path):
'''Submethod to write data as pickle'''
with open(data_path, 'wb') as f:
pickle.dump(data, f)
return data_path
def write_as_plain(data, data_path):
'''Submethod to write data as plain type'''
open_file = open(data_path, 'w')
ext = get_file_ext(data_path)
if ext == '.json':
json.dump(data, open_file, indent=2, cls=LabJsonEncoder)
elif ext == '.yml':
yaml.dump(data, open_file)
else:
open_file.write(str(data))
open_file.close()
return data_path
|
pymagnitude/third_party/allennlp/tests/data/dataset_readers/penn_tree_bank_reader_test.py
|
tpeng/magnitude
| 1,520 |
139443
|
# pylint: disable=no-self-use,invalid-name,protected-access
from __future__ import division
from __future__ import absolute_import
from nltk.tree import Tree
from allennlp.data.dataset_readers import PennTreeBankConstituencySpanDatasetReader
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataset_readers.dataset_utils.span_utils import enumerate_spans
try:
from itertools import izip
except:
izip = zip
class TestPennTreeBankConstituencySpanReader(AllenNlpTestCase):
def setUp(self):
super(TestPennTreeBankConstituencySpanReader, self).setUp()
self.span_width = 5
def test_read_from_file(self):
ptb_reader = PennTreeBankConstituencySpanDatasetReader()
instances = ptb_reader.read(unicode(self.FIXTURES_ROOT / u'data' / u'example_ptb.trees'))
assert len(instances) == 2
fields = instances[0].fields
tokens = [x.text for x in fields[u"tokens"].tokens]
pos_tags = fields[u"pos_tags"].labels
spans = [(x.span_start, x.span_end) for x in fields[u"spans"].field_list]
span_labels = fields[u"span_labels"].labels
assert tokens == [u'Also', u',', u'because', u'UAL', u'Chairman', u'Stephen', u'Wolf',
u'and', u'other', u'UAL', u'executives', u'have', u'joined', u'the',
u'pilots', u"'", u'bid', u',', u'the', u'board', u'might', u'be', u'forced',
u'to', u'exclude', u'him', u'from', u'its', u'deliberations', u'in',
u'order', u'to', u'be', u'fair', u'to', u'other', u'bidders', u'.']
assert pos_tags == [u'RB', u',', u'IN', u'NNP', u'NNP', u'NNP', u'NNP', u'CC', u'JJ', u'NNP',
u'NNS', u'VBP', u'VBN', u'DT', u'NNS', u'POS', u'NN', u',', u'DT', u'NN',
u'MD', u'VB', u'VBN', u'TO', u'VB', u'PRP', u'IN', u'PRP$',
u'NNS', u'IN', u'NN', u'TO', u'VB', u'JJ', u'TO', u'JJ', u'NNS', u'.']
assert spans == enumerate_spans(tokens)
gold_tree = Tree.fromstring(u"(S(ADVP(RB Also))(, ,)(SBAR(IN because)"
u"(S(NP(NP(NNP UAL)(NNP Chairman)(NNP Stephen)(NNP Wolf))"
u"(CC and)(NP(JJ other)(NNP UAL)(NNS executives)))(VP(VBP have)"
u"(VP(VBN joined)(NP(NP(DT the)(NNS pilots)(POS '))(NN bid))))))"
u"(, ,)(NP(DT the)(NN board))(VP(MD might)(VP(VB be)(VP(VBN "
u"forced)(S(VP(TO to)(VP(VB exclude)(NP(PRP him))(PP(IN from)"
u"(NP(PRP$ its)(NNS deliberations)))(SBAR(IN in)(NN order)(S("
u"VP(TO to)(VP(VB be)(ADJP(JJ fair)(PP(TO to)(NP(JJ other)(NNS "
u"bidders))))))))))))))(. .))")
assert fields[u"metadata"].metadata[u"gold_tree"] == gold_tree
assert fields[u"metadata"].metadata[u"tokens"] == tokens
correct_spans_and_labels = {}
ptb_reader._get_gold_spans(gold_tree, 0, correct_spans_and_labels)
for span, label in izip(spans, span_labels):
if label != u"NO-LABEL":
assert correct_spans_and_labels[span] == label
fields = instances[1].fields
tokens = [x.text for x in fields[u"tokens"].tokens]
pos_tags = fields[u"pos_tags"].labels
spans = [(x.span_start, x.span_end) for x in fields[u"spans"].field_list]
span_labels = fields[u"span_labels"].labels
assert tokens == [u'That', u'could', u'cost', u'him', u'the', u'chance',
u'to', u'influence', u'the', u'outcome', u'and', u'perhaps',
u'join', u'the', u'winning', u'bidder', u'.']
assert pos_tags == [u'DT', u'MD', u'VB', u'PRP', u'DT', u'NN',
u'TO', u'VB', u'DT', u'NN', u'CC', u'RB', u'VB', u'DT',
u'VBG', u'NN', u'.']
assert spans == enumerate_spans(tokens)
gold_tree = Tree.fromstring(u"(S(NP(DT That))(VP(MD could)(VP(VB cost)(NP(PRP him))"
u"(NP(DT the)(NN chance)(S(VP(TO to)(VP(VP(VB influence)(NP(DT the)"
u"(NN outcome)))(CC and)(VP(ADVP(RB perhaps))(VB join)(NP(DT the)"
u"(VBG winning)(NN bidder)))))))))(. .))")
assert fields[u"metadata"].metadata[u"gold_tree"] == gold_tree
assert fields[u"metadata"].metadata[u"tokens"] == tokens
correct_spans_and_labels = {}
ptb_reader._get_gold_spans(gold_tree, 0, correct_spans_and_labels)
for span, label in izip(spans, span_labels):
if label != u"NO-LABEL":
assert correct_spans_and_labels[span] == label
def test_strip_functional_tags(self):
ptb_reader = PennTreeBankConstituencySpanDatasetReader()
# Get gold spans should strip off all the functional tags.
tree = Tree.fromstring(u"(S (NP=PRP (D the) (N dog)) (VP-0 (V chased) (NP|FUN-TAGS (D the) (N cat))))")
ptb_reader._strip_functional_tags(tree)
assert tree == Tree.fromstring(u"(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
def test_get_gold_spans_correctly_extracts_spans(self):
ptb_reader = PennTreeBankConstituencySpanDatasetReader()
tree = Tree.fromstring(u"(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))")
span_dict = {}
ptb_reader._get_gold_spans(tree, 0, span_dict)
spans = list(span_dict.items()) # pylint: disable=protected-access
assert spans == [((0, 1), u'NP'), ((3, 4), u'NP'), ((2, 4), u'VP'), ((0, 4), u'S')]
def test_get_gold_spans_correctly_extracts_spans_with_nested_labels(self):
ptb_reader = PennTreeBankConstituencySpanDatasetReader()
# Here we have a parse with several nested labels - particularly the (WHNP (WHNP (WP What)))
# fragment. These should be concatenated into a single label by get_gold_spans.
tree = Tree.fromstring(u"""
(S
(`` ``)
(S-TPC
(NP-SBJ (PRP We))
(VP
(VBP have)
(S
(VP
(TO to)
(VP
(VP
(VB clear)
(PRT (RP up))
(NP (DT these) (NNS issues)))
(CC and)
(VP
(VB find)
(PRT (RP out))
(SBAR-NOM
(WHNP (WHNP (WP what)))
(S
(VP
(VBZ is)
(ADJP-PRD (JJ present))
(SBAR
(WHNP (WDT that))
(S
(VP
(VBZ is)
(VP
(VBG creating)
(NP (JJ artificial) (NN volatility)))))))))))))))
(, ,)
('' '')
(NP-SBJ (NNP Mr.) (NNP Fisher))
(VP (VBD said))
(. .))
""")
span_dict = {}
ptb_reader._strip_functional_tags(tree) # pylint: disable=protected-access
ptb_reader._get_gold_spans(tree, 0, span_dict) # pylint: disable=protected-access
assert span_dict == {(1, 1): u'NP', (5, 5): u'PRT', (6, 7): u'NP', (4, 7): u'VP', (10, 10): u'PRT',
(11, 11): u'WHNP-WHNP', (13, 13): u'ADJP', (14, 14): u'WHNP', (17, 18): u'NP',
(16, 18): u'VP', (15, 18): u'S-VP', (14, 18): u'SBAR', (12, 18): u'S-VP',
(11, 18): u'SBAR', (9, 18): u'VP', (4, 18): u'VP', (3, 18): u'S-VP',
(2, 18): u'VP', (1, 18): u'S', (21, 22): u'NP', (23, 23): u'VP', (0, 24): u'S'}
|
blesuite/cli/blesuite_cli.py
|
jreynders/BLESuite-1
| 198 |
139449
|
import argparse
from blesuite.connection_manager import BLEConnectionManager
from blesuite_wrapper import ble_service_read, ble_service_read_async, ble_service_write, \
ble_handle_subscribe, ble_service_scan, ble_service_write_async, ble_run_smart_scan
from blesuite import utils
from blesuite.utils.print_helper import print_data_and_hex
from blesuite.utils import validators
import logging
__version__ = "2.0"
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def parse_command():
"""
Creates parser and parses command line tool call.
:return: parsed arguments
"""
global __version__
#Dictionary of available commands. Place new commands here
cmd_choices = {'scan': "Scan for BTLE devices",
'smartscan': "Scan specified BTLE device for device information, services, characteristics "
"(including associated descriptors). Note: This scan takes longer than the service scan",
'servicescan': 'Scan specified address for all services, characteristics, and descriptors. ',
'read': "Read value from specified device and handle",
'write': "Write value to specific handle on a device. Specify the --data or --files options"
"to set the payload data. Only data or file data can be specified, not both"
"(data submitted using the data flag takes precedence over data in files).",
'subscribe': "Write specified value (0000,0100,0200,0300) to chosen handle and initiate listener.",
'spoof': 'Modify your Bluetooth adapter\'s BT_ADDR. Use --address to set the address. Some chipsets'
' may not be supported.'}
address_type_choices = ['public', 'random']
parser = argparse.ArgumentParser(prog="blesuite",
description='Bluetooh Low Energy (BTLE) tool set for communicating and '
'testing BTLE devices on the application layer.') # ,
# formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('command', metavar='command', type=str, nargs=1,
action='store', choices=cmd_choices.keys(),
help='BLESuite command you would like to execute.' +
'The following are the currently supported commands:\n' +
'\n'.join(['\033[1m{}\033[0m: {}'.format(k, v) for k, v in cmd_choices.iteritems()]))
parser.add_argument('--async', action='store_true', help='\033[1m<read, write>\033[0m '
'Enable asynchronous writing/reading. Any output'
'will be displayed when received. This prevents'
'blocking.')
parser.add_argument('--skip-device-info-query', action='store_true', help='\033[1m<smartscan>\033[0m '
'When scanning a device, specify this flag'
'to force smartscan to skip querying the device'
'for common information such as device name. This'
'is helpful when devices do not implement these services.')
parser.add_argument('--smart-read', action='store_true', help='\033[1m<smartscan>\033[0m '
'When scanning a device, specify this flag'
'to force smartscan to attempt to read'
'from each discovered characteristic descriptor.'
'Note: This will increase scan time to handle'
'each read operation.')
parser.add_argument('-m', '--mode', metavar='mode', default=[1],
type=int, nargs=1, required=False,
action='store', help='\033[1m<subscribe>\033[0m '
'Selects which configuration to set'
'for a characteristic configuration descriptor.'
'0=off,1=notifications,2=indications,'
'3=notifications and inidications')
parser.add_argument('--timeout', metavar='timeout', default=[5],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<lescan, read, write>\033[0m '
'Timeout (in seconds) for attempting to retrieve data from a device '
'(ie reading from a descriptor handle). (Default: 5 seconds)')
parser.add_argument('--subscribe-timeout', metavar='subscribe-timeout', default=[None],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<subscribe>\033[0m '
'Time (in seconds) for attempting to retrieve data from a device '
'when listening for notifications or indications. (Default: Indefinite)')
# Device for discovery service can be specified
parser.add_argument('-i', '--adapter', metavar='adapter', default=[0],
type=int, nargs=1,
required=False, action='store',
help='\033[1m<all commands>\033[0m '
'Specify which Bluetooth adapter should be used. '
'These can be found by running (hcitool dev).')
parser.add_argument('-d', '--address', metavar='address', type=validators.validate_bluetooth_address_cli, nargs=1,
required=False, action='store',
help='\033[1m<all commands>\033[0m '
'Bluetooth address (BD_ADDR) of the target Bluetooth device')
parser.add_argument('-a', '--handles', metavar='handles', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<read, write>\033[0m '
'Hexadecimal handel list of characteristics to access (ex: 005a 006b). If '
'you want to access the value of a characteristic, use the handle_value '
'value from the service scan.')
parser.add_argument('-u', '--uuids', metavar='uuids', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<read>\033[0m '
'UUID list of characteristics to access. If '
'you want to access the value of a characteristic, use the UUID '
'value from the service scan.')
parser.add_argument('--data', metavar='data', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<write>\033[0m '
'Strings that you want to write to a handle (separated by spaces).')
parser.add_argument('--files', metavar='files', type=str, nargs="+",
required=False, action='store', default=[],
help='\033[1m<write>\033[0m '
'Files that contain data to write to handle (separated by spaces)')
parser.add_argument('--payload-delimiter', metavar='payload-delimiter', type=str, nargs=1,
required=False, action='store', default=["EOF"],
help='\033[1m<write>\033[0m '
'Specify a delimiter (string) to use when specifying data for BLE payloads.'
'For instance, if I want to send packets with payloads in a file separated'
'by a comma, supply \'--payload-delimiter ,\'. Supply EOF if you want the entire contents'
'of a file sent. (Default: EOF)')
parser.add_argument("-t", '--address-type', metavar='address-type', type=str, nargs=1,
required=False, action='store', default=['public'], choices=address_type_choices,
help='\033[1m<all commands>\033[0m '
'Type of BLE address you want to connect to [public | random].')
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('--debug', action='store_true', help='\033[1m<all commands>\033[0m '
'Enable logging for debug statements.')
return parser.parse_args()
def process_args(args):
"""
Process command line tool arguments parsed by argparse
and call appropriate bleSuite functions.
:param args: parser.parse_args()
:return:
"""
command = args.command[0]
if args.debug:
logging.basicConfig(level=logging.DEBUG)
timeout = args.timeout[0] * 1000 # convert seconds to ms
if command == 'spoof':
import bdaddr
if args.address[0] == "":
print "Please specify an address to spoof."
else:
logger.debug("About to spoof to address %s for adapter %s" % (args.address[0], args.adapter[0]))
ret = bdaddr.bdaddr(("hci"+str(args.adapter[0])), args.address[0])
if ret == -1:
raise ValueError('Spoofing failed. Your device may not be supported.')
if command == 'scan':
print "BTLE Scan beginning"
with BLEConnectionManager(args.adapter[0], 'central') as connection_manager:
discovered = connection_manager.scan(timeout)
print "Discovered:"
for i in discovered.keys():
print "\t", i, "(public)" if discovered[i][0] == 0 else "(random)"
for h, j in enumerate(discovered[i][1]):
gap = connection_manager.decode_gap_data(str(discovered[i][1][h]))
info = connection_manager.generate_gap_data_dict(gap)
for k in info.keys():
print "\t\t", k + ":"
print "\t\t\t", info[k]
if command == 'smartscan':
print "BTLE Smart Scan beginning"
device = ble_run_smart_scan(args.address[0], args.adapter[0],
args.address_type[0], skip_device_info_query=args.skip_device_info_query,
attempt_read=args.smart_read,
timeout=timeout)
if command == 'servicescan':
print "BTLE Scanning Services"
ble_service_scan(args.address[0], args.adapter[0],
args.address_type[0])
if command == 'read':
if len(args.handles) <= 0 and len(args.uuids) <= 0:
print "ERROR: No handles or UUIDs supplied for read operation."
return
print "Reading value from handle or UUID"
if args.async:
uuidData, handleData = ble_service_read_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.uuids,
timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print_data_and_hex(dataTuple[1], False)
'''
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False)
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1], False)'''
for dataTuple in uuidData:
print "\nUUID:", dataTuple[0]
print_data_and_hex(dataTuple[1], False)
'''
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False)
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1].received(), True)'''
else:
uuidData, handleData = ble_service_read(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.uuids, timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print_data_and_hex(dataTuple[1], False)
for dataTuple in uuidData:
print "\nUUID:", dataTuple[0]
print_data_and_hex(dataTuple[1], False)
if command == 'write':
if len(args.handles) <= 0:
print "ERROR: No handles supplied for write operation. Note: Write operation does not support use of UUIDs."
return
print "Writing value to handle"
if args.async:
logger.debug("Async Write")
if len(args.data) > 0:
handleData = ble_service_write_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.data,
timeout=timeout)
elif args.payload_delimiter[0] == 'EOF':
logger.debug("Payload Delimiter: EOF")
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
logger.debug("Reading file: %s", dataFile)
f = open(dataFile, 'r')
dataSet.append(f.read())
f.close()
logger.debug("Sending data set: %s" % dataSet)
handleData = ble_service_write_async(args.addr[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet,
timeout=timeout)
logger.debug("Received data: %s" % handleData)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
else:
logger.debug("Payload Delimiter: %s", args.payload_delimiter[0])
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
f = open(dataFile, 'r')
data = f.read()
f.close()
data = data.split(args.payload_delimiter[0])
dataSet.extend(data)
logger.debug("Sending dataSet: %s" % dataSet)
handleData = ble_service_write_async(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet,
timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print "Input:"
utils.print_helper.print_data_and_hex(dataTuple[2], False, prefix="\t")
print "Output:"
#if tuple[1][0] is a string, it means our cmdLineToolWrapper removed the GattResponse object
#due to a timeout, else we grab the GattResponse and its response data
if isinstance(dataTuple[1][0], str):
utils.print_helper.print_data_and_hex(dataTuple[1], False, prefix="\t")
else:
utils.print_helper.print_data_and_hex(dataTuple[1][1].received(), False, prefix="\t")
else:
logger.debug("Sync Write")
print args.data
if len(args.data) > 0:
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, args.data, timeout=timeout)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
elif args.payload_delimiter[0] == 'EOF':
logger.debug("Payload Delimiter: EOF")
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
logger.debug("Reading file: %s", dataFile)
f = open(dataFile, 'r')
dataSet.append(f.read())
f.close()
logger.debug("Sending data set: %s" % dataSet)
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet, timeout=timeout)
logger.debug("Received data: %s" % handleData)
'''for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
utils.print_helper.print_data_and_hex(dataTuple[1], False)'''
else:
logger.debug("Payload Delimiter: %s", args.payload_delimiter[0])
dataSet = []
for dataFile in args.files:
if dataFile is None:
continue
f = open(dataFile, 'r')
data = f.read()
f.close()
data = data.split(args.payload_delimiter[0])
dataSet.extend(data)
logger.debug("Sending dataSet: %s" % dataSet)
handleData = ble_service_write(args.address[0], args.adapter[0],
args.address_type[0],
args.handles, dataSet, timeout=timeout)
for dataTuple in handleData:
print "\nHandle:", "0x" + dataTuple[0]
print "Input:"
print_data_and_hex([dataTuple[2]], False, prefix="\t")
print "Output:"
print_data_and_hex(dataTuple[1], False, prefix="\t")
if command == 'subscribe':
print "Subscribing to device"
if args.subscribe_timeout[0] is not None:
timeout = args.subscribe_timeout[0] * 1000
else:
timeout = None
ble_handle_subscribe(args.address[0], args.handles, args.adapter[0],
args.address_type[0], args.mode[0], timeout)
return
def main():
"""
Main loop for BLESuite command line tool.
:return:
"""
args = parse_command()
process_args(args)
logger.debug("Args: %s" % args)
|
tools/sandbox/c7n_autodoc/setup.py
|
al3pht/cloud-custodian
| 2,415 |
139461
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from setuptools import setup
import os
description = ""
if os.path.exists('README.md'):
description = open('README.md', 'r').read()
setup(
name="c7n_autodoc",
version='0.3',
description="Cloud Custodian - Automated Policy Documentation",
classifiers=[
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
],
url="https://github.com/cloud-custodian/cloud-custodian",
long_description=description,
long_description_content_type='text/markdown',
author="<NAME>",
author_email="<EMAIL>",
license="Apache-2.0",
py_modules=["c7n_autodoc"],
install_requires=["c7n", "pyyaml>=4.2b4", "boto3", "jinja2>=2.11.3", "jsonschema"]
)
|
examples/mnist/keras/mnist_inference.py
|
tornado12345/TensorFlowOnSpark
| 4,363 |
139466
|
# Copyright 2018 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
# This example demonstrates how to leverage Spark for parallel inferencing from a SavedModel.
#
# Normally, you can use TensorFlowOnSpark to just form a TensorFlow cluster for training and inferencing.
# However, in some situations, you may have a SavedModel without the original code for defining the inferencing
# graph. In these situations, we can use Spark to instantiate a single-node TensorFlow instance on each executor,
# where each executor can independently load the model and inference on input data.
#
# Note: this particular example demonstrates use of `tf.data.Dataset` to read the input data for inferencing,
# but it could also be adapted to just use an RDD of TFRecords from Spark.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import tensorflow as tf
def inference(args, ctx):
# load saved_model
saved_model = tf.saved_model.load(args.export_dir, tags='serve')
predict = saved_model.signatures['serving_default']
# parse function for TFRecords
def parse_tfr(example_proto):
feature_def = {"label": tf.io.FixedLenFeature(1, tf.int64),
"image": tf.io.FixedLenFeature(784, tf.int64)}
features = tf.io.parse_single_example(serialized=example_proto, features=feature_def)
image = tf.cast(features['image'], dtype=tf.float32) / 255.0
image = tf.reshape(image, [28, 28, 1])
label = tf.cast(features['label'], dtype=tf.float32)
return (image, label)
# define a new tf.data.Dataset (for inferencing)
ds = tf.data.Dataset.list_files("{}/part-*".format(args.images_labels), shuffle=False)
ds = ds.shard(ctx.num_workers, ctx.worker_num)
ds = ds.interleave(tf.data.TFRecordDataset)
ds = ds.map(parse_tfr)
ds = ds.batch(10)
# create an output file per spark worker for the predictions
tf.io.gfile.makedirs(args.output)
output_file = tf.io.gfile.GFile("{}/part-{:05d}".format(args.output, ctx.worker_num), mode='w')
for batch in ds:
predictions = predict(conv2d_input=batch[0])
labels = np.reshape(batch[1], -1).astype(np.int)
preds = np.argmax(predictions['dense_1'], axis=1)
for x in zip(labels, preds):
output_file.write("{} {}\n".format(x[0], x[1]))
output_file.close()
if __name__ == '__main__':
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
from tensorflowonspark import TFParallel
sc = SparkContext(conf=SparkConf().setAppName("mnist_inference"))
executors = sc._conf.get("spark.executor.instances")
num_executors = int(executors) if executors is not None else 1
parser = argparse.ArgumentParser()
parser.add_argument("--cluster_size", help="number of nodes in the cluster (for S with labelspark Standalone)", type=int, default=num_executors)
parser.add_argument('--images_labels', type=str, help='Directory for input images with labels')
parser.add_argument("--export_dir", help="HDFS path to export model", type=str, default="mnist_export")
parser.add_argument("--output", help="HDFS path to save predictions", type=str, default="predictions")
args, _ = parser.parse_known_args()
print("args: {}".format(args))
# Running single-node TF instances on each executor
TFParallel.run(sc, inference, args, args.cluster_size)
|
bindings/python/setup.py
|
wangjia3015/marisa-trie
| 388 |
139471
|
<gh_stars>100-1000
from distutils.core import setup, Extension
marisa_module = Extension("_marisa",
sources=["marisa-swig_wrap.cxx", "marisa-swig.cxx"],
libraries=["marisa"])
setup(name = "marisa",
ext_modules = [marisa_module],
py_modules = ["marisa"])
|
pyeit/mesh/wrapper.py
|
ccfbeltran/pyEIT
| 107 |
139488
|
# coding: utf-8
# pylint: disable=invalid-name, no-member, too-many-arguments
""" wrapper function of distmesh for EIT """
# Copyright (c) <NAME>. All rights reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division, absolute_import, print_function
import numpy as np
from .distmesh import build
from .mesh_circle import MeshCircle
from .utils import check_order
from .shape import circle, area_uniform, ball, thorax, L_shaped
from .shape import fix_points_fd, fix_points_ball
def create(n_el=16, fd=None, fh=area_uniform, h0=0.1, p_fix=None, bbox=None):
"""
Generating 2D/3D meshes using distmesh (pyEIT built-in)
Parameters
----------
n_el: int
number of electrodes (point-type electrode)
fd: function
distance function (circle in 2D, ball in 3D)
fh: function
mesh size quality control function
p_fix: NDArray
fixed points
bbox: NDArray
bounding box
h0: float
initial mesh size, default=0.1
Returns
-------
mesh_obj: dict
{'element', 'node', 'perm'}
"""
# test conditions if fd or/and bbox are none
if bbox is None:
if fd != ball:
bbox = np.array([[-1, -1], [1, 1]])
else:
bbox = [[-1.2, -1.2, -1.2], [1.2, 1.2, 1.2]]
bbox = np.array(
bbox
) # list is converted to Numpy array so we can use it then (calling shape method..)
n_dim = bbox.shape[1] # bring dimension
# infer dim
if fd is None:
if n_dim == 2:
fd = circle
elif n_dim == 3:
fd = ball
if n_dim not in [2, 3]:
raise TypeError("distmesh only supports 2D or 3D")
if bbox.shape[0] != 2:
raise TypeError("please specify lower and upper bound of bbox")
if p_fix is None:
if n_dim == 2:
if fd == thorax:
# thorax shape is generated so far without fixed points (to be updated later)
p_fix = [
(-0.098, -0.6463),
(-0.4181, -0.6074),
(-0.7207, -0.4946),
(-0.933, -0.2647),
(-0.9147, 0.0543),
(-0.8022, 0.3565),
(-0.5791, 0.5864),
(-0.1653, 0.6819),
(0.1564, 0.6571),
(0.5814, 0.6353),
(0.8298, 0.433),
(0.9698, 0.1431),
(0.9914, -0.1767),
(0.8359, -0.449),
(0.5419, -0.5833),
(0.2243, -0.6456),
]
p_fix = np.array(p_fix)
elif fd == L_shaped:
p_fix = [
[1, 0],
[1, -1],
[0, -1],
[-1, -1],
[-1, 0],
[-1, 1],
[0, 1],
[0, 0],
] # values brought from distmesh2D L shaped mesh example
p_fix = np.array(p_fix)
h0 = 0.15
else:
p_fix = fix_points_fd(fd, n_el=n_el)
elif n_dim == 3:
p_fix = fix_points_ball(n_el=n_el)
# 1. build mesh
p, t = build(fd, fh, pfix=p_fix, bbox=bbox, h0=h0)
# 2. check whether t is counter-clock-wise, otherwise reshape it
t = check_order(p, t)
# 3. generate electrodes, the same as p_fix (top n_el)
el_pos = np.arange(n_el)
# 4. init uniform element permittivity (sigma)
perm = np.ones(t.shape[0], dtype=np.float)
# 5. build output structure
mesh = {"element": t, "node": p, "perm": perm}
return mesh, el_pos
def set_perm(mesh, anomaly=None, background=None):
"""wrapper for pyEIT interface
Note
----
update permittivity of mesh, if specified.
Parameters
----------
mesh: dict
mesh structure
anomaly: dict, optional
anomaly is a dictionary (or arrays of dictionary) contains,
{'x': val, 'y': val, 'd': val, 'perm': val}
all permittivity on triangles whose distance to (x,y) are less than (d)
will be replaced with a new value, 'perm' may be a complex value.
background: float, optional
set background permittivity
Returns
-------
mesh_obj: dict
updated mesh structure, {'element', 'node', 'perm'}
"""
pts = mesh["element"]
tri = mesh["node"]
perm = mesh["perm"].copy()
tri_centers = np.mean(tri[pts], axis=1)
# this code is equivalent to:
# >>> N = np.shape(tri)[0]
# >>> for i in range(N):
# >>> tri_centers[i] = np.mean(pts[tri[i]], axis=0)
# >>> plt.plot(tri_centers[:,0], tri_centers[:,1], 'kx')
n = np.size(mesh["perm"])
# reset background if needed
if background is not None:
perm = background * np.ones(n)
# change dtype to 'complex' for complex-valued permittivity
if anomaly is not None:
for attr in anomaly:
if np.iscomplex(attr["perm"]):
perm = perm.astype("complex")
break
# assign anomaly values (for elements in regions)
if anomaly is not None:
for _, attr in enumerate(anomaly):
d = attr["d"]
# find elements whose distance to (cx,cy) is smaller than d
if "z" in attr:
index = (
np.sqrt(
(tri_centers[:, 0] - attr["x"]) ** 2
+ (tri_centers[:, 1] - attr["y"]) ** 2
+ (tri_centers[:, 2] - attr["z"]) ** 2
)
< d
)
else:
index = (
np.sqrt(
(tri_centers[:, 0] - attr["x"]) ** 2
+ (tri_centers[:, 1] - attr["y"]) ** 2
)
< d
)
# update permittivity within indices
perm[index] = attr["perm"]
mesh_new = {"node": tri, "element": pts, "perm": perm}
return mesh_new
def layer_circle(n_el=16, n_fan=8, n_layer=8):
"""generate mesh on unit-circle"""
model = MeshCircle(n_fan=n_fan, n_layer=n_layer, n_el=n_el)
p, e, el_pos = model.create()
perm = np.ones(e.shape[0])
mesh = {"element": e, "node": p, "perm": perm}
return mesh, el_pos
|
cupy/_indexing/indexing.py
|
prkhrsrvstv1/cupy
| 6,180 |
139509
|
import cupy
from cupy._core import internal
def take(a, indices, axis=None, out=None):
"""Takes elements of an array at specified indices along an axis.
This is an implementation of "fancy indexing" at single axis.
This function does not support ``mode`` option.
Args:
a (cupy.ndarray): Array to extract elements.
indices (int or array-like): Indices of elements that this function
takes.
axis (int): The axis along which to select indices. The flattened input
is used by default.
out (cupy.ndarray): Output array. If provided, it should be of
appropriate shape and dtype.
Returns:
cupy.ndarray: The result of fancy indexing.
.. seealso:: :func:`numpy.take`
"""
# TODO(okuta): check type
return a.take(indices, axis, out)
def take_along_axis(a, indices, axis):
"""Take values from the input array by matching 1d index and data slices.
Args:
a (cupy.ndarray): Array to extract elements.
indices (cupy.ndarray): Indices to take along each 1d slice of ``a``.
axis (int): The axis to take 1d slices along.
Returns:
cupy.ndarray: The indexed result.
.. seealso:: :func:`numpy.take_along_axis`
"""
if indices.dtype.kind not in ('i', 'u'):
raise IndexError('`indices` must be an integer array')
if axis is None:
a = a.ravel()
axis = 0
ndim = a.ndim
axis = internal._normalize_axis_index(axis, ndim)
if ndim != indices.ndim:
raise ValueError(
'`indices` and `a` must have the same number of dimensions')
fancy_index = []
for i, n in enumerate(a.shape):
if i == axis:
fancy_index.append(indices)
else:
ind_shape = (1,) * i + (-1,) + (1,) * (ndim - i - 1)
fancy_index.append(cupy.arange(n).reshape(ind_shape))
return a[tuple(fancy_index)]
def choose(a, choices, out=None, mode='raise'):
return a.choose(choices, out, mode)
def compress(condition, a, axis=None, out=None):
"""Returns selected slices of an array along given axis.
Args:
condition (1-D array of bools): Array that selects which entries to
return. If len(condition) is less than the size of a along the
given axis, then output is truncated to the length of the condition
array.
a (cupy.ndarray): Array from which to extract a part.
axis (int): Axis along which to take slices. If None (default), work
on the flattened array.
out (cupy.ndarray): Output array. If provided, it should be of
appropriate shape and dtype.
Returns:
cupy.ndarray: A copy of a without the slices along axis for which
condition is false.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.compress`
"""
return a.compress(condition, axis, out)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""Returns specified diagonals.
This function extracts the diagonals along two specified axes. The other
axes are not changed. This function returns a writable view of this array
as NumPy 1.10 will do.
Args:
a (cupy.ndarray): Array from which the diagonals are taken.
offset (int): Index of the diagonals. Zero indicates the main
diagonals, a positive value upper diagonals, and a negative value
lower diagonals.
axis1 (int): The first axis to take diagonals from.
axis2 (int): The second axis to take diagonals from.
Returns:
cupy.ndarray: A view of the diagonals of ``a``.
.. seealso:: :func:`numpy.diagonal`
"""
# TODO(okuta): check type
return a.diagonal(offset, axis1, axis2)
def extract(condition, a):
"""Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``.
If ``condition`` is boolean, ``np.extract`` is equivalent to
``arr[condition]``.
Args:
condition (int or array_like): An array whose nonzero or True entries
indicate the elements of array to extract.
a (cupy.ndarray): Input array of the same size as condition.
Returns:
cupy.ndarray: Rank 1 array of values from arr where condition is True.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.extract`
"""
if not isinstance(a, cupy.ndarray):
raise TypeError('extract requires input array to be cupy.ndarray')
if not isinstance(condition, cupy.ndarray):
condition = cupy.array(condition)
a = a.ravel()
condition = condition.ravel()
return a.take(condition.nonzero()[0])
def select(condlist, choicelist, default=0):
"""Return an array drawn from elements in choicelist, depending on conditions.
Args:
condlist (list of bool arrays): The list of conditions which determine
from which array in `choicelist` the output elements are taken.
When multiple conditions are satisfied, the first one encountered
in `condlist` is used.
choicelist (list of cupy.ndarray): The list of arrays from which the
output elements are taken. It has to be of the same length
as `condlist`.
default (scalar) : If provided, will fill element inserted in `output`
when all conditions evaluate to False. default value is 0.
Returns:
cupy.ndarray: The output at position m is the m-th element of the
array in `choicelist` where the m-th element of the corresponding
array in `condlist` is True.
.. seealso:: :func:`numpy.select`
"""
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
if len(condlist) == 0:
raise ValueError("select with an empty condition list is not possible")
if not cupy.isscalar(default):
raise TypeError("default only accepts scalar values")
for i in range(len(choicelist)):
if not isinstance(choicelist[i], cupy.ndarray):
raise TypeError("choicelist only accepts lists of cupy ndarrays")
cond = condlist[i]
if cond.dtype.type is not cupy.bool_:
raise ValueError(
'invalid entry {} in condlist: should be boolean ndarray'
.format(i))
dtype = cupy.result_type(*choicelist)
condlist = cupy.broadcast_arrays(*condlist)
choicelist = cupy.broadcast_arrays(*choicelist, default)
if choicelist[0].ndim == 0:
result_shape = condlist[0].shape
else:
result_shape = cupy.broadcast_arrays(condlist[0],
choicelist[0])[0].shape
result = cupy.empty(result_shape, dtype)
cupy.copyto(result, default)
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
cupy.copyto(result, choice, where=cond)
return result
|
gemini/gemini_main.py
|
bgruening/gemini
| 221 |
139515
|
#!/usr/bin/env python
import os.path
import sys
import tempfile
import argparse
import gemini.version
def add_inheritance_args(parser, min_kindreds=1, depth=True, gt_ll=False,
allow_unaffected=True, lenient=True, gq=True):
"""Common arguments added to various sub-parsers"""
parser.add_argument('db',
metavar='db',
help='The name of the database to be queried.')
parser.add_argument('--columns',
dest='columns',
metavar='STRING',
help='A list of columns that you would like returned. Def. = "*"',
)
parser.add_argument('--filter',
dest='filter',
metavar='STRING',
help='Restrictions to apply to variants (SQL syntax)')
parser.add_argument('--min-kindreds',
dest='min_kindreds',
type=int,
default=min_kindreds,
help='The min. number of kindreds that must have a candidate variant in a gene.')
parser.add_argument('--families',
dest='families',
help='Restrict analysis to a specific set of 1 or more (comma) separated) families',
default=None)
if lenient:
parser.add_argument("--lenient",
default=False,
action="store_true",
help="Loosen the restrictions on family structure")
if allow_unaffected:
parser.add_argument('--allow-unaffected',
action='store_true',
help='Report candidates that also impact samples labeled as unaffected.',
default=False)
# this is for comp_het, eventually, we could add depth support to that tool.
if depth:
parser.add_argument('-d',
dest='min_sample_depth',
type=int,
help="The minimum aligned\
sequence depth required for\
each sample in a family (default = 0)",
default=0)
if gq:
parser.add_argument('--min-gq',
type=int,
help="The minimum genotype quality required for \
each sample in a family (default = 0)",
default=0)
if gt_ll:
parser.add_argument('--gt-pl-max',
dest='gt_phred_ll',
type=int,
help="The maximum phred-scaled genotype likelihod"
" (PL) allowed for each sample in a family.",
default=None)
def examples(parser, args):
print("")
print( "[load] - load a VCF file into a gemini database:")
print( " gemini load -v my.vcf my.db")
print( " gemini load -v my.vcf -t snpEff my.db")
print( " gemini load -v my.vcf -t VEP my.db")
print("")
print( "[stats] - report basic statistics about your variants:")
print( " gemini stats --tstv my.db")
print( " gemini stats --tstv-coding my.db")
print( " gemini stats --sfs my.db")
print( " gemini stats --snp-counts my.db")
print("")
print( "[query] - explore the database with ad hoc queries:")
print( " gemini query -q \"select * from variants where is_lof = 1 and aaf <= 0.01\" my.db")
print( " gemini query -q \"select chrom, pos, gt_bases.NA12878 from variants\" my.db")
print( " gemini query -q \"select chrom, pos, in_omim, clin_sigs from variants\" my.db")
print("")
print( "[dump] - convenient \"data dumps\":")
print( " gemini dump --variants my.db")
print( " gemini dump --genotypes my.db")
print( " gemini dump --samples my.db")
print("")
print( "[region] - access variants in specific genomic regions:")
print( " gemini region --reg chr1:100-200 my.db")
print( " gemini region --gene TP53 my.db")
print("")
print( "[tools] - there are also many specific tools available")
print( " 1. Find compound heterozygotes.")
print( " gemini comp_hets my.db")
print("")
exit()
def main():
#########################################
# create the top-level parser
#########################################
parser = argparse.ArgumentParser(prog='gemini', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", "--version", help="Installed gemini version",
action="version",
version="%(prog)s " + str(gemini.version.__version__))
parser.add_argument('--annotation-dir', dest='annotation_dir',
help='Path to the annotation database.\n'
'This argument is optional and if given will take precedence over the default location stored in the gemini config file.')
subparsers = parser.add_subparsers(title='[sub-commands]', dest='command')
#########################################
# $ gemini examples
#########################################
parser_examples = subparsers.add_parser('examples',
help='show usage examples')
parser_examples.set_defaults(func=examples)
#########################################
# $ gemini load
#########################################
parser_load = subparsers.add_parser('load',
help='load a VCF file in gemini database')
parser_load.add_argument('db', metavar='db',
help='The name of the database to be created.')
parser_load.add_argument('-v', dest='vcf',
help='The VCF file to be loaded.')
parser_load.add_argument('-t', dest='anno_type',
default=None, choices=["snpEff", "VEP", "BCFT", "all"],
help="The annotations to be used with the input vcf.")
parser_load.add_argument('-p', dest='ped_file',
help='Sample information file in PED+ format.',
default=None)
parser_load.add_argument('--skip-gerp-bp',
dest='skip_gerp_bp',
action='store_true',
help='Do not load GERP scores at base pair resolution. Loaded by default.',
default=False)
parser_load.add_argument('--skip-cadd',
dest='skip_cadd',
action='store_true',
help='Do not load CADD scores. Loaded by default',
default=False)
parser_load.add_argument('--skip-gene-tables',
dest='skip_gene_tables',
action='store_true',
help='Do not load gene tables. Loaded by default.',
default=False)
parser_load.add_argument('--save-info-string',
dest='skip_info_string',
action='store_false',
help='Load INFO string from VCF file. Not loaded by default',
default=True)
parser_load.add_argument('--no-load-genotypes',
dest='no_load_genotypes',
action='store_true',
help='Genotypes exist in the file, but should not be stored.',
default=False)
parser_load.add_argument('--no-genotypes',
dest='no_genotypes',
action='store_true',
help='There are no genotypes in the file (e.g. some 1000G VCFs)',
default=False)
parser_load.add_argument('--cores', dest='cores',
default=1,
type=int,
help="Number of cores to use to load in parallel.")
parser_load.add_argument('--scheduler', dest='scheduler', default=None,
choices=["lsf", "sge", "slurm", "torque"],
help='Cluster scheduler to use.')
parser_load.add_argument('--queue', dest='queue',
default=None, help='Cluster queue to use.')
parser_load.add_argument('--tempdir', dest='tempdir',
default=tempfile.gettempdir(),
help='Temp directory for storing intermediate files when loading in parallel.')
parser_load.add_argument('--passonly',
dest='passonly',
default=False,
action='store_true',
help="Keep only variants that pass all filters.")
parser_load.add_argument('--test-mode',
dest='test_mode',
action='store_true',
help='Load in test mode (faster)',
default=False)
parser_load.add_argument('--skip-pls',
action='store_true',
help='dont create columns for phred-scaled genotype likelihoods',
default=False)
def load_fn(parser, args):
from gemini import gemini_load
if args.vcf != "-":
args.vcf = os.path.abspath(args.vcf)
gemini_load.load(parser, args)
parser_load.set_defaults(func=load_fn)
#########################################
# $ gemini amend
#########################################
parser_amend = subparsers.add_parser('amend',
help="Amend an already loaded GEMINI database.")
parser_amend.add_argument('db',
metavar='db',
help='The name of the database to be amended.')
parser_amend.add_argument('--sample',
metavar='sample',
default=None,
help='New sample information file to load')
parser_amend.add_argument('--clear',
default=False,
action="store_true",
help='Set all values in this column to NULL before loading.')
def amend_fn(parser, args):
from gemini import gemini_amend
gemini_amend.amend(parser, args)
parser_amend.set_defaults(func=amend_fn)
#########################################
# $ gemini load_chunk
#########################################
parser_loadchunk = subparsers.add_parser('load_chunk',
help='load a VCF file in gemini database')
parser_loadchunk.add_argument('db',
metavar='db',
help='The name of the database to be created.')
parser_loadchunk.add_argument('-v',
dest='vcf',
help='The VCF file to be loaded.')
parser_loadchunk.add_argument('-t',
dest='anno_type',
default=None,
metavar='STRING',
help="The annotations to be used with the input vcf. Options are:\n"
" snpEff - Annotations as reported by snpEff.\n"
" BCFT - Annotations as reported by bcftools.\n"
" VEP - Annotations as reported by VEP.\n"
)
parser_loadchunk.add_argument('-o',
dest='offset',
help='The starting number for the variant_ids',
default=None)
parser_loadchunk.add_argument('-p',
dest='ped_file',
help='Sample information file in PED+ format.',
default=None)
parser_loadchunk.add_argument('--no-load-genotypes',
dest='no_load_genotypes',
action='store_true',
help='Genotypes exist in the file, but should not be stored.',
default=False)
parser_loadchunk.add_argument('--no-genotypes',
dest='no_genotypes',
action='store_true',
help='There are no genotypes in the file (e.g. some 1000G VCFs)',
default=False)
parser_loadchunk.add_argument('--skip-gerp-bp',
dest='skip_gerp_bp',
action='store_true',
help='Do not load GERP scores at base pair resolution. Loaded by default.',
default=False)
parser_loadchunk.add_argument('--skip-cadd',
dest='skip_cadd',
action='store_true',
help='Do not load CADD scores. Loaded by default',
default=False)
parser_loadchunk.add_argument('--skip-gene-tables',
dest='skip_gene_tables',
action='store_true',
help='Do not load gene tables. Loaded by default.',
default=False)
parser_loadchunk.add_argument('--skip-info-string',
dest='skip_info_string',
action='store_true',
help='Do not load INFO string from VCF file to reduce DB size. Loaded by default',
default=False)
parser_loadchunk.add_argument('--passonly',
dest='passonly',
default=False,
action='store_true',
help="Keep only variants that pass all filters.")
parser_loadchunk.add_argument('--test-mode',
dest='test_mode',
action='store_true',
help='Load in test mode (faster)',
default=False)
parser_loadchunk.add_argument('--skip-pls',
action='store_true',
help='dont create columns for phred-scaled genotype likelihoods',
default=False)
parser_loadchunk.add_argument('--tempdir', dest='tempdir',
default=tempfile.gettempdir(),
help='Local (non-NFS) temp directory to use for working around SQLite locking issues '
'on NFS drives.')
def loadchunk_fn(parser, args):
from gemini import gemini_load_chunk
if args.vcf != "-":
args.vcf = os.path.abspath(args.vcf)
gemini_load_chunk.load(parser, args)
parser_loadchunk.set_defaults(func=loadchunk_fn)
#########################################
# $ gemini merge_chunks
#########################################
parser_mergechunks = subparsers.add_parser('merge_chunks',
help='combine intermediate db files into the final gemini ')
parser_mergechunks.add_argument('--db',
dest='db',
help='The name of the final database to be loaded.')
parser_mergechunks.add_argument('--vcf',
dest='vcf',
help='Original VCF file, for retrieving extra annotation fields.')
parser_mergechunks.add_argument('-t', dest='anno_type',
default=None, choices=["snpEff", "VEP", "BCFT", "all"],
help="The annotations to be used with the input vcf.")
parser_mergechunks.add_argument('--chunkdb',
nargs='*',
dest='chunkdbs',
action='append')
parser_mergechunks.add_argument('--tempdir', dest='tempdir',
default=tempfile.gettempdir(),
help='Local (non-NFS) temp directory to use for working around SQLite locking issues on NFS drives.')
parser_mergechunks.add_argument('--index', dest='index',
action='store_true',
help='Create all database indexes. If multiple merges are used to create a database, only the last merge '
'should create the indexes.')
parser_mergechunks.add_argument('--skip-pls',
action='store_true',
help='dont create columns for phred-scaled genotype likelihoods',
default=False)
def mergechunk_fn(parser, args):
from gemini import gemini_merge_chunks
gemini_merge_chunks.merge_chunks(parser, args)
parser_mergechunks.set_defaults(func=mergechunk_fn)
#########################################
# $ gemini query
#########################################
parser_query = subparsers.add_parser('query',
help='issue ad hoc SQL queries to the DB')
parser_query.add_argument('db',
metavar='db',
help='The name of the database to be queried.')
parser_query.add_argument('-q',
dest='query',
metavar='QUERY_STR',
help='The query to be issued to the database')
parser_query.add_argument('--gt-filter',
dest='gt_filter',
metavar='STRING',
help='Restrictions to apply to genotype values')
parser_query.add_argument('--show-samples',
dest='show_variant_samples',
action='store_true',
default=False,
help=('Add a column of all sample names with a variant to each '
'variant.'))
parser_query.add_argument('--show-families',
dest='show_families',
action='store_true',
default=False,
help=('Add a column listing all of the families '
'with a variant to each variant.'))
parser_query.add_argument('--family-wise',
dest='family_wise',
default=False,
action='store_true',
help=('Perform the sample-filter on a family-wise '
'basis.'))
parser_query.add_argument('--min-kindreds',
dest='min_kindreds',
default=1,
type=int,
help=('Minimum number of families for a variant passing '
'a family-wise filter to be in.'))
parser_query.add_argument('--sample-delim',
dest='sample_delim',
metavar='STRING',
help='The delimiter to be used with the --show-samples option.',
default=',')
parser_query.add_argument('--header',
dest='use_header',
action='store_true',
help='Add a header of column names to the output.',
default=False)
parser_query.add_argument('--sample-filter',
dest='sample_filter',
help='SQL filter to use to filter the sample table',
default=None)
parser_query.add_argument('--in',
dest='in_subject',
nargs='*',
help=('A variant must be in either all, none or any '
'samples passing the --sample-query filter.'),
choices=['all', 'none', 'any', 'only', 'not'],
default=['any'])
parser_query.add_argument('--format',
dest='format',
default='default',
help='Format of output (JSON, TPED or default)')
parser_query.add_argument('--region',
dest='region',
default=None,
help=('Restrict query to this region, '
'e.g. chr1:10-20.'))
parser_query.add_argument('--carrier-summary-by-phenotype',
dest='carrier_summary',
default=None,
help=('Output columns of counts of carriers and '
'non-carriers stratified by the given '
'sample phenotype column'))
parser_query.add_argument('--dgidb',
dest='dgidb',
action='store_true',
help='Request drug-gene interaction info from DGIdb.',
default=False)
parser_query.add_argument('--use-bcolz',
dest='bcolz',
action='store_true',
help='use a (previously created) bcolz index to speed genotype queries',
default=False)
def query_fn(parser, args):
from gemini import gemini_query
gemini_query.query(parser, args)
parser_query.set_defaults(func=query_fn)
#########################################
# $ gemini dump
#########################################
parser_dump = subparsers.add_parser('dump',
help='shortcuts for extracting data from the DB')
parser_dump.add_argument('db',
metavar='db',
help='The name of the database to be queried.')
parser_dump.add_argument('--variants',
dest='variants',
action='store_true',
help='Report all rows/columns from the variants table.',
default=False)
parser_dump.add_argument('--genotypes',
dest='genotypes',
action='store_true',
help='Report all rows/columns from the variants table \nwith one line per sample/genotype.',
default=False)
parser_dump.add_argument('--samples',
dest='samples',
action='store_true',
help='Report all rows/columns from the samples table.',
default=False)
parser_dump.add_argument('--header',
dest='use_header',
action='store_true',
help='Add a header of column names to the output.',
default=False)
parser_dump.add_argument('--sep',
dest='separator',
metavar='STRING',
help='Output column separator',
default="\t")
parser_dump.add_argument('--tfam',
dest='tfam',
action='store_true',
default=False,
help='Output sample information to TFAM format.')
def dump_fn(parser, args):
from gemini import gemini_dump
gemini_dump.dump(parser, args)
parser_dump.set_defaults(func=dump_fn)
#########################################
# $ gemini region
#########################################
parser_region = subparsers.add_parser('region',
help='extract variants from specific genomic loci')
parser_region.add_argument('db',
metavar='db',
help='The name of the database to be queried.')
parser_region.add_argument('--reg',
dest='region',
metavar='STRING',
help='Specify a chromosomal region chr:start-end')
parser_region.add_argument('--gene',
dest='gene',
metavar='STRING',
help='Specify a gene of interest')
parser_region.add_argument('--header',
dest='use_header',
action='store_true',
help='Add a header of column names to the output.',
default=False)
parser_region.add_argument('--columns',
dest='columns',
metavar='STRING',
help='A list of columns that you would like returned. Def. = "*"',
)
parser_region.add_argument('--filter',
dest='filter',
metavar='STRING',
help='Restrictions to apply to variants (SQL syntax)')
parser_region.add_argument('--show-samples',
dest='show_variant_samples',
action='store_true',
default=False,
help=('Add a column of all sample names with a variant to each '
'variant.'))
parser_region.add_argument('--format',
dest='format',
default='default',
help='Format of output (JSON, TPED or default)')
def region_fn(parser, args):
from gemini import gemini_region
gemini_region.region(parser, args)
parser_region.set_defaults(func=region_fn)
#########################################
# $ gemini stats
#########################################
parser_stats = subparsers.add_parser('stats',
help='compute useful variant stastics')
parser_stats.add_argument('db',
metavar='db',
help='The name of the database to be queried.')
parser_stats.add_argument('--tstv',
dest='tstv',
action='store_true',
help='Report the overall ts/tv ratio.',
default=False)
parser_stats.add_argument('--tstv-coding',
dest='tstv_coding',
action='store_true',
help='Report the ts/tv ratio in coding regions.',
default=False)
parser_stats.add_argument('--tstv-noncoding',
dest='tstv_noncoding',
action='store_true',
help='Report the ts/tv ratio in non-coding regions.',
default=False)
parser_stats.add_argument('--snp-counts',
dest='snp_counts',
action='store_true',
help='Report the count of each type of SNP (A->G, G->T, etc.).',
default=False)
parser_stats.add_argument('--sfs',
dest='sfs',
action='store_true',
help='Report the site frequency spectrum of the variants.',
default=False)
parser_stats.add_argument('--mds',
dest='mds',
action='store_true',
help='Report the pairwise genetic distance between the samples.',
default=False)
parser_stats.add_argument('--vars-by-sample',
dest='variants_by_sample',
action='store_true',
help='Report the number of variants observed in each sample.',
default=False)
parser_stats.add_argument('--gts-by-sample',
dest='genotypes_by_sample',
action='store_true',
help='Report the count of each genotype class obs. per sample.',
default=False)
parser_stats.add_argument('--summarize',
dest='query',
metavar='QUERY_STR',
default=None,
help='The query to be issued to the database to summarize')
parser_stats.add_argument('--gt-filter',
dest='gt_filter',
metavar='STRING',
help='Restrictions to apply to genotype values')
def stats_fn(parser, args):
from gemini import gemini_stats
gemini_stats.stats(parser, args)
parser_stats.set_defaults(func=stats_fn)
#########################################
# gemini annotate
#########################################
parser_get = subparsers.add_parser('annotate',
help='Add new columns for custom annotations')
parser_get.add_argument('db',
metavar='db',
help='The name of the database to be updated.')
parser_get.add_argument('-f',
dest='anno_file',
required=True,
help='The TABIX\'ed BED file containing the annotations')
parser_get.add_argument('-c',
dest='col_names',
help='The name(s) of the BED column(s) to be added to the variant table.'
'If the input file is a VCF, then this is the name of the info field to pull.')
parser_get.add_argument('-a',
dest='anno_type',
help='How should the annotation file be used? (def. extract)',
default="extract",
choices=['boolean', 'count', 'extract'])
parser_get.add_argument('-e',
dest='col_extracts',
help='Column(s) to extract information from for list annotations.'
'If the input is VCF, then this defaults to the fields specified in `-c`.')
parser_get.add_argument('-t',
dest='col_types',
help='What data type(s) should be used to represent the new values '
'in the database? '
'Any of {integer, float, text}')
parser_get.add_argument('-o',
dest='col_operations',
help='Operation(s) to apply to the extract column values '
'in the event that a variant overlaps multiple annotations '
'in your annotation file (-f).'
'Any of {sum, mean, median, min, max, mode, list, uniq_list, first, last}')
parser_get.add_argument('--region-only',
dest='region_only',
action='store_true',
default=False,
help='If set, only region coordinates will be considered when annotating variants.'
'The default is to annotate using region coordinates as well as REF and ALT variant values'
'This option is only valid if annotation is a VCF file')
def annotate_fn(parser, args):
from gemini import gemini_annotate
gemini_annotate.annotate(parser, args)
parser_get.set_defaults(func=annotate_fn)
#########################################
# gemini windower
#########################################
parser_get = subparsers.add_parser('windower',
help='Compute statistics across genome \"windows\"')
parser_get.add_argument('db',
metavar='db',
help='The name of the database to be updated.')
parser_get.add_argument('-w',
dest='window_size',
type=int,
default=1000000,
help='The name of the column to be added to the variant table.')
parser_get.add_argument('-s',
dest='step_size',
type=int,
default=0,
help="The step size for the windows in bp.\n")
parser_get.add_argument('-t',
dest='analysis_type',
help='The type of windowed analysis requested.',
choices=['nucl_div', 'hwe'],
default='hwe')
parser_get.add_argument('-o',
dest='op_type',
help='The operation that should be applied to the -t values.',
choices=['mean', 'median', 'min', 'max', 'collapse'],
default='mean')
def windower_fn(parser, args):
from gemini import gemini_windower
gemini_windower.windower(parser, args)
parser_get.set_defaults(func=windower_fn)
#########################################
# gemini db_info
#########################################
parser_get = subparsers.add_parser('db_info',
help='Get the names and types of cols. database tables')
parser_get.add_argument('db',
metavar='db',
help='The name of the database to be updated.')
def dbinfo_fn(parser, args):
from gemini import gemini_dbinfo
gemini_dbinfo.db_info(parser, args)
parser_get.set_defaults(func=dbinfo_fn)
#########################################
# $ gemini comp_hets
#########################################
parser_comp_hets = subparsers.add_parser('comp_hets',
help='Identify compound heterozygotes')
add_inheritance_args(parser_comp_hets, gt_ll=True, lenient=False)
parser_comp_hets.add_argument('--gene-where', dest="where",
default="is_exonic = 1 or impact_severity != 'LOW'",
help="""SQL clause to limit variants to genes. a reasonable alternative could be "gene != ''" """)
parser_comp_hets.add_argument('--pattern-only',
action='store_true',
help='find compound hets by inheritance pattern, without regard to affection',
default=False)
parser_comp_hets.add_argument('--max-priority',
type=float,
help='Default (1) is to show only confident compound hets. Set to 2' \
+ ' or higher to include pairs that are less likely true comp-hets',
default=1.6)
def comp_hets_fn(parser, args):
from gemini.gim import CompoundHet
CompoundHet(args).run()
parser_comp_hets.set_defaults(func=comp_hets_fn)
#########################################
# x-linked-recessive / dominant
#########################################
parser_xr = subparsers.add_parser('x_linked_recessive', help='X-linked recessive variants')
parser_xd = subparsers.add_parser('x_linked_dominant', help='X-linked dominant variants')
parser_xdn = subparsers.add_parser('x_linked_de_novo', help='X-linked de novo variants')
parser_xr.add_argument("-X", dest='X',
help="name of X chrom (if not default 'chrX' or 'X')",
default=[], action='append')
parser_xd.add_argument("-X", dest='X',
help="name of X chrom (if not default 'chrX' or 'X')",
default=[], action='append')
parser_xdn.add_argument("-X", dest='X',
help="name of X chrom (if not default 'chrX' or 'X')",
default=[], action='append')
add_inheritance_args(parser_xr, lenient=False, gt_ll=False)
add_inheritance_args(parser_xd, lenient=False, gt_ll=False)
add_inheritance_args(parser_xdn, lenient=False, gt_ll=False)
def x_rec_fn(parser, args):
from gemini.gim import XRec
XRec(args).run()
parser_xr.set_defaults(func=x_rec_fn)
def x_dom_fn(parser, args):
from gemini.gim import XDom
XDom(args).run()
parser_xd.set_defaults(func=x_dom_fn)
def x_denovo_fn(parser, args):
from gemini.gim import XDenovo
XDenovo(args).run()
parser_xdn.set_defaults(func=x_denovo_fn)
#########################################
# $ gemini pathways
#########################################
parser_pathway = subparsers.add_parser('pathways',
help='Map genes and variants to KEGG pathways')
parser_pathway.add_argument('db',
metavar='db',
help='The name of the database to be queried')
parser_pathway.add_argument('-v',
dest='version',
default='68',
metavar='STRING',
help="Version of ensembl genes to use. "
"Supported versions: 66 to 71\n"
)
parser_pathway.add_argument('--lof',
dest='lof',
action='store_true',
help='Report pathways for indivs/genes/sites with LoF variants',
default=False)
def pathway_fn(parser, args):
from gemini import tool_pathways
tool_pathways.pathways(parser, args)
parser_pathway.set_defaults(func=pathway_fn)
#########################################
# $ gemini lof_sieve
#########################################
parser_lof_sieve = subparsers.add_parser('lof_sieve',
help='Prioritize LoF mutations')
parser_lof_sieve.add_argument('db',
metavar='db',
help='The name of the database to be queried')
def lof_sieve_fn(parser, args):
from gemini import tool_lof_sieve
tool_lof_sieve.lof_sieve(parser, args)
parser_lof_sieve.set_defaults(func=lof_sieve_fn)
#########################################
# $ gemini burden
#########################################
burden_help = ("Gene-level genetic burden tests. By default counts all "
"variants with high impact in coding regions "
"as contributing to burden.")
parser_burden = subparsers.add_parser('burden',
help=burden_help)
parser_burden.add_argument('--nonsynonymous', action='store_true',
default=False,
help=("Count all nonsynonymous variants as "
"contributing burden."))
parser_burden.add_argument('--cases',
dest='cases',
nargs='*',
help=('Space separated list of cases for '
'association testing.'))
parser_burden.add_argument('--controls',
nargs='*',
dest='controls',
help=('Space separated list of controls for '
'association testing.'))
parser_burden.add_argument('--calpha',
action='store_true',
default=False,
help="Run the C-alpha association test.")
parser_burden.add_argument('--permutations',
default=0,
type=int,
help=("Number of permutations to run for the "
"C-alpha test (try 1000 to start)."))
parser_burden.add_argument('--min-aaf',
dest='min_aaf',
type=float,
default=0.0,
help='The min. alt. allele frequency for a '
'variant to be included.')
parser_burden.add_argument('--max-aaf',
dest='max_aaf',
type=float,
default=1.0,
help='The max. alt. allele frequency for a '
'variant to be included.')
parser_burden.add_argument('--save_tscores', default=False,
action='store_true',
help='Save the permuted T-scores to a file.')
parser_burden.add_argument('db',
metavar='db',
help='The name of the database to be queried.')
def burden_fn(parser, args):
from gemini import tool_burden_tests
tool_burden_tests.burden(parser, args)
parser_burden.set_defaults(func=burden_fn)
#########################################
# $ gemini interactions
#########################################
parser_interaction = subparsers.add_parser('interactions',
help='Find interaction partners for a gene in sample variants(default mode)')
parser_interaction.add_argument('db',
metavar='db',
help='The name of the database to be queried')
parser_interaction.add_argument('-g',
dest='gene',
help='Gene to be used as a root in BFS/shortest_path')
parser_interaction.add_argument('-r',
dest='radius',
type=int,
help="Set filter for BFS:\n"
"valid numbers starting from 0")
parser_interaction.add_argument("--edges",
help="edges file (default is hprd). Format is geneA|geneB\ngeneA|geneC...")
parser_interaction.add_argument('--var',
dest='var_mode',
help='var mode: Returns variant info (e.g. impact, biotype) for interacting genes',
action='store_true',
default=False)
def interactions_fn(parser, args):
from gemini import tool_interactions
tool_interactions.genequery(parser, args)
parser_interaction.set_defaults(func=interactions_fn)
#########################################
# gemini lof_interactions
#########################################
parser_interaction = subparsers.add_parser('lof_interactions',
help='Find interaction partners for a lof gene in sample variants(default mode)')
parser_interaction.add_argument('db',
metavar='db',
help='The name of the database to be queried')
parser_interaction.add_argument('-r',
dest='radius',
type=int,
help="set filter for BFS:\n")
parser_interaction.add_argument("--edges",
help="edges file (default is hprd). Format is geneA|geneB\ngeneA|geneC...")
parser_interaction.add_argument('--var',
dest='var_mode',
help='var mode: Returns variant info (e.g. impact, biotype) for interacting genes',
action='store_true',
default=False)
def lof_interactions_fn(parser, args):
from gemini import tool_interactions
tool_interactions.lofgenequery(parser, args)
parser_interaction.set_defaults(func=lof_interactions_fn)
#########################################
# $ gemini autosomal_recessive
#########################################
parser_auto_rec = subparsers.add_parser('autosomal_recessive',
help='Identify variants meeting an autosomal \
recessive inheritance model')
add_inheritance_args(parser_auto_rec, gt_ll=True)
def autosomal_recessive_fn(parser, args):
from gemini.gim import AutoRec
AutoRec(args).run()
parser_auto_rec.set_defaults(func=autosomal_recessive_fn)
#########################################
# $ gemini autosomal_dominant
#########################################
parser_auto_dom = subparsers.add_parser('autosomal_dominant',
help='Identify variants meeting an autosomal \
dominant inheritance model')
add_inheritance_args(parser_auto_dom, gt_ll=True)
def autosomal_dominant_fn(parser, args):
from gemini.gim import AutoDom
AutoDom(args).run()
parser_auto_dom.set_defaults(func=autosomal_dominant_fn)
#########################################
# $ gemini de_novo
#########################################
parser_de_novo = subparsers.add_parser('de_novo',
help='Identify candidate de novo mutations')
add_inheritance_args(parser_de_novo, min_kindreds=1, gt_ll=True)
def de_novo_fn(parser, args):
from gemini.gim import DeNovo
DeNovo(args).run()
parser_de_novo.set_defaults(func=de_novo_fn)
#########################################
# $ gemini mendel violations
#########################################
parser_mendel = subparsers.add_parser('mendel_errors',
help='Identify candidate violations of Mendelian inheritance')
add_inheritance_args(parser_mendel, gt_ll=True, allow_unaffected=False)
parser_mendel.add_argument('--only-affected',
action='store_true',
help='only consider candidates from affected samples.',
default=False)
def mendel_fn(parser, args):
from gemini.gim import MendelViolations
MendelViolations(args).run()
parser_mendel.set_defaults(func=mendel_fn)
#########################################
# $ gemini browser
#########################################
parser_browser = subparsers.add_parser('browser',
help='Browser interface to gemini')
parser_browser.add_argument('db', metavar='db',
help='The name of the database to be queried.')
parser_browser.add_argument('--use', metavar='use', default='builtin',
help='Which browser to use: builtin or puzzle', choices=('builtin', 'puzzle'))
parser_browser.add_argument('--host', metavar='host', default='localhost',
help='Hostname, default: localhost.')
parser_browser.add_argument('--port', metavar='port', default='8088',
help='Port, default: 8088.')
def browser_fn(parser, args):
from gemini import gemini_browser
gemini_browser.browser_main(parser, args)
parser_browser.set_defaults(func=browser_fn)
#########################################
# $ gemini set_somatic
#########################################
parser_set_somatic = subparsers.add_parser("set_somatic",
help="Tag somatic mutations (is_somatic) by comparint tumor/normal pairs.")
parser_set_somatic.add_argument('db', metavar='db',
help='The name of the database to be updated.')
parser_set_somatic.add_argument('--min-depth',
dest='min_depth',
type=float,
default=0,
help='The min combined depth for tumor + normal (def: %(default)s).')
parser_set_somatic.add_argument('--min-qual',
dest='min_qual',
type=float,
default=0,
help='The min variant quality (VCF QUAL) (def: %(default)s).')
parser_set_somatic.add_argument('--min-somatic-score',
dest='min_somatic_score',
type=float,
default=0,
help='The min somatic score (SSC) (def: %(default)s).')
parser_set_somatic.add_argument('--max-norm-alt-freq',
dest='max_norm_alt_freq',
type=float,
default=0,
help='The max freq. of the alt. allele in the normal sample (def: %(default)s).')
parser_set_somatic.add_argument('--max-norm-alt-count',
dest='max_norm_alt_count',
type=int,
default=0,
help='The max count. of the alt. allele in the normal sample (def: %(default)s).')
parser_set_somatic.add_argument('--min-norm-depth',
dest='min_norm_depth',
type=int,
default=0,
help='The minimum depth allowed in the normal sample to believe somatic (def: %(default)s).')
parser_set_somatic.add_argument('--min-tumor-alt-freq',
dest='min_tumor_alt_freq',
type=float,
default=0,
help='The min freq. of the alt. allele in the tumor sample (def: %(default)s).')
parser_set_somatic.add_argument('--min-tumor-alt-count',
dest='min_tumor_alt_count',
type=int,
default=0,
help='The min count. of the alt. allele in the tumor sample (def: %(default)s).')
parser_set_somatic.add_argument('--min-tumor-depth',
dest='min_tumor_depth',
type=int,
default=0,
help='The minimum depth allowed in the tumor sample to believe somatic (def: %(default)s).')
parser_set_somatic.add_argument('--chrom',
dest='chrom',
metavar='STRING',
help='A specific chromosome on which to tag somatic mutations. (def: %(default)s).',
default=None,
)
parser_set_somatic.add_argument('--dry-run',
dest='dry_run',
action='store_true',
help='Don\'t set the is_somatic flag, just report what _would_ be set. For testing parameters.',
default=False)
def set_somatic_fn(parser, args):
from gemini import gemini_set_somatic
gemini_set_somatic.set_somatic(parser, args)
parser_set_somatic.set_defaults(func=set_somatic_fn)
#########################################
# $ gemini actionable_mutations
#########################################
parser_actionable_mut = subparsers.add_parser("actionable_mutations",
help="Retrieve genes with actionable somatic mutations via COSMIC and DGIdb.")
parser_actionable_mut.add_argument('db', metavar='db',
help='The name of the database to be queried.')
def get_actionable_mut_fn(parser, args):
from gemini import gemini_actionable_mutations
gemini_actionable_mutations.get_actionable_mutations(parser, args)
parser_actionable_mut.set_defaults(func=get_actionable_mut_fn)
#########################################
# $ gemini update
#########################################
parser_update = subparsers.add_parser("update", help="Update gemini software and data files.")
parser_update.add_argument("--devel", help="Get the latest development version instead of the release",
action="store_true", default=False)
parser_update.add_argument("--dataonly", help="Only update data, not the underlying libraries.",
action="store_true", default=False)
parser_update.add_argument("--nodata", help="Do not install data dependencies",
dest="install_data", action="store_false", default=True)
parser_update.add_argument("--extra", help="Add additional non-standard genome annotations to include",
action="append", default=[], choices=["gerp_bp","cadd_score"])
parser_update.add_argument("--tooldir", help="Directory for third party tools (ie /usr/local) update")
def update_fn(parser, args):
from gemini import gemini_update
gemini_update.release(parser, args)
parser_update.set_defaults(func=update_fn)
#########################################
# $ gemini roh
#########################################
parser_hom_run = subparsers.add_parser('roh',
help='Identify runs of homozygosity')
parser_hom_run.add_argument('db',
metavar='db',
help='The name of the database to be queried.')
parser_hom_run.add_argument('--min-snps',
dest='min_snps',
metavar="INTEGER",
type=int,
default=25,
help='Minimum number of homozygous snps expected in a run (def. 25)')
parser_hom_run.add_argument('--min-total-depth',
dest='min_total_depth',
metavar="INTEGER",
type=int,
default=20,
help="""The minimum overall sequencing depth required"""
"""for a SNP to be considered (def = 20).""")
parser_hom_run.add_argument('--min-gt-depth',
dest='min_genotype_depth',
metavar="INTEGER",
type=int,
default=0,
help="""The minimum required sequencing depth underlying a given sample's genotype"""
"""for a SNP to be considered (def = 0).""")
parser_hom_run.add_argument('--min-size',
metavar="INTEGER",
dest='min_size',
type=int,
default=100000,
help='Minimum run size in base pairs (def. 100000)')
parser_hom_run.add_argument('--max-hets',
metavar="INTEGER",
dest='max_hets',
type=int,
default=1,
help='Maximum number of allowed hets in the run (def. 1)')
parser_hom_run.add_argument('--max-unknowns',
metavar="INTEGER",
type=int,
dest='max_unknowns',
default=3,
help='Maximum number of allowed unknowns in the run (def. 3)')
parser_hom_run.add_argument('-s',
dest='samples',
default=None,
help='Comma separated list of samples to screen for ROHs. e.g S120,S450')
def homozygosity_runs_fn(parser, args):
from gemini.tool_homozygosity_runs import run
run(parser, args)
parser_hom_run.set_defaults(func=homozygosity_runs_fn)
#########################################
# bcolz indexing
#########################################
bci = subparsers.add_parser('bcolz_index', help='index an existing gemini'
' database so it can use bcolze for faster '
' genotype queries.')
bci.add_argument('db', help='The path of the database to indexed with bcolz.')
bci.add_argument('--cols', help='list of gt columns to index. default is all')
def bci_fn(parser, args):
from gemini.gemini_bcolz import create
if args.cols:
create(args.db, [x.strip() for x in args.cols.split(",")])
else:
create(args.db)
bci.set_defaults(func=bci_fn)
#########################################
# $ gemini fusions
#########################################
parser_fusions = subparsers.add_parser('fusions',
help="Identify somatic fusion genes from a GEMINI database.")
parser_fusions.add_argument('db',
metavar='db',
help='The name of the database to be queried.')
parser_fusions.add_argument('--in_cosmic_census',
action='store_true',
help='One or both genes in fusion is in COSMIC cancer census')
parser_fusions.add_argument('--min_qual',
dest='min_qual',
metavar='FLOAT',
type=float,
default=None,
help='The min variant quality (VCF QUAL) (def: %(default)s).')
parser_fusions.add_argument('--evidence_type',
metavar='STR',
dest='evidence_type',
type=str,
default=None,
help='The supporting evidence types for the variant ("PE", "SR", or "PE,SR").')
def fusions_fn(parser, args):
from gemini.tool_fusions import run
run(parser, args)
parser_fusions.set_defaults(func=fusions_fn)
#########################################
# genewise
#########################################
from gemini.genewise import add_args
parser_genewise = subparsers.add_parser('gene_wise')
add_args(parser_genewise)
def genewise_run(parser, args):
from gemini.genewise import run
run(args)
parser_genewise.set_defaults(func=genewise_run)
#########################################
# $ gemini QC
#########################################
parser_qc = subparsers.add_parser('qc',
help='Quality control tools')
parser_qc.add_argument('db',
metavar='db',
help='The name of the database to be queried.')
parser_qc.add_argument('--mode',
dest='mode',
metavar="STRING",
default='sex',
help='What type of QC should be run? [sex]')
parser_qc.add_argument('--chrom',
dest='chrom',
metavar="STRING",
default='chrX',
help='Which chromosome should the sex test be applied to? [chrX]')
def qc_fn(parser, args):
from gemini.tool_qc import run
run(parser, args)
parser_qc.set_defaults(func=qc_fn)
#######################################################
# parse the args and call the selected function
#######################################################
import operator
subparsers._choices_actions.sort(key=operator.attrgetter('dest'))
for k in sorted(subparsers.choices):
subparsers.choices[k] = subparsers.choices.pop(k)
args = parser.parse_args()
# make sure database is found if provided
if len(sys.argv) > 2 and sys.argv[1] not in \
["load", "merge_chunks", "load_chunk"]:
if hasattr(args, "db") and args.db is not None and not os.path.exists(args.db):
if not "://" in args.db:
sys.stderr.write("Requested GEMINI database (%s) not found. "
"Please confirm the provided filename.\n"
% args.db)
elif len(sys.argv) > 2 and sys.argv[1] == "load":
if xor(args.scheduler, args.queue):
parser.error("If you are using the IPython parallel loading, you "
"must specify both a scheduler with --scheduler and a "
"queue to use with --queue.")
try:
args.func(parser, args)
except IOError as e:
if e.errno != 32: # ignore SIGPIPE
raise
def xor(arg1, arg2):
return bool(arg1) ^ bool(arg2)
if __name__ == "__main__":
main()
|
devil/devil/utils/reset_usb.py
|
tingshao/catapult
| 138 |
139516
|
<reponame>tingshao/catapult<gh_stars>100-1000
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
if sys.platform == 'win32':
raise ImportError('devil.utils.reset_usb only supported on unix systems.')
import argparse
import fcntl
import logging
import os
import re
if __name__ == '__main__':
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..')))
from devil.android import device_errors
from devil.utils import lsusb
from devil.utils import run_tests_helper
logger = logging.getLogger(__name__)
_INDENTATION_RE = re.compile(r'^( *)')
_LSUSB_BUS_DEVICE_RE = re.compile(r'^Bus (\d{3}) Device (\d{3}):')
_LSUSB_ENTRY_RE = re.compile(r'^ *([^ ]+) +([^ ]+) *([^ ].*)?$')
_LSUSB_GROUP_RE = re.compile(r'^ *([^ ]+.*):$')
_USBDEVFS_RESET = ord('U') << 8 | 20
def reset_usb(bus, device):
"""Reset the USB device with the given bus and device."""
usb_file_path = '/dev/bus/usb/%03d/%03d' % (bus, device)
with open(usb_file_path, 'w') as usb_file:
logger.debug('fcntl.ioctl(%s, %d)', usb_file_path, _USBDEVFS_RESET)
fcntl.ioctl(usb_file, _USBDEVFS_RESET)
def reset_android_usb(serial):
"""Reset the USB device for the given Android device."""
lsusb_info = lsusb.lsusb()
bus = None
device = None
for device_info in lsusb_info:
device_serial = lsusb.get_lsusb_serial(device_info)
if device_serial == serial:
bus = int(device_info.get('bus'))
device = int(device_info.get('device'))
if bus and device:
reset_usb(bus, device)
else:
raise device_errors.DeviceUnreachableError(
'Unable to determine bus(%s) or device(%s) for device %s'
% (bus, device, serial))
def reset_all_android_devices():
"""Reset all USB devices that look like an Android device."""
_reset_all_matching(lambda i: bool(lsusb.get_lsusb_serial(i)))
def _reset_all_matching(condition):
lsusb_info = lsusb.lsusb()
for device_info in lsusb_info:
if int(device_info.get('device')) != 1 and condition(device_info):
bus = int(device_info.get('bus'))
device = int(device_info.get('device'))
try:
reset_usb(bus, device)
serial = lsusb.get_lsusb_serial(device_info)
if serial:
logger.info(
'Reset USB device (bus: %03d, device: %03d, serial: %s)',
bus, device, serial)
else:
logger.info(
'Reset USB device (bus: %03d, device: %03d)',
bus, device)
except IOError:
logger.error(
'Failed to reset USB device (bus: %03d, device: %03d)',
bus, device)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='count')
parser.add_argument('-s', '--serial')
parser.add_argument('--bus', type=int)
parser.add_argument('--device', type=int)
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
if args.serial:
reset_android_usb(args.serial)
elif args.bus and args.device:
reset_usb(args.bus, args.device)
else:
parser.error('Unable to determine target. '
'Specify --serial or BOTH --bus and --device.')
return 0
if __name__ == '__main__':
sys.exit(main())
|
NVLL/analysis/word_freq.py
|
jennhu/vmf_vae_nlp
| 159 |
139557
|
<filename>NVLL/analysis/word_freq.py<gh_stars>100-1000
import os
def count(dic, fname):
with open(fname, 'r') as fd:
lines = fd.read().splitlines()
filtered_sents = []
for l in lines:
words = l.split(" ")
_ratio = comp_unk_ratio(words)
if _ratio <= 0.05:
filtered_sents.append(words)
for w in words:
if w in dic:
dic[w] += 1
else:
dic[w] = 1
return dic, filtered_sents
def read_sent():
pass
def comp_unk_ratio(sent):
total = len(sent) + 0.000001
cnt = 0
for w in sent:
if w == '<unk>':
cnt += 1
return cnt / total
def comp_ratio():
pass
def generate_based_on_word_freq():
count_word_freq()
def generate_based_on_sentiment():
pass
def count_word_freq():
d = {}
os.chdir("../../data/yelp")
d, _ = count(d, "valid.txt")
d, filtered_sents_test = count(d, "test.txt")
sorted_d = sorted(d, key=d.get, reverse=True)
print("Len of trimmed vocab {}".format(len(sorted_d)))
print("Num of Test samples after trimming {}".format(len(filtered_sents_test)))
uncommon = sorted_d[-10000:]
print(uncommon)
divide = 5
every = int(len(filtered_sents_test) / divide)
sent_dictionary = {}
for sent in filtered_sents_test:
total = len(sent)
cnt = 0.
for w in sent:
if w in uncommon:
cnt += 1
sent_dictionary[" ".join(sent)] = cnt / total
sorted_sents = sorted(sent_dictionary, key=sent_dictionary.get, reverse=True)
for piece in range(divide):
start = int(piece * every)
end = int((piece + 1) * every)
tmp_sents = sorted_sents[start:end]
with open("test-rare-" + str(piece) + ".txt", 'w') as fd:
fd.write("\n".join(tmp_sents))
if __name__ == "__main__":
bank_size = 1000
# Generate 2 set of sentences.
# Before beginning
# if a sentence has more than 10% UNK, remove it.
############
# Based on WordFreq Vocab size=15K
# Divide
# Top 1K sample with largest Common Word Ratio (common word= top3K freq word)
# Top 1K sample with largest Uncommon Word Ratio (uncommon word= top3K infreq word)
generate_based_on_word_freq()
############
# Based on Sentiment (sample from 5star and 1star)
#############
|
ibl/datasets/pitts.py
|
adujardin/OpenIBL
| 182 |
139565
|
<gh_stars>100-1000
from __future__ import print_function, absolute_import
import os.path as osp
from collections import namedtuple
import torch.distributed as dist
from ..utils.data import Dataset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json, read_mat
from ..utils.dist_utils import synchronize
def parse_dbStruct(path):
matStruct = read_mat(path)
dbImage = [f[0].item() for f in matStruct[1]]
utmDb = matStruct[2].T
qImage = [f[0].item() for f in matStruct[3]]
utmQ = matStruct[4].T
numDb = matStruct[5].item()
numQ = matStruct[6].item()
dbStruct = namedtuple('dbStruct',
['dbImage', 'utmDb', 'qImage', 'utmQ', 'numDb', 'numQ'])
return dbStruct(dbImage, utmDb, qImage, utmQ, numDb, numQ)
class Pittsburgh(Dataset):
def __init__(self, root, scale='250k', verbose=True):
super(Pittsburgh, self).__init__(root)
self.scale = scale
self.arrange()
self.load(verbose, scale)
def arrange(self):
if self._check_integrity(self.scale):
return
raw_dir = osp.join(self.root, 'raw')
if (not osp.isdir(raw_dir)):
raise RuntimeError("Dataset not found.")
db_root = osp.join('Pittsburgh', 'images')
q_root = osp.join('Pittsburgh', 'queries')
identities = []
utms = []
q_pids, db_pids = {}, {}
def register(split):
struct = parse_dbStruct(osp.join(raw_dir, 'pitts'+self.scale+'_'+split+'.mat'))
q_ids = []
for fpath, utm in zip(struct.qImage, struct.utmQ):
sid = fpath.split('_')[0]
if (sid not in q_pids.keys()):
pid = len(identities)
q_pids[sid] = pid
identities.append([])
utms.append(utm.tolist())
q_ids.append(pid)
identities[q_pids[sid]].append(osp.join(q_root, fpath))
assert(utms[q_pids[sid]]==utm.tolist())
db_ids = []
for fpath, utm in zip(struct.dbImage, struct.utmDb):
sid = fpath.split('_')[0]
if (sid not in db_pids.keys()):
pid = len(identities)
db_pids[sid] = pid
identities.append([])
utms.append(utm.tolist())
db_ids.append(pid)
identities[db_pids[sid]].append(osp.join(db_root, fpath))
assert(utms[db_pids[sid]]==utm.tolist())
return q_ids, db_ids
q_train_pids, db_train_pids = register('train')
# train_pids = q_train_pids + db_train_pids
q_val_pids, db_val_pids = register('val')
q_test_pids, db_test_pids = register('test')
assert len(identities)==len(utms)
# for pid in q_test_pids:
# if (len(identities[pid])!=24):
# print (identities[pid])
# Save meta information into a json file
meta = {'name': 'Pittsburgh_'+self.scale,
'identities': identities, 'utm': utms}
try:
rank = dist.get_rank()
except:
rank = 0
if rank == 0:
write_json(meta, osp.join(self.root, 'meta_'+self.scale+'.json'))
# Save the training / test split
splits = {
# 'train': sorted(train_pids),
'q_train': sorted(q_train_pids),
'db_train': sorted(db_train_pids),
'q_val': sorted(q_val_pids),
'db_val': sorted(db_val_pids),
'q_test': sorted(q_test_pids),
'db_test': sorted(db_test_pids)}
if rank == 0:
write_json(splits, osp.join(self.root, 'splits_'+self.scale+'.json'))
synchronize()
|
SimG4Core/Configuration/python/SimG4Core_cff.py
|
ckamtsikis/cmssw
| 852 |
139570
|
import FWCore.ParameterSet.Config as cms
# Geometry and Magnetic field must be initialized separately
# Geant4-based CMS Detector simulation (OscarProducer)
# - returns label "g4SimHits"
#
from SimG4Core.Application.g4SimHits_cfi import *
|
rest/addresses/list-post-example-1/list-post-example-1.6.x.py
|
Tshisuaka/api-snippets
| 234 |
139602
|
<filename>rest/addresses/list-post-example-1/list-post-example-1.6.x.py
# Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
app = client.addresses.create(
customer_name="Customer 123",
street="1 Hasselhoff Lane",
city="Berlin",
region="Berlin",
postal_code="10875",
iso_country="DE",
friendly_name="Billing - Customer 123"
)
print(app.sid)
|
pyoutline/outline/plugins/manager.py
|
mb0rt/OpenCue
| 334 |
139622
|
<filename>pyoutline/outline/plugins/manager.py
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for setting up the PyOutline plugin system.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import object
import logging
import sys
from outline.config import config
logger = logging.getLogger("outline.plugins")
class PluginManager(object):
"""
Class responsible for instantiating the plugin system and loading desired plugins.
"""
registered_plugins = []
@classmethod
def init_cuerun_plugins(cls, cuerun):
"""Initialize all registered plugins."""
for plugin in cls.registered_plugins:
try:
plugin.init_cuerun_plugin(cuerun)
except AttributeError:
pass
@classmethod
def load_plugin(cls, module_name):
"""Load a single plugin and register it with the plugin manager."""
logger.debug("importing [%s] outline plugin.", module_name)
try:
module = __import__(module_name,
globals(),
locals(),
[module_name])
try:
module.loaded()
except AttributeError as e:
pass
cls.registered_plugins.append(module)
except ImportError as e:
sys.stderr.write("Warning: plugin load failed: %s\n" % e)
@classmethod
def init_plugin(cls, module_name, layer):
"""Initialize a plugin on the given layer."""
try:
logger.debug("importing [%s] outline plugin.", module_name)
plugin = __import__(module_name, globals(), locals(), [module_name])
try:
plugin.init(layer)
except AttributeError as e:
pass
except ImportError as e:
sys.stderr.write("Warning: plugin load failed: %s\n" % e)
@classmethod
def load_all_plugins(cls):
"""Load and register all plugins listed in the config file."""
def section_priority(section_needing_key):
priority_option = "priority"
if config.has_option(section_needing_key, priority_option):
return config.getint(section_needing_key, priority_option)
return 0
sections = sorted(config.sections(), key=section_priority)
for section in sections:
if section.startswith("plugin:"):
if config.getint(section, "enable"):
logger.debug("Loading plugin '%s'", section)
cls.load_plugin(config.get(section, "module"))
@classmethod
def get_plugins(cls):
"""Gets a list of all registered plugins."""
return cls.registered_plugins
|
pyp.py
|
hauntsaninja/pyp
| 1,194 |
139639
|
<filename>pyp.py
#!/usr/bin/env python3
import argparse
import ast
import importlib
import inspect
import itertools
import os
import sys
import textwrap
import traceback
from collections import defaultdict
from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, cast
__all__ = ["pypprint"]
__version__ = "0.3.4"
def pypprint(*args, **kwargs): # type: ignore
"""Replacement for ``print`` that special-cases dicts and iterables.
- Dictionaries are printed one line per key-value pair, with key and value colon-separated.
- Iterables (excluding strings) are printed one line per item
- Everything else is delegated to ``print``
"""
from typing import Iterable
if len(args) != 1:
print(*args, **kwargs)
return
x = args[0]
if isinstance(x, dict):
for k, v in x.items():
print(f"{k}:", v, **kwargs)
elif isinstance(x, Iterable) and not isinstance(x, str):
for i in x:
print(i, **kwargs)
else:
print(x, **kwargs)
class NameFinder(ast.NodeVisitor):
"""Finds undefined names, top-level defined names and wildcard imports in the given AST.
A top-level defined name is any name that is stored to in the top-level scopes of ``trees``.
An undefined name is any name that is loaded before it is defined (in any scope).
Notes: a) we ignore deletes, b) used builtins will appear in undefined names, c) this logic
doesn't fully support comprehension / nonlocal / global / late-binding scopes.
"""
def __init__(self, *trees: ast.AST) -> None:
self._scopes: List[Set[str]] = [set()]
self._comprehension_scopes: List[int] = []
self.undefined: Set[str] = set()
self.wildcard_imports: List[str] = []
for tree in trees:
self.visit(tree)
assert len(self._scopes) == 1
@property
def top_level_defined(self) -> Set[str]:
return self._scopes[0]
def flexible_visit(self, value: Any) -> None:
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
self.visit(item)
elif isinstance(value, ast.AST):
self.visit(value)
def generic_visit(self, node: ast.AST) -> None:
def order(f_v: Tuple[str, Any]) -> int:
# This ordering fixes comprehensions, dict comps, loops, assignments
return {"generators": -3, "iter": -3, "key": -2, "value": -1}.get(f_v[0], 0)
# Adapted from ast.NodeVisitor.generic_visit, but re-orders traversal a little
for _, value in sorted(ast.iter_fields(node), key=order):
self.flexible_visit(value)
def visit_Name(self, node: ast.Name) -> None:
if isinstance(node.ctx, ast.Load):
if all(node.id not in d for d in self._scopes):
self.undefined.add(node.id)
elif isinstance(node.ctx, ast.Store):
self._scopes[-1].add(node.id)
# Ignore deletes, see docstring
self.generic_visit(node)
def visit_Global(self, node: ast.Global) -> None:
self._scopes[-1] |= self._scopes[0] & set(node.names)
def visit_Nonlocal(self, node: ast.Nonlocal) -> None:
if len(self._scopes) >= 2:
self._scopes[-1] |= self._scopes[-2] & set(node.names)
def visit_AugAssign(self, node: ast.AugAssign) -> None:
if isinstance(node.target, ast.Name):
# TODO: think about global, nonlocal
if node.target.id not in self._scopes[-1]:
self.undefined.add(node.target.id)
self.generic_visit(node)
def visit_NamedExpr(self, node: Any) -> None:
self.visit(node.value)
# PEP 572 has weird scoping rules
assert isinstance(node.target, ast.Name)
assert isinstance(node.target.ctx, ast.Store)
scope_index = len(self._scopes) - 1
comp_index = len(self._comprehension_scopes) - 1
while comp_index >= 0 and scope_index == self._comprehension_scopes[comp_index]:
scope_index -= 1
comp_index -= 1
self._scopes[scope_index].add(node.target.id)
def visit_alias(self, node: ast.alias) -> None:
if node.name != "*":
self._scopes[-1].add(node.asname if node.asname is not None else node.name)
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
if node.module is not None and "*" in (a.name for a in node.names):
self.wildcard_imports.append(node.module)
self.generic_visit(node)
def visit_ClassDef(self, node: ast.ClassDef) -> None:
self.flexible_visit(node.decorator_list)
self.flexible_visit(node.bases)
self.flexible_visit(node.keywords)
self._scopes.append(set())
self.flexible_visit(node.body)
self._scopes.pop()
# Classes are not okay with self-reference, so define ``name`` afterwards
self._scopes[-1].add(node.name)
def visit_function_helper(self, node: Any, name: Optional[str] = None) -> None:
# Functions are okay with recursion, but not self-reference while defining default values
self.flexible_visit(node.args)
if name is not None:
self._scopes[-1].add(name)
self._scopes.append(set())
for arg_node in ast.iter_child_nodes(node.args):
if isinstance(arg_node, ast.arg):
self._scopes[-1].add(arg_node.arg)
self.flexible_visit(node.body)
self._scopes.pop()
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
self.flexible_visit(node.decorator_list)
self.visit_function_helper(node, node.name)
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
self.flexible_visit(node.decorator_list)
self.visit_function_helper(node, node.name)
def visit_Lambda(self, node: ast.Lambda) -> None:
self.visit_function_helper(node)
def visit_ExceptHandler(self, node: ast.ExceptHandler) -> None:
# ExceptHandler's name is scoped to the handler. If name exists and the name is not already
# defined, we'll define then undefine it to mimic the scope.
if not node.name or node.name in self._scopes[-1]:
self.generic_visit(node)
return
self.flexible_visit(node.type)
assert node.name is not None
self._scopes[-1].add(node.name)
self.flexible_visit(node.body)
self._scopes[-1].remove(node.name)
def visit_comprehension_helper(self, node: Any) -> None:
self._comprehension_scopes.append(len(self._scopes))
self._scopes.append(set())
self.generic_visit(node)
self._scopes.pop()
self._comprehension_scopes.pop()
visit_ListComp = visit_comprehension_helper
visit_SetComp = visit_comprehension_helper
visit_GeneratorExp = visit_comprehension_helper
visit_DictComp = visit_comprehension_helper
def dfs_walk(node: ast.AST) -> Iterator[ast.AST]:
"""Helper to iterate over an AST depth-first."""
stack = [node]
while stack:
node = stack.pop()
stack.extend(reversed(list(ast.iter_child_nodes(node))))
yield node
MAGIC_VARS = {
"index": {"i", "idx", "index"},
"loop": {"line", "x", "l"},
"input": {"lines", "stdin"},
}
def is_magic_var(name: str) -> bool:
return any(name in vars for vars in MAGIC_VARS.values())
class PypError(Exception):
pass
def get_config_contents() -> str:
"""Returns the empty string if no config file is specified."""
config_file = os.environ.get("PYP_CONFIG_PATH")
if config_file is None:
return ""
try:
with open(config_file, "r") as f:
return f.read()
except FileNotFoundError as e:
raise PypError(f"Config file not found at PYP_CONFIG_PATH={config_file}") from e
class PypConfig:
"""PypConfig is responsible for handling user configuration.
We allow users to configure pyp with a config file that is very Python-like. Rather than
executing the config file as Python unconditionally, we treat it as a source of definitions. We
keep track of what each top-level stmt in the AST of the config file defines, and if we need
that definition in our program, use it. A wrinkle here is that definitions in the config file
may depend on other definitions within the config file; this is handled by build_missing_config.
Another wrinkle is wildcard imports; these are kept track of and added to the list of special
cased wildcard imports in build_missing_imports.
"""
def __init__(self) -> None:
config_contents = get_config_contents()
try:
config_ast = ast.parse(config_contents)
except SyntaxError as e:
error = f": {e.text!r}" if e.text else ""
raise PypError(f"Config has invalid syntax{error}") from e
# List of config parts
self.parts: List[ast.stmt] = config_ast.body
# Maps from a name to index of config part that defines it
self.name_to_def: Dict[str, int] = {}
self.def_to_names: Dict[int, List[str]] = defaultdict(list)
# Maps from index of config part to undefined names it needs
self.requires: Dict[int, Set[str]] = defaultdict(set)
# Modules from which automatic imports work without qualification, ordered by AST encounter
self.wildcard_imports: List[str] = []
self.shebang: str = "#!/usr/bin/env python3"
if config_contents.startswith("#!"):
self.shebang = "\n".join(
itertools.takewhile(lambda l: l.startswith("#"), config_contents.splitlines())
)
top_level: Tuple[Any, ...] = (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)
top_level += (ast.Import, ast.ImportFrom, ast.Assign, ast.AnnAssign, ast.If, ast.Try)
for index, part in enumerate(self.parts):
if not isinstance(part, top_level):
node_type = type(
part.value if isinstance(part, ast.Expr) else part
).__name__.lower()
raise PypError(
"Config only supports a subset of Python at top level; "
f"unsupported construct ({node_type}) on line {part.lineno}"
)
f = NameFinder(part)
for name in f.top_level_defined:
if self.name_to_def.get(name, index) != index:
raise PypError(f"Config has multiple definitions of {repr(name)}")
if is_magic_var(name):
raise PypError(f"Config cannot redefine built-in magic variable {repr(name)}")
self.name_to_def[name] = index
self.def_to_names[index].append(name)
self.requires[index] = f.undefined
self.wildcard_imports.extend(f.wildcard_imports)
class PypTransform:
"""PypTransform is responsible for transforming all input code.
A lot of pyp's magic comes from it making decisions based on defined and undefined names in the
input. This class helps keep track of that state as things change based on transformations. In
general, the logic in here is very sensitive to reordering; there are various implicit
assumptions about what transformations have happened and what names have been defined. But
the code is pretty small and the tests are good, so you should be okay!
"""
def __init__(
self,
before: List[str],
code: List[str],
after: List[str],
define_pypprint: bool,
config: PypConfig,
) -> None:
def parse_input(code: List[str]) -> ast.Module:
try:
return ast.parse(textwrap.dedent("\n".join(code).strip()))
except SyntaxError as e:
message = traceback.format_exception_only(type(e), e)
message[0] = "Invalid input\n\n"
raise PypError("".join(message).strip()) from e
self.before_tree = parse_input(before)
self.tree = parse_input(code)
self.after_tree = parse_input(after)
f = NameFinder(self.before_tree, self.tree, self.after_tree)
self.defined: Set[str] = f.top_level_defined
self.undefined: Set[str] = f.undefined
self.wildcard_imports: List[str] = f.wildcard_imports
# We'll always use sys in ``build_input``, so add it to undefined.
# This lets config define it or lets us automatically import it later
# (If before defines it, we'll just let it override the import...)
self.undefined.add("sys")
self.define_pypprint = define_pypprint
self.config = config
# The print statement ``build_output`` will add, if it determines it needs to.
self.implicit_print: Optional[ast.Call] = None
def build_missing_config(self) -> None:
"""Modifies the AST to define undefined names defined in config."""
config_definitions: Set[str] = set()
attempt_to_define = set(self.undefined)
while attempt_to_define:
can_define = attempt_to_define & set(self.config.name_to_def)
# The things we can define might in turn require some definitions, so update the things
# we need to attempt to define and loop
attempt_to_define = set()
for name in can_define:
config_definitions.update(self.config.def_to_names[self.config.name_to_def[name]])
attempt_to_define.update(self.config.requires[self.config.name_to_def[name]])
# We don't need to attempt to define things we've already decided we need to define
attempt_to_define -= config_definitions
config_indices = {self.config.name_to_def[name] for name in config_definitions}
# Run basically the same thing in reverse to see which dependencies stem from magic vars
before_config_indices = set(config_indices)
derived_magic_indices = {
i for i in config_indices if any(map(is_magic_var, self.config.requires[i]))
}
derived_magic_names = set()
while derived_magic_indices:
before_config_indices -= derived_magic_indices
derived_magic_names |= {
name for i in derived_magic_indices for name in self.config.def_to_names[i]
}
derived_magic_indices = {
i for i in before_config_indices if self.config.requires[i] & derived_magic_names
}
magic_config_indices = config_indices - before_config_indices
before_config_defs = [self.config.parts[i] for i in sorted(before_config_indices)]
magic_config_defs = [self.config.parts[i] for i in sorted(magic_config_indices)]
self.before_tree.body = before_config_defs + self.before_tree.body
self.tree.body = magic_config_defs + self.tree.body
for i in config_indices:
self.undefined.update(self.config.requires[i])
self.defined |= config_definitions
self.undefined -= config_definitions
def define(self, name: str) -> None:
"""Defines a name."""
self.defined.add(name)
self.undefined.discard(name)
def get_valid_name_in_top_scope(self, name: str) -> str:
"""Return a name related to ``name`` that does not conflict with existing definitions."""
while name in self.defined or name in self.undefined:
name += "_"
return name
def build_output(self) -> None:
"""Ensures that the AST prints something.
This is done by either a) checking whether we load a thing that prints, or b) if the last
thing in the tree is an expression, modifying the tree to print it.
"""
if self.undefined & {"print", "pprint", "pp", "pypprint"}: # has an explicit print
return
def inner(body: List[ast.stmt], use_pypprint: bool = False) -> bool:
if not body:
return False
if isinstance(body[-1], ast.Pass):
del body[-1]
return True
if not isinstance(body[-1], ast.Expr):
if (
# If the last thing in the tree is a statement that has a body
hasattr(body[-1], "body")
# and doesn't have an orelse, since users could expect the print in that branch
and not getattr(body[-1], "orelse", [])
# and doesn't enter a new scope
and not isinstance(
body[-1], (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)
)
):
# ...then recursively look for a standalone expression
return inner(body[-1].body, use_pypprint) # type: ignore
return False
if isinstance(body[-1].value, ast.Name):
output = body[-1].value.id
body.pop()
else:
output = self.get_valid_name_in_top_scope("output")
self.define(output)
body[-1] = ast.Assign(
targets=[ast.Name(id=output, ctx=ast.Store())], value=body[-1].value
)
print_fn = "print"
if use_pypprint:
print_fn = "pypprint"
self.undefined.add("pypprint")
if_print = ast.parse(f"if {output} is not None: {print_fn}({output})").body[0]
body.append(if_print)
self.implicit_print = if_print.body[0].value # type: ignore
return True
# First attempt to add a print to self.after_tree, then to self.tree
# We use pypprint in self.after_tree and print in self.tree, although the latter is
# subject to change later on if we call ``use_pypprint_for_implicit_print``. This logic
# could be a little simpler if we refactored so that we know what transformations we will
# do before we do them.
success = inner(self.after_tree.body, True) or inner(self.tree.body)
if not success:
raise PypError(
"Code doesn't generate any output; either explicitly print something, end with "
"an expression that pyp can print, or explicitly end with `pass`."
)
def use_pypprint_for_implicit_print(self) -> None:
"""If we implicitly print, use pypprint instead of print."""
if self.implicit_print is not None:
self.implicit_print.func.id = "pypprint" # type: ignore
# Make sure we import it later
self.undefined.add("pypprint")
def build_input(self) -> None:
"""Modifies the AST to use input from stdin.
How we do this depends on which magic variables are used.
"""
possible_vars = {typ: names & self.undefined for typ, names in MAGIC_VARS.items()}
if (possible_vars["loop"] or possible_vars["index"]) and possible_vars["input"]:
loop_names = ", ".join(possible_vars["loop"] or possible_vars["index"])
input_names = ", ".join(possible_vars["input"])
raise PypError(
f"Candidates found for both loop variable ({loop_names}) and "
f"input variable ({input_names})"
)
for typ, names in possible_vars.items():
if len(names) > 1:
names_str = ", ".join(names)
raise PypError(f"Multiple candidates for {typ} variable: {names_str}")
if possible_vars["loop"] or possible_vars["index"]:
# We'll loop over stdin and define loop / index variables
idx_var = possible_vars["index"].pop() if possible_vars["index"] else None
loop_var = possible_vars["loop"].pop() if possible_vars["loop"] else None
if loop_var:
self.define(loop_var)
if idx_var:
self.define(idx_var)
if loop_var is None:
loop_var = "_"
if idx_var:
for_loop = f"for {idx_var}, {loop_var} in enumerate(sys.stdin): "
else:
for_loop = f"for {loop_var} in sys.stdin: "
for_loop += f"{loop_var} = {loop_var}.rstrip('\\n')"
loop: ast.For = ast.parse(for_loop).body[0] # type: ignore
loop.body.extend(self.tree.body)
self.tree.body = [loop]
elif possible_vars["input"]:
# We'll read from stdin and define the necessary input variable
input_var = possible_vars["input"].pop()
self.define(input_var)
if input_var == "stdin":
input_assign = ast.parse(f"{input_var} = sys.stdin")
else:
input_assign = ast.parse(f"{input_var} = [x.rstrip('\\n') for x in sys.stdin]")
self.tree.body = input_assign.body + self.tree.body
self.use_pypprint_for_implicit_print()
else:
no_pipe_assertion = ast.parse(
"assert sys.stdin.isatty() or not sys.stdin.read(), "
'''"The command doesn't process input, but input is present"'''
)
self.tree.body = no_pipe_assertion.body + self.tree.body
self.use_pypprint_for_implicit_print()
def build_missing_imports(self) -> None:
"""Modifies the AST to import undefined names."""
self.undefined -= set(dir(__import__("builtins")))
# Optimisation: we will almost always define sys and pypprint. However, in order for us to
# get to `import sys`, we'll need to examine our wildcard imports, which in the presence
# of config, could be slow.
if "pypprint" in self.undefined:
pypprint_def = (
inspect.getsource(pypprint) if self.define_pypprint else "from pyp import pypprint"
)
self.before_tree.body = ast.parse(pypprint_def).body + self.before_tree.body
self.undefined.remove("pypprint")
if "sys" in self.undefined:
self.before_tree.body = ast.parse("import sys").body + self.before_tree.body
self.undefined.remove("sys")
# Now short circuit if we can
if not self.undefined:
return
def get_names_in_module(module: str) -> Any:
try:
mod = importlib.import_module(module)
except ImportError as e:
raise PypError(
f"Config contains wildcard import from {module}, but {module} failed to import"
) from e
return getattr(mod, "__all__", (n for n in dir(mod) if not n.startswith("_")))
subimports = {"Path": "pathlib", "pp": "pprint"}
wildcard_imports = (
["itertools", "math", "collections"]
+ self.config.wildcard_imports
+ self.wildcard_imports
)
subimports.update(
{name: module for module in wildcard_imports for name in get_names_in_module(module)}
)
def get_import_for_name(name: str) -> str:
if name in subimports:
return f"from {subimports[name]} import {name}"
return f"import {name}"
self.before_tree.body = [
ast.parse(stmt).body[0] for stmt in sorted(map(get_import_for_name, self.undefined))
] + self.before_tree.body
def build(self) -> ast.Module:
"""Returns a transformed AST."""
self.build_missing_config()
self.build_output()
self.build_input()
self.build_missing_imports()
ret = ast.parse("")
ret.body = self.before_tree.body + self.tree.body + self.after_tree.body
# Add fake line numbers to the nodes, so we can generate a traceback on error
i = 0
for node in dfs_walk(ret):
if isinstance(node, ast.stmt):
i += 1
node.lineno = i
return ast.fix_missing_locations(ret)
def unparse(tree: ast.AST, short_fallback: bool = False) -> str:
"""Returns Python code equivalent to executing ``tree``."""
if sys.version_info >= (3, 9):
return ast.unparse(tree)
try:
import astunparse # type: ignore
return cast(str, astunparse.unparse(tree))
except ImportError:
pass
if short_fallback:
return f"# {ast.dump(tree)} # --explain has instructions to make this readable"
return f"""
from ast import *
tree = fix_missing_locations({ast.dump(tree)})
# To see this in human readable form, run pyp with Python 3.9
# Alternatively, install a third party ast unparser: `python3 -m pip install astunparse`
# Once you've done that, simply re-run.
# In the meantime, this script is fully functional, if not easily readable or modifiable...
exec(compile(tree, filename="<ast>", mode="exec"), {{}})
"""
def run_pyp(args: argparse.Namespace) -> None:
config = PypConfig()
tree = PypTransform(args.before, args.code, args.after, args.define_pypprint, config).build()
if args.explain:
print(config.shebang)
print(unparse(tree))
return
try:
exec(compile(tree, filename="<pyp>", mode="exec"), {})
except Exception as e:
try:
line_to_node: Dict[int, ast.AST] = {}
for node in dfs_walk(tree):
line_to_node.setdefault(getattr(node, "lineno", -1), node)
def code_for_line(lineno: int) -> str:
node = line_to_node[lineno]
# Don't unparse nested child statements. Note this destroys the tree.
for _, value in ast.iter_fields(node):
if isinstance(value, list) and value and isinstance(value[0], ast.stmt):
value.clear()
return unparse(node, short_fallback=True).strip()
# Time to commit several sins against CPython implementation details
tb_except = traceback.TracebackException(
type(e), e, e.__traceback__.tb_next # type: ignore
)
for fs in tb_except.stack:
if fs.filename == "<pyp>":
fs._line = code_for_line(fs.lineno) # type: ignore[attr-defined]
fs.lineno = "PYP_REDACTED" # type: ignore[assignment]
tb_format = tb_except.format()
assert "Traceback (most recent call last)" in next(tb_format)
message = "Possible reconstructed traceback (most recent call last):\n"
message += "".join(tb_format).strip("\n")
message = message.replace(", line PYP_REDACTED", "")
except Exception:
message = "".join(traceback.format_exception_only(type(e), e)).strip()
if isinstance(e, ModuleNotFoundError):
message += (
"\n\nNote pyp treats undefined names as modules to automatically import. "
"Perhaps you forgot to define something or PYP_CONFIG_PATH is set incorrectly?"
)
if args.before and isinstance(e, NameError):
var = str(e)
var = var[var.find("'") + 1 : var.rfind("'")]
if var in ("lines", "stdin"):
message += (
"\n\nNote code in `--before` runs before any magic variables are defined "
"and should not process input. Your command should work by simply removing "
"`--before`, so instead passing in multiple statements in the main section "
"of your code."
)
raise PypError(
"Code raised the following exception, consider using --explain to investigate:\n\n"
f"{message}"
) from e
def parse_options(args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
prog="pyp",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=(
"Easily run Python at the shell!\n\n"
"For help and examples, see https://github.com/hauntsaninja/pyp\n\n"
"Cheatsheet:\n"
"- Use `x`, `l` or `line` for a line in the input. Use `i`, `idx` or `index` "
"for the index\n"
"- Use `lines` to get a list of rstripped lines\n"
"- Use `stdin` to get sys.stdin\n"
"- Use print explicitly if you don't like when or how or what pyp's printing\n"
"- If the magic is ever too mysterious, use --explain"
),
)
parser.add_argument("code", nargs="+", help="Python you want to run")
parser.add_argument(
"--explain",
"--script",
action="store_true",
help="Prints the Python that would get run, instead of running it",
)
parser.add_argument(
"-b",
"--before",
action="append",
default=[],
metavar="CODE",
help="Python to run before processing input",
)
parser.add_argument(
"-a",
"--after",
action="append",
default=[],
metavar="CODE",
help="Python to run after processing input",
)
parser.add_argument(
"--define-pypprint",
action="store_true",
help="Defines pypprint, if used, instead of importing it from pyp.",
)
parser.add_argument("--version", action="version", version=f"pyp {__version__}")
return parser.parse_args(args)
def main() -> None:
try:
run_pyp(parse_options(sys.argv[1:]))
except PypError as e:
print(f"error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
|
heronpy/streamlet/impl/transformbolt.py
|
pjfanning/incubator-heron
| 3,348 |
139651
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""module for map bolt: TransformBolt"""
from heronpy.api.bolt.bolt import Bolt
from heronpy.api.state.stateful_component import StatefulComponent
from heronpy.api.component.component_spec import GlobalStreamId
from heronpy.api.stream import Grouping
from heronpy.streamlet.streamlet import Streamlet
from heronpy.streamlet.transformoperator import TransformOperator
from heronpy.streamlet.impl.contextimpl import ContextImpl
from heronpy.streamlet.impl.streamletboltbase import StreamletBoltBase
# pylint: disable=unused-argument
class TransformBolt(Bolt, StatefulComponent, StreamletBoltBase):
"""TransformBolt"""
OPERATOR = 'operator'
# pylint: disable=attribute-defined-outside-init
def init_state(self, stateful_state):
self._state = stateful_state
def pre_save(self, checkpoint_id):
# Nothing really
pass
def initialize(self, config, context):
self.logger.debug("TransformBolt's Component-specific config: \n%s", str(config))
self.processed = 0
self.emitted = 0
if TransformBolt.OPERATOR in config:
self._transform_operator = config[TransformBolt.OPERATOR]
else:
raise RuntimeError("TransformBolt needs to be passed transform_operator")
if hasattr(self, '_state'):
contextimpl = ContextImpl(context, self._state, self)
else:
contextimpl = ContextImpl(context, None, self)
self._transform_operator.setup(contextimpl)
def process(self, tup):
self.transform_operator.transform(tup)
self.processed += 1
self.ack(tup)
# pylint: disable=protected-access
class TransformStreamlet(Streamlet):
"""TransformStreamlet"""
def __init__(self, transform_operator, parent):
super().__init__()
if not isinstance(transform_operator, TransformOperator):
raise RuntimeError("Transform Operator has to be a TransformOperator")
if not isinstance(parent, Streamlet):
raise RuntimeError("parent of Transform Streamlet has to be a Streamlet")
self._transform_operator = transform_operator
self._parent = parent
self.set_num_partitions(parent.get_num_partitions())
def _calculate_inputs(self):
return {GlobalStreamId(self._parent.get_name(), self._parent._output) :
Grouping.SHUFFLE}
def _build_this(self, builder, stage_names):
if not self.get_name():
self.set_name(self._default_stage_name_calculator("transform", stage_names))
if self.get_name() in stage_names:
raise RuntimeError("Duplicate Names")
stage_names.add(self.get_name())
builder.add_bolt(self.get_name(), TransformBolt, par=self.get_num_partitions(),
inputs=self._calculate_inputs(),
config={TransformBolt.OPERATOR : self._transform_operator})
return True
|
ipyvolume/traittypes.py
|
gridbugs/ipyvolume
| 1,784 |
139703
|
<reponame>gridbugs/ipyvolume
from traitlets import TraitType
import PIL.Image
class Image(TraitType):
"""A trait for PIL images."""
default_value = None
info_text = 'a PIL Image object'
def validate(self, obj, value):
if isinstance(value, PIL.Image.Image):
return value
self.error(obj, value)
|
pyannote/audio/labeling/tasks/speech_activity_detection.py
|
avramandrei/pyannote-audio
| 1,543 |
139709
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
"""Speech activity detection"""
from typing import Optional
from typing import Text
import numpy as np
import torch
import torch.nn as nn
from .base import LabelingTask
from .base import LabelingTaskGenerator
from pyannote.audio.train.task import Task, TaskType, TaskOutput
from ..gradient_reversal import GradientReversal
from pyannote.audio.models.models import RNN
from pyannote.audio.features.wrapper import Wrappable
from pyannote.database import Protocol
from pyannote.database import Subset
from pyannote.audio.train.model import Resolution
from pyannote.audio.train.model import Alignment
class SpeechActivityDetectionGenerator(LabelingTaskGenerator):
"""Batch generator for training speech activity detection
Parameters
----------
task : Task
Task
feature_extraction : Wrappable
Describes how features should be obtained.
See pyannote.audio.features.wrapper.Wrapper documentation for details.
protocol : Protocol
subset : {'train', 'development', 'test'}, optional
Protocol and subset.
resolution : `pyannote.core.SlidingWindow`, optional
Override `feature_extraction.sliding_window`. This is useful for
models that include the feature extraction step (e.g. SincNet) and
therefore output a lower sample rate than that of the input.
Defaults to `feature_extraction.sliding_window`
alignment : {'center', 'loose', 'strict'}, optional
Which mode to use when cropping labels. This is useful for models that
include the feature extraction step (e.g. SincNet) and therefore use a
different cropping mode. Defaults to 'center'.
duration : float, optional
Duration of audio chunks. Defaults to 2s.
batch_size : int, optional
Batch size. Defaults to 32.
per_epoch : float, optional
Force total audio duration per epoch, in days.
Defaults to total duration of protocol subset.
mask : str, optional
When provided, protocol files are expected to contain a key named after
this `mask` variable and providing a `SlidingWindowFeature` instance.
Generated batches will contain an additional "mask" key (on top of
existing "X" and "y" keys) computed as an excerpt of `current_file[mask]`
time-aligned with "y". Defaults to not add any "mask" key.
"""
def __init__(
self,
task: Task,
feature_extraction: Wrappable,
protocol: Protocol,
subset: Subset = "train",
resolution: Optional[Resolution] = None,
alignment: Optional[Alignment] = None,
duration: float = 2.0,
batch_size: int = 32,
per_epoch: float = None,
mask: Text = None,
):
super().__init__(
task,
feature_extraction,
protocol,
subset=subset,
resolution=resolution,
alignment=alignment,
duration=duration,
batch_size=batch_size,
per_epoch=per_epoch,
exhaustive=False,
mask=mask,
local_labels=True,
)
def postprocess_y(self, Y: np.ndarray) -> np.ndarray:
"""Generate labels for speech activity detection
Parameters
----------
Y : (n_samples, n_speakers) numpy.ndarray
Discretized annotation returned by
`pyannote.core.utils.numpy.one_hot_encoding`.
Returns
-------
y : (n_samples, 1) numpy.ndarray
See also
--------
`pyannote.core.utils.numpy.one_hot_encoding`
"""
# number of speakers for each frame
speaker_count = np.sum(Y, axis=1, keepdims=True)
# mark speech regions as such
return np.int64(speaker_count > 0)
@property
def specifications(self):
specs = {
"task": self.task,
"X": {"dimension": self.feature_extraction.dimension},
"y": {"classes": ["non_speech", "speech"]},
}
for key, classes in self.file_labels_.items():
# TODO. add an option to handle this list
# TODO. especially useful for domain-adversarial stuff
if key in ["duration", "audio", "uri"]:
continue
specs[key] = {"classes": classes}
return specs
class SpeechActivityDetection(LabelingTask):
"""Train speech activity (and overlap) detection
Parameters
----------
duration : float, optional
Duration of sub-sequences. Defaults to 3.2s.
batch_size : int, optional
Batch size. Defaults to 32.
per_epoch : float, optional
Total audio duration per epoch, in days.
Defaults to one day (1).
"""
def get_batch_generator(
self,
feature_extraction,
protocol,
subset: Subset = "train",
resolution=None,
alignment=None,
):
"""
resolution : `pyannote.core.SlidingWindow`, optional
Override `feature_extraction.sliding_window`. This is useful for
models that include the feature extraction step (e.g. SincNet) and
therefore output a lower sample rate than that of the input.
alignment : {'center', 'loose', 'strict'}, optional
Which mode to use when cropping labels. This is useful for models
that include the feature extraction step (e.g. SincNet) and
therefore use a different cropping mode. Defaults to 'center'.
"""
return SpeechActivityDetectionGenerator(
self.task,
feature_extraction,
protocol,
subset=subset,
resolution=resolution,
alignment=alignment,
duration=self.duration,
per_epoch=self.per_epoch,
batch_size=self.batch_size,
)
class DomainAwareSpeechActivityDetection(SpeechActivityDetection):
"""Domain-aware speech activity detection
Trains speech activity detection and domain classification jointly.
Parameters
----------
domain : `str`, optional
Batch key to use as domain. Defaults to 'domain'.
Could be 'database' or 'uri' for instance.
attachment : `int`, optional
Intermediate level where to attach the domain classifier.
Defaults to -1. Passed to `return_intermediate` in models supporting it.
rnn: `dict`, optional
Parameters of the RNN used in the domain classifier.
See `pyannote.audio.models.models.RNN` for details.
domain_loss : `str`, optional
Loss function to use. Defaults to 'NLLLoss'.
"""
DOMAIN_PT = "{train_dir}/weights/{epoch:04d}.domain.pt"
def __init__(
self, domain="domain", attachment=-1, rnn=None, domain_loss="NLLLoss", **kwargs
):
super().__init__(**kwargs)
self.domain = domain
self.attachment = attachment
if rnn is None:
rnn = dict()
self.rnn = rnn
self.domain_loss = domain_loss
if self.domain_loss == "NLLLoss":
# Default value
self.domain_loss_ = nn.NLLLoss()
self.activation_ = nn.LogSoftmax(dim=1)
elif self.domain_loss == "MSELoss":
self.domain_loss_ = nn.MSELoss()
self.activation_ = nn.Sigmoid()
else:
msg = f"{domain_loss} has not been implemented yet."
raise NotImplementedError(msg)
def more_parameters(self):
"""Initialize trainable trainer parameters
Yields
------
parameter : nn.Parameter
Trainable trainer parameters
"""
domain_classifier_rnn = RNN(
n_features=self.model.intermediate_dimension(self.attachment), **self.rnn
)
n_classes = len(self.specifications[self.domain]["classes"])
domain_classifier_linear = nn.Linear(
domain_classifier_rnn.dimension, n_classes, bias=True
).to(self.device)
self.domain_classifier_ = nn.Sequential(
domain_classifier_rnn, domain_classifier_linear
).to(self.device)
# TODO: check if we really need to do this .to(self.device) twice
return self.domain_classifier_.parameters()
def load_more(self, model_pt=None) -> bool:
"""Load classifier from disk"""
if model_pt is None:
domain_pt = self.DOMAIN_PT.format(
train_dir=self.train_dir_, epoch=self.epoch_
)
else:
domain_pt = model_pt.with_suffix(".domain.pt")
domain_classifier_state = torch.load(
domain_pt, map_location=lambda storage, loc: storage
)
self.domain_classifier_.load_state_dict(domain_classifier_state)
# FIXME add support for different domains
return True
def save_more(self):
"""Save domain classifier to disk"""
domain_pt = self.DOMAIN_PT.format(train_dir=self.train_dir_, epoch=self.epoch_)
torch.save(self.domain_classifier_.state_dict(), domain_pt)
def batch_loss(self, batch):
"""Compute loss for current `batch`
Parameters
----------
batch : `dict`
['X'] (`numpy.ndarray`)
['y'] (`numpy.ndarray`)
Returns
-------
batch_loss : `dict`
['loss'] (`torch.Tensor`) : Loss
"""
# forward pass
X = torch.tensor(batch["X"], dtype=torch.float32, device=self.device_)
fX, intermediate = self.model_(X, return_intermediate=self.attachment)
# speech activity detection
fX = fX.view((-1, self.n_classes_))
target = (
torch.tensor(batch["y"], dtype=torch.int64, device=self.device_)
.contiguous()
.view((-1,))
)
weight = self.weight
if weight is not None:
weight = weight.to(device=self.device_)
loss = self.loss_func_(fX, target, weight=weight)
# domain classification
domain_target = torch.tensor(
batch[self.domain], dtype=torch.int64, device=self.device_
)
domain_scores = self.activation_(self.domain_classifier_(intermediate))
domain_loss = self.domain_loss_(domain_scores, domain_target)
return {
"loss": loss + domain_loss,
"loss_domain": domain_loss,
"loss_task": loss,
}
class DomainAdversarialSpeechActivityDetection(DomainAwareSpeechActivityDetection):
"""Domain Adversarial speech activity detection
Parameters
----------
domain : `str`, optional
Batch key to use as domain. Defaults to 'domain'.
Could be 'database' or 'uri' for instance.
attachment : `int`, optional
Intermediate level where to attach the domain classifier.
Defaults to -1. Passed to `return_intermediate` in models supporting it.
alpha : `float`, optional
Coefficient multiplied with the domain loss
"""
def __init__(self, domain="domain", attachment=-1, alpha=1.0, **kwargs):
super().__init__(domain=domain, attachment=attachment, **kwargs)
self.alpha = alpha
self.gradient_reversal_ = GradientReversal()
def batch_loss(self, batch):
"""Compute loss for current `batch`
Parameters
----------
batch : `dict`
['X'] (`numpy.ndarray`)
['y'] (`numpy.ndarray`)
Returns
-------
batch_loss : `dict`
['loss'] (`torch.Tensor`) : Loss
"""
# forward pass
X = torch.tensor(batch["X"], dtype=torch.float32, device=self.device_)
fX, intermediate = self.model_(X, return_intermediate=self.attachment)
# speech activity detection
fX = fX.view((-1, self.n_classes_))
target = (
torch.tensor(batch["y"], dtype=torch.int64, device=self.device_)
.contiguous()
.view((-1,))
)
weight = self.weight
if weight is not None:
weight = weight.to(device=self.device_)
loss = self.loss_func_(fX, target, weight=weight)
# domain classification
domain_target = torch.tensor(
batch[self.domain], dtype=torch.int64, device=self.device_
)
domain_scores = self.activation_(
self.domain_classifier_(self.gradient_reversal_(intermediate))
)
if self.domain_loss == "MSELoss":
# One hot encode domain_target for Mean Squared Error Loss
nb_domains = domain_scores.shape[1]
identity_mat = torch.sparse.torch.eye(nb_domains, device=self.device_)
domain_target = identity_mat.index_select(dim=0, index=domain_target)
domain_loss = self.domain_loss_(domain_scores, domain_target)
return {
"loss": loss + self.alpha * domain_loss,
"loss_domain": domain_loss,
"loss_task": loss,
}
|
tests/tilemap_tests/test_file_formats.py
|
yegarti/arcade
| 824 |
139719
|
import arcade
TILE_SCALING = 1.0
def test_csv_left_up():
# Read in the tiled map
my_map = arcade.load_tilemap("../tiled_maps/csv_left_up_embedded.json")
assert my_map.tile_width == 128
assert my_map.tile_height == 128
assert my_map.width == 10
assert my_map.height == 10
# --- Platforms ---
assert "Blocking Sprites" in my_map.sprite_lists
wall_list = my_map.sprite_lists["Blocking Sprites"]
assert wall_list[0].position == (64, 1216)
assert "dirtCenter" in wall_list[0].texture.name
assert wall_list[1].position == (1216, 1216)
assert "grassCenter" in wall_list[1].texture.name
assert wall_list[2].position == (64, 64)
assert "boxCrate" in wall_list[2].texture.name
def test_csv_right_down():
# Read in the tiled map
my_map = arcade.load_tilemap("../tiled_maps/csv_right_down_external.json")
assert my_map.tile_width == 128
assert my_map.tile_height == 128
assert my_map.width == 10
assert my_map.height == 10
# --- Platforms ---
assert "Blocking Sprites" in my_map.sprite_lists
wall_list = my_map.sprite_lists["Blocking Sprites"]
assert wall_list[0].position == (64, 1216)
assert "dirtCenter" in wall_list[0].texture.name
assert wall_list[1].position == (1216, 1216)
assert "grassCenter" in wall_list[1].texture.name
assert wall_list[2].position == (64, 64)
assert "boxCrate" in wall_list[2].texture.name
def test_base_64_zlib():
# Read in the tiled map
my_map = arcade.load_tilemap("../tiled_maps/base_64_zlib.json")
assert my_map.tile_width == 128
assert my_map.tile_height == 128
assert my_map.width == 10
assert my_map.height == 10
# --- Platforms ---
assert "Blocking Sprites" in my_map.sprite_lists
wall_list = my_map.sprite_lists["Blocking Sprites"]
assert wall_list[0].position == (64, 1216)
assert "dirtCenter" in wall_list[0].texture.name
assert wall_list[1].position == (1216, 1216)
assert "grassCenter" in wall_list[1].texture.name
assert wall_list[2].position == (64, 64)
assert "boxCrate" in wall_list[2].texture.name
def test_base_64_gzip():
# Read in the tiled map
my_map = arcade.load_tilemap("../tiled_maps/base_64_gzip.json")
assert my_map.tile_width == 128
assert my_map.tile_height == 128
assert my_map.width == 10
assert my_map.height == 10
# --- Platforms ---
assert "Blocking Sprites" in my_map.sprite_lists
wall_list = my_map.sprite_lists["Blocking Sprites"]
assert wall_list[0].position == (64, 1216)
assert "dirtCenter" in wall_list[0].texture.name
assert wall_list[1].position == (1216, 1216)
assert "grassCenter" in wall_list[1].texture.name
assert wall_list[2].position == (64, 64)
assert "boxCrate" in wall_list[2].texture.name
|
dragonfly/nn/unittest_nn_domains.py
|
hase1128/dragonfly
| 675 |
139723
|
"""
Unit tests for functions/classes in nn_constraint_checker.py
-- <EMAIL>
"""
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# Local imports
from . import nn_domains
from .unittest_neural_network import generate_cnn_architectures, \
generate_mlp_architectures
from ..utils.base_test_class import BaseTestClass, execute_tests
class NNConstraintCheckerTestCase(BaseTestClass):
""" Contains unit tests for the TransportNNDistanceComputer class. """
def __init__(self, *args, **kwargs):
""" Constructor. """
super(NNConstraintCheckerTestCase, self).__init__(*args, **kwargs)
self.nns = generate_cnn_architectures() + generate_mlp_architectures()
self.cnn_constraint_checker = nn_domains.CNNConstraintChecker(
25, 5, 500000, 0, 5, 2, 15, 512, 16, 4)
self.mlp_constraint_checker = nn_domains.MLPConstraintChecker(
25, 5, 500000, 900, 5, 2, 15, 30, 8)
def test_constraint_checker(self):
""" Tests if the constraints are satisfied for each network. """
report_str = ('Testing constraint checker: max_layers=%d, max_mass=%d,' +
'max_out_deg=%d, max_in_deg=%d, max_edges=%d, max_2stride=%d.')%(
self.cnn_constraint_checker.max_num_layers,
self.cnn_constraint_checker.max_mass,
self.cnn_constraint_checker.max_in_degree,
self.cnn_constraint_checker.max_out_degree,
self.cnn_constraint_checker.max_num_edges,
self.cnn_constraint_checker.max_num_2strides,
)
self.report(report_str)
for nn in self.nns:
if nn.nn_class == 'cnn':
violation = self.cnn_constraint_checker(nn, True)
constrain_satisfied = self.cnn_constraint_checker(nn)
img_inv_sizes = [piis for piis in nn.post_img_inv_sizes if piis != 'x']
nn_class_str = ', max_inv_size=%d '%(max(img_inv_sizes))
else:
violation = self.mlp_constraint_checker(nn, True)
constrain_satisfied = self.mlp_constraint_checker(nn)
nn_class_str = ' '
self.report(('%s: #layers:%d, mass:%d, max_outdeg:%d, max_indeg:%d, ' +
'#edges:%d%s:: %s, %s')%(
nn.nn_class, len(nn.layer_labels), nn.get_total_mass(),
nn.get_out_degrees().max(), nn.get_in_degrees().max(),
nn.conn_mat.sum(), nn_class_str, str(constrain_satisfied), violation),
'test_result')
assert (constrain_satisfied and violation == '') or \
(not constrain_satisfied and violation != '')
if __name__ == '__main__':
execute_tests()
|
autolens/interferometer/model/result.py
|
Jammy2211/PyAutoLens
| 114 |
139730
|
import numpy as np
import autoarray as aa
import autogalaxy as ag
from autolens.lens.model.result import ResultDataset
class ResultInterferometer(ResultDataset):
@property
def max_log_likelihood_fit(self):
return self.analysis.fit_interferometer_for_instance(instance=self.instance)
@property
def real_space_mask(self):
return self.max_log_likelihood_fit.interferometer.real_space_mask
@property
def unmasked_model_visibilities(self):
return self.max_log_likelihood_fit.unmasked_blurred_image
@property
def unmasked_model_visibilities_of_planes(self):
return self.max_log_likelihood_fit.unmasked_blurred_image_of_planes
@property
def unmasked_model_visibilities_of_planes_and_galaxies(self):
fit = self.max_log_likelihood_fit
return fit.unmasked_blurred_image_of_planes_and_galaxies
def visibilities_for_galaxy(self, galaxy: ag.Galaxy) -> np.ndarray:
"""
Parameters
----------
galaxy
A galaxy used in this search
Returns
-------
ndarray or None
A numpy arrays giving the model visibilities of that galaxy
"""
return self.max_log_likelihood_fit.galaxy_model_visibilities_dict[galaxy]
@property
def visibilities_galaxy_dict(self) -> {str: ag.Galaxy}:
"""
A dictionary associating galaxy names with model visibilities of those galaxies
"""
return {
galaxy_path: self.visibilities_for_galaxy(galaxy)
for galaxy_path, galaxy in self.path_galaxy_tuples
}
@property
def hyper_galaxy_visibilities_path_dict(self):
"""
A dictionary associating 1D hyper_galaxies galaxy visibilities with their names.
"""
hyper_galaxy_visibilities_path_dict = {}
for path, galaxy in self.path_galaxy_tuples:
hyper_galaxy_visibilities_path_dict[path] = self.visibilities_galaxy_dict[
path
]
return hyper_galaxy_visibilities_path_dict
@property
def hyper_model_visibilities(self):
hyper_model_visibilities = aa.Visibilities.zeros(
shape_slim=(self.max_log_likelihood_fit.visibilities.shape_slim,)
)
for path, galaxy in self.path_galaxy_tuples:
hyper_model_visibilities += self.hyper_galaxy_visibilities_path_dict[path]
return hyper_model_visibilities
|
modelvshuman/datasets/base.py
|
TizianThieringer/model-vs-human
| 158 |
139738
|
<reponame>TizianThieringer/model-vs-human<filename>modelvshuman/datasets/base.py<gh_stars>100-1000
#!/usr/bin/env python3
import os
from os.path import join as pjoin
class Dataset(object):
"""Base Dataset class
Attributes:
name (str): name of the dataset
params (object): Dataclass object contains following attributes path, image_size, metric, decision_mapping,
experiments and container_session
loader (pytorch loader): Data loader
args (dict): Other arguments
"""
def __init__(self,
name,
params,
loader,
*args,
**kwargs):
self.name = name
self.image_size = params.image_size
self.decision_mapping = params.decision_mapping
self.info_mapping = params.info_mapping
self.experiments = params.experiments
self.metrics = params.metrics
self.contains_sessions = params.contains_sessions
self.args = args
self.kwargs = kwargs
resize = False if params.image_size == 224 else True
if self.contains_sessions:
self.path = pjoin(params.path, "dnn/")
else:
self.path = params.path
assert os.path.exists(self.path), f"dataset {self.name} path not found: " + self.path
if self.contains_sessions:
assert all(f.startswith("session-") for f in os.listdir(self.path))
else:
assert not any(f.startswith("session-") for f in os.listdir(self.path))
if self.experiments:
for e in self.experiments:
e.name = self.name
self._loader = None # this will be lazy-loaded the first time self.loader (the dataloader instance) is called
self._loader_callback = lambda: loader()(self.path, resize=resize,
batch_size=self.kwargs["batch_size"],
num_workers=self.kwargs["num_workers"],
info_mapping=self.info_mapping)
@property
def loader(self):
if self._loader is None:
self._loader = self._loader_callback()
return self._loader
@loader.setter
def loader(self, new_loader):
self._loader = new_loader
|
causalinference/core/propensity.py
|
youngminju-phd/Causalinference
| 392 |
139757
|
from __future__ import division
import numpy as np
from scipy.optimize import fmin_bfgs
from itertools import combinations_with_replacement
import causalinference.utils.tools as tools
from .data import Dict
class Propensity(Dict):
"""
Dictionary-like class containing propensity score data.
Propensity score related data includes estimated logistic regression
coefficients, maximized log-likelihood, predicted propensity scores,
and lists of the linear and quadratic terms that are included in the
logistic regression.
"""
def __init__(self, data, lin, qua):
Z = form_matrix(data['X'], lin, qua)
Z_c, Z_t = Z[data['controls']], Z[data['treated']]
beta = calc_coef(Z_c, Z_t)
self._data = data
self._dict = dict()
self._dict['lin'], self._dict['qua'] = lin, qua
self._dict['coef'] = beta
self._dict['loglike'] = -neg_loglike(beta, Z_c, Z_t)
self._dict['fitted'] = sigmoid(Z.dot(beta))
self._dict['se'] = calc_se(Z, self._dict['fitted'])
def __str__(self):
table_width = 80
coefs = self._dict['coef']
ses = self._dict['se']
output = '\n'
output += 'Estimated Parameters of Propensity Score\n\n'
entries1 = ['', 'Coef.', 'S.e.', 'z', 'P>|z|',
'[95% Conf. int.]']
entry_types1 = ['string']*6
col_spans1 = [1]*5 + [2]
output += tools.add_row(entries1, entry_types1,
col_spans1, table_width)
output += tools.add_line(table_width)
entries2 = tools.gen_reg_entries('Intercept', coefs[0], ses[0])
entry_types2 = ['string'] + ['float']*6
col_spans2 = [1]*7
output += tools.add_row(entries2, entry_types2,
col_spans2, table_width)
lin = self._dict['lin']
for (lin_term, coef, se) in zip(lin, coefs[1:], ses[1:]):
entries3 = tools.gen_reg_entries('X'+str(lin_term),
coef, se)
output += tools.add_row(entries3, entry_types2,
col_spans2, table_width)
qua = self._dict['qua']
lin_num = len(lin)+1 # including intercept
for (qua_term, coef, se) in zip(qua, coefs[lin_num:],
ses[lin_num:]):
name = 'X'+str(qua_term[0])+'*X'+str(qua_term[1])
entries4 = tools.gen_reg_entries(name, coef, se)
output += tools.add_row(entries4, entry_types2,
col_spans2, table_width)
return output
class PropensitySelect(Propensity):
"""
Dictionary-like class containing propensity score data.
Propensity score related data includes estimated logistic regression
coefficients, maximized log-likelihood, predicted propensity scores,
and lists of the linear and quadratic terms that are included in the
logistic regression.
"""
def __init__(self, data, lin_B, C_lin, C_qua):
X_c, X_t = data['X_c'], data['X_t']
lin = select_lin_terms(X_c, X_t, lin_B, C_lin)
qua = select_qua_terms(X_c, X_t, lin, C_qua)
super(PropensitySelect, self).__init__(data, lin, qua)
def form_matrix(X, lin, qua):
N, K = X.shape
mat = np.empty((N, 1+len(lin)+len(qua)))
mat[:, 0] = 1 # constant term
current_col = 1
if lin:
mat[:, current_col:current_col+len(lin)] = X[:, lin]
current_col += len(lin)
for term in qua: # qua is a list of tuples of column numbers
mat[:, current_col] = X[:, term[0]] * X[:, term[1]]
current_col += 1
return mat
def sigmoid(x, top_threshold=100, bottom_threshold=-100):
high_x = (x >= top_threshold)
low_x = (x <= bottom_threshold)
mid_x = ~(high_x | low_x)
values = np.empty(x.shape[0])
values[high_x] = 1.0
values[low_x] = 0.0
values[mid_x] = 1/(1+np.exp(-x[mid_x]))
return values
def log1exp(x, top_threshold=100, bottom_threshold=-100):
high_x = (x >= top_threshold)
low_x = (x <= bottom_threshold)
mid_x = ~(high_x | low_x)
values = np.empty(x.shape[0])
values[high_x] = 0.0
values[low_x] = -x[low_x]
values[mid_x] = np.log(1 + np.exp(-x[mid_x]))
return values
def neg_loglike(beta, X_c, X_t):
return log1exp(X_t.dot(beta)).sum() + log1exp(-X_c.dot(beta)).sum()
def neg_gradient(beta, X_c, X_t):
return (sigmoid(X_c.dot(beta))*X_c.T).sum(1) - \
(sigmoid(-X_t.dot(beta))*X_t.T).sum(1)
def calc_coef(X_c, X_t):
K = X_c.shape[1]
neg_ll = lambda b: neg_loglike(b, X_c, X_t)
neg_grad = lambda b: neg_gradient(b, X_c, X_t)
logit = fmin_bfgs(neg_ll, np.zeros(K), neg_grad,
full_output=True, disp=False)
return logit[0]
def calc_se(X, phat):
H = np.dot(phat*(1-phat)*X.T, X)
return np.sqrt(np.diag(np.linalg.inv(H)))
def get_excluded_lin(K, included):
included_set = set(included)
return [x for x in range(K) if x not in included_set]
def get_excluded_qua(lin, included):
whole_set = list(combinations_with_replacement(lin, 2))
included_set = set(included)
return [x for x in whole_set if x not in included_set]
def calc_loglike(X_c, X_t, lin, qua):
Z_c = form_matrix(X_c, lin, qua)
Z_t = form_matrix(X_t, lin, qua)
beta = calc_coef(Z_c, Z_t)
return -neg_loglike(beta, Z_c, Z_t)
def select_lin(X_c, X_t, lin_B, C_lin):
# Selects, through a sequence of likelihood ratio tests, the
# variables that should be included linearly in propensity
# score estimation.
K = X_c.shape[1]
excluded = get_excluded_lin(K, lin_B)
if excluded == []:
return lin_B
ll_null = calc_loglike(X_c, X_t, lin_B, [])
def lr_stat_lin(lin_term):
ll_alt = calc_loglike(X_c, X_t, lin_B+[lin_term], [])
return 2 * (ll_alt - ll_null)
lr_stats = np.array([lr_stat_lin(term) for term in excluded])
argmax_lr = lr_stats.argmax()
if lr_stats[argmax_lr] < C_lin:
return lin_B
else:
new_term = [excluded[argmax_lr]]
return select_lin(X_c, X_t, lin_B+new_term, C_lin)
def select_lin_terms(X_c, X_t, lin_B, C_lin):
# Mostly a wrapper around function select_lin to handle cases that
# require little computation.
if C_lin <= 0:
K = X_c.shape[1]
return lin_B + get_excluded_lin(K, lin_B)
elif C_lin == np.inf:
return lin_B
else:
return select_lin(X_c, X_t, lin_B, C_lin)
def select_qua(X_c, X_t, lin, qua_B, C_qua):
# Selects, through a sequence of likelihood ratio tests, the
# variables that should be included quadratically in propensity
# score estimation.
excluded = get_excluded_qua(lin, qua_B)
if excluded == []:
return qua_B
ll_null = calc_loglike(X_c, X_t, lin, qua_B)
def lr_stat_qua(qua_term):
ll_alt = calc_loglike(X_c, X_t, lin, qua_B+[qua_term])
return 2 * (ll_alt - ll_null)
lr_stats = np.array([lr_stat_qua(term) for term in excluded])
argmax_lr = lr_stats.argmax()
if lr_stats[argmax_lr] < C_qua:
return qua_B
else:
new_term = [excluded[argmax_lr]]
return select_qua(X_c, X_t, lin, qua_B+new_term, C_qua)
def select_qua_terms(X_c, X_t, lin, C_qua):
# Mostly a wrapper around function select_qua to handle cases that
# require little computation.
if lin == []:
return []
if C_qua <= 0:
return get_excluded_qua(lin, [])
elif C_qua == np.inf:
return []
else:
return select_qua(X_c, X_t, lin, [], C_qua)
|
rigl/experimental/jax/train_test.py
|
vishalbelsare/rigl
| 276 |
139761
|
<reponame>vishalbelsare/rigl
# coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for weight_symmetry.train."""
import glob
from os import path
import tempfile
from absl.testing import absltest
from absl.testing import flagsaver
from rigl.experimental.jax import train
class TrainTest(absltest.TestCase):
def test_train_driver_run(self):
"""Tests that the training driver runs, and outputs a TF summary."""
experiment_dir = tempfile.mkdtemp()
eval_flags = dict(
epochs=1,
experiment_dir=experiment_dir,
)
with flagsaver.flagsaver(**eval_flags):
train.main([])
with self.subTest(name='tf_summary_file_exists'):
outfile = path.join(experiment_dir, '*', 'events.out.tfevents.*')
files = glob.glob(outfile)
self.assertTrue(len(files) == 1 and path.exists(files[0]))
if __name__ == '__main__':
absltest.main()
|
third_party/libxml/chromium/roll.py
|
Ron423c/chromium
| 575 |
139837
|
<filename>third_party/libxml/chromium/roll.py
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import os.path
import shutil
import subprocess
import sys
import stat
import tempfile
# How to patch libxml2 in Chromium:
#
# 1. Write a .patch file and add it to third_party/libxml/chromium.
# 2. Apply the patch in src: patch -p1 <../chromium/foo.patch
# 3. Add the patch to the list of patches in this file.
# 4. Update README.chromium with the provenance of the patch.
# 5. Upload a change with the modified documentation, roll script,
# patch, applied patch and any other relevant changes like
# regression tests. Go through the usual review and commit process.
#
# How to roll libxml2 in Chromium:
#
# Prerequisites:
#
# 1. Check out Chromium somewhere on Linux, Mac and Windows.
# 2. On Linux:
# a. sudo apt-get install libicu-dev
# b. git clone https://github.com/GNOME/libxml2.git somewhere
# 3. On Mac, install these packages with brew:
# autoconf automake libtool pkgconfig icu4c
#
# Procedure:
#
# Warning: This process is destructive. Run it on a clean branch.
#
# 1. On Linux, in the libxml2 repo directory:
# a. git remote update origin
# b. git checkout origin/master
#
# This will be the upstream version of libxml you are rolling to.
#
# 2. On Linux, in the Chromium src director:
# a. third_party/libxml/chromium/roll.py --linux /path/to/libxml2
#
# If this fails, it may be a patch no longer applies. Reset to
# head; modify the patch files, this script, and
# README.chromium; then commit the result and run it again.
#
# b. Upload a CL, but do not Start Review.
#
# 2. On Windows, in the Chromium src directory:
# a. git cl patch <Gerrit Issue ID>
# b. third_party\libxml\chromium\roll.py --win32
# c. git cl upload
#
# 3. On Mac, in the Chromium src directory:
# a. git cl patch <Gerrit Issue ID>
# b. third_party/libxml/chromium/roll.py --mac --icu4c_path=~/homebrew/opt/icu4c
# c. Make and commit any final changes to README.chromium, BUILD.gn, etc.
# d. git cl upload
# e. Complete the review as usual
PATCHES = [
# TODO(dcheng): reach out upstream to see what's going on here.
'revert-non-recursive-xml-parsing.patch',
'chromium-issue-599427.patch',
'chromium-issue-628581.patch',
'libxml2-2.9.4-security-xpath-nodetab-uaf.patch',
'chromium-issue-708434.patch',
]
# See libxml2 configure.ac and win32/configure.js to learn what
# options are available. We include every option here to more easily track
# changes from one version to the next, and to be sure we only include what
# we need.
# These two sets of options should be in sync. You can check the
# generated #defines in (win32|mac|linux)/include/libxml/xmlversion.h to confirm
# this.
# We would like to disable python but it introduces a host of build errors
SHARED_XML_CONFIGURE_OPTIONS = [
# These options are turned ON
('--with-html', 'html=yes'),
('--with-icu', 'icu=yes'),
('--with-output', 'output=yes'),
('--with-push', 'push=yes'),
('--with-python', 'python=yes'),
('--with-reader', 'reader=yes'),
('--with-sax1', 'sax1=yes'),
('--with-tree', 'tree=yes'),
('--with-writer', 'writer=yes'),
('--with-xpath', 'xpath=yes'),
# These options are turned OFF
('--without-c14n', 'c14n=no'),
('--without-catalog', 'catalog=no'),
('--without-debug', 'xml_debug=no'),
('--without-docbook', 'docb=no'),
('--without-ftp', 'ftp=no'),
('--without-http', 'http=no'),
('--without-iconv', 'iconv=no'),
('--without-iso8859x', 'iso8859x=no'),
('--without-legacy', 'legacy=no'),
('--without-lzma', 'lzma=no'),
('--without-mem-debug', 'mem_debug=no'),
('--without-modules', 'modules=no'),
('--without-pattern', 'pattern=no'),
('--without-regexps', 'regexps=no'),
('--without-run-debug', 'run_debug=no'),
('--without-schemas', 'schemas=no'),
('--without-schematron', 'schematron=no'),
('--without-threads', 'threads=no'),
('--without-valid', 'valid=no'),
('--without-xinclude', 'xinclude=no'),
('--without-xptr', 'xptr=no'),
('--without-zlib', 'zlib=no'),
]
# These options are only available in configure.ac for Linux and Mac.
EXTRA_NIX_XML_CONFIGURE_OPTIONS = [
'--without-fexceptions',
'--without-minimum',
'--without-readline',
'--without-history',
]
# These options are only available in win32/configure.js for Windows.
EXTRA_WIN32_XML_CONFIGURE_OPTIONS = [
'trio=no',
'walker=no',
]
XML_CONFIGURE_OPTIONS = (
[option[0] for option in SHARED_XML_CONFIGURE_OPTIONS] +
EXTRA_NIX_XML_CONFIGURE_OPTIONS)
XML_WIN32_CONFIGURE_OPTIONS = (
[option[1] for option in SHARED_XML_CONFIGURE_OPTIONS] +
EXTRA_WIN32_XML_CONFIGURE_OPTIONS)
FILES_TO_REMOVE = [
'src/DOCBparser.c',
'src/HACKING',
'src/INSTALL',
'src/INSTALL.libxml2',
'src/MAINTAINERS',
'src/Makefile.in',
'src/Makefile.win',
'src/README.cvs-commits',
# This is unneeded "legacy" SAX API, even though we enable SAX1.
'src/SAX.c',
'src/VxWorks',
'src/autogen.sh',
'src/autom4te.cache',
'src/bakefile',
'src/build_glob.py',
'src/c14n.c',
'src/catalog.c',
'src/compile',
'src/config.guess',
'src/config.sub',
'src/configure',
'src/chvalid.def',
'src/debugXML.c',
'src/depcomp',
'src/doc',
'src/example',
'src/genChRanges.py',
'src/global.data',
'src/include/libxml/Makefile.in',
'src/include/libxml/xmlversion.h',
'src/include/libxml/xmlwin32version.h',
'src/include/libxml/xmlwin32version.h.in',
'src/include/Makefile.in',
'src/install-sh',
'src/legacy.c',
'src/libxml2.doap',
'src/ltmain.sh',
'src/m4',
'src/macos/libxml2.mcp.xml.sit.hqx',
'src/missing',
'src/optim',
'src/os400',
'src/python',
'src/relaxng.c',
'src/result',
'src/rngparser.c',
'src/schematron.c',
'src/test',
'src/testOOM.c',
'src/testOOMlib.c',
'src/testOOMlib.h',
'src/trio.c',
'src/trio.h',
'src/triop.h',
'src/triostr.c',
'src/triostr.h',
'src/vms',
'src/win32/VC10/config.h',
'src/win32/wince',
'src/xinclude.c',
'src/xlink.c',
'src/xml2-config.in',
'src/xmlcatalog.c',
'src/xmllint.c',
'src/xmlmodule.c',
'src/xmlregexp.c',
'src/xmlschemas.c',
'src/xmlschemastypes.c',
'src/xpointer.c',
'src/xstc',
'src/xzlib.c',
]
THIRD_PARTY_LIBXML_SRC = 'third_party/libxml/src'
class WorkingDir(object):
""""Changes the working directory and resets it on exit."""
def __init__(self, path):
self.prev_path = os.getcwd()
self.path = path
def __enter__(self):
os.chdir(self.path)
def __exit__(self, exc_type, exc_value, traceback):
if exc_value:
print('was in %s; %s before that' % (self.path, self.prev_path))
os.chdir(self.prev_path)
def git(*args):
"""Runs a git subcommand.
On Windows this uses the shell because there's a git wrapper
batch file in depot_tools.
Arguments:
args: The arguments to pass to git.
"""
command = ['git'] + list(args)
subprocess.check_call(command, shell=(os.name == 'nt'))
def remove_tracked_and_local_dir(path):
"""Removes the contents of a directory from git, and the filesystem.
Arguments:
path: The path to remove.
"""
remove_tracked_files([path])
shutil.rmtree(path, ignore_errors=True)
os.mkdir(path)
def remove_tracked_files(files_to_remove):
"""Removes tracked files from git.
Arguments:
files_to_remove: The files to remove.
"""
files_to_remove = [f for f in files_to_remove if os.path.exists(f)]
if files_to_remove:
git('rm', '-rf', *files_to_remove)
def sed_in_place(input_filename, program):
"""Replaces text in a file.
Arguments:
input_filename: The file to edit.
program: The sed program to perform edits on the file.
"""
# OS X's sed requires -e
subprocess.check_call(['sed', '-i', '-e', program, input_filename])
def check_copying(full_path_to_third_party_libxml_src):
path = os.path.join(full_path_to_third_party_libxml_src, 'COPYING')
if not os.path.exists(path):
return
with open(path) as f:
s = f.read()
if 'GNU' in s:
raise Exception('check COPYING')
def prepare_libxml_distribution(src_path, libxml2_repo_path, temp_dir):
"""Makes a libxml2 distribution.
Args:
src_path: The path to the Chromium checkout.
libxml2_repo_path: The path to the local clone of the libxml2 repo.
temp_dir: A temporary directory to stage the distribution to.
Returns: A tuple of commit hash and full path to the archive.
"""
# If it was necessary to push from a distribution prepared upstream,
# this is the point to inject it: Return the version string and the
# distribution tar file.
# The libxml2 repo we're pulling changes from should not have
# local changes. This *should* be a commit that's publicly visible
# in the upstream repo; reviewers should check this.
check_clean(libxml2_repo_path)
temp_config_path = os.path.join(temp_dir, 'config')
os.mkdir(temp_config_path)
temp_src_path = os.path.join(temp_dir, 'src')
os.mkdir(temp_src_path)
with WorkingDir(libxml2_repo_path):
commit = subprocess.check_output(
['git', 'log', '-n', '1', '--pretty=format:%H', 'HEAD'])
subprocess.check_call(
'git archive HEAD | tar -x -C "%s"' % temp_src_path,
shell=True)
with WorkingDir(temp_src_path):
os.remove('.gitignore')
for patch in PATCHES:
print('applying %s' % patch)
subprocess.check_call(
'patch -p1 --fuzz=0 < %s' % os.path.join(
src_path, THIRD_PARTY_LIBXML_SRC, '..', 'chromium', patch),
shell=True)
with WorkingDir(temp_config_path):
print('../src/autogen.sh %s' % XML_CONFIGURE_OPTIONS)
subprocess.check_call(['../src/autogen.sh'] + XML_CONFIGURE_OPTIONS)
subprocess.check_call(['make', 'dist-all'])
# Work out what it is called
tar_file = subprocess.check_output(
'''awk '/PACKAGE =/ {p=$3} /VERSION =/ {v=$3} '''
'''END {printf("%s-%s.tar.gz", p, v)}' Makefile''',
shell=True)
return commit, os.path.abspath(tar_file)
def roll_libxml_linux(src_path, libxml2_repo_path):
with WorkingDir(src_path):
# Export the upstream git repo.
try:
temp_dir = tempfile.mkdtemp()
print('temporary directory: %s' % temp_dir)
commit, tar_file = prepare_libxml_distribution(
src_path, libxml2_repo_path, temp_dir)
# Remove all of the old libxml to ensure only desired cruft
# accumulates
remove_tracked_and_local_dir(THIRD_PARTY_LIBXML_SRC)
# Update the libxml repo and export it to the Chromium tree
with WorkingDir(THIRD_PARTY_LIBXML_SRC):
subprocess.check_call(
'tar xzf %s --strip-components=1' % tar_file,
shell=True)
finally:
shutil.rmtree(temp_dir)
with WorkingDir(THIRD_PARTY_LIBXML_SRC):
# Put the version number is the README file
sed_in_place('../README.chromium',
's/Version: .*$/Version: %s/' % commit)
with WorkingDir('../linux'):
subprocess.check_call(
['../src/autogen.sh'] + XML_CONFIGURE_OPTIONS)
check_copying(os.getcwd())
sed_in_place('config.h', 's/#define HAVE_RAND_R 1//')
# Add *everything*
with WorkingDir('../src'):
git('add', '*')
git('commit', '-am', '%s libxml, linux' % commit)
print('Now push to Windows and run steps there.')
def roll_libxml_win32(src_path):
with WorkingDir(src_path):
# Run the configure script.
with WorkingDir(os.path.join(THIRD_PARTY_LIBXML_SRC, 'win32')):
subprocess.check_call(
['cscript', '//E:jscript', 'configure.js', 'compiler=msvc'] +
XML_WIN32_CONFIGURE_OPTIONS)
# Add and commit the result.
shutil.move('../config.h', '../../win32/config.h')
git('add', '../../win32/config.h')
shutil.move('../include/libxml/xmlversion.h',
'../../win32/include/libxml/xmlversion.h')
git('add', '../../win32/include/libxml/xmlversion.h')
git('commit', '-m', 'Windows')
git('clean', '-f')
print('Now push to Mac and run steps there.')
def roll_libxml_mac(src_path, icu4c_path):
icu4c_path = os.path.abspath(os.path.expanduser(icu4c_path))
os.environ["LDFLAGS"] = "-L" + os.path.join(icu4c_path, 'lib')
os.environ["CPPFLAGS"] = "-I" + os.path.join(icu4c_path, 'include')
os.environ["PKG_CONFIG_PATH"] = os.path.join(icu4c_path, 'lib/pkgconfig')
full_path_to_third_party_libxml = os.path.join(
src_path, THIRD_PARTY_LIBXML_SRC, '..')
with WorkingDir(os.path.join(full_path_to_third_party_libxml, 'mac')):
subprocess.check_call(['autoreconf', '-i', '../src'])
os.chmod('../src/configure',
os.stat('../src/configure').st_mode | stat.S_IXUSR)
subprocess.check_call(['../src/configure'] + XML_CONFIGURE_OPTIONS)
sed_in_place('config.h', 's/#define HAVE_RAND_R 1//')
with WorkingDir(full_path_to_third_party_libxml):
commit = subprocess.check_output(['awk', '/Version:/ {print $2}',
'README.chromium'])
remove_tracked_files(FILES_TO_REMOVE)
commit_message = 'Roll libxml to %s' % commit
git('commit', '-am', commit_message)
print('Now upload for review, etc.')
def check_clean(path):
with WorkingDir(path):
status = subprocess.check_output(['git', 'status', '-s'])
if len(status) > 0:
raise Exception('repository at %s is not clean' % path)
def main():
src_dir = os.getcwd()
if not os.path.exists(os.path.join(src_dir, 'third_party')):
print('error: run this script from the Chromium src directory')
sys.exit(1)
parser = argparse.ArgumentParser(
description='Roll the libxml2 dependency in Chromium')
platform = parser.add_mutually_exclusive_group(required=True)
platform.add_argument('--linux', action='store_true')
platform.add_argument('--win32', action='store_true')
platform.add_argument('--mac', action='store_true')
parser.add_argument(
'libxml2_repo_path',
type=str,
nargs='?',
help='The path to the local clone of the libxml2 git repo.')
parser.add_argument(
'--icu4c_path',
help='The path to the homebrew installation of icu4c.')
args = parser.parse_args()
if args.linux:
libxml2_repo_path = args.libxml2_repo_path
if not libxml2_repo_path:
print('Specify the path to the local libxml2 repo clone.')
sys.exit(1)
libxml2_repo_path = os.path.abspath(libxml2_repo_path)
roll_libxml_linux(src_dir, libxml2_repo_path)
elif args.win32:
roll_libxml_win32(src_dir)
elif args.mac:
icu4c_path = args.icu4c_path
if not icu4c_path:
print('Specify the path to the homebrew installation of icu4c with --icu4c_path.')
print(' ex: roll.py --mac --icu4c_path=~/homebrew/opt/icu4c')
sys.exit(1)
roll_libxml_mac(src_dir, icu4c_path)
if __name__ == '__main__':
main()
|
release/stubs.min/Autodesk/Revit/UI/__init___parts/TableViewUIUtils.py
|
htlcnn/ironpython-stubs
| 182 |
139845
|
class TableViewUIUtils(object):
""" This utility class contains members that involve the Revit UI and operate on schedule views or MEP electrical panel schedules. """
@staticmethod
def TestCellAndPromptToEditTypeParameter(tableView,sectionType,row,column):
"""
TestCellAndPromptToEditTypeParameter(tableView: TableView,sectionType: SectionType,row: int,column: int) -> bool
Prompts the end-user to control whether a type parameter contained in the
specified table cell should be allowed edited.
tableView: The table view.
sectionType: The section the row lies in.
row: The row index in the section.
column: The column index in the section.
Returns: Returns true if editing the cell is allowed; otherwise false.
"""
pass
__all__=[
'TestCellAndPromptToEditTypeParameter',
]
|
blog_notebooks/cyber/raw_data_generator/raw_data_generator/raw_data_generator.py
|
BradReesWork/notebooks-contrib
| 155 |
139858
|
<reponame>BradReesWork/notebooks-contrib
from jinja2 import Environment, PackageLoader, select_autoescape
import os
import json
import logging
class RawDataGenerator:
def __init__(self):
# Read templates
self.env = Environment(loader=PackageLoader('raw_data_generator', 'templates'),)
self.schema = ["Time", "EventID", "LogHost", "LogonType", "LogonTypeDescription", "UserName", "DomainName", "LogonID",
"SubjectUserName", "SubjectDomainName", "SubjectLogonID", "Status", "Source", "ServiceName", "Destination",
"AuthenticationPackage", "FailureReason", "ProcessName", "ProcessID", "ParentProcessName", "ParentProcessID", "Raw"]
# Generates raw data from templates
def generate_raw_data(self, infilepath, outfilepath, output_format):
with open(infilepath, "r") as infile:
filename = os.path.basename(infilepath).split('.')[0]
logging.info("Reading fileprint... " + infilepath)
logging.info(outfilepath + '/' + filename + '.' + output_format)
with open(outfilepath + '/' + filename + '.' + output_format, "w") as outfile:
logging.info("Writing to file..." + outfilepath)
if output_format == "csv":
# Write header
outfile.write((",".join(self.schema) + "\n"))
for line in infile:
str_line = line
json_line = json.loads(str_line)
raw_data = repr(self._generate_raw_log(json_line))
json_line["Raw"] = raw_data
if output_format == "csv":
raw_line = self._add_raw_data_to_csv(json_line)
else: #json
raw_line = json.dumps(json_line)
# If this line from the input source ends in a newline, then add a newline to output line
if repr(str_line)[-3:] == "\\n'":
raw_line = raw_line + "\n"
outfile.write(raw_line)
logging.info("Generate raw data is complete")
def _generate_raw_log(self, json_line):
event_code = json_line['EventID']
event_template = self.env.get_template("event_" + str(event_code) + ".txt")
return event_template.render(json_line)
def _add_raw_data_to_csv(self, json_data):
csv_str = str(json_data["Time"])
for val in self.schema[1:]:
data = str(json_data[val]) if val in json_data else ""
csv_str = csv_str + "," + data
return csv_str
|
digital_image_processing/filters/gabor_filter.py
|
Vaibhav19102008/Python
| 145,614 |
139870
|
# Implementation of the Gaborfilter
# https://en.wikipedia.org/wiki/Gabor_filter
import numpy as np
from cv2 import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filter2D, imread, imshow, waitKey
def gabor_filter_kernel(
ksize: int, sigma: int, theta: int, lambd: int, gamma: int, psi: int
) -> np.ndarray:
"""
:param ksize: The kernelsize of the convolutional filter (ksize x ksize)
:param sigma: standard deviation of the gaussian bell curve
:param theta: The orientation of the normal to the parallel stripes
of Gabor function.
:param lambd: Wavelength of the sinusoidal component.
:param gamma: The spatial aspect ratio and specifies the ellipticity
of the support of Gabor function.
:param psi: The phase offset of the sinusoidal function.
>>> gabor_filter_kernel(3, 8, 0, 10, 0, 0).tolist()
[[0.8027212023735046, 1.0, 0.8027212023735046], [0.8027212023735046, 1.0, \
0.8027212023735046], [0.8027212023735046, 1.0, 0.8027212023735046]]
"""
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
ksize = ksize + 1
gabor = np.zeros((ksize, ksize), dtype=np.float32)
# each value
for y in range(ksize):
for x in range(ksize):
# distance from center
px = x - ksize // 2
py = y - ksize // 2
# degree to radiant
_theta = theta / 180 * np.pi
cos_theta = np.cos(_theta)
sin_theta = np.sin(_theta)
# get kernel x
_x = cos_theta * px + sin_theta * py
# get kernel y
_y = -sin_theta * px + cos_theta * py
# fill kernel
gabor[y, x] = np.exp(
-(_x ** 2 + gamma ** 2 * _y ** 2) / (2 * sigma ** 2)
) * np.cos(2 * np.pi * _x / lambd + psi)
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
img = imread("../image_data/lena.jpg")
# turn image in gray scale value
gray = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
out = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
"""
ksize = 10
sigma = 8
lambd = 10
gamma = 0
psi = 0
"""
kernel_10 = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filter2D(gray, CV_8UC3, kernel_10)
out = out / out.max() * 255
out = out.astype(np.uint8)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
|
examples/environment_features/scim_ideal_grid_simulation.py
|
RaviPandey33/gym-electric-motor-1
| 179 |
139875
|
"""This example simulates the start-up behavior of the squirrel cage induction motor connected to
an ideal three-phase grid. The state and action space is continuous.
Running the example will create a formatted plot that show the motor's angular velocity, the drive torque,
the applied voltage in three-phase abc-coordinates, and the measured current in field-oriented dq-coordinates.
"""
import numpy as np
import gym_electric_motor as gem
import matplotlib.pyplot as plt
def parameterize_three_phase_grid(amplitude, frequency, initial_phase):
"""This nested function allows to create a function of time, which returns the momentary voltage of the
three-phase grid.
The nested structure allows to parameterize the three-phase grid by amplitude(as a fraction of the DC-link voltage),
frequency (in Hertz) and initial phase (in degree).
"""
omega = frequency * 2 * np.pi # 1/s
phi = 2 * np.pi / 3 # phase offset
phi_initial = initial_phase * 2 * np.pi / 360
def grid_voltage(t):
u_abc = [
amplitude * np.sin(omega * t + phi_initial),
amplitude * np.sin(omega * t + phi_initial - phi),
amplitude * np.sin(omega * t + phi_initial + phi)
]
return u_abc
return grid_voltage
# Create the environment
env = gem.make(
# Choose the squirrel cage induction motor (SCIM) with continuous-control-set
"AbcCont-CC-SCIM-v0",
# Define the numerical solver for the simulation
ode_solver="scipy.ode",
# Define which state variables are to be monitored concerning limit violations
# "()" means, that limit violation will not necessitate an env.reset()
constraints=(),
# Set the sampling time
tau=1e-5
)
tau = env.physical_system.tau
limits = env.physical_system.limits
# reset the environment such that the simulation can be started
(state, reference) = env.reset()
# We define these arrays in order to save our simulation results in them
# Initial state and initial time are directly inserted
STATE = np.transpose(np.array([state * limits]))
TIME = np.array([0])
# Use the previously defined function to parameterize a three-phase grid with an amplitude of
# 80 % of the DC-link voltage and a frequency of 50 Hertz
f_grid = 50 # Hertz
u_abc = parameterize_three_phase_grid(amplitude=0.8, frequency=f_grid, initial_phase=0)
# Set a time horizon to simulate, in this case 60 ms
time_horizon = 0.06
step_horizon = int(time_horizon / tau)
for idx in range(step_horizon):
# calculate the time of this simulation step
time = idx * tau
# apply the voltage as given by the grid
(state, reference), reward, done, _ = env.step(u_abc(time))
# save the results of this simulation step
STATE = np.append(STATE, np.transpose([state * limits]), axis=1)
TIME = np.append(TIME, time)
# convert the timescale from s to ms
TIME *= 1e3
# the rest of the code is for plotting the results in a nice way
# the state indices for the SCIM are:
# STATE[0]: omega (mechanical angular velocity)
# STATE[1]: T (drive torque)
# STATE[2] - STATE[4]: i_sa, i_sb, i_sc (three-phase stator currents)
# STATE[5] - STATE[6]: i_sd, i_sq (stator currents in field oriented dq-coordinates)
# STATE[7] - STATE[9]: u_sa, u_sb, u_sc (three-phase stator voltages)
# STATE[10] - STATE[11]: u_sd, u_sq (stator voltages in field oriented dq-coordinates)
# STATE[12]: epsilon (rotor angular position)
# STATE[13]: u_sup (DC-link supply voltage)
plt.subplots(2, 2, figsize=(7.45, 2.5))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.08, hspace=0.05)
plt.rcParams.update({'font.size': 8})
plt.subplot(2, 2, 1)
plt.plot(TIME, STATE[0])
plt.ylabel(r"$\omega_\mathrm{me} \, / \, \frac{1}{\mathrm{s}}$")
plt.xlim([0, 60])
plt.yticks([0, 50, 100, 150])
plt.tick_params(axis='x', which='both', labelbottom=False)
plt.tick_params(axis='both', direction="in", left=True, right=False, bottom=True, top=True)
plt.grid()
ax = plt.subplot(2, 2, 2)
plt.plot(TIME, STATE[7], label=r"$u_a$")
plt.plot(TIME, STATE[8], label=r"$u_b$")
plt.plot(TIME, STATE[9], label=r"$u_c$")
plt.ylabel(r"$u \, / \, \mathrm{V}$")
plt.xlim([0, 60])
plt.yticks([-200, 0, 200])
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
plt.tick_params(axis='x', which='both', labelbottom=False)
plt.tick_params(axis='both', direction="in", left=False, right=True, bottom=True, top=True)
plt.grid()
plt.legend(loc="lower right", ncol=3)
plt.subplot(2, 2, 3)
plt.plot(TIME, STATE[1])
plt.xlabel(r"$t \, / \, \mathrm{ms}$")
plt.ylabel(r"$T \, / \, \mathrm{Nm}$")
plt.xlim([0, 60])
plt.yticks([0, 20])
plt.tick_params(axis='both', direction="in", left=True, right=False, bottom=True, top=True)
plt.grid()
ax = plt.subplot(2, 2, 4)
plt.plot(TIME, STATE[5], label=r"$i_d$")
plt.plot(TIME, STATE[6], label=r"$i_q$")
plt.xlabel(r"$t \, / \, \mathrm{ms}$")
plt.ylabel(r"$i \, / \, \mathrm{A}$")
plt.xlim([0, 60])
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
plt.tick_params(axis='both', direction="in", left=False, right=True, bottom=True, top=True)
plt.yticks([0, 10, 20, 30])
plt.grid()
plt.legend(loc="upper right", ncol=2)
plt.show()
|
src/admin/widgets/datetime.py
|
aimanow/sft
| 280 |
139881
|
<filename>src/admin/widgets/datetime.py
from datetime import datetime, date
from wtforms.fields.html5 import DateTimeField
from godmode.widgets.base import BaseWidget
class DatetimeWidget(BaseWidget):
field = DateTimeField(format="%Y-%m-%d %H:%M:%S")
field_kwargs = {"step": 1}
def render_list(self, item):
value = getattr(item, self.name, None)
try:
if isinstance(value, (date, datetime)):
return value.strftime("%d.%m.%Y %H:%M")
if isinstance(value, str) and value.isnumeric():
return datetime.utcfromtimestamp(int(value)).strftime("%d.%m.%Y %H:%M")
except ValueError:
pass
return value
|
tests/test_inheritance_routers.py
|
kmlee78/django-ninja
| 2,809 |
139889
|
import pytest
from ninja import NinjaAPI, Router
from ninja.testing import TestClient
api = NinjaAPI()
@api.get("/endpoint")
# view->api
def global_op(request):
return "global"
first_router = Router()
@first_router.get("/endpoint_1")
# view->router, router->api
def router_op1(request):
return "first 1"
second_router_one = Router()
@second_router_one.get("endpoint_1")
# view->router2, router2->router1, router1->api
def router_op2(request):
return "second 1"
second_router_two = Router()
@second_router_two.get("endpoint_2")
# view->router2, router2->router1, router1->api
def router2_op3(request):
return "second 2"
first_router.add_router("/second", second_router_one, tags=["one"])
first_router.add_router("/second", second_router_two, tags=["two"])
api.add_router("/first", first_router, tags=["global"])
@first_router.get("endpoint_2")
# router->api, view->router
def router1_op1(request):
return "first 2"
@second_router_one.get("endpoint_3")
# router2->router1, router1->api, view->router2
def router21_op3(request, path_param: int = None):
return "second 3" if path_param is None else f"second 3: {path_param}"
second_router_three = Router()
@second_router_three.get("endpoint_4")
# router1->api, view->router2, router2->router1
def router_op3(request, path_param: int = None):
return "second 4" if path_param is None else f"second 4: {path_param}"
first_router.add_router("/second", second_router_three, tags=["three"])
client = TestClient(api)
@pytest.mark.parametrize(
"path,expected_status,expected_response",
[
("/endpoint", 200, "global"),
("/first/endpoint_1", 200, "first 1"),
("/first/endpoint_2", 200, "first 2"),
("/first/second/endpoint_1", 200, "second 1"),
("/first/second/endpoint_2", 200, "second 2"),
("/first/second/endpoint_3", 200, "second 3"),
("/first/second/endpoint_4", 200, "second 4"),
],
)
def test_inheritance_responses(path, expected_status, expected_response):
response = client.get(path)
assert response.status_code == expected_status, response.content
assert response.json() == expected_response
def test_tags():
schema = api.get_openapi_schema()
# print(schema)
glob = schema["paths"]["/api/first/endpoint_1"]["get"]
assert glob["tags"] == ["global"]
e1 = schema["paths"]["/api/first/second/endpoint_1"]["get"]
assert e1["tags"] == ["one"]
e2 = schema["paths"]["/api/first/second/endpoint_2"]["get"]
assert e2["tags"] == ["two"]
|
spyder/plugins/layout/api.py
|
Earthman100/spyder
| 7,956 |
139892
|
<filename>spyder/plugins/layout/api.py
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Layout Plugin API.
"""
# Standard libray imports
import copy
# Third party imports
from qtpy.QtCore import QRectF, Qt
from qtpy.QtWidgets import (QGridLayout, QPlainTextEdit, QWidget)
# Local imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.translations import get_translation
# Localization
_ = get_translation("spyder")
class BaseGridLayoutType:
"""
A base layout type to create custom layouts for Spyder panes.
The API for this plugin is a subset of a QGridLayout, so the same
concepts, like row, column, spans and stretches apply.
Notes
-----
See: https://doc.qt.io/qt-5/qgridlayout.html
"""
ID = None
"""Unique string identifier for the layout."""
def __init__(self, parent_plugin):
self.plugin = parent_plugin
self._plugin = parent_plugin
self._areas = []
self._area_rects = []
self._column_stretchs = {}
self._row_stretchs = {}
self._default_added = False
self._default_area = None
self._visible_areas = []
self._rows = 0
self._cols = 0
self._plugin_ids = []
# --- Private API
# ------------------------------------------------------------------------
def _check_layout_validity(self):
"""
Check the current layout is a valid one.
"""
self._visible_areas = []
# Check ID
if self.ID is None:
raise SpyderAPIError("A Layout must define an `ID` class "
"attribute!")
# Check name
self.get_name()
# All layouts need to add at least 1 area
if not self._areas:
raise SpyderAPIError("A Layout must define add least one area!")
default_areas = []
area_zero_zero = False
for area in self._areas:
default_areas.append(area["default"])
if area["default"]:
self._default_area = area
self._visible_areas.append(area["visible"])
if area_zero_zero and area["row"] == 0 and area["column"] == 0:
raise SpyderAPIError(
"Multiple areas defined their row and column as 0!")
if area["row"] == 0 and area["column"] == 0:
area_zero_zero = True
if not set(area["hidden_plugin_ids"]) <= set(area["plugin_ids"]):
raise SpyderAPIError(
"At least 1 hidden plugin id is not being specified "
"in the area plugin ids list!\n SpyderLayout: {}\n "
"hidden_plugin_ids: {}\n"
"plugin_ids: {}".format(self.get_name(),
area["hidden_plugin_ids"],
area["plugin_ids"]))
# Check that there is at least 1 visible!
if not any(self._visible_areas):
raise SpyderAPIError("At least 1 area must be `visible`")
# Check that there is a `default` area!
if not any(default_areas):
raise SpyderAPIError("No area is the `default`!")
# Check that there is 1 `default` area!
if default_areas.count(True) != 1:
raise SpyderAPIError("Only 1 area can be the `default`!")
# Check one area has row zero and column zero
if not area_zero_zero:
raise SpyderAPIError(
"1 area needs to be specified with row 0 and column 0!")
# Check Area
self._check_area()
def _check_area(self):
"""
Check if the current layout added areas cover the entire rectangle.
Rectangle given by the extreme points for the added areas.
"""
self._area_rects = []
height = self._rows + 1
area_float_rects = []
delta = 0.0001
for index, area in enumerate(self._areas):
# These areas are used with a small delta to ensure if they are
# next to each other they will not overlap.
rectf = QRectF()
rectf.setLeft(area["column"] + delta)
rectf.setRight(area["column"] + area["col_span"] - delta)
rectf.setTop(height - area["row"] - delta)
rectf.setBottom(height - area["row"] - area["row_span"] + delta)
rectf.index = index
rectf.plugin_ids = area["plugin_ids"]
area_float_rects.append(rectf)
# These areas are used to calculate the actual total area
rect = QRectF()
rect.setLeft(area["column"])
rect.setRight(area["column"] + area["col_span"])
rect.setTop(height - area["row"])
rect.setBottom(height - area["row"] - area["row_span"])
rect.index = index
rect.plugin_ids = area["plugin_ids"]
self._area_rects.append(rect)
# Check if areas are overlapping!
for rect_1 in area_float_rects:
for rect_2 in area_float_rects:
if rect_1.index != rect_2.index:
if rect_1.intersects(rect_2):
raise SpyderAPIError(
"Area with plugins {0} is overlapping area "
"with plugins {1}".format(rect_1.plugin_ids,
rect_2.plugin_ids))
# Check the total area (using corner points) versus the sum of areas
total_area = 0
tops = []
rights = []
for index, rect in enumerate(self._area_rects):
tops.append(rect.top())
rights.append(rect.right())
area = abs(rect.width() * rect.height())
total_area += area
self._areas[index]["area"] = area
if total_area != max(rights)*max(tops):
raise SpyderAPIError(
"Areas are not covering the entire section!\n"
"Either an area is missing or col_span/row_span are "
"not correctly set!"
)
# --- SpyderGridLayout API
# ------------------------------------------------------------------------
def get_name(self):
"""
Return the layout localized name.
Returns
-------
str
Localized name of the layout.
Notes
-----
This is a method to be able to update localization without a restart.
"""
raise NotImplementedError("A layout must define a `get_name` method!")
# --- Public API
# ------------------------------------------------------------------------
def add_area(self,
plugin_ids,
row,
column,
row_span=1,
col_span=1,
default=False,
visible=True,
hidden_plugin_ids=[]):
"""
Add a new area and `plugin_ids` that will populate it to the layout.
The area will start at row, column spanning row_pan rows and
column_span columns.
Parameters
----------
plugin_ids: list
List of plugin ids that will be in the area
row: int
Initial row where the area starts
column: int
Initial column where the area starts
row_span: int, optional
Number of rows that the area covers
col_span: int, optional
Number of columns the area covers
default: bool, optiona
Defines an area as the default one, i.e all other plugins that where
not passed in the `plugins_ids` will be added to the default area.
By default is False.
visible: bool, optional
Defines if the area is visible when setting up the layout.
Default is True.
Notes
-----
See: https://doc.qt.io/qt-5/qgridlayout.html
"""
if self._default_added and default:
raise SpyderAPIError("A default location has already been "
"defined!")
self._plugin_ids += plugin_ids
self._rows = max(row, self._rows)
self._cols = max(column, self._cols)
self._default_added = default
self._column_stretchs[column] = 1
self._row_stretchs[row] = 1
self._areas.append(
dict(
plugin_ids=plugin_ids,
row=row,
column=column,
row_span=row_span,
col_span=col_span,
default=default,
visible=visible,
hidden_plugin_ids=hidden_plugin_ids,
)
)
def set_column_stretch(self, column, stretch):
"""
Set the factor of column to stretch.
The stretch factor is relative to the other columns in this grid.
Columns with a higher stretch factor take more of the available space.
Parameters
----------
column: int
The column number. The first column is number 0.
stretch: int
Column stretch factor.
Notes
-----
See: https://doc.qt.io/qt-5/qgridlayout.html
"""
self._column_stretchs[column] = stretch
def set_row_stretch(self, row, stretch):
"""
Set the factor of row to stretch.
The stretch factor is relative to the other rows in this grid.
Rows with a higher stretch factor take more of the available space.
Parameters
----------
row: int
The row number. The first row is number 0.
stretch: int
Row stretch factor.
Notes
-----
See: https://doc.qt.io/qt-5/qgridlayout.html
"""
self._row_stretchs[row] = stretch
def preview_layout(self, show_hidden_areas=False):
"""
Show the layout with placeholder texts using a QWidget.
"""
from spyder.utils.qthelpers import qapplication
app = qapplication()
widget = QWidget()
layout = QGridLayout()
for area in self._areas:
label = QPlainTextEdit()
label.setReadOnly(True)
label.setPlainText("\n".join(area["plugin_ids"]))
if area["visible"] or show_hidden_areas:
layout.addWidget(
label,
area["row"],
area["column"],
area["row_span"],
area["col_span"],
)
# label.setVisible(area["visible"])
if area["default"]:
label.setStyleSheet(
"QPlainTextEdit {background-color: #ff0000;}")
if not area["visible"]:
label.setStyleSheet(
"QPlainTextEdit {background-color: #eeeeee;}")
for row, stretch in self._row_stretchs.items():
layout.setRowStretch(row, stretch)
for col, stretch in self._column_stretchs.items():
layout.setColumnStretch(col, stretch)
widget.setLayout(layout)
widget.showMaximized()
app.exec_()
def set_main_window_layout(self, main_window, dockable_plugins):
"""
Set the given mainwindow layout.
First validate the current layout definition, then clear the mainwindow
current layout and finally calculate and set the new layout.
"""
# Define plugins assigned to areas, all the available plugins and
# initial docks for each area
all_plugin_ids = []
# Before applying a new layout all plugins need to be hidden
for plugin in dockable_plugins:
all_plugin_ids.append(plugin.NAME)
plugin.toggle_view(False)
# Add plugins without an area assigned to the default area and made
# them hidden. Deep copy needed since test can run multiple times with
# the same Mainwindow instance when using the 'main_window' fixture
patched_default_area = copy.deepcopy(self._default_area)
unassgined_plugin_ids = list(
set(self._plugin_ids) ^ set(all_plugin_ids))
patched_default_area["plugin_ids"] += unassgined_plugin_ids
patched_default_area["hidden_plugin_ids"] += unassgined_plugin_ids
patched_areas = [
patched_default_area if area["default"] else area
for area in self._areas]
# Define initial dock for each area
docks = {}
for area in patched_areas:
current_area = area
plugin_id = current_area["plugin_ids"][0]
plugin = main_window.get_plugin(plugin_id, error=False)
if plugin:
dock = plugin.dockwidget
docks[(current_area["row"], current_area["column"])] = dock
dock.area = area["area"]
dock.col_span = area["col_span"]
dock.row_span = area["row_span"]
plugin.toggle_view(area["visible"])
# Define base layout (distribution of dockwidgets
# following defined areas)
layout_data = []
# Find dock splits in the horizontal direction
direction = Qt.Horizontal
for row in range(0, self._rows + 1):
dock = None
for col in range(0, self._cols + 1):
key = (row, col)
if key in docks:
if dock is None:
dock = docks[key]
else:
layout_data.append(
(1/docks[key].area,
key,
dock,
docks[key],
direction))
dock = docks[key]
main_window.addDockWidget(
Qt.LeftDockWidgetArea, dock, direction)
# Find dock splits in the vertical direction
direction = Qt.Vertical
for col in range(0, self._cols + 1):
dock = None
for row in range(0, self._rows + 1):
key = (row, col)
if key in docks:
if dock is None:
dock = docks[key]
else:
layout_data.append(
(1/docks[key].area,
key,
dock,
docks[key],
direction))
dock = docks[key]
# We sort based on the inverse of the area, then the row and then
# the column. This allows to make the dock splits in the right order.
sorted_data = sorted(layout_data, key=lambda x: (x[0], x[1]))
for area, key, first, second, direction in sorted_data:
main_window.splitDockWidget(first, second, direction)
plugins_to_tabify = []
for area in patched_areas:
area_visible = area["visible"]
base_plugin = main_window.get_plugin(
area["plugin_ids"][0], error=False)
if base_plugin:
plugin_ids = area["plugin_ids"][1:]
hidden_plugin_ids = area["hidden_plugin_ids"]
for plugin_id in plugin_ids:
current_plugin = main_window.get_plugin(
plugin_id, error=False)
if current_plugin:
if (plugin_id in unassgined_plugin_ids and
hasattr(current_plugin, 'TABIFY')):
plugins_to_tabify.append(
(current_plugin, base_plugin))
else:
main_window.tabify_plugins(
base_plugin, current_plugin)
if plugin_id not in hidden_plugin_ids:
current_plugin.toggle_view(area_visible)
else:
current_plugin.toggle_view(False)
# Raise front widget per area
if area["visible"]:
base_plugin.dockwidget.show()
base_plugin.dockwidget.raise_()
# try to use the TABIFY attribute to add the plugin to the layout.
# Otherwise use the default area base plugin
for plugin, base_plugin in plugins_to_tabify:
if not main_window.tabify_plugin(plugin):
main_window.tabify_plugins(base_plugin, plugin)
current_plugin.toggle_view(False)
column_docks = []
column_stretches = []
for key, dock in docks.items():
for col, stretch in self._column_stretchs.items():
if key[1] == col and dock.col_span == 1:
column_docks.append(dock)
column_stretches.append(stretch)
row_docks = []
row_stretches = []
for key, dock in docks.items():
for row, stretch in self._row_stretchs.items():
if key[0] == row and dock.row_span == 1:
row_docks.append(dock)
row_stretches.append(stretch)
main_window.showMaximized()
main_window.resizeDocks(column_docks, column_stretches, Qt.Horizontal)
main_window.resizeDocks(row_docks, row_stretches, Qt.Vertical)
|
examples/plotting/plot_graph.py
|
WeilerP/cellrank
| 172 |
139915
|
"""
Plot graph structures
---------------------
This functions show how to plot graph structures, such as the transition matrix.
"""
import cellrank as cr
import numpy as np
adata = cr.datasets.pancreas_preprocessed("../example.h5ad")
adata
# %%
# First, we create a forward transition matrix using the high-level pipeline.
cr.tl.transition_matrix(
adata, show_progress_bar=False, weight_connectivities=0.2, softmax_scale=4
)
# %%
# We can now plot the transition matrix. Below we don't show any arrows, which dramatically speeds up the plotting.
cr.pl.graph(
adata,
"T_fwd",
edge_alpha=0.1,
node_size=5,
arrows=False,
keys="clusters",
keylocs="obs",
)
# %%
# To further illustrate the functionalities, let us only consider the `'Delta`' cluster. We can also filter the edges
# by their weights, as shown below. Only transitions with probability at least 0.1 are plotted.
ixs = np.where(adata.obs["clusters"] == "Delta")[0]
cr.pl.graph(adata, "T_fwd", ixs=ixs, arrows=True, node_size=200, filter_edges=(0.1, 1))
# %%
# Lastly, we can visualize different edge aggregations, such as minimum or maximum. Here we take at most 3 outgoing
# edges restricted to ``ixs`` for each node in descending order and color the nodes by the maximum outgoing weights.
# Aggregated values are always computed before any filtering happens, such as shown above.
#
# Here we also specify ``edge_reductions_restrict_to_ixs`` (by default, it is the same as ``ixs``) that computes the
# statistic between the cells marked with ``ixs`` and ``edge_reductions_restrict_to_ixs``.
#
# Below we compare the maximum transition from each of the `"Delta"` cells to any of the `"Beta"` cells.
cr.pl.graph(
adata,
"T_fwd",
ixs=ixs,
edge_alpha=0.5,
node_size=200,
keys="outgoing",
arrows=False,
top_n_edges=(3, False, "outgoing"),
title="outgoing to Beta",
edge_reductions=np.max,
edge_reductions_restrict_to_ixs=np.where(adata.obs["clusters"] == "Beta")[0],
)
|
psq/__init__.py
|
Tomesco/bookshelf-demo-project
| 210 |
139990
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
psq - a distributed task queue using Google Cloud Pubsub.
Homepage: https://github.com/GoogleCloudPlatform/psq
"""
from __future__ import absolute_import
import logging
from .broadcast_queue import BroadcastQueue
from .datastore_storage import DatastoreStorage
from .globals import current_queue, current_task, queue_context, task_context
from .queue import Queue
from .storage import Storage
from .task import Retry, Task, TaskResult
from .worker import Worker
__all__ = [
'Queue',
'BroadcastQueue',
'Task',
'TaskResult',
'Retry',
'Worker',
'Storage',
'DatastoreStorage',
'current_queue',
'queue_context',
'current_task',
'task_context'
]
# Set default logging handler to avoid "No handler found" warnings.
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
twiml/voice/say/say-2/say-2.6.x.py
|
Tshisuaka/api-snippets
| 234 |
139994
|
from twilio.twiml.voice_response import VoiceResponse, Say
response = VoiceResponse()
response.say('Chapeau!', voice='alice', language='fr-FR')
print(response)
|
redbot/message/headers/content_base.py
|
Malvoz/redbot
| 167 |
139998
|
#!/usr/bin/env python
from redbot.message import headers
class content_base(headers.HttpHeader):
canonical_name = "Content-Base"
description = """\
The `Content-Base` header field established the base URI of the message. It has been
deprecated, because it was not implemented widely.
"""
reference = "https://tools.ietf.org/html/rfc2068#section-14.11"
list_header = False
deprecated = True
valid_in_requests = True
valid_in_responses = True
no_coverage = True
|
tests/softmax_regression_test.py
|
SanggunLee/edgetpu
| 320 |
140006
|
<filename>tests/softmax_regression_test.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests SoftmaxRegression class.
Generates some fake data and tries to overfit the data with SoftmaxRegression.
"""
import unittest
from edgetpu.learn.backprop.softmax_regression import SoftmaxRegression
import numpy as np
def generate_fake_data(class_sizes, means, cov_mats):
"""Generates fake data for training and testing.
Examples from same class is drawn from the same MultiVariate Normal (MVN)
distribution.
# classes = len(class_sizes) = len(means) = len(cov_mats)
dim of MVN = cov_mats[0].shape[0]
Args:
class_sizes: list of ints, number of examples to draw from each class.
means: list of list of floats, mean value of each MVN distribution.
cov_mats: list of ndarray, each element is a k by k ndarray, which
represents the covariance matrix in MVN distribution, k is the dimension
of MVN distribution.
Returns:
a tuple of data and labels. data and labels are shuffled.
"""
# Some sanity checks.
assert len(class_sizes) == len(means)
assert len(class_sizes) == len(cov_mats)
num_data = np.sum(class_sizes)
feature_dim = len(means[0])
data = np.empty((num_data, feature_dim))
labels = np.empty((num_data), dtype=int)
start_idx = 0
class_idx = 0
for size, mean, cov_mat in zip(class_sizes, means, cov_mats):
data[start_idx:start_idx + size] = np.random.multivariate_normal(
mean, cov_mat, size)
labels[start_idx:start_idx + size] = np.ones(size, dtype=int) * class_idx
start_idx += size
class_idx += 1
perm = np.random.permutation(data.shape[0])
data = data[perm, :]
labels = labels[perm]
return data, labels
class SoftmaxRegressionTest(unittest.TestCase):
def test_softmax_regression_linear_separable_data(self):
# Fake data is generated from 3 MVN distributions, these MVN distributionss
# are tuned to be well-separated, such that it can be separated by
# SoftmaxRegression model (which is a linear classifier).
num_train = 200
num_val = 30
# Let's distribute data evenly among different classes.
num_classes = 3
class_sizes = ((num_train + num_val) // num_classes) * np.ones(
num_classes, dtype=int)
class_sizes[-1] = (num_train + num_val) - np.sum(class_sizes[0:-1])
# 3 is chosen, such that each pair of mean is over 6 `sigma` distance
# apart. Which makes classes harder to `touch` each other.
# https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule
means = np.array([[1, 1], [-1, -1], [1, -1]]) * 3
feature_dim = len(means[0])
cov_mats = [np.eye(feature_dim)] * num_classes
model = SoftmaxRegression(feature_dim, num_classes)
np.random.seed(12345)
all_data, all_labels = generate_fake_data(class_sizes, means, cov_mats)
dataset = {}
dataset['data_train'] = all_data[0:num_train]
dataset['labels_train'] = all_labels[0:num_train]
dataset['data_val'] = all_data[num_train:]
dataset['labels_val'] = all_labels[num_train:]
# train with SGD.
num_iter = 20
learning_rate = 0.01
model.train_with_sgd(
dataset, num_iter, learning_rate, batch_size=100, print_every=5)
self.assertGreater(
model.get_accuracy(dataset['data_train'], dataset['labels_train']),
0.99)
def test_softmax_regression_linear_non_separable_data(self):
# Fake data is generated from 3 MVN distributions, these MVN distributions
# are NOT well-separated.
num_train = 200
num_val = 30
# Let's distribute data evenly among different classes.
num_classes = 3
class_sizes = ((num_train + num_val) // num_classes) * np.ones(
num_classes, dtype=int)
class_sizes[-1] = (num_train + num_val) - np.sum(class_sizes[0:-1])
means = np.array([[1, 1], [-1, -1], [1, -1]])
feature_dim = len(means[0])
cov_mats = [np.eye(feature_dim)] * num_classes
model = SoftmaxRegression(feature_dim, num_classes)
np.random.seed(54321)
all_data, all_labels = generate_fake_data(class_sizes, means, cov_mats)
dataset = {}
dataset['data_train'] = all_data[0:num_train]
dataset['labels_train'] = all_labels[0:num_train]
dataset['data_val'] = all_data[num_train:]
dataset['labels_val'] = all_labels[num_train:]
# train with SGD.
num_iter = 50
learning_rate = 0.1
model.train_with_sgd(
dataset, num_iter, learning_rate, batch_size=100, print_every=5)
self.assertGreater(
model.get_accuracy(dataset['data_train'], dataset['labels_train']), 0.8)
|
python/topology/generator.py
|
marcfrei/scion
| 211 |
140024
|
<filename>python/topology/generator.py
#!/usr/bin/env python3
# Copyright 2014 ETH Zurich
# Copyright 2018 ETH Zurich, Anapaya Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`generator` --- SCION topology generator
=============================================
"""
# Stdlib
import argparse
# SCION
from python.lib.defines import (
GEN_PATH,
)
from python.topology.config import (
ConfigGenerator,
ConfigGenArgs,
DEFAULT_TOPOLOGY_FILE,
)
def add_arguments(parser):
parser.add_argument('-c', '--topo-config', default=DEFAULT_TOPOLOGY_FILE,
help='Path policy file')
parser.add_argument('-d', '--docker', action='store_true',
help='Create a docker-compose configuration')
parser.add_argument('-n', '--network',
help='Network to create subnets in (E.g. "127.0.0.0/8"')
parser.add_argument('-o', '--output-dir', default=GEN_PATH,
help='Output directory')
parser.add_argument('--random-ifids', action='store_true',
help='Generate random IFIDs')
parser.add_argument('--docker-registry', help='Specify docker registry to pull images from')
parser.add_argument('--image-tag', help='Docker image tag')
parser.add_argument('--sig', action='store_true',
help='Generate a SIG per AS (only available with -d, the SIG image needs\
to be built manually e.g. when running acceptance tests)')
parser.add_argument('-qos', '--colibri', action='store_true',
help='Generate COLIBRI service')
parser.add_argument('--features', help='Feature flags to enable, a comma separated list\
e.g. foo,bar enables foo and bar feature.')
return parser
def init_features(raw_args):
features = getattr(raw_args, 'features')
if features is None:
features = ''
feature_dict = {}
for f in features.split(','):
if f != '':
feature_dict[f] = True
setattr(raw_args, 'features', feature_dict)
def main():
"""
Main function.
"""
parser = argparse.ArgumentParser()
add_arguments(parser)
raw_args = parser.parse_args()
init_features(raw_args)
args = ConfigGenArgs(raw_args)
confgen = ConfigGenerator(args)
confgen.generate_all()
if __name__ == "__main__":
main()
|
src/genie/libs/parser/iosxr/tests/ShowSpanningTreeMst/cli/equal/golden_output_2_expected.py
|
balmasea/genieparser
| 204 |
140033
|
<reponame>balmasea/genieparser
expected_output = {
'mstp': {
'blocked-ports': {
'mst_instances': {
'0': {
'mst_id': '0',
'interfaces': {
'GigabitEthernet0/0/4/4': {
'name': 'GigabitEthernet0/0/4/4',
'cost': 200000,
'role': 'ALT',
'port_priority': 128,
'port_num': 196,
'port_state': 'BLK',
'designated_bridge_priority': 4097,
'designated_bridge_address': '0004.9bff.8078',
'designated_port_priority': 128,
'designated_port_num': 195,
},
},
},
},
},
},
}
|
tests/input/func_models_float_money_field.py
|
enefeq/django_linter
| 101 |
140051
|
<reponame>enefeq/django_linter
"""
Check for correct type for money related field
"""
from django.db import models
class Product(models.Model):
name = models.CharField(max_length=255)
price = models.FloatField()
def __unicode__(self):
return self.name
|
tests/db_functions/math/test_log.py
|
Lord-Elrond/django
| 61,676 |
140063
|
<reponame>Lord-Elrond/django
import math
from decimal import Decimal
from django.db.models.functions import Log
from django.test import TestCase
from ..models import DecimalModel, FloatModel, IntegerModel
class LogTests(TestCase):
def test_null(self):
IntegerModel.objects.create(big=100)
obj = IntegerModel.objects.annotate(
null_log_small=Log('small', 'normal'),
null_log_normal=Log('normal', 'big'),
null_log_big=Log('big', 'normal'),
).first()
self.assertIsNone(obj.null_log_small)
self.assertIsNone(obj.null_log_normal)
self.assertIsNone(obj.null_log_big)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal('12.9'), n2=Decimal('3.6'))
obj = DecimalModel.objects.annotate(n_log=Log('n1', 'n2')).first()
self.assertIsInstance(obj.n_log, Decimal)
self.assertAlmostEqual(obj.n_log, Decimal(math.log(obj.n2, obj.n1)))
def test_float(self):
FloatModel.objects.create(f1=2.0, f2=4.0)
obj = FloatModel.objects.annotate(f_log=Log('f1', 'f2')).first()
self.assertIsInstance(obj.f_log, float)
self.assertAlmostEqual(obj.f_log, math.log(obj.f2, obj.f1))
def test_integer(self):
IntegerModel.objects.create(small=4, normal=8, big=2)
obj = IntegerModel.objects.annotate(
small_log=Log('small', 'big'),
normal_log=Log('normal', 'big'),
big_log=Log('big', 'big'),
).first()
self.assertIsInstance(obj.small_log, float)
self.assertIsInstance(obj.normal_log, float)
self.assertIsInstance(obj.big_log, float)
self.assertAlmostEqual(obj.small_log, math.log(obj.big, obj.small))
self.assertAlmostEqual(obj.normal_log, math.log(obj.big, obj.normal))
self.assertAlmostEqual(obj.big_log, math.log(obj.big, obj.big))
|
beartype/_util/func/utilfunctest.py
|
vcokltfre/beartype
| 1,056 |
140105
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **callable tester** (i.e., callable testing various properties of
passed callables) utilities.
This private submodule implements utility functions dynamically introspecting
various high-level properties of arbitrary callables.
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype.roar._roarexc import _BeartypeUtilCallableException
from beartype._util.func.utilfunccodeobj import (
get_func_unwrapped_codeobj_or_none)
from beartype._util.utiltyping import (
CallableCodeObjable,
TypeException,
)
from inspect import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
)
from typing import Any
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ CONSTANTS }....................
FUNC_NAME_LAMBDA = '<lambda>'
'''
Default name of all **pure-Python lambda functions** (i.e., function declared
as a ``lambda`` expression embedded in a larger statement rather than as a
full-blown ``def`` statement).
Python initializes the names of *all* lambda functions to this lambda-specific
placeholder string on lambda definition.
Caveats
----------
**Usage of this placeholder to differentiate lambda from non-lambda callables
invites false positives in unlikely edge cases.** Technically, malicious third
parties may externally change the name of any lambda function *after* defining
that function. Pragmatically, no one sane should ever do such a horrible thing.
While predictably absurd, this is also the only efficient (and thus sane) means
of differentiating lambda from non-lambda callables. Alternatives require
AST-based parsing, which comes with its own substantial caveats, concerns,
edge cases, and false positives. If you must pick your poison, pick this one.
'''
# ....................{ VALIDATORS }....................
#FIXME: Uncomment when needed.
# def die_unless_func_lambda(
# # Mandatory parameters.
# func: Any,
#
# # Optional parameters.
# exception_cls: Type[Exception] = _BeartypeUtilCallableException,
# ) -> None:
# '''
# Raise an exception unless the passed callable is a **pure-Python lambda
# function** (i.e., function declared as a `lambda` expression embedded in a
# larger statement rather than as a full-blown `def` statement).
#
# Parameters
# ----------
# func : Callable
# Callable to be inspected.
# exception_cls : type, optional
# Type of exception to be raised if this callable is *not* a pure-Python
# lambda function. Defaults to :class:`_BeartypeUtilCallableException`.
#
# Raises
# ----------
# exception_cls
# If this callable is *not* a pure-Python lambda function.
#
# See Also
# ----------
# :func:`is_func_lambda`
# Further details.
# '''
#
# # If this callable is *NOT* a lambda function, raise an exception.
# if not is_func_lambda(func):
# assert isinstance(exception_cls, type), (
# f'{repr(exception_cls)} not class.')
# raise exception_cls(f'Callable {repr(func)} not lambda function.')
def die_unless_func_python(
# Mandatory parameters.
func: CallableCodeObjable,
# Optional parameters.
func_label: str = 'Callable',
exception_cls: TypeException = _BeartypeUtilCallableException,
) -> None:
'''
Raise an exception if the passed callable is **C-based** (i.e., implemented
in C as either a builtin bundled with the active Python interpreter *or*
third-party C extension function).
Equivalently, this validator raises an exception unless the passed function
is **pure-Python** (i.e., implemented in Python as either a function or
method).
Parameters
----------
func : CallableCodeObjable
Callable to be inspected.
func_label : str, optional
Human-readable label describing this callable in exception messages
raised by this validator. Defaults to ``'Callable'``.
exception_cls : type, optional
Type of exception to be raised in the event of fatal error. Defaults to
:class:`_BeartypeUtilCallableException`.
Raises
----------
exception_cls
If this callable has *no* code object and is thus *not* pure-Python.
See Also
----------
:func:`is_func_python`
Further details.
'''
# If this callable is *NOT* pure-Python, raise an exception.
if not is_func_python(func):
assert isinstance(func_label, str), f'{repr(func_label)} not string.'
assert isinstance(exception_cls, type), (
f'{repr(exception_cls)} not class.')
# If this callable is uncallable, raise an appropriate exception.
if not callable(func):
raise exception_cls(f'{func_label} {repr(func)} not callable.')
# Else, this callable is callable.
# This callable *MUST* be C-based By process of elimination. In this
# case, raise an appropriate exception.
raise exception_cls(
f'{func_label} {repr(func)} not pure-Python (i.e., code object '
f'not found due to being either C[++]-based or object defining '
f'the __call__() dunder method).'
)
# ....................{ TESTERS }....................
def is_func_lambda(func: Any) -> bool:
'''
``True`` only if the passed object is a **pure-Python lambda function**
(i.e., function declared as a ``lambda`` expression embedded in a larger
statement rather than as a full-blown ``def`` statement).
Parameters
----------
func : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is a pure-Python lambda function.
'''
# Return true only if this both...
return (
# This callable is pure-Python *AND*...
is_func_python(func) and
# This callable's name is the lambda-specific placeholder name
# initially given by Python to *ALL* lambda functions. Technically,
# this name may be externally changed by malicious third parties after
# the declaration of this lambda. Pragmatically, no one sane would ever
# do such a horrible thing. Would they!?!?
#
# While predictably absurd, this is also the only efficient (and thus
# sane) means of differentiating lambda from non-lambda callables.
# Alternatives require AST-based parsing, which comes with its own
# substantial caveats, concerns, and edge cases.
func.__name__ == FUNC_NAME_LAMBDA
)
def is_func_python(func: object) -> bool:
'''
``True`` only if the passed object is a **pure-Python callable** (i.e.,
implemented in Python as either a function or method rather than in C as
either a builtin bundled with the active Python interpreter *or*
third-party C extension function).
Parameters
----------
func : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is a pure-Python callable
'''
# Return true only if a pure-Python code object underlies this object.
# C-based callables are associated with *NO* code objects.
return get_func_unwrapped_codeobj_or_none(func) is not None
# ....................{ TESTERS ~ async }....................
def is_func_async(func: object) -> bool:
'''
``True`` only if the passed object is an **asynchronous callable**
(i.e., awaitable callable satisfying the :class:`collections.abc.Awaitable`
protocol by being declared via the ``async def`` syntax and thus callable
*only* when preceded by comparable ``await`` syntax).
Parameters
----------
func : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is an asynchronous callable.
See Also
----------
:func:`inspect.iscoroutinefunction`
:func:`inspect.isasyncgenfunction`
Stdlib functions strongly inspiring this implementation.
'''
# Code object underlying this pure-Python callable if any *OR* "None".
#
# Note this tester intentionally inlines the tests performed by the
# is_func_async_coroutine() and
# is_func_async_generator() testers for efficiency.
func_codeobj = get_func_unwrapped_codeobj_or_none(func)
# If this object is *NOT* a pure-Python callable, immediately return false.
if func_codeobj is None:
return False
# Else, this object is a pure-Python callable.
# Bit field of OR-ed binary flags describing this callable.
func_codeobj_flags = func_codeobj.co_flags
# Return true only if these flags imply this callable to be either...
return (
# An asynchronous coroutine *OR*...
func_codeobj_flags & CO_COROUTINE != 0 or
# An asynchronous generator.
func_codeobj_flags & CO_ASYNC_GENERATOR != 0
)
def is_func_async_coroutine(func: object) -> bool:
'''
``True`` only if the passed object is an **asynchronous coroutine**
(i.e., awaitable callable containing *no* ``yield`` expressions satisfying
the :class:`collections.abc.Awaitable` protocol by being declared via the
``async def`` syntax and thus callable *only* when preceded by comparable
``await`` syntax).
Parameters
----------
func : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is an asynchronous coroutine.
See Also
----------
:func:`inspect.iscoroutinefunction`
Stdlib function strongly inspiring this implementation.
'''
# Code object underlying this pure-Python callable if any *OR* "None".
func_codeobj = get_func_unwrapped_codeobj_or_none(func)
# Return true only if...
return (
# This object is a pure-Python callable *AND*...
func_codeobj is not None and
# This callable's code object implies this callable to be an
# asynchronous coroutine.
func_codeobj.co_flags & CO_COROUTINE != 0
)
def is_func_async_generator(func: object) -> bool:
'''
``True`` only if the passed object is an **asynchronous generator**
(i.e., awaitable callable containing one or more ``yield`` expressions
satisfying the :class:`collections.abc.Awaitable` protocol by being
declared via the ``async def`` syntax and thus callable *only* when
preceded by comparable ``await`` syntax).
Parameters
----------
func : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is an asynchronous generator.
See Also
----------
:func:`inspect.isasyncgenfunction`
Stdlib function strongly inspiring this implementation.
'''
# Code object underlying this pure-Python callable if any *OR* "None".
func_codeobj = get_func_unwrapped_codeobj_or_none(func)
# Return true only if...
return (
# This object is a pure-Python callable *AND*...
func_codeobj is not None and
# This callable's code object implies this callable to be an
# asynchronous generator.
func_codeobj.co_flags & CO_ASYNC_GENERATOR != 0
)
# ....................{ TESTERS ~ sync }....................
def is_func_sync_generator(func: object) -> bool:
'''
``True`` only if the passed object is a **synchronous generator**
(i.e., awaitable callable containing one or more ``yield`` expressions
declared with the ``def`` rather than ``async def`` keyword).
Parameters
----------
func : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is a synchronous generator.
See Also
----------
:func:`inspect.isgeneratorfunction`
Stdlib function strongly inspiring this implementation.
'''
# If this object is uncallable, immediately return False.
#
# Note this test is explicitly required to differentiate synchronous
# generator callables from synchronous generator objects (i.e., the objects
# they implicitly create and return). Whereas both asynchronous coroutine
# objects *AND* asynchronous generator objects do *NOT* contain code
# objects whose "CO_COROUTINE" and "CO_ASYNC_GENERATOR" flags are non-zero,
# synchronous generator objects do contain code objects whose
# "CO_GENERATOR" flag is non-zero. This implies synchronous generator
# callables to create and return synchronous generator objects that are
# themselves technically valid synchronous generator callables, which is
# absurd. We prohibit this ambiguity by differentiating the two here.
if not callable(func):
return False
# Else, this object is callable.
# Code object underlying this pure-Python callable if any *OR* "None".
func_codeobj = get_func_unwrapped_codeobj_or_none(func)
# Return true only if...
return (
# This object is a pure-Python callable *AND*...
func_codeobj is not None and
# This callable's code object implies this callable to be a
# synchronous generator.
func_codeobj.co_flags & CO_GENERATOR != 0
)
|
pymoo/problems/constr_as_penalty.py
|
jarreguit/pymoo
| 762 |
140113
|
from pymoo.core.problem import Problem
from pymoo.problems.meta import MetaProblem
from pymoo.util.misc import at_least_2d_array
class ConstraintsAsPenalty(MetaProblem):
def __init__(self, problem, penalty=1e6):
super().__init__(problem)
self.penalty = penalty
# set the constraints to be zero, because they are now added to the objective
self.n_constr = 0
def do(self, x, out, *args, **kwargs):
self.problem.do(x, out, *args, **kwargs)
if self.problem.has_constraints():
F, G = at_least_2d_array(out["F"]), at_least_2d_array(out["G"])
CV = Problem.calc_constraint_violation(G)
out["__F__"] = F
out["__G__"] = G
out["__CV__"] = CV
out["F"] = F + self.penalty * CV
out["G"] = None
|
airbyte-integrations/connectors/source-s3/unit_tests/test_abstract_file_parser.py
|
darian-heede/airbyte
| 6,215 |
140116
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import pyarrow as pa
import pytest
from airbyte_cdk import AirbyteLogger
from source_s3.source_files_abstract.formats.abstract_file_parser import AbstractFileParser
LOGGER = AirbyteLogger()
class TestAbstractFileParserStatics:
@pytest.mark.parametrize( # testing all datatypes as laid out here: https://json-schema.org/understanding-json-schema/reference/type.html
"input_json_type, output_pyarrow_type",
[
("string", pa.large_string()),
("number", pa.float64()),
("integer", pa.int64()),
("object", pa.large_string()),
("array", pa.large_string()),
("boolean", pa.bool_()),
("null", pa.large_string()),
],
)
def test_json_type_to_pyarrow_type(self, input_json_type, output_pyarrow_type):
# Json -> PyArrow direction
LOGGER.info(f"asserting that JSON type '{input_json_type}' converts to PyArrow type '{output_pyarrow_type}'...")
assert AbstractFileParser.json_type_to_pyarrow_type(input_json_type) == output_pyarrow_type
@pytest.mark.parametrize( # testing all datatypes as laid out here: https://arrow.apache.org/docs/python/api/datatypes.html
"input_pyarrow_types, output_json_type",
[
((pa.null(),), "string"), # null type
((pa.bool_(),), "boolean"), # boolean type
(
(pa.int8(), pa.int16(), pa.int32(), pa.int64(), pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()),
"integer",
), # integer types
((pa.float16(), pa.float32(), pa.float64(), pa.decimal128(5, 10), pa.decimal256(3, 8)), "number"), # number types
((pa.time32("s"), pa.time64("ns"), pa.timestamp("ms"), pa.date32(), pa.date64()), "string"), # temporal types
((pa.binary(), pa.large_binary()), "string"), # binary types
((pa.string(), pa.utf8(), pa.large_string(), pa.large_utf8()), "string"), # string types
((pa.list_(pa.string()), pa.large_list(pa.timestamp("us"))), "string"), # array types
((pa.map_(pa.string(), pa.float32()), pa.dictionary(pa.int16(), pa.list_(pa.string()))), "string"), # object types
],
)
def test_json_type_to_pyarrow_type_reverse(self, input_pyarrow_types, output_json_type):
# PyArrow -> Json direction (reverse=True)
for typ in input_pyarrow_types:
LOGGER.info(f"asserting that PyArrow type '{typ}' converts to JSON type '{output_json_type}'...")
assert AbstractFileParser.json_type_to_pyarrow_type(typ, reverse=True) == output_json_type
@pytest.mark.parametrize( # if expecting fail, put pyarrow_schema as None
"json_schema, pyarrow_schema",
[
(
{"a": "string", "b": "number", "c": "integer", "d": "object", "e": "array", "f": "boolean", "g": "null"},
{
"a": pa.large_string(),
"b": pa.float64(),
"c": pa.int64(),
"d": pa.large_string(),
"e": pa.large_string(),
"f": pa.bool_(),
"g": pa.large_string(),
},
),
({"single_column": "object"}, {"single_column": pa.large_string()}),
({}, {}),
({"a": "NOT A REAL TYPE", "b": "another fake type"}, {"a": pa.large_string(), "b": pa.large_string()}),
(["string", "object"], None), # bad input type
],
)
def test_json_schema_to_pyarrow_schema(self, json_schema, pyarrow_schema):
# Json -> PyArrow direction
if pyarrow_schema is not None:
assert AbstractFileParser.json_schema_to_pyarrow_schema(json_schema) == pyarrow_schema
else:
with pytest.raises(Exception) as e_info:
AbstractFileParser.json_schema_to_pyarrow_schema(json_schema)
LOGGER.debug(str(e_info))
@pytest.mark.parametrize( # if expecting fail, put json_schema as None
"pyarrow_schema, json_schema",
[
(
{
"a": pa.utf8(),
"b": pa.float16(),
"c": pa.uint32(),
"d": pa.map_(pa.string(), pa.float32()),
"e": pa.bool_(),
"f": pa.date64(),
},
{"a": "string", "b": "number", "c": "integer", "d": "string", "e": "boolean", "f": "string"},
),
({"single_column": pa.int32()}, {"single_column": "integer"}),
({}, {}),
({"a": "NOT A REAL TYPE", "b": "another fake type"}, {"a": "string", "b": "string"}),
(["string", "object"], None), # bad input type
],
)
def test_json_schema_to_pyarrow_schema_reverse(self, pyarrow_schema, json_schema):
# PyArrow -> Json direction (reverse=True)
if json_schema is not None:
assert AbstractFileParser.json_schema_to_pyarrow_schema(pyarrow_schema, reverse=True) == json_schema
else:
with pytest.raises(Exception) as e_info:
AbstractFileParser.json_schema_to_pyarrow_schema(pyarrow_schema, reverse=True)
LOGGER.debug(str(e_info))
|
contrib/memtest.py
|
achievement008/followthemoney
| 137 |
140121
|
<filename>contrib/memtest.py
import sys
import logging
import balkhash
from followthemoney import model
from followthemoney.types import registry
fmt = "%(name)s [%(levelname)s] %(message)s"
logging.basicConfig(stream=sys.stderr, level=logging.INFO, format=fmt)
registry.text.max_size = 1 * 1024 * 1024
dataset = balkhash.init("memtest", backend="LEVELDB")
text = open("LICENSE", "r").read()
bulk = dataset.bulk()
for i in range(1000000):
entity = model.make_entity("PlainText")
entity.id = "banana"
entity.add("indexText", "%s - %s" % (text, i))
bulk.put(entity, fragment=str(i))
print(i)
bulk.flush()
for entity in dataset.iterate():
print(entity)
|
anuga/operators/base_operator.py
|
samcom12/anuga_core
| 136 |
140144
|
<reponame>samcom12/anuga_core
from __future__ import print_function
from builtins import object
from anuga.utilities.system_tools import log_to_file
class Operator(object):
"""Operator - generic structure for a fractional operator
This is the base class for all fractional step operators
"""
counter = 0
def __init__(self,
domain,
description = None,
label = None,
logging = False,
verbose = False):
self.domain = domain
self.domain.set_fractional_step_operator(self)
# useful aliases
self.stage_c = self.domain.quantities['stage'].centroid_values
self.xmom_c = self.domain.quantities['xmomentum'].centroid_values
self.ymom_c = self.domain.quantities['ymomentum'].centroid_values
self.elev_c = self.domain.quantities['elevation'].centroid_values
self.coord_c = self.domain.centroid_coordinates
self.areas = self.domain.areas
if domain.numproc > 1:
msg = 'Not implemented to run in parallel'
assert self.parallel_safe(), msg
if description is None:
self.description = ' '
else:
self.description = description
self.set_label(label)
self.verbose = verbose
# Keep count of inlet operator
Operator.counter += 1
self.set_logging(logging)
def __call__(self):
#timestep = self.domain.get_timestep()
raise Exception('Need to implement __call__ for your operator')
def get_timestep(self):
return self.domain.get_timestep()
def get_time(self):
return self.domain.get_time()
def parallel_safe(self):
"""By default an operator is not parallel safe
"""
return False
def statistics(self):
message = 'You need to implement operator statistics for your operator'
return message
def timestepping_statistics(self):
message = 'You need to implement timestepping statistics for your operator'
return message
def print_statistics(self):
print(self.statistics())
def print_timestepping_statistics(self):
print(self.timestepping_statistics())
def log_timestepping_statistics(self):
from anuga.utilities.system_tools import log_to_file
if self.logging:
log_to_file(self.log_filename, self.timestepping_statistics())
def set_label(self, label=None):
if label is None:
self.label = "operator_%g" % Operator.counter
else:
self.label = label + '_%g' % Operator.counter
def set_logging(self, flag=True):
self.logging = flag
def activate_logging(self):
# If flag is true open file with mode = "w" to form a clean file for logging
if self.logging:
self.log_filename = self.label + '.log'
log_to_file(self.log_filename, self.statistics(), mode='w')
#log_to_file(self.log_filename, 'time,Q')
#log_to_file(self.log_filename, self.culvert_type)
|
data_collection/gazette/spiders/sc_garopaba.py
|
kaiocp/querido-diario
| 454 |
140153
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScGaropabaSpider(FecamGazetteSpider):
name = "sc_garopaba"
FECAM_QUERY = "cod_entidade:98"
TERRITORY_ID = "4205704"
|
lib/datasets/textdataset_catalog.py
|
Ocelot7777/masktextspotter.caffe2
| 287 |
140160
|
<gh_stars>100-1000
# Modified by <NAME>
# ##############################################################################
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Collection of available datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
# Path to data dir
_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
_CACHE_DIR= os.path.join(os.path.dirname(__file__), 'cache')
if not os.path.exists(_CACHE_DIR):
os.mkdir(_CACHE_DIR)
IM_DIR = 'im_dir'
ANN_FN = 'ann_fn'
IM_LIST='im_list'
IM_PREFIX = 'image_prefix'
# Available datasets
DATASETS = {
'synth_train': {
IM_DIR:
_DATA_DIR + '/synth/train_images',
ANN_FN:
_DATA_DIR + '/synth/train_gts',
IM_LIST:
_DATA_DIR + '/synth/train_list.txt'
},
'synth0_train': {
IM_DIR:
_DATA_DIR + '/synth0/train_images',
ANN_FN:
_DATA_DIR + '/synth0/train_gts',
IM_LIST:
_DATA_DIR + '/synth0/train_list.txt'
},
'icdar2013_train': {
IM_DIR:
_DATA_DIR + '/icdar2013/train_images',
# use filtered validation as there is an issue converting contours
ANN_FN:
_DATA_DIR + '/icdar2013/train_gts',
IM_LIST:
_DATA_DIR + '/icdar2013/train_list.txt'
},
'icdar2013_test': {
IM_DIR:
_DATA_DIR + '/icdar2013/test_images',
# use filtered validation as there is an issue converting contours
ANN_FN:
_DATA_DIR + '/icdar2013/test_gts',
IM_LIST:
_DATA_DIR + '/icdar2013/test_list.txt',
},
'icdar2015_train': {
IM_DIR:
_DATA_DIR + '/icdar2015/train_images',
# use filtered validation as there is an issue converting contours
ANN_FN:
_DATA_DIR + '/icdar2015/train_gts',
IM_LIST:
_DATA_DIR + '/icdar2015/train_list.txt'
},
'icdar2015_test': {
IM_DIR:
_DATA_DIR + '/icdar2015/test_images',
# use filtered validation as there is an issue converting contours
ANN_FN:
_DATA_DIR + '/icdar2015/test_gts',
IM_LIST:
_DATA_DIR + '/icdar2015/test_list.txt'
},
'totaltext_train': {
IM_DIR:
_DATA_DIR + '/totaltext/train_images',
# use filtered validation as there is an issue converting contours
ANN_FN:
_DATA_DIR + '/totaltext/train_gts',
IM_LIST:
_DATA_DIR + '/totaltext/train_list.txt'
},
'totaltext_test': {
IM_DIR:
_DATA_DIR + '/totaltext/test_images',
# use filtered validation as there is an issue converting contours
ANN_FN:
_DATA_DIR + '/totaltext/test_gts',
IM_LIST:
_DATA_DIR + '/totaltext/test_list.txt'
},
'scut-eng-char_train': {
IM_DIR:
_DATA_DIR + '/scut-eng-char/train_images',
# use filtered validation as there is an issue converting contours
ANN_FN:
_DATA_DIR + '/scut-eng-char/train_gts',
IM_LIST:
_DATA_DIR + '/scut-eng-char/train_list.txt'
}
}
|
tools/train_classifier.py
|
TwentyBN/Sense
| 483 |
140173
|
#!/usr/bin/env python
"""
Finetuning script that can be used to train a custom classifier on top of our pretrained models.
Usage:
train_classifier.py --path_in=PATH
[--model_name=NAME]
[--model_version=VERSION]
[--num_layers_to_finetune=NUM]
[--epochs=NUM]
[--use_gpu]
[--path_out=PATH]
[--temporal_training]
[--resume]
[--overwrite]
train_classifier.py (-h | --help)
Options:
--path_in=PATH Path to the dataset folder.
Important: this folder should follow the structure described in the README.
--model_name=NAME Name of the backbone model to be used.
--model_version=VERSION Version of the backbone model to be used.
--num_layers_to_finetune=NUM Number of layers to finetune in addition to the final layer [default: 9].
--epochs=NUM Number of epochs to run [default: 80].
--path_out=PATH Where to save results. Will default to `path_in` if not provided.
--temporal_training Use this flag if your dataset has been annotated with the temporal
annotations tool
--resume Initialize weights from the last saved checkpoint and restart training
--overwrite Allow overwriting existing checkpoint files in the output folder (path_out)
"""
import datetime
import json
import os
import sys
from docopt import docopt
from natsort import natsorted
from natsort import ns
import torch.utils.data
from sense.downstream_tasks.nn_utils import LogisticRegression
from sense.downstream_tasks.nn_utils import Pipe
from sense.finetuning import extract_features
from sense.finetuning import generate_data_loader
from sense.finetuning import set_internal_padding_false
from sense.finetuning import training_loops
from sense.loading import build_backbone_network
from sense.loading import get_relevant_weights
from sense.loading import ModelConfig
from tools.sense_studio.project_utils import load_project_config
from sense.utils import clean_pipe_state_dict_key
from tools import directories
SUPPORTED_MODEL_CONFIGURATIONS = [
ModelConfig('StridedInflatedEfficientNet', 'pro', []),
ModelConfig('StridedInflatedMobileNetV2', 'pro', []),
ModelConfig('StridedInflatedEfficientNet', 'lite', []),
ModelConfig('StridedInflatedMobileNetV2', 'lite', []),
]
def train_model(path_in, path_out, model_name, model_version, num_layers_to_finetune, epochs,
use_gpu=True, overwrite=True, temporal_training=None, resume=False, log_fn=print,
confmat_event=None):
os.makedirs(path_out, exist_ok=True)
# Check for existing files
saved_files = ["last_classifier.checkpoint", "best_classifier.checkpoint", "config.json", "label2int.json",
"confusion_matrix.png", "confusion_matrix.npy"]
if not overwrite and any(os.path.exists(os.path.join(path_out, file)) for file in saved_files):
print(f"Warning: This operation will overwrite files in {path_out}")
while True:
confirmation = input("Are you sure? Add --overwrite to hide this warning. (Y/N) ")
if confirmation.lower() == "y":
break
elif confirmation.lower() == "n":
sys.exit()
else:
print('Invalid input')
# Load weights
selected_config, weights = get_relevant_weights(
SUPPORTED_MODEL_CONFIGURATIONS,
model_name,
model_version,
log_fn=log_fn,
)
backbone_weights = weights['backbone']
if resume:
# Load the last classifier
checkpoint_classifier = torch.load(os.path.join(path_out, 'last_classifier.checkpoint'))
else:
checkpoint_classifier = None
# Load backbone network
backbone_network = build_backbone_network(selected_config, backbone_weights,
weights_finetuned=checkpoint_classifier)
# Get the required temporal dimension of feature tensors in order to
# finetune the provided number of layers
if num_layers_to_finetune > 0:
num_timesteps = backbone_network.num_required_frames_per_layer.get(-num_layers_to_finetune)
if not num_timesteps:
# Remove 1 because we added 0 to temporal_dependencies
num_layers = len(backbone_network.num_required_frames_per_layer) - 1
msg = (f'ERROR - Num of layers to finetune not compatible. '
f'Must be an integer between 0 and {num_layers}')
log_fn(msg)
raise IndexError(msg)
else:
num_timesteps = 1
# Extract layers to finetune
if num_layers_to_finetune > 0:
fine_tuned_layers = backbone_network.cnn[-num_layers_to_finetune:]
backbone_network.cnn = backbone_network.cnn[0:-num_layers_to_finetune]
project_config = load_project_config(path_in)
# Find label names
if project_config:
label_names = project_config['classes'].keys()
else:
label_names = os.listdir(directories.get_videos_dir(path_in, 'train'))
label_names = natsorted(label_names, alg=ns.IC)
label_names = [x for x in label_names if not x.startswith('.')]
label_names_temporal = ['background']
if project_config:
tags = project_config['tags']
label_names_temporal.extend(tags.values())
else:
for label in label_names:
label_names_temporal.extend([f'{label}_tag1', f'{label}_tag2'])
label_names_temporal = natsorted(label_names_temporal, alg=ns.IC)
label2int = {name: index for index, name in enumerate(label_names)}
label2int_temporal_annotation = {name: index for index, name in enumerate(label_names_temporal)}
# Extract features for all videos
extract_features(path_in, label_names, selected_config, backbone_network, num_layers_to_finetune, use_gpu,
num_timesteps=num_timesteps, log_fn=log_fn)
extractor_stride = backbone_network.num_required_frames_per_layer_padding[0]
# Create the data loaders
features_dir = directories.get_features_dir(path_in, 'train', selected_config, num_layers_to_finetune)
tags_dir = directories.get_tags_dir(path_in, 'train')
train_loader = generate_data_loader(
project_config,
features_dir,
tags_dir,
label_names,
label2int,
label2int_temporal_annotation,
num_timesteps=num_timesteps,
stride=extractor_stride,
temporal_annotation_only=temporal_training,
)
features_dir = directories.get_features_dir(path_in, 'valid', selected_config, num_layers_to_finetune)
tags_dir = directories.get_tags_dir(path_in, 'valid')
valid_loader = generate_data_loader(
project_config,
features_dir,
tags_dir,
label_names,
label2int,
label2int_temporal_annotation,
num_timesteps=None,
batch_size=1,
shuffle=False,
stride=extractor_stride,
temporal_annotation_only=temporal_training,
)
# Check if the data is loaded fully
if not train_loader or not valid_loader:
log_fn("ERROR - \n "
"\tMissing annotations for train or valid set.\n"
"\tHint: Check if tags_train and tags_valid directories exist.\n")
return
# Modify the network to generate the training network on top of the features
if temporal_training:
num_output = len(label_names_temporal)
else:
num_output = len(label_names)
# modify the network to generate the training network on top of the features
gesture_classifier = LogisticRegression(num_in=backbone_network.feature_dim,
num_out=num_output,
use_softmax=False)
if resume:
gesture_classifier.load_state_dict(checkpoint_classifier)
if num_layers_to_finetune > 0:
# remove internal padding for training
fine_tuned_layers.apply(set_internal_padding_false)
net = Pipe(fine_tuned_layers, gesture_classifier)
else:
net = gesture_classifier
net.train()
if use_gpu:
net = net.cuda()
lr_schedule = {0: 0.0001, int(epochs / 2): 0.00001} if epochs > 1 else {0: 0.0001}
num_epochs = epochs
# Save training config and label2int dictionary
config = {
'backbone_name': selected_config.model_name,
'backbone_version': selected_config.version,
'num_layers_to_finetune': num_layers_to_finetune,
'classifier': str(gesture_classifier),
'temporal_training': temporal_training,
'lr_schedule': lr_schedule,
'num_epochs': num_epochs,
'start_time': str(datetime.datetime.now()),
'end_time': '',
}
with open(os.path.join(path_out, 'config.json'), 'w') as f:
json.dump(config, f, indent=2)
with open(os.path.join(path_out, 'label2int.json'), 'w') as f:
json.dump(label2int_temporal_annotation if temporal_training else label2int, f, indent=2)
# Train model
best_model_state_dict = training_loops(net, train_loader, valid_loader, use_gpu, num_epochs, lr_schedule,
label_names, label_names_temporal, path_out,
temporal_annotation_training=temporal_training, log_fn=log_fn,
confmat_event=confmat_event)
# Save best model
if isinstance(net, Pipe):
best_model_state_dict = {clean_pipe_state_dict_key(key): value
for key, value in best_model_state_dict.items()}
torch.save(best_model_state_dict, os.path.join(path_out, "best_classifier.checkpoint"))
config['end_time'] = str(datetime.datetime.now())
with open(os.path.join(path_out, 'config.json'), 'w') as f:
json.dump(config, f, indent=2)
if __name__ == "__main__":
# Parse arguments
args = docopt(__doc__)
_path_in = args['--path_in']
_path_out = args['--path_out'] or os.path.join(_path_in, "checkpoints")
_use_gpu = args['--use_gpu']
_model_name = args['--model_name'] or None
_model_version = args['--model_version'] or None
_num_layers_to_finetune = int(args['--num_layers_to_finetune'])
_epochs = int(args['--epochs'])
_temporal_training = args['--temporal_training']
_resume = args['--resume']
_overwrite = args['--overwrite']
train_model(
path_in=_path_in,
path_out=_path_out,
model_name=_model_name,
model_version=_model_version,
num_layers_to_finetune=_num_layers_to_finetune,
epochs=_epochs,
use_gpu=_use_gpu,
overwrite=_overwrite,
temporal_training=_temporal_training,
resume=_resume,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.