max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
barbican/common/policies/quotas.py
|
stackhpc/barbican
| 177 |
72270
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
_READER = "role:reader"
_SYSTEM_ADMIN = "role:admin and system_scope:all"
_SYSTEM_READER = "role:reader and system_scope:all"
rules = [
policy.DocumentedRuleDefault(
name='quotas:get',
check_str=f'rule:all_users or {_READER}',
scope_types=['project'],
description='List quotas for the project the user belongs to.',
operations=[
{
'path': '/v1/quotas',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name='project_quotas:get',
check_str=f'rule:service_admin or {_SYSTEM_READER}',
scope_types=['system'],
description='List quotas for the specified project.',
operations=[
{
'path': '/v1/project-quotas',
'method': 'GET'
},
{
'path': '/v1/project-quotas/{uuid}',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name='project_quotas:put',
check_str=f'rule:service_admin or {_SYSTEM_ADMIN}',
scope_types=['system'],
description='Create or update the configured project quotas for '
'the project with the specified UUID.',
operations=[
{
'path': '/v1/project-quotas/{uuid}',
'method': 'PUT'
}
]
),
policy.DocumentedRuleDefault(
name='project_quotas:delete',
check_str=f'rule:service_admin or {_SYSTEM_ADMIN}',
scope_types=['system'],
description='Delete the project quotas configuration for the '
'project with the requested UUID.',
operations=[
{
'path': '/v1/quotas}',
'method': 'DELETE'
}
]
),
]
def list_rules():
return rules
|
pytorch_lightning/accelerators/cpu.py
|
krfricke/pytorch-lightning
| 3,469 |
72275
|
<gh_stars>1000+
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Union
import torch
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.utilities import device_parser
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _PSUTIL_AVAILABLE
from pytorch_lightning.utilities.types import _DEVICE
class CPUAccelerator(Accelerator):
"""Accelerator for CPU devices."""
def setup_environment(self, root_device: torch.device) -> None:
"""
Raises:
MisconfigurationException:
If the selected device is not CPU.
"""
super().setup_environment(root_device)
if root_device.type != "cpu":
raise MisconfigurationException(f"Device should be CPU, got {root_device} instead.")
def get_device_stats(self, device: _DEVICE) -> Dict[str, Any]:
"""Get CPU stats from ``psutil`` package."""
return get_cpu_stats()
@staticmethod
def parse_devices(devices: Union[int, str, List[int]]) -> int:
"""Accelerator device parsing logic."""
devices = device_parser.parse_cpu_cores(devices)
return devices
@staticmethod
def get_parallel_devices(devices: Union[int, str, List[int]]) -> List[torch.device]:
"""Gets parallel devices for the Accelerator."""
devices = device_parser.parse_cpu_cores(devices)
return [torch.device("cpu")] * devices
@staticmethod
def auto_device_count() -> int:
"""Get the devices when set to auto."""
return 1
@staticmethod
def is_available() -> bool:
"""CPU is always available for execution."""
return True
@classmethod
def register_accelerators(cls, accelerator_registry: Dict) -> None:
accelerator_registry.register(
"cpu",
cls,
description=f"{cls.__class__.__name__}",
)
# CPU device metrics
_CPU_VM_PERCENT = "cpu_vm_percent"
_CPU_PERCENT = "cpu_percent"
_CPU_SWAP_PERCENT = "cpu_swap_percent"
def get_cpu_stats() -> Dict[str, float]:
if not _PSUTIL_AVAILABLE:
raise ModuleNotFoundError(
"Fetching CPU device stats requires `psutil` to be installed."
" Install it by running `pip install -U psutil`."
)
import psutil
return {
_CPU_VM_PERCENT: psutil.virtual_memory().percent,
_CPU_PERCENT: psutil.cpu_percent(),
_CPU_SWAP_PERCENT: psutil.swap_memory().percent,
}
|
tests/test_display.py
|
antoinedemathelin/ruptures
| 942 |
72279
|
import pytest
from ruptures.datasets import pw_constant
from ruptures.show import display
from ruptures.show.display import MatplotlibMissingError
@pytest.fixture(scope="module")
def signal_bkps():
signal, bkps = pw_constant()
return signal, bkps
def test_display_with_options(signal_bkps):
try:
signal, bkps = signal_bkps
fig, axarr = display(signal, bkps)
fig, axarr = display(signal, bkps, bkps)
figsize = (20, 10) # figure size
fig, axarr = display(
signal,
bkps,
figsize=figsize,
)
fig, axarr = display(
signal[:, 0],
bkps,
figsize=figsize,
)
except MatplotlibMissingError:
pytest.skip("matplotlib is not installed")
def test_display_without_options(signal_bkps):
try:
signal, bkps = signal_bkps
fig, axarr = display(signal, bkps)
fig, axarr = display(signal, bkps, bkps)
figsize = (20, 10) # figure size
fig, axarr = display(signal, bkps)
fig, axarr = display(signal[:, 0], bkps)
except MatplotlibMissingError:
pytest.skip("matplotlib is not installed")
def test_display_with_new_options(signal_bkps):
try:
signal, bkps = signal_bkps
fig, axarr = display(signal, bkps)
fig, axarr = display(signal, bkps, bkps)
fig, axarr = display(signal, bkps, facecolor="k", edgecolor="b")
fig, axarr = display(signal[:, 0], bkps, facecolor="k", edgecolor="b")
except MatplotlibMissingError:
pytest.skip("matplotlib is not installed")
def test_display_with_computed_chg_pts_options(signal_bkps):
try:
signal, bkps = signal_bkps
fig, axarr = display(signal, bkps)
fig, axarr = display(signal, bkps, bkps)
fig, axarr = display(signal, bkps, bkps, computed_chg_pts_color="k")
fig, axarr = display(
signal, bkps, bkps, computed_chg_pts_color="k", computed_chg_pts_linewidth=3
)
fig, axarr = display(
signal,
bkps,
bkps,
computed_chg_pts_color="k",
computed_chg_pts_linewidth=3,
computed_chg_pts_linestyle="--",
)
fig, axarr = display(
signal,
bkps,
bkps,
computed_chg_pts_color="k",
computed_chg_pts_linewidth=3,
computed_chg_pts_linestyle="--",
computed_chg_pts_alpha=1.0,
)
except MatplotlibMissingError:
pytest.skip("matplotlib is not installed")
|
src/genie/libs/parser/iosxr/tests/ShowPceIPV4PeerPrefix/cli/equal/golden_output_expected.py
|
balmasea/genieparser
| 204 |
72280
|
<reponame>balmasea/genieparser
expected_output = {
'nodes': {
1: {
'te_router_id': '192.168.0.4',
'host_name': 'rtrD',
'isis_system_id': [
'1921.68ff.1004 level-1',
'1921.68ff.1004 level-2',
'1921.68ff.1004 level-2'],
'asn': [
65001,
65001,
65001],
'domain_id': [
1111,
1111,
9999],
'advertised_prefixes': [
'192.168.0.4',
'192.168.0.4',
'192.168.0.4',
'192.168.0.6']},
2: {
'te_router_id': '192.168.0.1',
'host_name': 'rtrA',
'isis_system_id': ['1921.68ff.1001 level-2'],
'advertised_prefixes': ['192.168.0.1']}}}
|
DQMOffline/EGamma/test/EgammaAnalyzers_cfg.py
|
ckamtsikis/cmssw
| 852 |
72284
|
import sys
import os
import electronDbsDiscovery
import FWCore.ParameterSet.Config as cms
process = cms.Process("testEgammaAnalyzers")
process.DQMStore = cms.Service("DQMStore")
process.load("DQMServices.Components.DQMStoreStats_cfi")
#from DQMServices.Components.DQMStoreStats_cfi import *
#dqmStoreStats.runOnEndJob = cms.untracked.bool(True)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.source = cms.Source ("PoolSource",fileNames = cms.untracked.vstring(),secondaryFileNames = cms.untracked.vstring())
process.source.fileNames.extend(electronDbsDiscovery.search())
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("DQMOffline.EGamma.egammaDQMOffline_cff")
process.dqmElectronTagProbeAnalysis.OutputFile = cms.string(os.environ['TEST_HISTOS_FILE'])
process.p = cms.Path(process.egammaDQMOffline*process.dqmStoreStats)
|
seahub/share/migrations/0001_initial.py
|
MJochim/seahub
| 420 |
72287
|
<reponame>MJochim/seahub<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-21 08:43
import datetime
from django.db import migrations, models
import django.db.models.deletion
import seahub.base.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AnonymousShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('repo_owner', seahub.base.fields.LowerCaseCharField(max_length=255)),
('repo_id', models.CharField(max_length=36)),
('anonymous_email', seahub.base.fields.LowerCaseCharField(max_length=255)),
('token', models.CharField(max_length=25, unique=True)),
],
),
migrations.CreateModel(
name='ExtraGroupsSharePermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('repo_id', models.CharField(db_index=True, max_length=36)),
('group_id', models.IntegerField(db_index=True)),
('permission', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='ExtraSharePermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('repo_id', models.CharField(db_index=True, max_length=36)),
('share_to', models.CharField(db_index=True, max_length=255)),
('permission', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='FileShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', seahub.base.fields.LowerCaseCharField(db_index=True, max_length=255)),
('repo_id', models.CharField(db_index=True, max_length=36)),
('path', models.TextField()),
('token', models.CharField(max_length=100, unique=True)),
('ctime', models.DateTimeField(default=datetime.datetime.now)),
('view_cnt', models.IntegerField(default=0)),
('s_type', models.CharField(db_index=True, default=b'f', max_length=2)),
('password', models.CharField(max_length=128, null=True)),
('expire_date', models.DateTimeField(null=True)),
('permission', models.CharField(choices=[(b'view_download', b'View and download'), (b'view_only', b'Disable download')], db_index=True, default=b'view_download', max_length=50)),
],
),
migrations.CreateModel(
name='OrgFileShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('org_id', models.IntegerField(db_index=True)),
('file_share', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='share.FileShare')),
],
),
migrations.CreateModel(
name='PrivateFileDirShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('from_user', seahub.base.fields.LowerCaseCharField(db_index=True, max_length=255)),
('to_user', seahub.base.fields.LowerCaseCharField(db_index=True, max_length=255)),
('repo_id', models.CharField(db_index=True, max_length=36)),
('path', models.TextField()),
('token', models.CharField(max_length=10, unique=True)),
('permission', models.CharField(max_length=5)),
('s_type', models.CharField(default=b'f', max_length=5)),
],
),
migrations.CreateModel(
name='UploadLinkShare',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', seahub.base.fields.LowerCaseCharField(db_index=True, max_length=255)),
('repo_id', models.CharField(db_index=True, max_length=36)),
('path', models.TextField()),
('token', models.CharField(max_length=100, unique=True)),
('ctime', models.DateTimeField(default=datetime.datetime.now)),
('view_cnt', models.IntegerField(default=0)),
('password', models.CharField(max_length=128, null=True)),
('expire_date', models.DateTimeField(null=True)),
],
),
]
|
edb/edgeql/compiler/conflicts.py
|
aaronbrighton/edgedb
| 7,302 |
72290
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Compilation of DML exclusive constraint conflict handling."""
from __future__ import annotations
from typing import *
from edb import errors
from edb.common import context as pctx
from edb.ir import ast as irast
from edb.ir import typeutils
from edb.schema import constraints as s_constr
from edb.schema import name as s_name
from edb.schema import objtypes as s_objtypes
from edb.schema import pointers as s_pointers
from edb.schema import utils as s_utils
from edb.edgeql import ast as qlast
from edb.edgeql import utils as qlutils
from . import astutils
from . import context
from . import dispatch
from . import inference
from . import setgen
from . import typegen
def _compile_conflict_select(
stmt: irast.MutatingStmt,
subject_typ: s_objtypes.ObjectType,
*,
for_inheritance: bool,
fake_dml_set: Optional[irast.Set],
obj_constrs: Sequence[s_constr.Constraint],
constrs: Dict[str, Tuple[s_pointers.Pointer, List[s_constr.Constraint]]],
parser_context: Optional[pctx.ParserContext],
ctx: context.ContextLevel,
) -> Optional[qlast.Expr]:
"""Synthesize a select of conflicting objects
... for a single object type. This gets called once for each ancestor
type that provides constraints to the type being inserted.
`cnstrs` contains the constraints to consider.
"""
# Find which pointers we need to grab
needed_ptrs = set(constrs)
for constr in obj_constrs:
subjexpr = constr.get_subjectexpr(ctx.env.schema)
assert subjexpr
needed_ptrs |= qlutils.find_subject_ptrs(subjexpr.qlast)
wl = list(needed_ptrs)
ptr_anchors = {}
while wl:
p = wl.pop()
ptr = subject_typ.getptr(ctx.env.schema, s_name.UnqualName(p))
if expr := ptr.get_expr(ctx.env.schema):
assert isinstance(expr.qlast, qlast.Expr)
ptr_anchors[p] = expr.qlast
for ref in qlutils.find_subject_ptrs(expr.qlast):
if ref not in needed_ptrs:
wl.append(ref)
needed_ptrs.add(ref)
ctx.anchors = ctx.anchors.copy()
# If we are given a fake_dml_set to directly represent the result
# of our DML, use that instead of populating the result.
if fake_dml_set:
for p in needed_ptrs | {'id'}:
ptr = subject_typ.getptr(ctx.env.schema, s_name.UnqualName(p))
val = setgen.extend_path(fake_dml_set, ptr, ctx=ctx)
ptr_anchors[p] = ctx.create_anchor(val, p)
# Find the IR corresponding to the fields we care about and
# produce anchors for them
ptrs_in_shape = set()
for elem, _ in stmt.subject.shape:
assert elem.rptr is not None
name = elem.rptr.ptrref.shortname.name
ptrs_in_shape.add(name)
if name in needed_ptrs and name not in ptr_anchors:
assert elem.expr
if inference.infer_volatility(elem.expr, ctx.env).is_volatile():
if for_inheritance:
error = (
'INSERT does not support volatile properties with '
'exclusive constraints when another statement in '
'the same query modifies a related type'
)
else:
error = (
'INSERT UNLESS CONFLICT ON does not support volatile '
'properties'
)
raise errors.UnsupportedFeatureError(
error, context=parser_context
)
# FIXME: The wrong thing will definitely happen if there are
# volatile entries here
ptr_anchors[name] = ctx.create_anchor(
setgen.ensure_set(elem.expr, ctx=ctx), name)
if for_inheritance and not ptrs_in_shape:
return None
# Fill in empty sets for pointers that are needed but not present
present_ptrs = set(ptr_anchors)
for p in (needed_ptrs - present_ptrs):
ptr = subject_typ.getptr(ctx.env.schema, s_name.UnqualName(p))
typ = ptr.get_target(ctx.env.schema)
assert typ
ptr_anchors[p] = qlast.TypeCast(
expr=qlast.Set(elements=[]),
type=typegen.type_to_ql_typeref(typ, ctx=ctx))
if not ptr_anchors:
raise errors.QueryError(
'INSERT UNLESS CONFLICT property requires matching shape',
context=parser_context,
)
conds: List[qlast.Expr] = []
for ptrname, (ptr, ptr_cnstrs) in constrs.items():
if ptrname not in present_ptrs:
continue
anchor = qlutils.subject_paths_substitute(
ptr_anchors[ptrname], ptr_anchors)
ptr_val = qlast.Path(partial=True, steps=[
qlast.Ptr(ptr=qlast.ObjectRef(name=ptrname))
])
ptr, ptr_cnstrs = constrs[ptrname]
ptr_card = ptr.get_cardinality(ctx.env.schema)
for cnstr in ptr_cnstrs:
lhs: qlast.Expr = anchor
rhs: qlast.Expr = ptr_val
# If there is a subjectexpr, substitute our lhs and rhs in
# for __subject__ in the subjectexpr and compare *that*
if (subjectexpr := cnstr.get_subjectexpr(ctx.env.schema)):
assert isinstance(subjectexpr.qlast, qlast.Expr)
lhs = qlutils.subject_substitute(subjectexpr.qlast, lhs)
rhs = qlutils.subject_substitute(subjectexpr.qlast, rhs)
conds.append(qlast.BinOp(
op='=' if ptr_card.is_single() else 'IN',
left=lhs, right=rhs,
))
insert_subject = qlast.Path(steps=[
s_utils.name_to_ast_ref(subject_typ.get_name(ctx.env.schema))])
for constr in obj_constrs:
# TODO: learn to skip irrelevant ones for UPDATEs at least?
subjectexpr = constr.get_subjectexpr(ctx.env.schema)
assert subjectexpr and isinstance(subjectexpr.qlast, qlast.Expr)
lhs = qlutils.subject_paths_substitute(subjectexpr.qlast, ptr_anchors)
rhs = qlutils.subject_substitute(subjectexpr.qlast, insert_subject)
conds.append(qlast.BinOp(op='=', left=lhs, right=rhs))
if not conds:
return None
# We use `any` to compute the disjunction here because some might
# be empty.
if len(conds) == 1:
cond = conds[0]
else:
cond = qlast.FunctionCall(
func='any',
args=[qlast.Set(elements=conds)],
)
# For the result filtering we need to *ignore* the same object
if fake_dml_set:
anchor = qlutils.subject_paths_substitute(
ptr_anchors['id'], ptr_anchors)
ptr_val = qlast.Path(partial=True, steps=[
qlast.Ptr(ptr=qlast.ObjectRef(name='id'))
])
cond = qlast.BinOp(
op='AND',
left=cond,
right=qlast.BinOp(op='!=', left=anchor, right=ptr_val),
)
# Produce a query that finds the conflicting objects
select_ast = qlast.DetachedExpr(
expr=qlast.SelectQuery(result=insert_subject, where=cond)
)
return select_ast
def _constr_matters(
constr: s_constr.Constraint, ctx: context.ContextLevel,
) -> bool:
schema = ctx.env.schema
return (
not constr.generic(schema)
and not constr.get_delegated(schema)
and (
constr.get_owned(schema)
or all(anc.get_delegated(schema) or anc.generic(schema) for anc
in constr.get_ancestors(schema).objects(schema))
)
)
PointerConstraintMap = Dict[
str,
Tuple[s_pointers.Pointer, List[s_constr.Constraint]],
]
ConstraintPair = Tuple[PointerConstraintMap, List[s_constr.Constraint]]
ConflictTypeMap = Dict[s_objtypes.ObjectType, ConstraintPair]
def _split_constraints(
obj_constrs: Sequence[s_constr.Constraint],
constrs: PointerConstraintMap,
ctx: context.ContextLevel,
) -> ConflictTypeMap:
schema = ctx.env.schema
type_maps: ConflictTypeMap = {}
# Split up pointer constraints by what object types they come from
for name, (_, p_constrs) in constrs.items():
for p_constr in p_constrs:
ancs = (p_constr,) + p_constr.get_ancestors(schema).objects(schema)
for anc in ancs:
if not _constr_matters(anc, ctx):
continue
p_ptr = anc.get_subject(schema)
assert isinstance(p_ptr, s_pointers.Pointer)
obj = p_ptr.get_source(schema)
assert isinstance(obj, s_objtypes.ObjectType)
map, _ = type_maps.setdefault(obj, ({}, []))
_, entry = map.setdefault(name, (p_ptr, []))
entry.append(anc)
# Split up object constraints by what object types they come from
for obj_constr in obj_constrs:
ancs = (obj_constr,) + obj_constr.get_ancestors(schema).objects(schema)
for anc in ancs:
if not _constr_matters(anc, ctx):
continue
obj = anc.get_subject(schema)
assert isinstance(obj, s_objtypes.ObjectType)
_, o_constr_entry = type_maps.setdefault(obj, ({}, []))
o_constr_entry.append(anc)
return type_maps
def compile_conflict_select(
stmt: irast.MutatingStmt,
subject_typ: s_objtypes.ObjectType,
*,
for_inheritance: bool=False,
fake_dml_set: Optional[irast.Set]=None,
obj_constrs: Sequence[s_constr.Constraint],
constrs: PointerConstraintMap,
parser_context: Optional[pctx.ParserContext],
ctx: context.ContextLevel,
) -> Tuple[irast.Set, bool, bool]:
"""Synthesize a select of conflicting objects
This teases apart the constraints we care about based on which
type they originate from, generates a SELECT for each type, and
unions them together.
`cnstrs` contains the constraints to consider.
"""
schema = ctx.env.schema
if for_inheritance:
type_maps = {subject_typ: (constrs, list(obj_constrs))}
else:
type_maps = _split_constraints(obj_constrs, constrs, ctx=ctx)
# Generate a separate query for each type
from_parent = False
frags = []
for a_obj, (a_constrs, a_obj_constrs) in type_maps.items():
frag = _compile_conflict_select(
stmt, a_obj, obj_constrs=a_obj_constrs, constrs=a_constrs,
for_inheritance=for_inheritance,
fake_dml_set=fake_dml_set,
parser_context=parser_context, ctx=ctx,
)
if frag:
if a_obj != subject_typ:
from_parent = True
frags.append(frag)
always_check = from_parent or any(
not child.is_view(schema) for child in subject_typ.children(schema)
)
# Union them all together
select_ast = qlast.Set(elements=frags)
with ctx.new() as ectx:
ectx.implicit_limit = 0
select_ir = dispatch.compile(select_ast, ctx=ectx)
select_ir = setgen.scoped_set(
select_ir, force_reassign=True, ctx=ectx)
assert isinstance(select_ir, irast.Set)
return select_ir, always_check, from_parent
def _get_exclusive_ptr_constraints(
typ: s_objtypes.ObjectType,
*, ctx: context.ContextLevel,
) -> Dict[str, Tuple[s_pointers.Pointer, List[s_constr.Constraint]]]:
schema = ctx.env.schema
pointers = {}
exclusive_constr = schema.get('std::exclusive', type=s_constr.Constraint)
for ptr in typ.get_pointers(schema).objects(schema):
ptr = ptr.get_nearest_non_derived_parent(schema)
ex_cnstrs = [c for c in ptr.get_constraints(schema).objects(schema)
if c.issubclass(schema, exclusive_constr)]
if ex_cnstrs:
name = ptr.get_shortname(schema).name
if name != 'id':
pointers[name] = ptr, ex_cnstrs
return pointers
def compile_insert_unless_conflict(
stmt: irast.InsertStmt,
typ: s_objtypes.ObjectType,
*, ctx: context.ContextLevel,
) -> irast.OnConflictClause:
"""Compile an UNLESS CONFLICT clause with no ON
This requires synthesizing a conditional based on all the exclusive
constraints on the object.
"""
pointers = _get_exclusive_ptr_constraints(typ, ctx=ctx)
obj_constrs = typ.get_constraints(ctx.env.schema).objects(ctx.env.schema)
select_ir, always_check, _ = compile_conflict_select(
stmt, typ,
constrs=pointers,
obj_constrs=obj_constrs,
parser_context=stmt.context, ctx=ctx)
return irast.OnConflictClause(
constraint=None, select_ir=select_ir, always_check=always_check,
else_ir=None)
def compile_insert_unless_conflict_on(
stmt: irast.InsertStmt,
typ: s_objtypes.ObjectType,
constraint_spec: qlast.Expr,
else_branch: Optional[qlast.Expr],
*, ctx: context.ContextLevel,
) -> irast.OnConflictClause:
with ctx.new() as constraint_ctx:
constraint_ctx.partial_path_prefix = stmt.subject
# We compile the name here so we can analyze it, but we don't do
# anything else with it.
cspec_res = dispatch.compile(constraint_spec, ctx=constraint_ctx)
# We accept a property, link, or a list of them in the form of a
# tuple.
if cspec_res.rptr is None and isinstance(cspec_res.expr, irast.Tuple):
cspec_args = [elem.val for elem in cspec_res.expr.elements]
else:
cspec_args = [cspec_res]
for cspec_arg in cspec_args:
if not cspec_arg.rptr:
raise errors.QueryError(
'UNLESS CONFLICT argument must be a property, link, '
'or tuple of properties and links',
context=constraint_spec.context,
)
if cspec_arg.rptr.source.path_id != stmt.subject.path_id:
raise errors.QueryError(
'UNLESS CONFLICT argument must be a property of the '
'type being inserted',
context=constraint_spec.context,
)
schema = ctx.env.schema
ptrs = []
exclusive_constr = schema.get('std::exclusive', type=s_constr.Constraint)
for cspec_arg in cspec_args:
assert cspec_arg.rptr is not None
schema, ptr = (
typeutils.ptrcls_from_ptrref(cspec_arg.rptr.ptrref, schema=schema))
if not isinstance(ptr, s_pointers.Pointer):
raise errors.QueryError(
'UNLESS CONFLICT property must be a property',
context=constraint_spec.context,
)
ptr = ptr.get_nearest_non_derived_parent(schema)
ptr_card = ptr.get_cardinality(schema)
if not ptr_card.is_single():
raise errors.QueryError(
'UNLESS CONFLICT property must be a SINGLE property',
context=constraint_spec.context,
)
ptrs.append(ptr)
obj_constrs = inference.cardinality.get_object_exclusive_constraints(
typ, set(ptrs), ctx.env)
field_constrs = []
if len(ptrs) == 1:
field_constrs = [
c for c in ptrs[0].get_constraints(schema).objects(schema)
if c.issubclass(schema, exclusive_constr)]
all_constrs = list(obj_constrs) + field_constrs
if len(all_constrs) != 1:
raise errors.QueryError(
'UNLESS CONFLICT property must have a single exclusive constraint',
context=constraint_spec.context,
)
ds = {ptr.get_shortname(schema).name: (ptr, field_constrs)
for ptr in ptrs}
select_ir, always_check, from_anc = compile_conflict_select(
stmt, typ, constrs=ds, obj_constrs=list(obj_constrs),
parser_context=stmt.context, ctx=ctx)
# Compile an else branch
else_ir = None
if else_branch:
# TODO: We should support this, but there is some semantic and
# implementation trickiness.
if from_anc:
raise errors.UnsupportedFeatureError(
'UNLESS CONFLICT can not use ELSE when constraint is from a '
'parent type',
context=constraint_spec.context,
)
# The ELSE needs to be able to reference the subject in an
# UPDATE, even though that would normally be prohibited.
ctx.path_scope.factoring_allowlist.add(stmt.subject.path_id)
# Compile else
else_ir = dispatch.compile(
astutils.ensure_qlstmt(else_branch), ctx=ctx)
assert isinstance(else_ir, irast.Set)
return irast.OnConflictClause(
constraint=irast.ConstraintRef(id=all_constrs[0].id),
select_ir=select_ir,
always_check=always_check,
else_ir=else_ir
)
def compile_inheritance_conflict_selects(
stmt: irast.MutatingStmt,
conflict: irast.MutatingStmt,
typ: s_objtypes.ObjectType,
subject_type: s_objtypes.ObjectType,
*, ctx: context.ContextLevel,
) -> List[irast.OnConflictClause]:
"""Compile the selects needed to resolve multiple DML to related types
Generate a SELECT that finds all objects of type `typ` that conflict with
the insert `stmt`. The backend will use this to explicitly check that
no conflicts exist, and raise an error if they do.
This is needed because we mostly use triggers to enforce these
cross-type exclusive constraints, and they use a snapshot
beginning at the start of the statement.
"""
pointers = _get_exclusive_ptr_constraints(typ, ctx=ctx)
obj_constrs = typ.get_constraints(ctx.env.schema).objects(
ctx.env.schema)
# This is a little silly, but for *this* we need to do one per
# constraint (so that we can properly identify which constraint
# failed in the error messages)
entries: List[Tuple[s_constr.Constraint, ConstraintPair]] = []
for name, (ptr, ptr_constrs) in pointers.items():
for ptr_constr in ptr_constrs:
if _constr_matters(ptr_constr, ctx):
entries.append((ptr_constr, ({name: (ptr, [ptr_constr])}, [])))
for obj_constr in obj_constrs:
if _constr_matters(obj_constr, ctx):
entries.append((obj_constr, ({}, [obj_constr])))
# For updates, we need to pull from the actual result overlay,
# since the final row can depend on things not in the query.
fake_dml_set = None
if isinstance(stmt, irast.UpdateStmt):
fake_subject = qlast.DetachedExpr(expr=qlast.Path(steps=[
s_utils.name_to_ast_ref(subject_type.get_name(ctx.env.schema))]))
fake_dml_set = dispatch.compile(fake_subject, ctx=ctx)
clauses = []
for cnstr, (p, o) in entries:
select_ir, _, _ = compile_conflict_select(
stmt, typ,
for_inheritance=True,
fake_dml_set=fake_dml_set,
constrs=p,
obj_constrs=o,
parser_context=stmt.context, ctx=ctx)
if isinstance(select_ir, irast.EmptySet):
continue
cnstr_ref = irast.ConstraintRef(id=cnstr.id)
clauses.append(
irast.OnConflictClause(
constraint=cnstr_ref, select_ir=select_ir, always_check=False,
else_ir=None, else_fail=conflict,
update_query_set=fake_dml_set)
)
return clauses
def compile_inheritance_conflict_checks(
stmt: irast.MutatingStmt,
subject_stype: s_objtypes.ObjectType,
*, ctx: context.ContextLevel,
) -> Optional[List[irast.OnConflictClause]]:
if not ctx.env.dml_stmts:
return None
assert isinstance(subject_stype, s_objtypes.ObjectType)
# TODO: when the conflicting statement is an UPDATE, only
# look at things it updated
modified_ancestors = set()
base_object = ctx.env.schema.get(
'std::BaseObject', type=s_objtypes.ObjectType)
subject_stypes = [subject_stype]
# For updates, we need to also consider all descendants, because
# those could also have interesting constraints of their own.
if isinstance(stmt, irast.UpdateStmt):
subject_stypes.extend(subject_stype.descendants(ctx.env.schema))
# N.B that for updates, the update itself will be in dml_stmts,
# since an update can conflict with itself if there are subtypes.
for ir in ctx.env.dml_stmts:
typ = setgen.get_set_type(ir.subject, ctx=ctx)
assert isinstance(typ, s_objtypes.ObjectType)
typ = typ.get_nearest_non_derived_parent(ctx.env.schema)
typs = [typ]
# As mentioned above, need to consider descendants of updates
if isinstance(ir, irast.UpdateStmt):
typs.extend(typ.descendants(ctx.env.schema))
for typ in typs:
if typ.is_view(ctx.env.schema):
continue
for subject_stype in subject_stypes:
if subject_stype.is_view(ctx.env.schema):
continue
# If the earlier DML has a shared ancestor that isn't
# BaseObject and isn't (if it's an insert) the same type,
# then we need to see if we need a conflict select
if (
subject_stype == typ
and not isinstance(ir, irast.UpdateStmt)
and not isinstance(stmt, irast.UpdateStmt)
):
continue
ancs = s_utils.get_class_nearest_common_ancestors(
ctx.env.schema, [subject_stype, typ])
for anc in ancs:
if anc != base_object:
modified_ancestors.add((subject_stype, anc, ir))
conflicters = []
for subject_stype, anc_type, ir in modified_ancestors:
conflicters.extend(compile_inheritance_conflict_selects(
stmt, ir, anc_type, subject_stype, ctx=ctx))
return conflicters or None
|
dojo/db_migrations/0075_import_history.py
|
mtcolman/django-DefectDojo
| 1,772 |
72308
|
# Generated by Django 2.2.17 on 2021-01-30 08:35
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
from django.db.models import JSONField
class Migration(migrations.Migration):
dependencies = [('dojo', '0074_notifications_close_engagement')]
operations = [
migrations.CreateModel(
name='Test_Import',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('import_settings', JSONField(null=True)),
('version', models.CharField(blank=True, max_length=100, null=True)),
('type', models.CharField(default='unknown', max_length=64, null=False)),
],
options={
'ordering': ('-id',),
},
),
migrations.CreateModel(
name='Test_Import_Finding_Action',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('action', models.CharField(blank=True, choices=[('N', 'created'), ('C', 'closed'), ('R', 'reactivated'), ('U', 'updated')], max_length=100, null=True)),
('finding', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='dojo.Finding')),
('test_import', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='dojo.Test_Import')),
],
options={
'ordering': ('test_import', 'action', 'finding'),
'unique_together': {('test_import', 'finding')},
},
),
migrations.AddField(
model_name='test_import',
name='findings_affected',
field=models.ManyToManyField(through='dojo.Test_Import_Finding_Action', to='dojo.Finding'),
),
migrations.AddField(
model_name='test_import',
name='test',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to='dojo.Test'),
),
migrations.AddIndex(
model_name='test_import',
index=models.Index(fields=['created', 'test', 'type'], name='dojo_test_i_created_951f4e_idx'),
),
]
|
pajbot/web/routes/api/playsound.py
|
sirinoks/pajbot
| 145 |
72323
|
from flask_restful import Resource
from flask_restful.reqparse import RequestParser
from pajbot.managers.db import DBManager
from pajbot.models.playsound import Playsound
from pajbot.models.sock import SocketClientManager
from pajbot.modules import PlaysoundModule
from pajbot.web.utils import requires_level
from pajbot.managers.adminlog import AdminLogManager
class PlaysoundAPI(Resource):
@requires_level(500)
def put(self, playsound_name, **options):
playsound_name = PlaysoundModule.massage_name(playsound_name)
if not PlaysoundModule.validate_name(playsound_name):
return (
{
"error": "Invalid Playsound name. The playsound name may only contain lowercase latin letters, 0-9, -, or _. No spaces :rage:"
},
400,
)
post_parser = RequestParser()
post_parser.add_argument("link", required=True)
args = post_parser.parse_args()
try:
link = args["link"]
except (ValueError, KeyError):
return {"error": "Invalid `link` parameter."}, 400
with DBManager.create_session_scope() as db_session:
count = db_session.query(Playsound).filter(Playsound.name == playsound_name).count()
if count >= 1:
return "Playsound already exists", 400
# the rest of the parameters are initialized with defaults
playsound = Playsound(name=playsound_name, link=link)
db_session.add(playsound)
log_msg = f"The {playsound_name} playsound has been added"
AdminLogManager.add_entry("Playsound added", options["user"], log_msg)
return "OK", 200
@requires_level(500)
def post(self, playsound_name, **options):
# require JSON so the cooldown can be null
post_parser = RequestParser()
post_parser.add_argument("link", required=True)
post_parser.add_argument("volume", type=int, required=True)
post_parser.add_argument("cooldown", type=int, required=False)
post_parser.add_argument("enabled", type=bool, required=False)
args = post_parser.parse_args()
link = args["link"]
if not PlaysoundModule.validate_link(link):
return "Empty or bad link, links must start with https:// and must not contain spaces", 400
volume = args["volume"]
if not PlaysoundModule.validate_volume(volume):
return "Bad volume argument", 400
# cooldown is allowed to be null/None
cooldown = args.get("cooldown", None)
if not PlaysoundModule.validate_cooldown(cooldown):
return "Bad cooldown argument", 400
enabled = args["enabled"]
if enabled is None:
return "Bad enabled argument", 400
with DBManager.create_session_scope() as db_session:
playsound = db_session.query(Playsound).filter(Playsound.name == playsound_name).one_or_none()
if playsound is None:
return "Playsound does not exist", 404
raw_edited_data = {
"link": (playsound.link, link),
"volume": (playsound.volume, volume),
"cooldown": (playsound.cooldown, cooldown),
}
# make a dictionary with all the changed values (except for enabled, which has a special case below)
filtered_edited_data = {k: v for k, v in raw_edited_data.items() if v[0] != v[1]}
log_msg = f"The {playsound_name} playsound has been updated: "
log_msg_changes = []
if playsound.enabled != enabled:
log_msg_changes.append("enabled" if enabled else "disabled")
# iterate over changed values and push them to the log msg
for edited_key, values in filtered_edited_data.items():
log_msg_changes.append(f"{edited_key} {values[0]} to {values[1]}")
log_msg += ", ".join(log_msg_changes)
playsound.link = link
playsound.volume = volume
playsound.cooldown = cooldown
playsound.enabled = enabled
db_session.add(playsound)
if len(log_msg_changes):
AdminLogManager.add_entry("Playsound edited", options["user"], log_msg)
return "OK", 200
@requires_level(500)
def delete(self, playsound_name, **options):
with DBManager.create_session_scope() as db_session:
playsound = db_session.query(Playsound).filter(Playsound.name == playsound_name).one_or_none()
if playsound is None:
return "Playsound does not exist", 404
log_msg = f"The {playsound.name} playsound has been removed"
AdminLogManager.add_entry("Playsound removed", options["user"], log_msg)
db_session.delete(playsound)
return "OK", 200
class PlayPlaysoundAPI(Resource):
@requires_level(500)
def post(self, playsound_name, **options):
with DBManager.create_session_scope() as db_session:
count = db_session.query(Playsound).filter(Playsound.name == playsound_name).count()
if count <= 0:
return "Playsound does not exist", 404
# explicitly don't check for disabled
SocketClientManager.send("playsound.play", {"name": playsound_name})
return "OK", 200
def init(api):
api.add_resource(PlaysoundAPI, "/playsound/<playsound_name>")
api.add_resource(PlayPlaysoundAPI, "/playsound/<playsound_name>/play")
|
test/test-dehaze.py
|
opteroncx/MoePhoto
| 192 |
72332
|
<gh_stars>100-1000
import PIL.Image as Image
import scipy.misc
import sys
sys.path.append('./python')
from dehaze import load_model, transform, cuda # pylint: disable=E0401
def run_test():
net = load_model()
input_image = './download/canyon1.jpg'
output_filename = './download/canyon1_dh.jpg'
#===== Load input image =====
img = Image.open(input_image).convert('RGB')
imgIn = transform(img).unsqueeze_(0)
#===== Test procedures =====
if cuda:
imgIn = imgIn.cuda()
prediction = net(imgIn)
prediction = prediction.data.cpu().numpy().squeeze().transpose((1, 2, 0))
scipy.misc.toimage(prediction).save(output_filename)
if __name__ == '__main__':
print('dehaze')
run_test()
|
examples/erato/training/train_attention.py
|
sundogrd/tensorflow_end2end_speech_recognition
| 351 |
72348
|
<filename>examples/erato/training/train_attention.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Train the Attention-based model (ERATO corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile, abspath
import sys
import time
import tensorflow as tf
from setproctitle import setproctitle
import yaml
import shutil
sys.path.append(abspath('../../../'))
from experiments.erato.data.load_dataset_attention import Dataset
from experiments.erato.metrics.attention import do_eval_cer, do_eval_fmeasure
from utils.io.labels.sparsetensor import list2sparsetensor
from utils.training.learning_rate_controller import Controller
from utils.training.plot import plot_loss, plot_ler
from utils.directory import mkdir_join, mkdir
from utils.parameter import count_total_parameters
from models.attention.attention_seq2seq import AttentionSeq2Seq
def do_train(model, params):
"""Run training.
Args:
model: the model to train
params (dict): A dictionary of parameters
"""
map_file_path = '../metrics/mapping_files/' + \
params['label_type'] + '_' + params['ss_type'] + '.txt'
# Load dataset
train_data = Dataset(
data_type='train', label_type=params['label_type'],
ss_type=params['ss_type'],
batch_size=params['batch_size'], map_file_path=map_file_path,
max_epoch=params['num_epoch'], splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=True, sort_stop_epoch=params['sort_stop_epoch'])
dev_data = Dataset(
data_type='dev', label_type=params['label_type'],
ss_type=params['ss_type'],
batch_size=params['batch_size'], map_file_path=map_file_path,
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=False)
test_data = Dataset(
data_type='test', label_type=params['label_type'],
ss_type=params['ss_type'],
batch_size=params['batch_size'], map_file_path=map_file_path,
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
sort_utt=False)
# Tell TensorFlow that the model will be built into the default graph
with tf.Graph().as_default():
# Define placeholders
model.create_placeholders()
learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate')
# Add to the graph each operation (including model definition)
loss_op, logits, decoder_outputs_train, decoder_outputs_infer = model.compute_loss(
model.inputs_pl_list[0],
model.labels_pl_list[0],
model.inputs_seq_len_pl_list[0],
model.labels_seq_len_pl_list[0],
model.keep_prob_encoder_pl_list[0],
model.keep_prob_decoder_pl_list[0],
model.keep_prob_embedding_pl_list[0])
train_op = model.train(loss_op,
optimizer=params['optimizer'],
learning_rate=learning_rate_pl)
_, decode_op_infer = model.decode(
decoder_outputs_train,
decoder_outputs_infer)
ler_op = model.compute_ler(model.labels_st_true_pl,
model.labels_st_pred_pl)
# Define learning rate controller
lr_controller = Controller(
learning_rate_init=params['learning_rate'],
decay_start_epoch=params['decay_start_epoch'],
decay_rate=params['decay_rate'],
decay_patient_epoch=params['decay_patient_epoch'],
lower_better=True)
# Build the summary tensor based on the TensorFlow collection of
# summaries
summary_train = tf.summary.merge(model.summaries_train)
summary_dev = tf.summary.merge(model.summaries_dev)
# Add the variable initializer operation
init_op = tf.global_variables_initializer()
# Create a saver for writing training checkpoints
saver = tf.train.Saver(max_to_keep=None)
# Count total param
parameters_dict, total_parameters = count_total_parameters(
tf.trainable_variables())
for parameter_name in sorted(parameters_dict.keys()):
print("%s %d" % (parameter_name, parameters_dict[parameter_name]))
print("Total %d variables, %s M param" %
(len(parameters_dict.keys()),
"{:,}".format(total_parameters / 1000000)))
csv_steps, csv_loss_train, csv_loss_dev = [], [], []
csv_ler_train, csv_ler_dev = [], []
# Create a session for running operation on the graph
with tf.Session() as sess:
# Instantiate a SummaryWriter to output summaries and the graph
summary_writer = tf.summary.FileWriter(
model.save_path, sess.graph)
# Initialize param
sess.run(init_op)
# Train model
start_time_train = time.time()
start_time_epoch = time.time()
start_time_step = time.time()
cer_dev_best = 1
not_improved_epoch = 0
learning_rate = float(params['learning_rate'])
for step, (data, is_new_epoch) in enumerate(train_data):
# Create feed dictionary for next mini batch (train)
inputs, labels_train, inputs_seq_len, labels_seq_len, _ = data
feed_dict_train = {
model.inputs_pl_list[0]: inputs[0],
model.labels_pl_list[0]: labels_train[0],
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.labels_seq_len_pl_list[0]: labels_seq_len[0],
model.keep_prob_encoder_pl_list[0]: 1 - float(params['dropout_encoder']),
model.keep_prob_decoder_pl_list[0]: 1 - float(params['dropout_decoder']),
model.keep_prob_embedding_pl_list[0]: 1 - float(params['dropout_embedding']),
learning_rate_pl: learning_rate
}
# Update parameters
sess.run(train_op, feed_dict=feed_dict_train)
if (step + 1) % params['print_step'] == 0:
# Create feed dictionary for next mini batch (dev)
(inputs, labels_dev, inputs_seq_len,
labels_seq_len, _), _ = dev_data.next()
feed_dict_dev = {
model.inputs_pl_list[0]: inputs[0],
model.labels_pl_list[0]: labels_dev[0],
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.labels_seq_len_pl_list[0]: labels_seq_len[0],
model.keep_prob_encoder_pl_list[0]: 1.0,
model.keep_prob_decoder_pl_list[0]: 1.0,
model.keep_prob_embedding_pl_list[0]: 1.0
}
# Compute loss
loss_train = sess.run(loss_op, feed_dict=feed_dict_train)
loss_dev = sess.run(loss_op, feed_dict=feed_dict_dev)
csv_steps.append(step)
csv_loss_train.append(loss_train)
csv_loss_dev.append(loss_dev)
# Change to evaluation mode
feed_dict_train[model.keep_prob_encoder_pl_list[0]] = 1.0
feed_dict_train[model.keep_prob_decoder_pl_list[0]] = 1.0
feed_dict_train[model.keep_prob_embedding_pl_list[0]] = 1.0
# Predict class ids & update even files
predicted_ids_train, summary_str_train = sess.run(
[decode_op_infer, summary_train], feed_dict=feed_dict_train)
predicted_ids_dev, summary_str_dev = sess.run(
[decode_op_infer, summary_dev], feed_dict=feed_dict_dev)
summary_writer.add_summary(summary_str_train, step + 1)
summary_writer.add_summary(summary_str_dev, step + 1)
summary_writer.flush()
# Convert to sparsetensor to compute LER
feed_dict_ler_train = {
model.labels_st_true_pl: list2sparsetensor(
labels_train[0], padded_value=train_data.padded_value),
model.labels_st_pred_pl: list2sparsetensor(
predicted_ids_train, padded_value=train_data.padded_value)
}
feed_dict_ler_dev = {
model.labels_st_true_pl: list2sparsetensor(
labels_dev[0], padded_value=dev_data.padded_value),
model.labels_st_pred_pl: list2sparsetensor(
predicted_ids_dev, padded_value=dev_data.padded_value)
}
# Compute accuracy
ler_train = sess.run(ler_op, feed_dict=feed_dict_ler_train)
ler_dev = sess.run(ler_op, feed_dict=feed_dict_ler_dev)
csv_ler_train.append(ler_train)
csv_ler_dev.append(ler_dev)
duration_step = time.time() - start_time_step
print("Step %d (epoch: %.3f): loss = %.3f (%.3f) / ler = %.3f (%.3f) / lr = %.5f (%.3f min)" %
(step + 1, train_data.epoch_detail, loss_train, loss_dev, ler_train, ler_dev,
learning_rate, duration_step / 60))
sys.stdout.flush()
start_time_step = time.time()
# Save checkpoint and evaluate model per epoch
if is_new_epoch:
duration_epoch = time.time() - start_time_epoch
print('-----EPOCH:%d (%.3f min)-----' %
(train_data.epoch, duration_epoch / 60))
# Save fugure of loss & ler
plot_loss(csv_loss_train, csv_loss_dev, csv_steps,
save_path=model.save_path)
plot_ler(csv_ler_train, csv_ler_dev, csv_steps,
label_type=params['label_type'],
save_path=model.save_path)
if train_data.epoch >= params['eval_start_epoch']:
start_time_eval = time.time()
print('=== Dev Data Evaluation ===')
cer_dev_epoch = do_eval_cer(
session=sess,
decode_op=decode_op_infer,
model=model,
dataset=dev_data,
label_type=params['label_type'],
ss_type=params['ss_type'],
eval_batch_size=1)
print(' CER: %f %%' % (cer_dev_epoch * 100))
if cer_dev_epoch < cer_dev_best:
cer_dev_best = cer_dev_epoch
not_improved_epoch = 0
print('■■■ ↑Best Score (CER)↑ ■■■')
# Save model (check point)
checkpoint_file = join(
model.save_path, 'model.ckpt')
save_path = saver.save(
sess, checkpoint_file, global_step=train_data.epoch)
print("Model saved in file: %s" % save_path)
print('=== Test Data Evaluation ===')
ler_test = do_eval_cer(
session=sess,
decode_op=decode_op_infer,
model=model,
dataset=test_data,
label_type=params['label_type'],
ss_type=params['ss_type'],
is_test=True,
eval_batch_size=1)
print(' CER: %f %%' % (ler_test * 100))
if params['ss_type'] != 'remove':
df_acc = do_eval_fmeasure(
session=sess,
decode_op=decode_op_infer,
model=model,
dataset=test_data,
label_type=params['label_type'],
ss_type=params['ss_type'],
is_test=True,
eval_batch_size=1)
print(df_acc)
else:
not_improved_epoch += 1
duration_eval = time.time() - start_time_eval
print('Evaluation time: %.3f min' %
(duration_eval / 60))
# Early stopping
if not_improved_epoch == params['not_improved_patient_epoch']:
break
# Update learning rate
learning_rate = lr_controller.decay_lr(
learning_rate=learning_rate,
epoch=train_data.epoch,
value=cer_dev_epoch)
start_time_epoch = time.time()
duration_train = time.time() - start_time_train
print('Total time: %.3f hour' % (duration_train / 3600))
# Training was finished correctly
with open(join(model.save_path, 'complete.txt'), 'w') as f:
f.write('')
def main(config_path, model_save_path):
# Load a config file (.yml)
with open(config_path, "r") as f:
config = yaml.load(f)
params = config['param']
# Except for a <SOS> and <EOS> class
if params['ss_type'] == 'remove':
params['num_classes'] = 147
elif params['ss_type'] in ['insert_left', 'insert_right']:
params['num_classes'] = 151
elif params['ss_type'] == 'insert_both':
params['num_classes'] = 155
else:
TypeError
# Model setting
model = AttentionSeq2Seq(
input_size=params['input_size'] * params['num_stack'],
encoder_type=params['encoder_type'],
encoder_num_units=params['encoder_num_units'],
encoder_num_layers=params['encoder_num_layers'],
encoder_num_proj=params['encoder_num_proj'],
attention_type=params['attention_type'],
attention_dim=params['attention_dim'],
decoder_type=params['decoder_type'],
decoder_num_units=params['decoder_num_units'],
decoder_num_layers=params['decoder_num_layers'],
embedding_dim=params['embedding_dim'],
num_classes=params['num_classes'],
sos_index=params['num_classes'],
eos_index=params['num_classes'] + 1,
max_decode_length=params['max_decode_length'],
lstm_impl='LSTMBlockCell',
use_peephole=params['use_peephole'],
parameter_init=params['weight_init'],
clip_grad_norm=params['clip_grad_norm'],
clip_activation_encoder=params['clip_activation_encoder'],
clip_activation_decoder=params['clip_activation_decoder'],
weight_decay=params['weight_decay'],
time_major=True,
sharpening_factor=params['sharpening_factor'],
logits_temperature=params['logits_temperature'],
sigmoid_smoothing=params['sigmoid_smoothing'])
# Set process name
setproctitle('tf_erato_' + model.name + '_' +
params['label_type'] + '_' + params['ss_type'] + '_' + params['attention_type'])
model.name = 'en' + str(params['encoder_num_units'])
model.name += '_' + str(params['encoder_num_layers'])
model.name += '_att' + str(params['attention_dim'])
model.name += '_de' + str(params['decoder_num_units'])
model.name += '_' + str(params['decoder_num_layers'])
model.name += '_' + params['optimizer']
model.name += '_lr' + str(params['learning_rate'])
model.name += '_' + params['attention_type']
if params['dropout_encoder'] != 0:
model.name += '_dropen' + str(params['dropout_encoder'])
if params['dropout_decoder'] != 0:
model.name += '_dropde' + str(params['dropout_decoder'])
if params['dropout_embedding'] != 0:
model.name += '_dropem' + str(params['dropout_embedding'])
if params['num_stack'] != 1:
model.name += '_stack' + str(params['num_stack'])
if params['weight_decay'] != 0:
model.name += 'wd' + str(params['weight_decay'])
if params['sharpening_factor'] != 1:
model.name += '_sharp' + str(params['sharpening_factor'])
if params['logits_temperature'] != 1:
model.name += '_temp' + str(params['logits_temperature'])
if bool(params['sigmoid_smoothing']):
model.name += '_smoothing'
# Set save path
model.save_path = mkdir_join(
model_save_path, 'attention', params['label_type'],
params['ss_type'], model.name)
# Reset model directory
model_index = 0
new_model_path = model.save_path
while True:
if isfile(join(new_model_path, 'complete.txt')):
# Training of the first model have been finished
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
elif isfile(join(new_model_path, 'config.yml')):
# Training of the first model have not been finished yet
model_index += 1
new_model_path = model.save_path + '_' + str(model_index)
else:
break
model.save_path = mkdir(new_model_path)
# Save config file
shutil.copyfile(config_path, join(model.save_path, 'config.yml'))
sys.stdout = open(join(model.save_path, 'train.log'), 'w')
# TODO(hirofumi): change to logger
do_train(model=model, params=params)
if __name__ == '__main__':
args = sys.argv
if len(args) != 3:
raise ValueError('Length of args should be 3.')
main(config_path=args[1], model_save_path=args[2])
|
Python/count-good-triplets.py
|
shreyventure/LeetCode-Solutions
| 388 |
72369
|
<gh_stars>100-1000
class Solution:
"""
The Brute Force Solution
Time Complexity: O(N^3)
Space Complexity: O(1)
"""
def countGoodTriplets(self, arr: List[int], a: int, b: int, c: int) -> int:
triplet_count = 0
# for each i, for each j, check if the first condition is satisfied
for i in range(len(arr) - 2):
for j in range(i + 1, len(arr) - 1):
if abs(arr[i] - arr[j]) <= a:
# for each k, check if the last two conditions are satisfied
for k in range(j + 1, len(arr)):
if abs(arr[j] - arr[k]) <= b and abs(arr[i] - arr[k]) <= c:
# the triplet is Good, increment the count!
triplet_count += 1
return triplet_count
|
tracker/usecasecode/360d/stream_track.py
|
PowerMagicUniversity/deepstream_360_d_smart_parking_application
| 294 |
72387
|
"""
Main file for streaming Multicam tracker for 360 degree usecase
"""
__version__ = '0.2'
import argparse
import json
import logging
import signal
import sys
from mctrack import mctrackstream
logging.basicConfig(filename='mctracker360.log', level=logging.INFO)
DEFAULT_CONSUMER_KAFKA_BOOTSTRAP_SERVER_URL = "kafka"
DEFAULT_PRODUCER_KAFKA_BOOTSTRAP_SERVER_URL = "kafka"
DEFAULT_CONSUMER_KAFKA_TOPIC = "metromind-raw"
DEFAULT_PRODUCER_KAFKA_TOPIC = "metromind-start"
DEFAULT_MCTRACKER_CONFIG_FILE = "config/config_360d.json"
DEFAULT_STREAM_CONFIG_FILE = "config/config_360d_stream.json"
mctrack_obj = None
def signal_handler(signum, _):
"""Signal handler. This function will dump all tracker stats and exit
Arguments:
signum {int} -- The signal number
frame {list} -- Stack frame
"""
logging.error("Multicam tracker got a signal: %d", signum)
try:
if mctrack_obj is not None:
mctrack_obj.dump_stats()
except Exception:
pass
exit()
def main():
"""Main function. Starts multicam tracker and runs continiously
until killed
"""
global mctrack_obj
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", help="Config file for mctracker",
default=DEFAULT_MCTRACKER_CONFIG_FILE)
parser.add_argument("-s", "--sconfig", help="Config file for streaming setup",
default=DEFAULT_STREAM_CONFIG_FILE)
args = parser.parse_args()
stream_config = None
try:
stream_config = json.load(open(args.sconfig))
except IOError as ioe:
err_msg = "ERROR: Stream Config I/O Error({}): {}: {}. Quitting".format(
ioe.errno, args.sconfig, ioe.strerror)
logging.error(err_msg)
print(err_msg)
exit()
except:
err_msg = "ERROR: Stream Config Error: {}: {}. Quitting".format(
args.sconfig, sys.exc_info()[0])
logging.error(err_msg)
print(err_msg)
exit()
print(stream_config)
ckafka = (stream_config
.get("msgBrokerConfig", {})
.get("inputKafkaServerUrl",
DEFAULT_CONSUMER_KAFKA_BOOTSTRAP_SERVER_URL))
pkafka = (stream_config
.get("msgBrokerConfig", {})
.get("outputKafkaServerUrl",
DEFAULT_PRODUCER_KAFKA_BOOTSTRAP_SERVER_URL))
itopic = (stream_config
.get("msgBrokerConfig", {})
.get("inputKafkaTopic", DEFAULT_CONSUMER_KAFKA_TOPIC))
otopic = (stream_config
.get("msgBrokerConfig", {})
.get("outputKafkaTopic",
DEFAULT_CONSUMER_KAFKA_TOPIC))
time_it_flag = stream_config.get("profileTime", False)
print("Starting MC-Streaming app with following args:\n"
"consumer kafka server={}\n"
"consumer kafka topic={}\n"
"producer kafka server={}\n"
"producer kafka topic={}\n"
"Time profile={}\n"
"MC Tracker Config File={}\n".format(ckafka, itopic,
pkafka, otopic,
time_it_flag,
args.config))
# Set the signal handler for ctrl-c. Since the program runs indefinitely,
# we need to dump some stats when sigint is received
# (when profiling is enabled)
signal.signal(signal.SIGINT, signal_handler)
mctrack_obj = mctrackstream.McTrackerStream(ckafka, itopic,
pkafka, otopic,
args.config, time_it_flag)
mctrack_obj.start_mctracker()
if __name__ == "__main__":
main()
|
static/paddlex_restful/restful/dataset/datasetbase.py
|
cheneyveron/PaddleX
| 3,655 |
72392
|
# copytrue (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import os.path as osp
import random
from .utils import copy_directory
class DatasetBase(object):
def __init__(self, dataset_id, path):
self.id = dataset_id
self.path = path
self.all_files = list()
self.file_info = dict()
self.label_info = dict()
self.labels = list()
self.train_files = list()
self.val_files = list()
self.test_files = list()
self.class_train_file_list = dict()
self.class_val_file_list = dict()
self.class_test_file_list = dict()
def copy_dataset(self, source_path, files):
# 将原数据集拷贝至目标路径
copy_directory(source_path, self.path, files)
def dump_statis_info(self):
# info['fields']指定了需要dump的信息
info = dict()
info['fields'] = [
'file_info', 'label_info', 'labels', 'train_files', 'val_files',
'test_files', 'class_train_file_list', 'class_val_file_list',
'class_test_file_list'
]
for field in info['fields']:
if hasattr(self, field):
info[field] = getattr(self, field)
with open(osp.join(self.path, 'statis.pkl'), 'wb') as f:
pickle.dump(info, f)
def load_statis_info(self):
with open(osp.join(self.path, 'statis.pkl'), 'rb') as f:
info = pickle.load(f)
for field in info['fields']:
if field in info:
setattr(self, field, info[field])
def split(self, val_split, test_split):
all_files = list(self.file_info.keys())
random.shuffle(all_files)
val_num = int(len(all_files) * val_split)
test_num = int(len(all_files) * test_split)
train_num = len(all_files) - val_num - test_num
assert train_num > 0, "训练集样本数量需大于0"
assert val_num > 0, "验证集样本数量需大于0"
self.train_files = all_files[:train_num]
self.val_files = all_files[train_num:train_num + val_num]
self.test_files = all_files[train_num + val_num:]
self.train_set = set(self.train_files)
self.val_set = set(self.val_files)
self.test_set = set(self.test_files)
for label, file_list in self.label_info.items():
self.class_train_file_list[label] = list()
self.class_val_file_list[label] = list()
self.class_test_file_list[label] = list()
for f in file_list:
if f in self.test_set:
self.class_test_file_list[label].append(f)
if f in self.val_set:
self.class_val_file_list[label].append(f)
if f in self.train_set:
self.class_train_file_list[label].append(f)
|
cgi-bin/paint_x2_unet/unet.py
|
ohong/pretty-whale
| 2,990 |
72418
|
#!/usr/bin/env python
import numpy as np
import math
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda, optimizers, serializers, Variable
from chainer import function
from chainer.utils import type_check
class UNET(chainer.Chain):
def __init__(self):
super(UNET, self).__init__(
c0=L.Convolution2D(4, 32, 3, 1, 1),
c1=L.Convolution2D(32, 64, 4, 2, 1),
c2=L.Convolution2D(64, 64, 3, 1, 1),
c3=L.Convolution2D(64, 128, 4, 2, 1),
c4=L.Convolution2D(128, 128, 3, 1, 1),
c5=L.Convolution2D(128, 256, 4, 2, 1),
c6=L.Convolution2D(256, 256, 3, 1, 1),
c7=L.Convolution2D(256, 512, 4, 2, 1),
c8=L.Convolution2D(512, 512, 3, 1, 1),
dc8=L.Deconvolution2D(1024, 512, 4, 2, 1),
dc7=L.Convolution2D(512, 256, 3, 1, 1),
dc6=L.Deconvolution2D(512, 256, 4, 2, 1),
dc5=L.Convolution2D(256, 128, 3, 1, 1),
dc4=L.Deconvolution2D(256, 128, 4, 2, 1),
dc3=L.Convolution2D(128, 64, 3, 1, 1),
dc2=L.Deconvolution2D(128, 64, 4, 2, 1),
dc1=L.Convolution2D(64, 32, 3, 1, 1),
dc0=L.Convolution2D(64, 3, 3, 1, 1),
bnc0=L.BatchNormalization(32),
bnc1=L.BatchNormalization(64),
bnc2=L.BatchNormalization(64),
bnc3=L.BatchNormalization(128),
bnc4=L.BatchNormalization(128),
bnc5=L.BatchNormalization(256),
bnc6=L.BatchNormalization(256),
bnc7=L.BatchNormalization(512),
bnc8=L.BatchNormalization(512),
bnd8=L.BatchNormalization(512),
bnd7=L.BatchNormalization(256),
bnd6=L.BatchNormalization(256),
bnd5=L.BatchNormalization(128),
bnd4=L.BatchNormalization(128),
bnd3=L.BatchNormalization(64),
bnd2=L.BatchNormalization(64),
bnd1=L.BatchNormalization(32)
# l = L.Linear(3*3*256, 2)'
)
def calc(self, x):
e0 = F.relu(self.bnc0(self.c0(x)))
e1 = F.relu(self.bnc1(self.c1(e0)))
e2 = F.relu(self.bnc2(self.c2(e1)))
del e1
e3 = F.relu(self.bnc3(self.c3(e2)))
e4 = F.relu(self.bnc4(self.c4(e3)))
del e3
e5 = F.relu(self.bnc5(self.c5(e4)))
e6 = F.relu(self.bnc6(self.c6(e5)))
del e5
e7 = F.relu(self.bnc7(self.c7(e6)))
e8 = F.relu(self.bnc8(self.c8(e7)))
d8 = F.relu(self.bnd8(self.dc8(F.concat([e7, e8]))))
del e7, e8
d7 = F.relu(self.bnd7(self.dc7(d8)))
del d8
d6 = F.relu(self.bnd6(self.dc6(F.concat([e6, d7]))))
del d7, e6
d5 = F.relu(self.bnd5(self.dc5(d6)))
del d6
d4 = F.relu(self.bnd4(self.dc4(F.concat([e4, d5]))))
del d5, e4
d3 = F.relu(self.bnd3(self.dc3(d4)))
del d4
d2 = F.relu(self.bnd2(self.dc2(F.concat([e2, d3]))))
del d3, e2
d1 = F.relu(self.bnd1(self.dc1(d2)))
del d2
d0 = self.dc0(F.concat([e0, d1]))
return d0
def __call__(self, x, t):
h = self.calc(x)
loss = F.mean_absolute_error(h, t)
chainer.report({'loss': loss}, self)
return loss
class DIS(chainer.Chain):
def __init__(self):
super(DIS, self).__init__(
c1=L.Convolution2D(3, 32, 4, 2, 1),
c2=L.Convolution2D(32, 32, 3, 1, 1),
c3=L.Convolution2D(32, 64, 4, 2, 1),
c4=L.Convolution2D(64, 64, 3, 1, 1),
c5=L.Convolution2D(64, 128, 4, 2, 1),
c6=L.Convolution2D(128, 128, 3, 1, 1),
c7=L.Convolution2D(128, 256, 4, 2, 1),
l8l=L.Linear(None, 2,
initialW=chainer.initializers.HeNormal(
math.sqrt(0.02 * math.sqrt(8 * 8 * 256) / 2))),
bnc1=L.BatchNormalization(32),
bnc2=L.BatchNormalization(32),
bnc3=L.BatchNormalization(64),
bnc4=L.BatchNormalization(64),
bnc5=L.BatchNormalization(128),
bnc6=L.BatchNormalization(128),
bnc7=L.BatchNormalization(256),
)
def calc(self, x):
h = F.relu(self.bnc1(self.c1(x)))
h = F.relu(self.bnc2(self.c2(h)))
h = F.relu(self.bnc3(self.c3(h)))
h = F.relu(self.bnc4(self.c4(h)))
h = F.relu(self.bnc5(self.c5(h)))
h = F.relu(self.bnc6(self.c6(h)))
h = F.relu(self.bnc7(self.c7(h)))
return self.l8l(h)
def __call__(self, x, t):
h = self.calc(x)
loss = F.softmax_cross_entropy(h, t)
#chainer.report({'loss': loss }, self)
return loss
|
ocs_ci/utility/ocs_build.py
|
annagitel/ocs-ci
| 130 |
72456
|
"""
This module is used to return latest OCS internal build for specified OCS
version.
"""
import argparse
import os
from ocs_ci.framework import config
from ocs_ci.framework.main import load_config
from ocs_ci.ocs.constants import OCS_VERSION_CONF_DIR
from ocs_ci.utility.utils import get_latest_ds_olm_tag
def init_arg_parser():
"""
Init argument parser.
Returns:
object: Parsed arguments
"""
parser = argparse.ArgumentParser(description="OCS Internal build version")
parser.add_argument(
"--ocs-version",
action="store",
required=False,
default=config.ENV_DATA["ocs_version"],
help=f"""
OCS version in format X.Y (e.g. 4.7). If not specified, the default
value {config.ENV_DATA['ocs_version']} will be used.
""",
)
parser.add_argument(
"--image",
action="store_true",
required=False,
default=False,
help="If used the whole image of OCS internal build will be returned",
)
return parser.parse_args()
def main():
"""
Main function
"""
parser = init_arg_parser()
ocs_version = parser.ocs_version
image = parser.image
config.ENV_DATA["ocs_version"] = ocs_version
version_config_file = os.path.join(OCS_VERSION_CONF_DIR, f"ocs-{ocs_version}.yaml")
load_config([version_config_file])
latest_ocs_build = get_latest_ds_olm_tag()
if image:
base_image = config.DEPLOYMENT["default_ocs_registry_image"].split(":")[0]
print(f"{base_image}:{latest_ocs_build}")
return
print(latest_ocs_build)
if __name__ == "__main__":
main()
|
nova/notifications/objects/compute_task.py
|
zjzh/nova
| 1,874 |
72477
|
<reponame>zjzh/nova<filename>nova/notifications/objects/compute_task.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.notifications.objects import base
from nova.notifications.objects import request_spec as reqspec_payload
from nova.objects import base as nova_base
from nova.objects import fields
@nova_base.NovaObjectRegistry.register_notification
class ComputeTaskPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'instance_uuid': fields.UUIDField(),
# There are some cases that request_spec is None.
# e.g. Old instances can still have no RequestSpec object
# attached to them.
'request_spec': fields.ObjectField('RequestSpecPayload',
nullable=True),
'state': fields.InstanceStateField(nullable=True),
'reason': fields.ObjectField('ExceptionPayload')
}
def __init__(self, instance_uuid, request_spec, state, reason):
super(ComputeTaskPayload, self).__init__()
self.instance_uuid = instance_uuid
self.request_spec = reqspec_payload.RequestSpecPayload(
request_spec) if request_spec is not None else None
self.state = state
self.reason = reason
@base.notification_sample('compute_task-build_instances-error.json')
@base.notification_sample('compute_task-migrate_server-error.json')
@base.notification_sample('compute_task-rebuild_server-error.json')
@nova_base.NovaObjectRegistry.register_notification
class ComputeTaskNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('ComputeTaskPayload')
}
|
flatlib/chart.py
|
D-Vaillant/flatlib
| 196 |
72486
|
<reponame>D-Vaillant/flatlib
"""
This file is part of flatlib - (C) FlatAngle
Author: <NAME> (<EMAIL>)
This module implements a class to represent an
astrology Chart. It provides methods to handle
the chart, as well as three relevant properties:
- objects: a list with the chart's objects
- houses: a list with the chart's houses
- angles: a list with the chart's angles
Since houses 1 and 10 may not match the Asc and
MC in some house systems, the Chart class
includes the list of angles. The angles should be
used when you want to deal with angle's longitudes.
There are also methods to access fixed stars.
"""
from . import angle
from . import const
from . import utils
from .ephem import ephem
from .datetime import Datetime
# ------------------ #
# Chart Class #
# ------------------ #
class Chart:
""" This class represents an astrology chart. """
def __init__(self, date, pos, **kwargs):
""" Creates an astrology chart for a given
date and location.
Optional arguments are:
- hsys: house system
- IDs: list of objects to include
"""
# Handle optional arguments
hsys = kwargs.get('hsys', const.HOUSES_DEFAULT)
IDs = kwargs.get('IDs', const.LIST_OBJECTS_TRADITIONAL)
self.date = date
self.pos = pos
self.hsys = hsys
self.objects = ephem.getObjectList(IDs, date, pos)
self.houses, self.angles = ephem.getHouses(date, pos, hsys)
def copy(self):
""" Returns a deep copy of this chart. """
chart = Chart.__new__(Chart)
chart.date = self.date
chart.pos = self.pos
chart.hsys = self.hsys
chart.objects = self.objects.copy()
chart.houses = self.houses.copy()
chart.angles = self.angles.copy()
return chart
# === Properties === #
def getObject(self, ID):
""" Returns an object from the chart. """
return self.objects.get(ID)
def getHouse(self, ID):
""" Returns an house from the chart. """
return self.houses.get(ID)
def getAngle(self, ID):
""" Returns an angle from the chart. """
return self.angles.get(ID)
def get(self, ID):
""" Returns an object, house or angle
from the chart.
"""
if ID.startswith('House'):
return self.getHouse(ID)
elif ID in const.LIST_ANGLES:
return self.getAngle(ID)
else:
return self.getObject(ID)
# === Fixed stars === #
# The computation of fixed stars is inefficient,
# so the access must be made directly to the
# ephemeris only when needed.
def getFixedStar(self, ID):
""" Returns a fixed star from the ephemeris. """
return ephem.getFixedStar(ID, self.date)
def getFixedStars(self):
""" Returns a list with all fixed stars. """
IDs = const.LIST_FIXED_STARS
return ephem.getFixedStarList(IDs, self.date)
# === Houses and angles === #
def isHouse1Asc(self):
""" Returns true if House1 is the same as the Asc. """
house1 = self.getHouse(const.HOUSE1)
asc = self.getAngle(const.ASC)
dist = angle.closestdistance(house1.lon, asc.lon)
return abs(dist) < 0.0003 # 1 arc-second
def isHouse10MC(self):
""" Returns true if House10 is the same as the MC. """
house10 = self.getHouse(const.HOUSE10)
mc = self.getAngle(const.MC)
dist = angle.closestdistance(house10.lon, mc.lon)
return abs(dist) < 0.0003 # 1 arc-second
# === Other properties === #
def isDiurnal(self):
""" Returns true if this chart is diurnal. """
sun = self.getObject(const.SUN)
mc = self.getAngle(const.MC)
# Get ecliptical positions and check if the
# sun is above the horizon.
lat = self.pos.lat
sunRA, sunDecl = utils.eqCoords(sun.lon, sun.lat)
mcRA, mcDecl = utils.eqCoords(mc.lon, 0)
return utils.isAboveHorizon(sunRA, sunDecl, mcRA, lat)
def getMoonPhase(self):
""" Returns the phase of the moon. """
sun = self.getObject(const.SUN)
moon = self.getObject(const.MOON)
dist = angle.distance(sun.lon, moon.lon)
if dist < 90:
return const.MOON_FIRST_QUARTER
elif dist < 180:
return const.MOON_SECOND_QUARTER
elif dist < 270:
return const.MOON_THIRD_QUARTER
else:
return const.MOON_LAST_QUARTER
# === Solar returns === #
def solarReturn(self, year):
""" Returns this chart's solar return for a
given year.
"""
sun = self.getObject(const.SUN)
date = Datetime('{0}/01/01'.format(year),
'00:00',
self.date.utcoffset)
srDate = ephem.nextSolarReturn(date, sun.lon)
return Chart(srDate, self.pos, hsys=self.hsys)
|
tensorflow_toolkit/action_detection/action_detection/postprocessing/detection_output.py
|
morkovka1337/openvino_training_extensions
| 256 |
72503
|
<reponame>morkovka1337/openvino_training_extensions<filename>tensorflow_toolkit/action_detection/action_detection/postprocessing/detection_output.py<gh_stars>100-1000
# Copyright (C) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from collections import namedtuple
import numpy as np
from action_detection.postprocessing.metrics import matrix_iou
Detections = namedtuple('Detections', 'loc, scores')
Actions = namedtuple('Detections', 'loc, scores, action_labels, action_scores, id')
def nms(input_bboxes, input_scores, threshold, keep_top_k, min_score=0.01):
"""Carry out default NMS algorithm over the input boxes.
:param input_bboxes: Input boxes
:param input_scores: Detection scores of boxes
:param threshold: Min IoU value to merge boxes
:param keep_top_k: Max number of boxes to output
:param min_score: Min score value to output box
:return: Filtered box IDs
"""
if len(input_bboxes) == 0:
return []
if len(input_bboxes) > keep_top_k:
indices = np.argsort(-input_scores)[:keep_top_k]
scores = input_scores[indices]
bboxes = input_bboxes[indices]
else:
scores = np.copy(input_scores)
indices = np.arange(len(scores))
bboxes = input_bboxes
similarity_matrix = matrix_iou(bboxes, bboxes)
out_ids = []
for _ in xrange(len(bboxes)):
bbox_id = np.argmax(scores)
bbox_score = scores[bbox_id]
if bbox_score < min_score:
break
out_ids.append(indices[bbox_id])
scores[bbox_id] = 0.0
iou_values = similarity_matrix[bbox_id]
scores[iou_values > threshold] = 0.0
return np.array(out_ids, dtype=np.int32)
def soft_nms(input_bboxes, input_scores, keep_top_k, sigma, min_score):
"""Carry out Soft-NMS algorithm over the input boxes.
:param input_bboxes: Input boxes
:param input_scores: Detection scores of boxes
:param keep_top_k: Max number of boxes to output
:param sigma: Algorithm parameter
:param min_score: Min score value to output box
:return: Filtered box IDs
"""
if len(input_bboxes) == 0:
return [], []
if len(input_bboxes) > keep_top_k:
indices = np.argsort(-input_scores)[:keep_top_k]
scores = input_scores[indices]
bboxes = input_bboxes[indices]
else:
scores = np.copy(input_scores)
indices = np.arange(len(scores))
bboxes = input_bboxes
similarity_matrix = matrix_iou(bboxes, bboxes)
out_ids = []
out_scores = []
for _ in xrange(len(bboxes)):
bbox_id = np.argmax(scores)
bbox_score = scores[bbox_id]
if bbox_score < min_score:
break
out_ids.append(indices[bbox_id])
out_scores.append(bbox_score)
scores[bbox_id] = 0.0
iou_values = similarity_matrix[bbox_id]
scores *= np.exp(np.negative(np.square(iou_values) / sigma))
return np.array(out_ids, dtype=np.int32), np.array(out_scores, dtype=np.float32)
def ssd_detection_output(batch_bboxes, batch_conf, bg_class, min_conf=0.01, out_top_k=200,
nms_overlap=0.45, nms_top_k=400):
"""Process network output to translate it into the bboxes with labels.
:param batch_bboxes: All bboxes
:param batch_conf: All detection scores
:param bg_class: ID of background class
:param min_conf: Min score value to output box
:param out_top_k: Max number of boxes per image to output
:param nms_overlap: NMS parameter
:param nms_top_k: NMS parameter
:return: List of detections
"""
assert batch_bboxes.shape[:2] == batch_conf.shape[:2]
assert batch_bboxes.shape[2] == 4
num_classes = batch_conf.shape[-1]
assert num_classes > 1
all_detections = []
for sample_id in xrange(batch_bboxes.shape[0]):
sample_bboxes = batch_bboxes[sample_id]
sample_conf = batch_conf[sample_id]
all_sample_detections = []
for label in xrange(num_classes):
if label == bg_class:
continue
sample_scores = sample_conf[:, label]
valid_mask = sample_scores > min_conf
# noinspection PyTypeChecker
if np.sum(valid_mask) == 0:
continue
valid_bboxes = sample_bboxes[valid_mask]
valid_scores = sample_scores[valid_mask]
merged_ids = nms(valid_bboxes, valid_scores, nms_overlap, nms_top_k)
if len(merged_ids) > 0:
out_bboxes = valid_bboxes[merged_ids].reshape([-1, 4])
out_scores = valid_scores[merged_ids].reshape([-1])
for i in xrange(len(out_scores)):
all_sample_detections.append((out_bboxes[i], label, out_scores[i]))
if len(all_sample_detections) > out_top_k:
all_sample_detections.sort(key=lambda tup: tup[2], reverse=True)
all_sample_detections = all_sample_detections[:out_top_k]
sample_detections = {}
for bbox, label, score in all_sample_detections:
if label not in sample_detections:
sample_detections[label] = {'loc': [bbox],
'scores': [score]}
else:
last_data = sample_detections[label]
last_data['loc'].append(bbox)
last_data['scores'].append(score)
out_sample_detections = {label: Detections(loc=np.stack(sample_detections[label]['loc']),
scores=np.stack(sample_detections[label]['scores']))
for label in sample_detections}
all_detections.append(out_sample_detections)
return all_detections
def ssd_warp_gt(batch_bboxes, batch_labels, bg_class):
"""Translates Ground truth boxes and labels into the internal format.
:param batch_bboxes: Bbox coordinates
:param batch_labels: Bbox labels
:param bg_class: ID of background label
:return: List of boxes
"""
assert batch_bboxes.shape[0] == batch_labels.shape[0]
all_gt = []
for sample_id in xrange(batch_bboxes.shape[0]):
sample_bboxes = batch_bboxes[sample_id]
sample_labels = batch_labels[sample_id]
valid_mask = np.logical_and(sample_labels >= 0, sample_labels != bg_class)
if np.sum(valid_mask) == 0:
all_gt.append([])
continue
valid_bboxes = sample_bboxes[valid_mask]
valid_labels = sample_labels[valid_mask]
unique_labels = np.unique(valid_labels)
sample_detections = {}
for label in unique_labels:
label_mask = valid_labels == label
class_bboxes = valid_bboxes[label_mask]
sample_detections[label] = Detections(loc=class_bboxes, scores=None)
all_gt.append(sample_detections)
return all_gt
def action_detection_output(batch_bboxes, batch_det_conf, batch_action_conf, bg_class,
min_det_conf=0.01, min_action_conf=0.01, out_top_k=400,
nms_top_k=400, nms_sigma=0.6, do_nms=True):
"""Process network output to translate it into the bboxes with detection scores and action labels.
:param batch_bboxes: All bboxes
:param batch_det_conf: All detection scores
:param batch_action_conf: All action scores
:param bg_class: ID of background class
:param min_det_conf: Min score value to output box
:param min_action_conf: Min score value for action confidence
:param out_top_k: Max number of boxes per image to output
:param nms_top_k: NMS parameter
:param nms_sigma: NMS parameter
:param do_nms: Whether to run NMS algorithm
:return: List of detections
"""
assert batch_bboxes.shape[:2] == batch_det_conf.shape[:2]
assert batch_bboxes.shape[:2] == batch_action_conf.shape[:2]
assert batch_bboxes.shape[2] == 4
num_det_classes = batch_det_conf.shape[-1]
assert num_det_classes == 2
num_action_classes = batch_action_conf.shape[-1]
assert num_action_classes > 1
det_class = (bg_class + 1) % 2
all_detections = []
for sample_id in xrange(batch_bboxes.shape[0]):
sample_bboxes = batch_bboxes[sample_id]
sample_det_scores = batch_det_conf[sample_id, :, det_class]
sample_action_conf = batch_action_conf[sample_id]
valid_mask = sample_det_scores > min_det_conf
# noinspection PyTypeChecker
if np.sum(valid_mask) == 0:
all_detections.append({det_class: []})
continue
valid_bboxes = sample_bboxes[valid_mask]
valid_det_scores = sample_det_scores[valid_mask]
valid_det_conf = sample_action_conf[valid_mask]
if do_nms:
filtered_ids, filtered_scores = soft_nms(valid_bboxes, valid_det_scores, nms_top_k, nms_sigma, min_det_conf)
else:
filtered_scores = np.copy(valid_det_scores)
filtered_ids = np.argsort(-filtered_scores)
if len(filtered_ids) > 0:
out_bboxes = valid_bboxes[filtered_ids].reshape([-1, 4])
out_det_scores = filtered_scores.reshape([-1])
out_action_conf = valid_det_conf[filtered_ids].reshape([-1, num_action_classes])
if 0 < out_top_k < len(out_det_scores):
out_bboxes = out_bboxes[:out_top_k]
out_det_scores = out_det_scores[:out_top_k]
out_action_conf = out_action_conf[:out_top_k]
out_action_label = np.argmax(out_action_conf, axis=-1)
out_action_score = np.max(out_action_conf, axis=-1)
if min_action_conf is not None and min_action_conf > 0.0:
out_action_label[out_action_score < min_action_conf] = 0
sample_detections = Actions(loc=out_bboxes,
scores=out_det_scores,
action_labels=out_action_label,
action_scores=out_action_score,
id=None)
all_detections.append({det_class: sample_detections})
else:
all_detections.append({det_class: []})
continue
return all_detections
def action_warp_gt(batch_bboxes, batch_labels, bg_class, batch_track_ids=None):
"""Translates Ground truth boxes and actions into the internal format.
:param batch_bboxes: Bbox coordinates
:param batch_labels: Bbox labels
:param bg_class: ID of background label
:param batch_track_ids: ID of track in a batch
:return: List of boxes
"""
assert batch_bboxes.shape[0] == batch_labels.shape[0]
det_class = (bg_class + 1) % 2
all_gt = []
for sample_id in xrange(batch_bboxes.shape[0]):
sample_bboxes = batch_bboxes[sample_id]
sample_labels = batch_labels[sample_id]
sample_track_ids = batch_track_ids[sample_id] if batch_track_ids is not None else None
valid_mask = sample_labels >= 0
# noinspection PyTypeChecker
if np.sum(valid_mask) == 0:
all_gt.append([])
continue
valid_bboxes = sample_bboxes[valid_mask]
valid_labels = sample_labels[valid_mask]
valid_track_ids = sample_track_ids[valid_mask] if sample_track_ids is not None else None
sample_detections = {det_class: Actions(loc=valid_bboxes,
scores=None,
action_labels=valid_labels,
action_scores=None,
id=valid_track_ids)}
all_gt.append(sample_detections)
return all_gt
|
convlab/human_eval/bot_server.py
|
ngduyanhece/ConvLab
| 405 |
72553
|
<reponame>ngduyanhece/ConvLab
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import sys
sys.path.append('../../')
from convlab.agent import Body
from convlab.agent import DialogAgent
from convlab.spec import spec_util
from convlab.env import make_env
import numpy as np
import copy
from flask import Flask, request, jsonify
from queue import PriorityQueue
from threading import Thread
import time
rgi_queue = PriorityQueue(maxsize=0)
rgo_queue = PriorityQueue(maxsize=0)
app = Flask(__name__)
os.environ['lab_mode'] = 'eval'
spec_file = sys.argv[1]
spec_name = sys.argv[2]
lab_mode = sys.argv[3]
if '@' in lab_mode:
lab_mode, prename = lab_mode.split('@')
spec = spec_util.get_eval_spec(spec_file, spec_name, prename)
else:
spec = spec_util.get(spec_file, spec_name)
# # lab_mode, prename = sys.argv[3].split('@')
# spec = spec_util.get_eval_spec(spec_file, prename)
spec = spec_util.override_eval_spec(spec)
agent_spec = spec['agent'][0]
env = make_env(spec)
body = Body(env, spec['agent'])
agent = DialogAgent(spec, body)
# last_obs = 'hi'
# agent.reset(last_obs)
# obs = 'hi can you find me a hotel in the west?'
# action = agent.act(obs)
# next_obs = 'we have six people'
# agent.update(obs, action, 0, next_obs, 0)
# action = agent.act(next_obs)
@app.route('/', methods=['GET', 'POST'])
def process():
try:
in_request = request.json
print(in_request)
except:
return "invalid input: {}".format(in_request)
rgi_queue.put((time.time(), in_request))
rgi_queue.join()
output = rgo_queue.get()
print(output['response'])
rgo_queue.task_done()
# return jsonify({'response': response})
return jsonify(output)
def generate_response(in_queue, out_queue):
while True:
# pop input
last_action = 'null'
in_request = in_queue.get()
obs = in_request['input']
if in_request['agent_state'] == {}:
agent.reset(obs)
else:
encoded_state, dst_state, last_action = in_request['agent_state']
agent.body.encoded_state = np.asarray(encoded_state) if isinstance(encoded_state, list) else encoded_state
agent.dst.state = copy.deepcopy(dst_state)
agent.update(obs, last_action, 0, obs, 0)
try:
action = agent.act(obs)
encoded_state = agent.body.encoded_state.tolist() if isinstance(agent.body.encoded_state,
np.ndarray) else agent.body.encoded_state
dst_state = copy.deepcopy(agent.dst.state)
except Exception as e:
print('agent error', e)
try:
if action == '':
response = 'Sorry I do not understand, can you paraphrase?'
else:
response = action
except Exception as e:
print('Response generation error', e)
response = 'What did you say?'
last_action = action
out_queue.put({'response': response, 'agent_state': (encoded_state, dst_state, last_action)})
in_queue.task_done()
out_queue.join()
if __name__ == '__main__':
worker = Thread(target=generate_response, args=(rgi_queue, rgo_queue,))
worker.setDaemon(True)
worker.start()
app.run(host='0.0.0.0', port=10004)
|
jaxrl/agents/__init__.py
|
HassamSheikh/jaxrl
| 157 |
72574
|
from jaxrl.agents.awac.awac_learner import AWACLearner
from jaxrl.agents.bc.bc_learner import BCLearner
from jaxrl.agents.ddpg.ddpg_learner import DDPGLearner
from jaxrl.agents.drq.drq_learner import DrQLearner
from jaxrl.agents.sac.sac_learner import SACLearner
from jaxrl.agents.sac_v1.sac_v1_learner import SACV1Learner
|
tensorflow_graphics/geometry/transformation/rotation_matrix_2d.py
|
sarvex/graphics
| 2,759 |
72576
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""This module implements 2d rotation matrix functionalities.
Given an angle of rotation $$\theta$$ a 2d rotation matrix can be expressed as
$$
\mathbf{R} =
\begin{bmatrix}
\cos(\theta) & -\sin(\theta) \\
\sin(\theta) & \cos(\theta)
\end{bmatrix}.
$$
More details rotation matrices can be found on [this page.]
(https://en.wikipedia.org/wiki/Rotation_matrix)
Note: This matrix rotates points in the $$xy$$-plane counterclockwise.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional
from six.moves import range
import tensorflow as tf
from tensorflow_graphics.geometry.transformation import rotation_matrix_common
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
from tensorflow_graphics.util import type_alias
def from_euler(angle: type_alias.TensorLike,
name: str = "rotation_matrix_2d_from_euler_angle") -> tf.Tensor:
r"""Converts an angle to a 2d rotation matrix.
Converts an angle $$\theta$$ to a 2d rotation matrix following the equation
$$
\mathbf{R} =
\begin{bmatrix}
\cos(\theta) & -\sin(\theta) \\
\sin(\theta) & \cos(\theta)
\end{bmatrix}.
$$
Note:
The resulting matrix rotates points in the $$xy$$-plane counterclockwise.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angle: A tensor of shape `[A1, ..., An, 1]`, where the last dimension
represents an angle in radians.
name: A name for this op that defaults to
"rotation_matrix_2d_from_euler_angle".
Returns:
A tensor of shape `[A1, ..., An, 2, 2]`, where the last dimension represents
a 2d rotation matrix.
Raises:
ValueError: If the shape of `angle` is not supported.
"""
with tf.name_scope(name):
angle = tf.convert_to_tensor(value=angle)
shape.check_static(
tensor=angle, tensor_name="angle", has_dim_equals=(-1, 1))
cos_angle = tf.cos(angle)
sin_angle = tf.sin(angle)
matrix = tf.stack((cos_angle, -sin_angle,
sin_angle, cos_angle),
axis=-1) # pyformat: disable
output_shape = tf.concat((tf.shape(input=angle)[:-1], (2, 2)), axis=-1)
return tf.reshape(matrix, shape=output_shape)
def from_euler_with_small_angles_approximation(
angles: type_alias.TensorLike,
name: str = "rotation_matrix_2d_from_euler_with_small_angles_approximation"
) -> tf.Tensor:
r"""Converts an angle to a 2d rotation matrix under the small angle assumption.
Under the small angle assumption, $$\sin(x)$$ and $$\cos(x)$$ can be
approximated by their second order Taylor expansions, where
$$\sin(x) \approx x$$ and $$\cos(x) \approx 1 - \frac{x^2}{2}$$. The 2d
rotation matrix will then be approximated as
$$
\mathbf{R} =
\begin{bmatrix}
1.0 - 0.5\theta^2 & -\theta \\
\theta & 1.0 - 0.5\theta^2
\end{bmatrix}.
$$
In the current implementation, the smallness of the angles is not verified.
Note:
The resulting matrix rotates points in the $$xy$$-plane counterclockwise.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
angles: A tensor of shape `[A1, ..., An, 1]`, where the last dimension
represents a small angle in radians.
name: A name for this op that defaults to
"rotation_matrix_2d_from_euler_with_small_angles_approximation".
Returns:
A tensor of shape `[A1, ..., An, 2, 2]`, where the last dimension represents
a 2d rotation matrix.
Raises:
ValueError: If the shape of `angle` is not supported.
"""
with tf.name_scope(name):
angles = tf.convert_to_tensor(value=angles)
shape.check_static(
tensor=angles, tensor_name="angles", has_dim_equals=(-1, 1))
cos_angle = 1.0 - 0.5 * angles * angles
sin_angle = angles
matrix = tf.stack((cos_angle, -sin_angle,
sin_angle, cos_angle),
axis=-1) # pyformat: disable
output_shape = tf.concat((tf.shape(input=angles)[:-1], (2, 2)), axis=-1)
return tf.reshape(matrix, shape=output_shape)
def inverse(matrix: type_alias.TensorLike,
name: str = "rotation_matrix_2d_inverse") -> tf.Tensor:
"""Computes the inverse of a 2D rotation matrix.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
matrix: A tensor of shape `[A1, ..., An, 2, 2]`, where the last two
dimensions represent a 2d rotation matrix.
name: A name for this op that defaults to "rotation_matrix_2d_inverse".
Returns:
A tensor of shape `[A1, ..., An, 2, 2]`, where the last dimension represents
a 2d rotation matrix.
Raises:
ValueError: If the shape of `matrix` is not supported.
"""
with tf.name_scope(name):
matrix = tf.convert_to_tensor(value=matrix)
shape.check_static(
tensor=matrix,
tensor_name="matrix",
has_rank_greater_than=1,
has_dim_equals=((-2, 2), (-1, 2)))
ndims = matrix.shape.ndims
perm = list(range(ndims - 2)) + [ndims - 1, ndims - 2]
return tf.transpose(a=matrix, perm=perm)
def is_valid(matrix: type_alias.TensorLike,
atol: type_alias.Float = 1e-3,
name: str = "rotation_matrix_2d_is_valid") -> tf.Tensor:
r"""Determines if a matrix is a valid rotation matrix.
Determines if a matrix $$\mathbf{R}$$ is a valid rotation matrix by checking
that $$\mathbf{R}^T\mathbf{R} = \mathbf{I}$$ and $$\det(\mathbf{R}) = 1$$.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
matrix: A tensor of shape `[A1, ..., An, 2, 2]`, where the last two
dimensions represent a 2d rotation matrix.
atol: The absolute tolerance parameter.
name: A name for this op that defaults to "rotation_matrix_2d_is_valid".
Returns:
A tensor of type `bool` and shape `[A1, ..., An, 1]` where False indicates
that the input is not a valid rotation matrix.
"""
with tf.name_scope(name):
matrix = tf.convert_to_tensor(value=matrix)
shape.check_static(
tensor=matrix,
tensor_name="matrix",
has_rank_greater_than=1,
has_dim_equals=((-2, 2), (-1, 2)))
return rotation_matrix_common.is_valid(matrix, atol)
def rotate(point: type_alias.TensorLike,
matrix: type_alias.TensorLike,
name: str = "rotation_matrix_2d_rotate") -> tf.Tensor:
"""Rotates a 2d point using a 2d rotation matrix.
Note:
In the following, A1 to An are optional batch dimensions, which must be
identical.
Args:
point: A tensor of shape `[A1, ..., An, 2]`, where the last dimension
represents a 2d point.
matrix: A tensor of shape `[A1, ..., An, 2, 2]`, where the last two
dimensions represent a 2d rotation matrix.
name: A name for this op that defaults to "rotation_matrix_2d_rotate".
Returns:
A tensor of shape `[A1, ..., An, 2]`, where the last dimension
represents a 2d point.
Raises:
ValueError: If the shape of `point` or `matrix` is not supported.
"""
with tf.name_scope(name):
point = tf.convert_to_tensor(value=point)
matrix = tf.convert_to_tensor(value=matrix)
shape.check_static(
tensor=point, tensor_name="point", has_dim_equals=(-1, 2))
shape.check_static(
tensor=matrix,
tensor_name="matrix",
has_rank_greater_than=1,
has_dim_equals=((-2, 2), (-1, 2)))
shape.compare_batch_dimensions(
tensors=(point, matrix),
tensor_names=("point", "matrix"),
last_axes=(-2, -3),
broadcast_compatible=True)
point = tf.expand_dims(point, axis=-1)
common_batch_shape = shape.get_broadcasted_shape(point.shape[:-2],
matrix.shape[:-2])
def dim_value(dim: Optional[int] = None) -> int:
return 1 if dim is None else tf.compat.dimension_value(dim)
common_batch_shape = [dim_value(dim) for dim in common_batch_shape]
point = tf.broadcast_to(point, common_batch_shape + [2, 1])
matrix = tf.broadcast_to(matrix, common_batch_shape + [2, 2])
rotated_point = tf.matmul(matrix, point)
return tf.squeeze(rotated_point, axis=-1)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
|
zerver/lib/scim_filter.py
|
fatihCinarKrtg/zulip
| 17,004 |
72592
|
from typing import List, Optional, Tuple
from django.http import HttpRequest
from django_scim.filters import UserFilterQuery
from zerver.lib.request import RequestNotes
# This is in a separate file due to circular import issues django-scim2 runs into
# when this is placed in zerver.lib.scim.
class ZulipUserFilterQuery(UserFilterQuery):
"""This class implements the filter functionality of SCIM2.
E.g. requests such as
/scim/v2/Users?filter=userName eq "<EMAIL>"
can be made to refer to resources via their properties.
This gets fairly complicated in its full scope
(https://datatracker.ietf.org/doc/html/rfc7644#section-3.4.2.2)
and django-scim2 implements an entire mechanism of converting
this SCIM2 filter syntax into SQL queries.
What we have to do in this class is to customize django-scim2 so
that it knows which SCIM attributes map to which UserProfile
fields. We can assume that get_extra_model_filter_kwargs_getter
has already ensured that we will only interact with non-bot user
accounts in the realm associated with this SCIM configuration.
"""
# attr_map describes which table.column the given SCIM2 User
# attributes refer to.
attr_map = {
# attr, sub attr, uri
("userName", None, None): "zerver_userprofile.delivery_email",
# We can only reasonably support filtering by name.formatted
# as UserProfile.full_name is its equivalent. We don't store
# first/last name information for UserProfile, so we can't
# support filtering based on name.givenName or name.familyName.
("name", "formatted", None): "zerver_userprofile.full_name",
("active", None, None): "zerver_userprofile.is_active",
}
# joins tells django-scim2 to always add the specified JOINS
# to the formed SQL queries. We need to JOIN the Realm table
# because we need to limit the results to the realm (subdomain)
# of the request.
joins = ("INNER JOIN zerver_realm ON zerver_realm.id = realm_id",)
@classmethod
def get_extras(cls, q: str, request: Optional[HttpRequest] = None) -> Tuple[str, List[object]]:
"""
Return extra SQL and params to be attached to end of current Query's
SQL and params. The return format matches the format that should be used
for providing raw SQL with params to Django's .raw():
https://docs.djangoproject.com/en/3.2/topics/db/sql/#passing-parameters-into-raw
Here we ensure that results are limited to the subdomain of the request
and also exclude bots, as we currently don't want them to be managed by SCIM2.
"""
assert request is not None
realm = RequestNotes.get_notes(request).realm
assert realm is not None
return "AND zerver_realm.id = %s AND zerver_userprofile.is_bot = False", [realm.id]
|
contrib/automation_tests/orbit_capture_loading.py
|
ioperations/orbit
| 1,847 |
72597
|
<gh_stars>1000+
"""
Copyright (c) 2021 The Orbit Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
"""
from absl import app
from datetime import date, timedelta
from core.orbit_e2e import E2ETestSuite
from test_cases.capture_window import Capture, CheckTimers, CheckThreadStates, FilterTracks, \
ToggleCollapsedStateOfAllTracks, VerifyTracksExist
from test_cases.connection_window import ConnectToStadiaInstance, FilterAndSelectFirstProcess, LoadCapture, \
LoadLatestCapture
from test_cases.live_tab import AddIterator, VerifyFunctionCallCount
from test_cases.main_window import EndSession
"""Verify loading a capture in Orbit using pywinauto.
Before this script is run there needs to be a gamelet reserved and
"hello_ggp_standalone" has to be started. Further, Orbit needs to be started.
Also, the captures directory should be cleared.
The script requires absl and pywinauto. Since pywinauto requires the bitness of
the python installation to match the bitness of the program under test, it needs
to be run from 64 bit python.
This automation script covers a basic workflow:
- load an old, unsupported capture and verify this fails with a message
- load a supported capture
- verify that the scheduler track is present and contains timers
- verify that the frame track is present and contains timers
- verify that the tracks from manual instrumentation are present
- verify that the memory tracks are present
- verify that an iterator can be added to "TestFunc2"
- verify that "TestFunc2" was called exactly 1257 times
- take a capture and verify there is a corresponding capture in the latest captures list which contains the tracks
"""
def main(argv):
# During the tests, we want to verify that captures get automatically saved. We will do so by filtering the recent
# captures list with the current date (in addition to also deleting old captures before this script runs). However,
# if it is around midnight when this code gets executed and we store the date string, it can be that the capture
# actually gets taken on the next day. Therefore, we will also check for the next day.
today = date.today()
tomorrow = today + timedelta(days=1)
today_string = today.strftime("%Y_%m_%d")
tomorrow_string = tomorrow.strftime("%Y_%m_%d")
test_cases = [
LoadCapture(capture_file_path="testdata\\OrbitTest_1-64.orbit", expect_fail=True),
LoadCapture(capture_file_path="testdata\\OrbitTest_1-72.orbit"),
FilterTracks(filter_string="Scheduler", expected_track_count=1),
CheckTimers(track_name_filter='Scheduler*'),
FilterTracks(filter_string="Frame", expected_track_count=1),
CheckTimers(track_name_filter='Frame track*'), # Verify the frame track has timers
FilterTracks(filter_string="DynamicName_", expected_track_count=5),
FilterTracks(filter_string="_var", expected_track_count=6),
FilterTracks(filter_string="OrbitThread_", expected_track_count=1),
ToggleCollapsedStateOfAllTracks(),
CheckTimers(track_name_filter="OrbitThread_*"),
CheckThreadStates(track_name_filter='OrbitThread_*'),
FilterTracks(filter_string="ORBIT_ASYNC_TASKS", expected_track_count=1),
CheckTimers(track_name_filter="ORBIT_ASYNC_TASKS"),
FilterTracks(filter_string="ORBIT_START_ASYNC_TEST", expected_track_count=1),
CheckTimers(track_name_filter="ORBIT_START_ASYNC_TEST"),
FilterTracks(filter_string=""),
VerifyTracksExist(track_names=["Page*", "*System*", "*CGroup*"], allow_duplicates=True),
AddIterator(function_name="TestFunc2"),
VerifyFunctionCallCount(function_name="TestFunc2", min_calls=1257, max_calls=1257),
# Let's take a capture with the current version and verify this can be loaded
EndSession(),
ConnectToStadiaInstance(),
FilterAndSelectFirstProcess(process_filter="hello_ggp"),
Capture(),
VerifyTracksExist(track_names="hello_ggp_stand*", allow_duplicates=True),
EndSession(),
# If we took the capture around midnight, we need to ensure to also look for the next day. Remember, the strings
# get created before the tests run. Thus the `today_string` might be actually from the day before the capture
# gets auto-saved.
LoadLatestCapture(filter_strings=[f"hello_ggp_stand_{today_string}", f"hello_ggp_stand_{tomorrow_string}"]),
VerifyTracksExist(track_names="hello_ggp_stand*", allow_duplicates=True)
]
suite = E2ETestSuite(test_name="Capture Loading", test_cases=test_cases)
suite.execute()
if __name__ == '__main__':
app.run(main)
|
vega/algorithms/nas/modnas/backend/__init__.py
|
This-50m/vega
| 724 |
72599
|
<filename>vega/algorithms/nas/modnas/backend/__init__.py
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
import importlib
import traceback
from modnas.registry.backend import build
from . import predefined
from typing import Optional
_backend = None
_backend_keys = []
def use(backend: Optional[str], *args, imported=False, **kwargs) -> None:
"""Switch to backend by name."""
global _backend, _backend_keys
if backend == _backend or backend == 'none' or backend is None:
return
try:
if imported:
bk_mod = importlib.import_module(backend)
else:
bk_mod = build(backend, *args, **kwargs)
except ImportError:
traceback.print_exc()
return
bk_vars = vars(bk_mod)
bk_keys = bk_vars.keys()
ns = globals()
for k in _backend_keys:
ns.pop(k, None)
for k in bk_keys:
if k.startswith('__'):
continue
ns[k] = bk_vars[k]
_backend_keys = list(bk_keys)
_backend = backend
def backend():
"""Return name of current backend."""
return _backend
def is_backend(backend: str) -> bool:
"""Return if the current backend is the given one."""
return _backend == backend
|
pycon/settings/base.py
|
Prakhyat-Srivastava/pycon
| 154 |
72601
|
<filename>pycon/settings/base.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# base settings - imported by other settings files, then overridden
import copy
from datetime import timedelta
import os.path
import posixpath
import bleach
from django.core.urlresolvers import reverse_lazy
def env_or_default(NAME, default):
return os.environ.get(NAME, default)
CONFERENCE_YEAR = "2019"
# Top level of our source / repository
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir))
# Symposion package
PACKAGE_ROOT = os.path.join(PROJECT_ROOT, "symposion")
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA = DEBUG
# django-compressor is turned off by default due to deployment overhead for
# most users. See <URL> for more information
COMPRESS = False
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": env_or_default("DB_NAME", "pycon"),
"USER": env_or_default("DB_USER", ""),
"PASSWORD": env_or_default("DB_PASSWORD", ""),
"HOST": env_or_default("DB_HOST", ""),
"PORT": env_or_default("DB_PORT", ""),
# https://docs.djangoproject.com/en/1.8/ref/databases/#persistent-connections
"CONN_MAX_AGE": int(env_or_default("CONN_MAX_AGE", 300)),
}
}
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [
# ("<NAME>", "<EMAIL>"),
]
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "US/Eastern"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# Conference ID and any URL prefixes
CONFERENCE_ID = 1
CONFERENCE_URL_PREFIXES = {
1: CONFERENCE_YEAR,
}
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
gettext = lambda s: s
LANGUAGES = (
('en', gettext('English')),
# ('fr', gettext('French')),
)
LOCALE_PATHS = [os.path.join(PROJECT_ROOT, "locale")]
# Absolute path to the directory that holds media - this is files uploaded
# by users, such as attachments.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = env_or_default("MEDIA_ROOT", os.path.join(PROJECT_ROOT, "site_media", "media"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = "/%s/site_media/media/" % CONFERENCE_URL_PREFIXES[CONFERENCE_ID]
# Absolute path to the directory where static files will be gathered
# at deploy time and served from in production. Should NOT be
# in version control, or contain anything before deploying.
STATIC_ROOT = os.path.join(PROJECT_ROOT, "site_media", "static")
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = "/%s/site_media/static/" % CONFERENCE_URL_PREFIXES[CONFERENCE_ID]
# Additional directories which hold static files
STATICFILES_DIRS = [
os.path.join(PACKAGE_ROOT, "static"),
]
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"compressor.finders.CompressorFinder",
]
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# Subdirectory of COMPRESS_ROOT to store the cached media files in
COMPRESS_OUTPUT_DIR = "cache"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
MIDDLEWARE_CLASSES = [
"djangosecure.middleware.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
# LocaleMiddleware must follow session middleware and cache middleware,
# and precede commonmiddleware
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"reversion.middleware.RevisionMiddleware",
# "debug_toolbar.middleware.DebugToolbarMiddleware",
]
if os.getenv('NOINDEX') == '1':
MIDDLEWARE_CLASSES.append('pycon.noindexmiddleware.NoIndexMiddleware')
ROOT_URLCONF = "symposion.urls"
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, "pycon/templates"),
os.path.join(PACKAGE_ROOT, "templates"),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"pycon.context_processors.global_settings",
"pinax_utils.context_processors.settings",
"account.context_processors.account",
"symposion.reviews.context_processors.reviews",
"constance.context_processors.config",
"pinax_theme_bootstrap.context_processors.theme",
]
INSTALLED_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.humanize",
# theme
"pinax_theme_bootstrap",
"django_forms_bootstrap",
# external
"compressor",
"mailer",
"timezones",
"metron",
"easy_thumbnails",
"account",
"sitetree",
"taggit",
"reversion",
"biblion",
"djangosecure",
"raven.contrib.django.raven_compat",
"constance.backends.database",
"constance",
"uni_form",
"gunicorn",
"multi_email_field",
"email_log",
"djcelery_email",
"multiselectfield",
"markdownify",
"storages",
# symposion
"symposion.conference",
"symposion.cms",
"symposion.boxes",
"symposion.speakers",
"symposion.proposals",
"symposion.reviews",
"symposion.teams",
"symposion.schedule",
# custom
"markedit",
"pycon",
"pycon.bulkemail",
"pycon.sponsorship",
"pycon.registration",
"pycon.schedule",
"pycon.profile",
"pycon.finaid",
"pycon.pycon_api",
"pycon.tutorials",
"pycon.mentorship",
]
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
EMAIL_BACKEND = 'djcelery_email.backends.CeleryEmailBackend'
CELERY_EMAIL_BACKEND = 'email_log.backends.EmailBackend'
EMAIL_LOG_BACKEND = "django.core.mail.backends.console.EmailBackend"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_USE_OPENID = False
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_VERIFICATION = False
ACCOUNT_EMAIL_AUTHENTICATION = False
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = False
ACCOUNT_CREATE_ON_SAVE = True
AUTHENTICATION_BACKENDS = [
# Permissions backends
"symposion.teams.backends.TeamPermissionsBackend",
# Django User Accounts
"account.auth_backends.EmailAuthenticationBackend",
'django.contrib.auth.backends.ModelBackend',
]
LOGIN_URL = reverse_lazy("account_login")
ACCOUNT_SIGNUP_REDIRECT_URL = "dashboard"
ACCOUNT_LOGIN_REDIRECT_URL = "dashboard"
ACCOUNT_LOGOUT_REDIRECT_URL = "home"
ACCOUNT_USER_DISPLAY = lambda user: user.get_full_name()
LOGIN_ERROR_URL = reverse_lazy("account_login")
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
CONSTANCE_BACKEND = "constance.backends.database.DatabaseBackend"
CONSTANCE_CONFIG = {
# "SETTING_NAME": (default_value, "help text")
"CDN_PURGE_BASE_URL": ("", "Base URL for CDN 'PURGE' requests"
" when pages are edited through the web."),
"CTE_SECRET": ("", "Shared secret for CTE integration"),
"CTE_BASICAUTH_USER": ("", "Shared User for accessing CTE Registration data"),
"CTE_BASICAUTH_PASS": ("", "Shared User password for accessing CTE Registration data"),
"CTE_TUTORIAL_DATA_URL": ("", "URL for the CSV of CTE Tutorial Registration Data"),
"REGISTRATION_INTRODUCTION_URL": ("", "URL for introduction to registration domain"),
"REGISTRATION_URL": ("", "URL for registration"),
"SPONSOR_FROM_EMAIL": ("", "From address for emails to sponsors"),
"REGISTRATION_STATUS": ("", "Used in the home page template. Valid values are 'soon', 'open' and 'closed'"),
}
# Instead of expecting blog posts to be typed as markup, simply expect
# raw HTML to be typed into the "Teaser:" and "Content:" fields of each
# Biblion Post in the Django admin interface. By using the identity
# function unicode() as the filter, the HTML winds up being saved to the
# database intact and unchanged.
BIBLION_PARSER = ["__builtin__.unicode", {}]
BIBLION_SECTIONS = [
("general", "General"),
]
SYMPOSION_PAGE_REGEX = r"(([\w-]{1,})(/[\w-]{1,})*)/$"
USE_X_ACCEL_REDIRECT = False
MARKEDIT_DEFAULT_SETTINGS = {
'preview': 'below',
'toolbar': {
'backgroundMode': 'dark',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# Is somebody clobbering this? We shouldn't have to set it ourselves,
# but if we don't, gunicorn's django_wsgi blows up trying to configure
# logging with an empty dictionary.
from django.utils.log import DEFAULT_LOGGING
LOGGING = copy.deepcopy(DEFAULT_LOGGING)
LOGGING.setdefault('root', {
# Default root logger, just so everything has a handler and we don't see warnings
'handlers': ['null'], # null handler is defined in the default logging config
})
BLEACH_ALLOWED_TAGS = bleach.ALLOWED_TAGS + ['p']
# Django issues a nasty warning in 1.7 if you don't
# declare a runner explicitly, even though it works...
# This can be removed in 1.8, the warning has been
# removed.
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Need to switch from the now-default JSON serializer, or OAuth2 breaks trying
# to serialize a datetime to JSON
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# Celery
REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')
BROKER_URL = 'redis://{}:6379/0'.format(REDIS_HOST) # Redis DB 0 for Celery. (Cache will use DB 1)
# We deliberately do not set CELERY_RESULT_BACKEND because we are discarding results.
# Pickle is fine, our redis is only accessible on localhost
CELERY_ACCEPT_CONTENT = ['pickle']
# Some other options Celery docs say we should set when using Redis:
BROKER_TRANSPORT_OPTIONS = {
'fanout_prefix': True,
'fanout_patterns': True
}
# NOTE: to start the worker, activate the venv and run "celery -A pycon worker [options]"
# Send bulk emails every 5 minutes
CELERYBEAT_SCHEDULE = {
'send_bulk_emails': {
'task': 'pycon.bulkemail.tasks.send_bulk_emails',
'schedule': timedelta(minutes=5),
}
}
# EMAIL ADDRESSES
# Override in more specific settings files, please.
DEFAULT_FROM_EMAIL = '<EMAIL>'
FINANCIAL_AID_EMAIL = '<EMAIL>'
ORGANIZERS_EMAIL = '<EMAIL>'
REGISTRATION_EMAIL = '<EMAIL>'
SPONSORSHIP_EMAIL = '<EMAIL>'
THEME_CONTACT_EMAIL = '<EMAIL>'
FINANCIAL_AID_WEEKLY_REPORT_EMAIL = ['<EMAIL>']
# django easy_thumbnails
THUMBNAIL_ALIASES = {
"": {
'sponsor_homepage': {'size': (300, 300)},
'sponsor_jobs': {'size': (150, 80)},
'sponsor_list': {'size': (260, 240)},
'sponsor_link': {'size': (150, 150)},
'speaker_profile': {'size': (128, 128)},
}
}
# fixer.io, currency conversion
FIXER_ACCESS_KEY = os.environ.get('FIXER_ACCESS_KEY')
|
torchdrug/layers/conv.py
|
wconnell/torchdrug
| 772 |
72635
|
<reponame>wconnell/torchdrug<filename>torchdrug/layers/conv.py
import functools
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import checkpoint
from torch_scatter import scatter_mean, scatter_add, scatter_max
from torchdrug import data, layers, utils
from torchdrug.layers import functional
class MessagePassingBase(nn.Module):
"""
Base module for message passing.
Any custom message passing module should be derived from this class.
"""
gradient_checkpoint = False
def message(self, graph, input):
"""
Compute edge messages for the graph.
Parameters:
graph (Graph): graph(s)
input (Tensor): node representations of shape :math:`(|V|, ...)`
Returns:
Tensor: edge messages of shape :math:`(|E|, ...)`
"""
raise NotImplementedError
def aggregate(self, graph, message):
"""
Aggregate edge messages to nodes.
Parameters:
graph (Graph): graph(s)
message (Tensor): edge messages of shape :math:`(|E|, ...)`
Returns:
Tensor: node updates of shape :math:`(|V|, ...)`
"""
raise NotImplementedError
def message_and_aggregate(self, graph, input):
"""
Fused computation of message and aggregation over the graph.
This may provide better time or memory complexity than separate calls of
:meth:`message <MessagePassingBase.message>` and :meth:`aggregate <MessagePassingBase.aggregate>`.
Parameters:
graph (Graph): graph(s)
input (Tensor): node representations of shape :math:`(|V|, ...)`
Returns:
Tensor: node updates of shape :math:`(|V|, ...)`
"""
message = self.message(graph, input)
update = self.aggregate(graph, message)
return update
def _message_and_aggregate(self, *tensors):
graph = data.Graph.from_tensors(tensors[:-1])
input = tensors[-1]
update = self.message_and_aggregate(graph, input)
return update
def combine(self, input, update):
"""
Combine node input and node update.
Parameters:
input (Tensor): node representations of shape :math:`(|V|, ...)`
update (Tensor): node updates of shape :math:`(|V|, ...)`
"""
raise NotImplementedError
def forward(self, graph, input):
"""
Perform message passing over the graph(s).
Parameters:
graph (Graph): graph(s)
input (Tensor): node representations of shape :math:`(|V|, ...)`
"""
if self.gradient_checkpoint:
update = checkpoint.checkpoint(self._message_and_aggregate, *graph.to_tensors(), input)
else:
update = self.message_and_aggregate(graph, input)
output = self.combine(input, update)
return output
class GraphConv(MessagePassingBase):
"""
Graph convolution operator from `Semi-Supervised Classification with Graph Convolutional Networks`_.
.. _Semi-Supervised Classification with Graph Convolutional Networks:
https://arxiv.org/pdf/1609.02907.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, output_dim, edge_input_dim=None, batch_norm=False, activation="relu"):
super(GraphConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_input_dim = edge_input_dim
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.linear = nn.Linear(input_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
# add self loop
node_in = torch.cat([graph.edge_list[:, 0], torch.arange(graph.num_node, device=graph.device)])
degree_in = graph.degree_in.unsqueeze(-1) + 1
message = input[node_in]
if self.edge_linear:
edge_input = self.edge_linear(graph.edge_feature.float())
edge_input = torch.cat([edge_input, torch.zeros(graph.num_node, self.input_dim, device=graph.device)])
message += edge_input
message /= degree_in[node_in].sqrt()
return message
def aggregate(self, graph, message):
# add self loop
node_out = torch.cat([graph.edge_list[:, 1], torch.arange(graph.num_node, device=graph.device)])
edge_weight = torch.cat([graph.edge_weight, torch.ones(graph.num_node, device=graph.device)])
edge_weight = edge_weight.unsqueeze(-1)
degree_out = graph.degree_out.unsqueeze(-1) + 1
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
update = update / degree_out.sqrt()
return update
def message_and_aggregate(self, graph, input):
node_in, node_out = graph.edge_list.t()[:2]
node_in = torch.cat([node_in, torch.arange(graph.num_node, device=graph.device)])
node_out = torch.cat([node_out, torch.arange(graph.num_node, device=graph.device)])
edge_weight = torch.cat([graph.edge_weight, torch.ones(graph.num_node, device=graph.device)])
degree_in = graph.degree_in + 1
degree_out = graph.degree_out + 1
edge_weight = edge_weight / (degree_in[node_in] * degree_out[node_out]).sqrt()
adjacency = utils.sparse_coo_tensor(torch.stack([node_in, node_out]), edge_weight,
(graph.num_node, graph.num_node))
update = torch.sparse.mm(adjacency.t(), input)
if self.edge_linear:
edge_input = graph.edge_feature.float()
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_weight = edge_weight.unsqueeze(-1)
edge_update = scatter_add(edge_input * edge_weight, graph.edge_list[:, 1], dim=0,
dim_size=graph.num_node)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update
def combine(self, input, update):
output = self.linear(update)
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class GraphAttentionConv(MessagePassingBase):
"""
Graph attentional convolution operator from `Graph Attention Networks`_.
.. _Graph Attention Networks:
https://arxiv.org/pdf/1710.10903.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
num_head (int, optional): number of attention heads
negative_slope (float, optional): negative slope of leaky relu activation
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
eps = 1e-10
def __init__(self, input_dim, output_dim, edge_input_dim=None, num_head=1, negative_slope=0.2, concat=True,
batch_norm=False, activation="relu"):
super(GraphAttentionConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_input_dim = edge_input_dim
self.num_head = num_head
self.concat = concat
self.leaky_relu = functools.partial(F.leaky_relu, negative_slope=negative_slope)
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
if output_dim % num_head != 0:
raise ValueError("Expect output_dim to be a multiplier of num_head, but found `%d` and `%d`"
% (output_dim, num_head))
self.linear = nn.Linear(input_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, output_dim)
else:
self.edge_linear = None
self.query = nn.Parameter(torch.zeros(num_head, output_dim * 2 // num_head))
nn.init.kaiming_uniform_(self.query, negative_slope, mode="fan_in")
def message(self, graph, input):
# add self loop
node_in = torch.cat([graph.edge_list[:, 0], torch.arange(graph.num_node, device=graph.device)])
node_out = torch.cat([graph.edge_list[:, 1], torch.arange(graph.num_node, device=graph.device)])
edge_weight = torch.cat([graph.edge_weight, torch.ones(graph.num_node, device=graph.device)])
edge_weight = edge_weight.unsqueeze(-1)
hidden = self.linear(input)
key = torch.stack([hidden[node_in], hidden[node_out]], dim=-1)
if self.edge_linear:
edge_input = self.edge_linear(graph.edge_feature.float())
edge_input = torch.cat([edge_input, torch.zeros(graph.num_node, self.output_dim, device=graph.device)])
key += edge_input.unsqueeze(-1)
key = key.view(-1, *self.query.shape)
weight = torch.einsum("hd, nhd -> nh", self.query, key)
weight = self.leaky_relu(weight)
weight = weight - scatter_max(weight, node_out, dim=0, dim_size=graph.num_node)[0][node_out]
attention = weight.exp() * edge_weight
# why mean? because with mean we have normalized message scale across different node degrees
normalizer = scatter_mean(attention, node_out, dim=0, dim_size=graph.num_node)[node_out]
attention = attention / (normalizer + self.eps)
value = hidden[node_in].view(-1, self.num_head, self.query.shape[-1] // 2)
attention = attention.unsqueeze(-1).expand_as(value)
message = (attention * value).flatten(1)
return message
def aggregate(self, graph, message):
# add self loop
node_out = torch.cat([graph.edge_list[:, 1], torch.arange(graph.num_node, device=graph.device)])
update = scatter_mean(message, node_out, dim=0, dim_size=graph.num_node)
return update
def combine(self, input, update):
output = update
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class GraphIsomorphismConv(MessagePassingBase):
"""
Graph isomorphism convolution operator from `How Powerful are Graph Neural Networks?`_
.. _How Powerful are Graph Neural Networks?:
https://arxiv.org/pdf/1810.00826.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
hidden_dims (list of int, optional): hidden dimensions
eps (float, optional): initial epsilon
learn_eps (bool, optional): learn epsilon or not
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, output_dim, edge_input_dim=None, hidden_dims=None, eps=0, learn_eps=False,
batch_norm=False, activation="relu"):
super(GraphIsomorphismConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_input_dim = edge_input_dim
eps = torch.tensor([eps], dtype=torch.float32)
if learn_eps:
self.eps = nn.Parameter(eps)
else:
self.register_buffer("eps", eps)
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
if hidden_dims is None:
hidden_dims = []
self.mlp = layers.MLP(input_dim, list(hidden_dims) + [output_dim], activation)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
node_in = graph.edge_list[:, 0]
message = input[node_in]
if self.edge_linear:
message += self.edge_linear(graph.edge_feature.float())
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
edge_weight = graph.edge_weight.unsqueeze(-1)
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
return update
def message_and_aggregate(self, graph, input):
adjacency = utils.sparse_coo_tensor(graph.edge_list.t()[:2], graph.edge_weight,
(graph.num_node, graph.num_node))
update = torch.sparse.mm(adjacency.t(), input)
if self.edge_linear:
edge_input = graph.edge_feature.float()
edge_weight = graph.edge_weight.unsqueeze(-1)
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_update = scatter_add(edge_input * edge_weight, graph.edge_list[:, 1], dim=0,
dim_size=graph.num_node)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update
def combine(self, input, update):
output = self.mlp((1 + self.eps) * input + update)
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class RelationalGraphConv(MessagePassingBase):
"""
Relational graph convolution operator from `Modeling Relational Data with Graph Convolutional Networks`_.
.. _Modeling Relational Data with Graph Convolutional Networks:
https://arxiv.org/pdf/1703.06103.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
num_relation (int): number of relations
edge_input_dim (int, optional): dimension of edge features
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
eps = 1e-10
def __init__(self, input_dim, output_dim, num_relation, edge_input_dim=None, batch_norm=False, activation="relu"):
super(RelationalGraphConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.num_relation = num_relation
self.edge_input_dim = edge_input_dim
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.self_loop = nn.Linear(input_dim, output_dim)
self.linear = nn.Linear(num_relation * input_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
node_in = graph.edge_list[:, 0]
message = input[node_in]
if self.edge_linear:
message += self.edge_linear(graph.edge_feature.float())
return message
def aggregate(self, graph, message):
assert graph.num_relation == self.num_relation
node_out = graph.edge_list[:, 1] * self.num_relation + graph.edge_list[:, 2]
edge_weight = graph.edge_weight.unsqueeze(-1)
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node * self.num_relation) / \
(scatter_add(edge_weight, node_out, dim=0, dim_size=graph.num_node * self.num_relation) + self.eps)
return update.view(graph.num_node, self.num_relation * self.input_dim)
def message_and_aggregate(self, graph, input):
assert graph.num_relation == self.num_relation
node_in, node_out, relation = graph.edge_list.t()
node_out = node_out * self.num_relation + relation
degree_out = scatter_add(graph.edge_weight, node_out, dim_size=graph.num_node * graph.num_relation)
edge_weight = graph.edge_weight / degree_out[node_out]
adjacency = utils.sparse_coo_tensor(torch.stack([node_in, node_out]), edge_weight,
(graph.num_node, graph.num_node * graph.num_relation))
update = torch.sparse.mm(adjacency.t(), input)
if self.edge_linear:
edge_input = graph.edge_feature.float()
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_weight = edge_weight.unsqueeze(-1)
edge_update = scatter_add(edge_input * edge_weight, node_out, dim=0,
dim_size=graph.num_node * graph.num_relation)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update.view(graph.num_node, self.num_relation * self.input_dim)
def combine(self, input, update):
output = self.linear(update) + self.self_loop(input)
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class NeuralFingerprintConv(MessagePassingBase):
"""
Graph neural network operator from `Convolutional Networks on Graphs for Learning Molecular Fingerprints`_.
Note this operator doesn't include the sparsifying step of the original paper.
.. _Convolutional Networks on Graphs for Learning Molecular Fingerprints:
https://arxiv.org/pdf/1509.09292.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, output_dim, edge_input_dim=None, batch_norm=False, activation="relu"):
super(NeuralFingerprintConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_input_dim = edge_input_dim
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.linear = nn.Linear(input_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
node_in = graph.edge_list[:, 0]
message = input[node_in]
if self.edge_linear:
message += self.edge_linear(graph.edge_feature.float())
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
edge_weight = graph.edge_weight.unsqueeze(-1)
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
return update
def message_and_aggregate(self, graph, input):
adjacency = utils.sparse_coo_tensor(graph.edge_list.t()[:2], graph.edge_weight,
(graph.num_node, graph.num_node))
update = torch.sparse.mm(adjacency.t(), input)
if self.edge_linear:
edge_input = graph.edge_feature.float()
edge_weight = graph.edge_weight.unsqueeze(-1)
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_update = scatter_add(edge_input * edge_weight, graph.edge_list[:, 1], dim=0,
dim_size=graph.num_node)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update
def combine(self, input, update):
output = self.linear(input + update)
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class ContinuousFilterConv(MessagePassingBase):
"""
Continuous filter operator from
`SchNet: A continuous-filter convolutional neural network for modeling quantum interactions`_.
.. _SchNet\: A continuous-filter convolutional neural network for modeling quantum interactions:
https://arxiv.org/pdf/1706.08566.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
hidden_dim (int, optional): hidden dimension. By default, same as :attr:`output_dim`
cutoff (float, optional): maximal scale for RBF kernels
num_gaussian (int, optional): number of RBF kernels
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, output_dim, edge_input_dim=None, hidden_dim=None, cutoff=5, num_gaussian=100,
batch_norm=False, activation="shifted_softplus"):
super(ContinuousFilterConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.edge_input_dim = edge_input_dim
if hidden_dim is None:
hidden_dim = output_dim
self.hidden_dim = hidden_dim
self.rbf = layers.RBF(stop=cutoff, num_kernel=num_gaussian)
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if activation == "shifted_softplus":
self.activation = functional.shifted_softplus
elif isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.input_layer = nn.Linear(input_dim, hidden_dim)
self.rbf_layer = nn.Linear(num_gaussian, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
node_in, node_out = graph.edge_list.t()[:2]
position = graph.node_position
message = self.input_layer(input)[node_in]
if self.edge_linear:
message += self.edge_linear(graph.edge_feature.float())
weight = self.rbf_layer(self.rbf(position[node_in], position[node_out]))
message *= weight
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
edge_weight = graph.edge_weight.unsqueeze(-1)
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
return update
def message_and_aggregate(self, graph, input):
node_in, node_out = graph.edge_list.t()[:2]
position = graph.node_position
rbf_weight = self.rbf_layer(self.rbf(position[node_in], position[node_out]))
indices = torch.stack([node_out, node_in, torch.arange(graph.num_edge, device=graph.device)])
adjacency = utils.sparse_coo_tensor(indices, graph.edge_weight, (graph.num_node, graph.num_node, graph.num_edge))
update = functional.generalized_rspmm(adjacency, rbf_weight, self.input_layer(input))
if self.edge_linear:
edge_input = graph.edge_feature.float()
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_weight = graph.edge_weight.unsqueeze(-1) * rbf_weight
edge_update = scatter_add(edge_input * edge_weight, graph.edge_list[:, 1], dim=0,
dim_size=graph.num_node)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update
def combine(self, input, update):
output = self.output_layer(update)
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class MessagePassing(MessagePassingBase):
"""
Message passing operator from `Neural Message Passing for Quantum Chemistry`_.
This implements the edge network variant in the original paper.
.. _Neural Message Passing for Quantum Chemistry:
https://arxiv.org/pdf/1704.01212.pdf
Parameters:
input_dim (int): input dimension
edge_input_dim (int): dimension of edge features
hidden_dims (list of int, optional): hidden dims of edge network
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, edge_input_dim, hidden_dims=None, batch_norm=False, activation="relu"):
super(MessagePassing, self).__init__()
self.input_dim = input_dim
self.output_dim = input_dim
self.edge_input_dim = edge_input_dim
if hidden_dims is None:
hidden_dims = []
if batch_norm:
self.batch_norm = nn.BatchNorm1d(input_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.edge_mlp = layers.MLP(edge_input_dim, list(hidden_dims) + [input_dim * input_dim], activation)
def message(self, graph, input):
node_in = graph.edge_list[:, 0]
transform = self.edge_mlp(graph.edge_feature.float()).view(-1, self.input_dim, self.input_dim)
if graph.num_edge:
message = torch.einsum("bed, bd -> be", transform, input[node_in])
else:
message = torch.zeros(0, self.input_dim, device=graph.device)
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
edge_weight = graph.edge_weight.unsqueeze(-1)
update = scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
return update
def combine(self, input, update):
output = update
if self.batch_norm:
output = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
class ChebyshevConv(MessagePassingBase):
"""
Chebyshev spectral graph convolution operator from
`Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering`_.
.. _Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering:
https://arxiv.org/pdf/1606.09375.pdf
Parameters:
input_dim (int): input dimension
output_dim (int): output dimension
edge_input_dim (int, optional): dimension of edge features
k (int, optional): number of Chebyshev polynomials.
This also corresponds to the radius of the receptive field.
hidden_dims (list of int, optional): hidden dims of edge network
batch_norm (bool, optional): apply batch normalization on nodes or not
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, output_dim, edge_input_dim=None, k=1, batch_norm=False, activation="relu"):
super(ChebyshevConv, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.k = k
self.edge_input_dim = edge_input_dim
if batch_norm:
self.batch_norm = nn.BatchNorm1d(output_dim)
else:
self.batch_norm = None
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
self.linear = nn.Linear((k + 1) * input_dim, output_dim)
if edge_input_dim:
self.edge_linear = nn.Linear(edge_input_dim, input_dim)
else:
self.edge_linear = None
def message(self, graph, input):
node_in = graph.edge_list[:, 0]
degree_in = graph.degree_in.unsqueeze(-1)
# because self-loop messages have a different scale, they are processed in combine()
message = input[node_in]
if self.edge_linear:
message += self.edge_linear(graph.edge_feature.float())
message /= degree_in[node_in].sqrt()
return message
def aggregate(self, graph, message):
node_out = graph.edge_list[:, 1]
edge_weight = graph.edge_weight.unsqueeze(-1)
degree_out = graph.degree_out.unsqueeze(-1)
# because self-loop messages have a different scale, they are processed in combine()
update = -scatter_add(message * edge_weight, node_out, dim=0, dim_size=graph.num_node)
update = update / degree_out.sqrt()
return update
def message_and_aggregate(self, graph, input):
node_in, node_out = graph.edge_list.t()[:2]
edge_weight = -graph.edge_weight / (graph.degree_in[node_in] * graph.degree_out[node_out]).sqrt()
adjacency = utils.sparse_coo_tensor(graph.edge_list.t()[:2], edge_weight, (graph.num_node, graph.num_node))
update = torch.sparse.mm(adjacency.t(), input)
if self.edge_linear:
edge_input = graph.edge_feature.float()
if self.edge_linear.in_features > self.edge_linear.out_features:
edge_input = self.edge_linear(edge_input)
edge_weight = edge_weight.unsqueeze(-1)
edge_update = scatter_add(edge_input * edge_weight, graph.edge_list[:, 1], dim=0,
dim_size=graph.num_node)
if self.edge_linear.in_features <= self.edge_linear.out_features:
edge_update = self.edge_linear(edge_update)
update += edge_update
return update
def forward(self, graph, input):
# Chebyshev polynomial bases
bases = [input]
for i in range(self.k):
x = super(ChebyshevConv, self).forward(graph, bases[-1])
if i > 0:
x = 2 * x - bases[-2]
bases.append(x)
bases = torch.cat(bases, dim=-1)
output = self.linear(bases)
if self.batch_norm:
x = self.batch_norm(output)
if self.activation:
output = self.activation(output)
return output
def combine(self, input, update):
output = input + update
return output
|
quokka/core/db.py
|
songshansitulv/quokka
| 1,141 |
72655
|
import itertools
from contextlib import suppress
from copy import deepcopy
from pymongo import MongoClient
from tinydb_serialization import SerializationMiddleware
from tinymongo import TinyMongoClient
from tinymongo.serializers import DateTimeSerializer
from tinymongo.tinymongo import generate_id
from quokka.utils.text import split_all_category_roots
class QuokkaTinyMongoClient(TinyMongoClient):
@property
def _storage(self):
serialization = SerializationMiddleware()
serialization.register_serializer(DateTimeSerializer(), 'TinyDate')
# TODO: Read custom serializers from settings and extensions
return serialization
class QuokkaDB(object):
config = {}
system = 'tinydb'
folder = 'databases'
host = 'localhost'
port = 27017
name = 'quokka_db'
collections = {
'index': 'index',
'contents': 'contents',
'uploads': 'uploads',
'users': 'users',
}
def __init__(self, app=None):
self.app = None
if app is not None:
self.init_app(app)
def init_app(self, app):
self.config = app.config.get('DATABASE', {})
# update atributes with config counterparts
for key, value in self.config.items():
if key.lower() != 'collections':
setattr(self, key.lower(), value)
else:
self.collections.update(value)
self._register(app)
def _register(self, app):
if not hasattr(app, 'extensions'):
app.extensions = {}
if 'db' in app.extensions:
raise RuntimeError("Flask extension already initialized")
app.extensions['db'] = self
self.app = app
def get_db_name(self, collection):
"""return db_name for collection"""
if self.system == "mongo":
return self.name
return collection
def get_collection(self, collection):
"""Get the corresponding database collection/table"""
col_name = self.collections.get(collection, collection)
db_name = self.get_db_name(col_name)
return self.connection[db_name][col_name]
def get_content_collection(self, content_id):
return self.connection[content_id]['contents']
def get_content_collection_mongo(self, content_id):
return self.connection[self.name]['contents']
@property
def connection(self):
if getattr(self, '_connection', None) is None:
if self.system == 'tinydb':
self._connection = QuokkaTinyMongoClient(self.folder)
elif self.system == 'mongo':
self._connection = MongoClient(
host=self.host,
port=self.port
)
return self._connection
def __dir__(self):
"""Return existing attributes + collection names"""
attrs = []
for attr in super().__dir__():
if attr.endswith(('_mongo', '_tinydb')):
attrs.append(attr.rpartition('_')[0])
else:
attrs.append(attr)
return sorted(list(set(attrs)) + list(self.collections.keys()))
def __getattribute__(self, name):
collections = super().__getattribute__('collections')
get_collection = super().__getattribute__('get_collection')
if name in collections:
return get_collection(name)
# Try to get system specific method e.g: self.categories_mongo
try:
system = super().__getattribute__('system')
return super().__getattribute__(f'{name}_{system}')
except AttributeError:
return super().__getattribute__(name)
# [ <-- DB query helpers --> ]
def generate_id(self):
return generate_id()
def value_set(self, colname, key, filter=None,
sort=True, flat=False, **kwargs):
"""Return a set of all values in a key"""
if filter is not None:
data = self.get_collection(colname).find(filter, **kwargs)
else:
data = self.get_collection(colname).find(**kwargs)
values = [item.get(key) for item in data if item.get(key) is not None]
if flat is True:
values = list(itertools.chain(*values))
with suppress(TypeError):
values = list(set(values))
return sorted(values) if sort is True else values
def author_set(self, sort=True, **kwargs):
users = [
item.get('fullname', item.get('username'))
for item in self.users.find()
]
authors = self.value_set('index', 'authors', flat=True, **kwargs)
values = list(set(users + authors))
return sorted(values) if sort is True else values
def tag_set(self, sort=True, **kwargs):
return self.value_set('index', 'tags', flat=True, sort=sort, **kwargs)
def category_set(self, sort=True, **kwargs):
results = self.value_set('index', 'category', sort=sort, **kwargs)
cats = []
for result in results:
cats.extend(split_all_category_roots(result))
return sorted(set(cats)) if sort is True else set(cats)
def content_set(self, *args, **kwargs):
return self.index.find(*args, **kwargs)
def article_set(self, *args, **kwargs):
kwargs.setdefault(
'sort',
self.app.theme_context.get('ARTICLE_ORDER_BY', [('date', -1)])
)
if not args:
args = [{'content_type': 'article'}]
elif isinstance(args[0], dict):
args[0]['content_type'] = 'article'
return self.content_set(*args, **kwargs)
def page_set(self, *args, **kwargs):
kwargs.setdefault(
'sort',
self.app.theme_context.get('PAGE_ORDER_BY', [('title', -1)])
)
if not args:
args = [{'content_type': 'page'}]
elif isinstance(args[0], dict):
args[0]['content_type'] = 'page'
return self.content_set(*args, **kwargs)
def block_set(self, *args, **kwargs):
kwargs.setdefault(
'sort',
self.app.theme_context.get(
'BLOCK_ORDER_BY', [('title', -1)]
)
)
if not args:
args = [{'content_type': 'block'}]
elif isinstance(args[0], dict):
args[0]['content_type'] = 'block'
return self.content_set(*args, **kwargs)
def select(self, colname, *args, **kwargs):
return self.get_collection(colname).find(*args, **kwargs)
def count(self, colname, *args, **kwargs):
return self.get_collection(colname).find(*args, **kwargs).count()
def get(self, colname, *args, **kwargs):
return self.get_collection(colname).find_one(*args, **kwargs)
def insert(self, colname, *args, **kwargs):
return self.get_collection(colname).insert(*args, **kwargs)
def update(self, colname, query, doc):
return self.get_collection(colname).update_one(query, doc)
def push_content(self, model):
"""Insert or Update content related to model"""
collection = self.get_content_collection(model['_id'])
current_saved = collection.find_one({
'content_id': model['_id'],
'version': model.get('version', 0)
})
if is_equal(model, current_saved):
model.pop('content', None)
return
model_to_save = deepcopy(model)
if not current_saved:
version = 0
else:
version = model.get('version', 0) + 1
model['version'] = model_to_save['version'] = version
model_to_save['content_id'] = model_to_save.pop('_id')
collection.insert(model_to_save)
model.pop('content', None)
def pull_content(self, model):
if not isinstance(model, dict):
model = self.get('index', {'_id': model})
if not model or (
model.get('version') == 0 and not model.get('_isclone')):
return
collection = self.get_content_collection(model['_id'])
record = collection.find_one({
'content_id': model['_id'],
'version': model['version']
})
return record['content'] if record else None
def get_with_content(self, **kwargs):
model = self.get('index', kwargs)
if model:
model['content'] = self.pull_content(model)
return model
def is_equal(model, other):
if not other:
return False
versioned_keys = [
'title', 'summary', 'tags', 'category', 'date',
'content', 'authors', 'slug', 'status', 'published',
'comments', 'block_items'
]
for key in versioned_keys:
if model.get(key) != other.get(key):
return False
return True
|
gltbx/generate_functions_bpl.py
|
dperl-sol/cctbx_project
| 155 |
72677
|
<gh_stars>100-1000
from __future__ import absolute_import, division, print_function
from libtbx.utils import write_this_is_auto_generated
from libtbx.str_utils import line_breaker
import libtbx.load_env
import libtbx.path
import os
import sys
from six.moves import range
this = "gltbx.generate_functions_bpl"
return_types = {
"GLenum": 0,
"GLboolean": 0,
"GLint": 0,
"GLuint": 0,
"const GLubyte*": 0,
"GLUnurbs*": 0,
"GLUquadric*": 0,
"GLUtesselator*": 0,
}
arg_types = {
"const void*": 0,
"GLbitfield": 0,
"GLboolean": 0,
"GLboolean*": 0,
"GLbyte": 0,
"GLclampd": 0,
"GLclampf": 0,
"GLdouble": 0,
"GLdouble*": 0,
"GLenum": 0,
"GLfloat": 0,
"GLfloat*": 0,
"GLint": 0,
"GLint*": 0,
"GLshort": 0,
"GLsizei": 0,
"GLubyte": 0,
"GLubyte*": 0,
"GLuint": 0,
"GLuint*": 0,
"GLushort": 0,
"GLushort*": 0,
"GLvoid*": 0,
"GLvoid**": 0,
"const GLboolean*": 0,
"const GLbyte*": 0,
"const GLclampf*": 0,
"const GLdouble*": 0,
"const GLfloat*": 0,
"const GLint*": 0,
"const GLshort*": 0,
"const GLubyte*": 0,
"const GLuint*": 0,
"const GLushort*": 0,
"const GLvoid*": 0,
"GLUnurbs*": 0,
"GLUquadric*": 0,
"GLUtesselator*": 0,
"glu_function_pointer": 0,
}
opaque_pointers = [
"GLUnurbs*",
"GLUquadric*",
"GLUtesselator*",
]
pointee_sizes = {
"glAreTexturesResident textures": 0,
"glAreTexturesResident residences": 0,
"glBitmap bitmap": 0,
"glCallLists lists": "?n*sizeof(type)",
"glClipPlane equation": 4,
"glColor3bv v": 3,
"glColor3dv v": 3,
"glColor3fv v": 3,
"glColor3iv v": 3,
"glColor3sv v": 3,
"glColor3ubv v": 3,
"glColor3uiv v": 3,
"glColor3usv v": 3,
"glColor4bv v": 4,
"glColor4dv v": 4,
"glColor4fv v": 4,
"glColor4iv v": 4,
"glColor4sv v": 4,
"glColor4ubv v": 4,
"glColor4uiv v": 4,
"glColor4usv v": 4,
"glColorPointer pointer": 0,
"glDeleteTextures textures": 0,
"glDrawElements indices": 0,
"glDrawPixels pixels": 0,
"glEdgeFlagv flag": 1,
"glEdgeFlagPointer pointer": 0,
"glEvalCoord1dv u": 1,
"glEvalCoord1fv u": 1,
"glEvalCoord2dv u": 2,
"glEvalCoord2fv u": 2,
"glFeedbackBuffer buffer": "size",
"glFogfv params": "?pname=GL_FOG_COLOR: 4, default: 1",
"glFogiv params": "?pname=GL_FOG_COLOR: 4, default: 1",
"glGenTextures textures": "n",
"glGetClipPlane equation": 4,
"glGetBooleanv params": "?1..16 depending on pname",
"glGetDoublev params": "?1..16 depending on pname",
"glGetFloatv params": "?1..16 depending on pname",
"glGetIntegerv params": "?1..16 depending on pname",
"glGetLightfv params": "?1..4 depending on pname",
"glGetLightiv params": "?1..4 depending on pname",
"glGetMapdv v": 0,
"glGetMapfv v": 0,
"glGetMapiv v": 0,
"glGetMaterialfv params": "?1..4 depending on pname",
"glGetMaterialiv params": 0,
"glGetPixelMapfv values": "?glGet(map)",
"glGetPixelMapuiv values": 0,
"glGetPixelMapusv values": 0,
"glGetPointerv params": 0,
"glGetPolygonStipple mask": 0,
"glGetTexEnvfv params": 0,
"glGetTexEnviv params": 0,
"glGetTexGendv params": 0,
"glGetTexGenfv params": 0,
"glGetTexGeniv params": 0,
"glGetTexImage pixels": 0,
"glGetTexLevelParameterfv params": 0,
"glGetTexLevelParameteriv params": 0,
"glGetTexParameterfv params": 0,
"glGetTexParameteriv params": 0,
"glIndexdv c": 0,
"glIndexfv c": 0,
"glIndexiv c": 0,
"glIndexsv c": 0,
"glIndexubv c": 0,
"glIndexPointer pointer": 0,
"glInterleavedArrays pointer": 0,
"glLightfv params": 0,
"glLightiv params": 0,
"glLightModelfv params": 0,
"glLightModeliv params": 0,
"glLoadMatrixd m": 0,
"glLoadMatrixf m": 0,
"glMap1d points": 0,
"glMap1f points": 0,
"glMap2d points": 0,
"glMap2f points": 0,
"glMaterialfv params": 0,
"glMaterialiv params": 0,
"glMultMatrixd m": 0,
"glMultMatrixf m": 0,
"glNormal3bv v": 3,
"glNormal3dv v": 3,
"glNormal3fv v": 3,
"glNormal3iv v": 3,
"glNormal3sv v": 3,
"glNormalPointer pointer": 0,
"glPixelMapfv values": 0,
"glPixelMapuiv values": 0,
"glPixelMapusv values": 0,
"glPolygonStipple mask": 0,
"glPrioritizeTextures textures": 0,
"glPrioritizeTextures priorities": 0,
"glRasterPos2dv v": 2,
"glRasterPos2fv v": 2,
"glRasterPos2iv v": 2,
"glRasterPos2sv v": 2,
"glRasterPos3dv v": 3,
"glRasterPos3fv v": 3,
"glRasterPos3iv v": 3,
"glRasterPos3sv v": 3,
"glRasterPos4dv v": 4,
"glRasterPos4fv v": 4,
"glRasterPos4iv v": 4,
"glRasterPos4sv v": 4,
"glReadPixels pixels": 0,
"glRectdv v1": 2,
"glRectdv v2": 2,
"glRectfv v1": 2,
"glRectfv v2": 2,
"glRectiv v1": 2,
"glRectiv v2": 2,
"glRectsv v1": 2,
"glRectsv v2": 2,
"glSelectBuffer buffer": 0,
"glTexCoord1dv v": 1,
"glTexCoord1fv v": 1,
"glTexCoord1iv v": 1,
"glTexCoord1sv v": 1,
"glTexCoord2dv v": 2,
"glTexCoord2fv v": 2,
"glTexCoord2iv v": 2,
"glTexCoord2sv v": 2,
"glTexCoord3dv v": 3,
"glTexCoord3fv v": 3,
"glTexCoord3iv v": 3,
"glTexCoord3sv v": 3,
"glTexCoord4dv v": 4,
"glTexCoord4fv v": 4,
"glTexCoord4iv v": 4,
"glTexCoord4sv v": 4,
"glTexCoordPointer pointer": 0,
"glTexEnvfv params": 0,
"glTexEnviv params": 0,
"glTexGendv params": 0,
"glTexGenfv params": 0,
"glTexGeniv params": 0,
"glTexImage1D pixels": 0,
"glTexImage2D pixels": 0,
"glTexParameterfv params": 0,
"glTexParameteriv params": 0,
"glTexSubImage1D pixels": 0,
"glTexSubImage2D pixels": 0,
"gluBeginCurve nurb": 0,
"gluEndCurve nurb": 0,
"gluBeginPolygon tess": 0,
"gluEndPolygon tess": 0,
"gluBeginSurface nurb": 0,
"gluEndSurface nurb": 0,
"gluBeginTrim nurb": 0,
"gluEndTrim nurb": 0,
"gluBuild1DMipmaps data": 0,
"gluBuild2DMipmaps data": 0,
"gluCylinder quad": 0,
"gluDeleteNurbsRenderer nurb": 0,
"gluDeleteQuadric quad": 0,
"gluDeleteTess tess": 0,
"gluDisk quad": 0,
"gluGetNurbsProperty nurb": 0,
"gluGetNurbsProperty data": 0,
"gluGetTessProperty tess": 0,
"gluGetTessProperty data": 0,
"gluLoadSamplingMatrices nurb": 0,
"gluLoadSamplingMatrices model": 16,
"gluLoadSamplingMatrices perspective": 16,
"gluLoadSamplingMatrices view": 4,
"gluNextContour tess": 0,
"gluNurbsCallbackDataEXT nurb": 0,
"gluNurbsCallbackDataEXT userData": 0,
"gluNurbsCallback nurb": 0,
"gluNurbsCurve nurb": 0,
"gluNurbsCurve knots": 0,
"gluNurbsCurve control": 0,
"gluNurbsProperty nurb": 0,
"gluNurbsSurface nurb": 0,
"gluNurbsSurface sKnots": 0,
"gluNurbsSurface tKnots": 0,
"gluNurbsSurface control": 0,
"gluPartialDisk quad": 0,
"gluPickMatrix viewport": 4,
"gluProject model": 16,
"gluProject proj": 16,
"gluProject view": 4,
"gluProject winX": 1,
"gluProject winY": 1,
"gluProject winZ": 1,
"gluPwlCurve nurb": 0,
"gluPwlCurve data": 0,
"gluQuadricCallback quad": 0,
"gluQuadricDrawStyle quad": 0,
"gluQuadricNormals quad": 0,
"gluQuadricOrientation quad": 0,
"gluQuadricTexture quad": 0,
"gluScaleImage dataIn": 0,
"gluScaleImage dataOut": 0,
"gluSphere quad": 0,
"gluTessBeginContour tess": 0,
"gluTessEndContour tess": 0,
"gluTessBeginPolygon tess": 0,
"gluTessBeginPolygon data": 0,
"gluTessCallback tess": 0,
"gluTessEndPolygon tess": 0,
"gluTessNormal tess": 0,
"gluTessProperty tess": 0,
"gluTessVertex tess": 0,
"gluTessVertex location": 0,
"gluTessVertex data": 0,
"gluUnProject model": 16,
"gluUnProject proj": 16,
"gluUnProject view": 4,
"gluUnProject objX": 1,
"gluUnProject objY": 1,
"gluUnProject objZ": 1,
"glVertex2dv v": 2,
"glVertex2fv v": 2,
"glVertex2iv v": 2,
"glVertex2sv v": 2,
"glVertex3dv v": 3,
"glVertex3fv v": 3,
"glVertex3iv v": 3,
"glVertex3sv v": 3,
"glVertex4dv v": 4,
"glVertex4fv v": 4,
"glVertex4iv v": 4,
"glVertex4sv v": 4,
"glVertexPointer pointer": 0,
}
version_guards = {
"glBlendColorEXT": "GL_XXX",
"glEdgeFlagPointer": "GLTBX_XXX",
"gluNurbsCallbackDataEXT": "GL_XXX",
}
special_wrappers = {
"glGetString": [
"""\
boost::python::str
gl_GetString(boost::python::object const& py_name)
{
boost::python::extract<GLenum> name_proxy(py_name);
GLenum name = name_proxy();
boost::python::str result(
reinterpret_cast<const char*>(glGetString(name)));
return result;
}
""",
None
],
"gluGetString": [
"""\
boost::python::str
glu_GetString(boost::python::object const& py_name)
{
boost::python::extract<GLenum> name_proxy(py_name);
GLenum name = name_proxy();
boost::python::str result(
reinterpret_cast<const char*>(gluGetString(name)));
return result;
}
""",
None
],
"gluErrorString": [
"""\
boost::python::str
glu_ErrorString(boost::python::object const& py_error)
{
boost::python::extract<GLenum> error_proxy(py_error);
GLenum error = error_proxy();
return boost::python::str(
reinterpret_cast<const char*>(gluErrorString(error)));
}
""",
None
],
}
def bytes_converters(signature, expected_size="0", post_extract=""):
assert signature.return_type == "void"
function_name = signature.function_name
arg_type = signature.args[-1].type
arg_name = signature.args[-1].name
arg_type_name = arg_type+" "+arg_name
is_const = arg_type.startswith("const ")
call = "\n".join(signature.format_call(
return_directly=is_const,
prefix=" "))
if (not is_const):
call += "\n %s_proxy.write_back();" % arg_name
is_const = "false"
else:
is_const = "true"
extracts = [""]
for arg in signature.args[:-1]:
assert not arg.type.startswith("const ")
extracts.append("boost::python::extract<%s> %s_proxy(py_%s);" % (
arg.type, arg.name, arg.name))
extracts.append("%s %s = %s_proxy();" % (
arg.type, arg.name, arg.name))
extracts = "\n ".join(extracts)
return """\
%(extracts)s%(post_extract)s
if (type == GL_BYTE) {
boost_python::converter_str<GLubyte> %(arg_name)s_proxy(
"%(arg_name)s", py_%(arg_name)s, %(expected_size)s, %(is_const)s);
%(arg_type_name)s = reinterpret_cast<%(arg_type)s>(
%(arg_name)s_proxy.get());
%(call)s
}
else if (type == GL_UNSIGNED_BYTE) {
boost_python::converter_str<GLbyte> %(arg_name)s_proxy(
"%(arg_name)s", py_%(arg_name)s, %(expected_size)s, %(is_const)s);
%(arg_type_name)s = reinterpret_cast<%(arg_type)s>(
%(arg_name)s_proxy.get());
%(call)s
}
else {
throw std::runtime_error(
"Conversion not implemented for given GLenum type:"
" %(function_name)s(): %(arg_type_name)s");
}""" % vars()
def glReadPixels_wrapper_body(signature):
return bytes_converters(
signature=signature,
expected_size="expected_size",
post_extract="""
boost::python::ssize_t expected_size = glReadPixels_pixels_expected_size(
width, height, format, type);""")
special_wrapper_bodies = {
"glCallLists": bytes_converters,
"glDrawPixels": bytes_converters,
"glGetTexImage": bytes_converters,
"glReadPixels": glReadPixels_wrapper_body,
"glTexImage1D": bytes_converters,
"glTexImage2D": bytes_converters,
"glTexSubImage1D": bytes_converters,
"glTexSubImage2D": bytes_converters,
}
class argument:
def __init__(self, function_name, string):
fields = string.split()
self.type = " ".join(fields[:-1])
self.name = fields[-1]
assert self.type in arg_types
arg_types[self.type] += 1
if (self.type[-1] != "*"):
self.pointee_size = None
else:
self.pointee_size = pointee_sizes[function_name + " " + self.name]
class signature:
def __init__(self, string):
assert string.endswith(" )")
fields = string[:-2].split("(")
assert len(fields) == 2
arg_strings = []
for arg in fields[1].split(","):
arg_strings.append(
" ".join(arg.replace("*", " * ").split()).replace(" *", "*"))
fields = fields[0].split()
self.return_type = " ".join(" ".join(fields[:-1])
.replace("*", " * ").split()).replace(" *", "*")
self.function_name = fields[-1]
if (self.return_type != "void"):
assert self.return_type in return_types
return_types[self.return_type] += 1
self.args = []
if (arg_strings != ["void"]):
for arg in arg_strings:
self.args.append(argument(self.function_name, arg))
self.version_guard = version_guards.get(self.function_name, None)
self.have_opaque_pointer = self.return_type in opaque_pointers
if (not self.have_opaque_pointer):
for arg in self.args:
if (arg.type in opaque_pointers):
self.have_opaque_pointer = True
def show(self, f=None):
if (f is None): f = sys.stdout
print("function name:", self.function_name, file=f)
print(" return type:", self.return_type, file=f)
for arg in self.args:
print(" arg type:", arg.type, "name:", arg.name, file=f)
def wrapper_function_name(self):
i = 2
if (self.function_name.startswith("glu")): i = 3
return self.function_name[:i]+"_"+self.function_name[i:]
def write_no_opaque_pointers_guard_if(self, f):
if (self.have_opaque_pointer):
print("#if !defined(GLTBX_NO_OPAQUE_POINTERS)", file=f)
def write_no_opaque_pointers_guard_endif(self, f):
if (self.have_opaque_pointer):
print("#endif", file=f)
def write_version_guard_if(self, f):
if (self.version_guard is not None):
print("#if defined(%s)" % self.version_guard, file=f)
def write_version_guard_endif(self, f):
if (self.version_guard is not None):
print("#endif", file=f)
def format_call(self, return_directly, prefix):
s = ""
if (self.return_type != "void"):
if (return_directly):
s += "return "
else:
s += self.return_type + " result = "
s += self.function_name+"("
s += ", ".join([arg.name for arg in self.args])
s += ");"
result = []
indent = ""
for line in line_breaker(s, 70):
result.append(prefix+indent+line)
indent = " "
return result
def write_wrapper(self, f):
special = special_wrappers.get(self.function_name, None)
if (special is not None and special[0] is not None):
print(special[0], file=f)
return
lines = [
self.return_type,
self.wrapper_function_name()+"("
]
for arg in self.args:
lines.append(" %s %s," % (
"boost::python::object const&", "py_"+arg.name))
if (lines[-1][-1] == ","):
lines[-1] = lines[-1][:-1]
lines[-1] += ")"
lines.append("{")
special_body = special_wrapper_bodies.get(self.function_name, None)
if (special_body is not None):
lines.extend(special_body(self).splitlines())
else:
not_implemented = [
"const void*",
"GLvoid*",
"GLvoid**",
"const GLvoid*",
"glu_function_pointer"]
to_write_back = []
ss = ""
for arg in self.args:
if ((arg.pointee_size is not None
and arg.type not in opaque_pointers)
or arg.type == "glu_function_pointer"):
if (arg.type in not_implemented):
lines.append(ss+" throw std::runtime_error(")
lines.append(ss+' "Conversion not implemented:"')
lines.append(ss+' " %s(): %s %s");' % (
self.function_name, arg.type, arg.name))
ss = "//"
lines.append(ss+" %s %s = 0;" % (arg.type, arg.name))
else:
expected_size = arg.pointee_size
if (isinstance(expected_size, str)):
if (expected_size[0] == "?"):
expected_size = "0"
else:
assert isinstance(expected_size, int)
expected_size = str(expected_size)
if (arg.type.startswith("const ")):
is_const = "true"
converter_t = arg.type[6:-1]
else:
is_const = "false"
converter_t = arg.type[:-1]
if ( arg.type.endswith("GLbyte*")
or arg.type.endswith("GLubyte*")):
converter = "boost_python::converter_str"
else:
converter = "boost_python::converter"
lines.append(ss+' %s<%s> %s_proxy(' % (
converter,
converter_t,
arg.name))
lines.append(ss+' "%s", py_%s, %s, %s);' % (
arg.name,
arg.name,
expected_size,
is_const))
lines.append(ss+" %s %s = %s_proxy.get();" % (
arg.type, arg.name, arg.name))
if (is_const == "false"): to_write_back.append(arg)
else:
assert not arg.type.startswith("const ")
lines.append(ss+' boost::python::extract<%s> %s_proxy(py_%s);' % (
arg.type, arg.name, arg.name))
lines.append(ss+" %s %s = %s_proxy();" % (
arg.type, arg.name, arg.name))
return_directly = len(to_write_back) == 0
lines.extend([ss+line for line in
self.format_call(return_directly=return_directly, prefix=" ")])
for arg in to_write_back:
lines.append(ss+" %s_proxy.write_back();" % arg.name)
if (self.return_type != "void" and not return_directly):
lines.append(ss+" return result;")
lines.append("}")
self.write_no_opaque_pointers_guard_if(f=f)
self.write_version_guard_if(f=f)
for line in lines:
print(" ", line, file=f)
self.write_version_guard_endif(f=f)
self.write_no_opaque_pointers_guard_endif(f=f)
print(file=f)
def write_def(self, f):
special = special_wrappers.get(self.function_name, None)
if (special is not None and special[1] is not None):
print(special[1], file=f)
return
return_opaque = self.return_type in opaque_pointers
def_args = (self.function_name, self.wrapper_function_name())
self.write_no_opaque_pointers_guard_if(f=f)
self.write_version_guard_if(f=f)
if (len(self.args) == 0):
if (not return_opaque):
print(' def("%s", %s);' % def_args, file=f)
else:
print(' def("%s", %s,' % def_args, file=f)
print(" return_value_policy<return_opaque_pointer>());", file=f)
else:
assert not return_opaque
print(' def("%s", %s, (' % def_args, file=f)
s = ""
for arg in self.args:
s += ', arg("%s")' % arg.name
s = s[2:] + "));"
for line in line_breaker(s, 73):
print(" "+line, file=f)
self.write_version_guard_endif(f=f)
self.write_no_opaque_pointers_guard_endif(f=f)
def get_signatures():
result = []
specs_file = libtbx.env.under_dist("gltbx", "opengl_specs.txt")
for line in open(specs_file).read().splitlines():
if (not (line.startswith("GL_") or line.startswith("GLU_"))):
result.append(signature(line))
return result
def write_function_wrappers(f, namespace, signatures, i_fragment):
write_this_is_auto_generated(f, this)
print("""\
#include <gltbx/special_wrapper_support.h>
#include <gltbx/pointer_args_bpl.h>
#include <gltbx/error.h>
""", file=f)
if (namespace == "glu"):
print("#if defined(__GNUC__) && __GNUC__ == 2 \\", file=f)
print(" && __GNUC_MINOR__ == 96 && __GNUC_PATCHLEVEL__ == 0", file=f)
print("#define GLTBX_NO_OPAQUE_POINTERS", file=f)
print("#else", file=f)
print("#include <boost/python/return_value_policy.hpp>", file=f)
print("#include <boost/python/return_opaque_pointer.hpp>", file=f)
for opaque_pointer in opaque_pointers:
print("BOOST_PYTHON_OPAQUE_SPECIALIZED_TYPE_ID(%s)" % (
opaque_pointer[:-1]), file=f)
print("#endif", file=f)
print(file=f)
print("""\
namespace gltbx { namespace %s { namespace {
""" % namespace, file=f)
for signature in signatures:
signature.write_wrapper(f=f)
print("""\
} // namespace <anonymous>
namespace boost_python {
void
wrap_functions_%02d()
{
using namespace boost::python;""" % i_fragment, file=f)
for signature in signatures:
signature.write_def(f=f)
print("""\
}
}}} // namespace gltbx::%s::boost_python""" % namespace, file=f)
def run(target_dir):
if (not os.path.isdir(target_dir)):
os.makedirs(target_dir)
gl_signatures = []
glu_signatures = []
for signature in get_signatures():
if (signature.function_name.startswith("glu")):
glu_signatures.append(signature)
else:
gl_signatures.append(signature)
for namespace,signatures,n_fragments in [("gl", gl_signatures, 16),
("glu", glu_signatures, 4)]:
block_size = len(signatures) // n_fragments
if (block_size * n_fragments < len(signatures)):
block_size += 1
for i_fragment in range(n_fragments):
file_name = libtbx.path.norm_join(
target_dir, namespace+"_functions_%02d_bpl.cpp" % i_fragment)
with open(file_name, "w") as f:
write_function_wrappers(
f=f,
namespace=namespace,
signatures=signatures[i_fragment*block_size:(i_fragment+1)*block_size],
i_fragment=i_fragment)
if __name__ == "__main__":
run(".")
|
Data Structure/Array Or Vector/Convert Array into Zig-Zag Fashion/solutionByVaishnavi.py
|
Mdanish777/Programmers-Community
| 261 |
72696
|
<filename>Data Structure/Array Or Vector/Convert Array into Zig-Zag Fashion/solutionByVaishnavi.py
def zigZag_Fashion(array, length):
flag = True
for i in range(length - 1):
if flag is True:
if array[i] > array[i+1]:
array[i],array[i+1] = array[i+1],array[i]
else:
if array[i] < array[i+1]:
array[i],array[i+1] = array[i+1],array[i]
flag = bool( 1 - flag )
print(array)
arraySize = int(input("Enter Array Size:- " ))
array=[]
print("Enter Array Elements")
for i in range(arraySize):
array.append(int(input()))
length = len(array)
zigZag_Fashion(array, length)
|
server/workers/tests/generate_testdata.py
|
chreman/Headstart
| 111 |
72697
|
<filename>server/workers/tests/generate_testdata.py
import json
import pandas as pd
import numpy as np
import requests
from tqdm import tqdm
api_url = "http://127.0.0.1/api"
df = pd.read_csv("Backend regression test cases.csv")
def extract_params(case):
search = {}
search["service"] = case.get("data integration", "").lower()
search["q"] = case.get("search query", "")
search["from"] = case.get("from", "")
search["to"] = case.get("to", "")
search["sorting"] = case.get("sorting", "")
search["article_types"] = case.get("article types", "[]")
return search
def get_input_data(search, raw=False):
params = search
service = params.pop('service')
if service not in ["base", "pubmed"]:
return None
if service == "pubmed":
params.pop("article_types")
params["limit"] = 100
if service == "base":
params["limit"] = 120
doctypes = eval(params["article_types"])
if isinstance(doctypes, list):
params["document_types"] = [a for a in doctypes]
else:
params["document_types"] = [121]
params.pop("article_types", [])
if raw:
params["raw"] = True
url = "/".join([api_url, service, "search"])
res = requests.post(url, json=params)
return res
for r in tqdm(df.iterrows()):
case = dict(r[1])
if np.isnan(case["case id"]):
continue
s = extract_params(case)
res = get_input_data(s, raw=True)
if res is None:
continue
res_json = res.json()
res_json.pop("id")
input_data = res_json["input_data"]
params = res_json["params"]
with open("knowncases/testcase%d.json" % case["case id"], "w") as outfile:
json.dump(res_json, outfile, indent=4, separators=(',', ': '), sort_keys=True)
|
calamari_ocr/scripts/cross_fold_train.py
|
jacektl/calamari
| 922 |
72722
|
from paiargparse import PAIArgumentParser
from tfaip.util.logging import logger
from calamari_ocr.ocr.training.cross_fold_trainer import (
CrossFoldTrainer,
CrossFoldTrainerParams,
)
logger = logger(__name__)
def run():
return main(parse_args())
def parse_args(args=None):
parser = PAIArgumentParser()
parser.add_root_argument("root", CrossFoldTrainerParams, CrossFoldTrainerParams())
params: CrossFoldTrainerParams = parser.parse_args(args).root
# TODO: add the training args (omit those params, that are set by the cross fold training)
# setup_train_args(parser, omit=["files", "validation", "weights",
# "early_stopping_best_model_output_dir", "early_stopping_best_model_prefix",
# "output_dir"])
return params
def main(params):
trainer = CrossFoldTrainer(params)
logger.info("Running cross fold train with params")
logger.info(params.to_json(indent=2))
trainer.run()
if __name__ == "__main__":
run()
|
tutorials/stock-wallet/microservices/stocks/tests/test_commands/test_services.py
|
bhardwajRahul/minos-python
| 247 |
72726
|
import sys
import unittest
import pendulum
from src import (
Stocks,
StocksCommandService,
)
from minos.networks import (
InMemoryRequest,
Response,
)
from tests.utils import (
build_dependency_injector,
)
class TestStocksCommandService(unittest.IsolatedAsyncioTestCase):
def setUp(self) -> None:
self.injector = build_dependency_injector()
async def asyncSetUp(self) -> None:
await self.injector.wire(modules=[sys.modules[__name__]])
async def asyncTearDown(self) -> None:
await self.injector.unwire()
def test_constructor(self):
service = StocksCommandService()
self.assertIsInstance(service, StocksCommandService)
async def test_get_remote_quotes(self):
service = StocksCommandService()
now = pendulum.now()
now_minus_one_month = now.subtract(months=1)
response = service.call_remote("AAPL", now_minus_one_month.to_date_string(), now.to_date_string())
self.assertIsInstance(response, list)
if __name__ == "__main__":
unittest.main()
|
docs_src/settings/app01/main.py
|
shashankrnr32/fastapi
| 53,007 |
72733
|
<reponame>shashankrnr32/fastapi
from fastapi import FastAPI
from .config import settings
app = FastAPI()
@app.get("/info")
async def info():
return {
"app_name": settings.app_name,
"admin_email": settings.admin_email,
"items_per_user": settings.items_per_user,
}
|
tests/peer/test_peerstore.py
|
g-r-a-n-t/py-libp2p
| 315 |
72747
|
<gh_stars>100-1000
import pytest
from libp2p.peer.peerstore import PeerStore, PeerStoreError
# Testing methods from IPeerStore base class.
def test_peer_info_empty():
store = PeerStore()
with pytest.raises(PeerStoreError):
store.peer_info("peer")
def test_peer_info_basic():
store = PeerStore()
store.add_addr("peer", "/foo", 10)
info = store.peer_info("peer")
assert info.peer_id == "peer"
assert info.addrs == ["/foo"]
def test_add_get_protocols_basic():
store = PeerStore()
store.add_protocols("peer1", ["p1", "p2"])
store.add_protocols("peer2", ["p3"])
assert set(store.get_protocols("peer1")) == set(["p1", "p2"])
assert set(store.get_protocols("peer2")) == set(["p3"])
def test_add_get_protocols_extend():
store = PeerStore()
store.add_protocols("peer1", ["p1", "p2"])
store.add_protocols("peer1", ["p3"])
assert set(store.get_protocols("peer1")) == set(["p1", "p2", "p3"])
def test_set_protocols():
store = PeerStore()
store.add_protocols("peer1", ["p1", "p2"])
store.add_protocols("peer2", ["p3"])
store.set_protocols("peer1", ["p4"])
store.set_protocols("peer2", [])
assert set(store.get_protocols("peer1")) == set(["p4"])
assert set(store.get_protocols("peer2")) == set([])
# Test with methods from other Peer interfaces.
def test_peers():
store = PeerStore()
store.add_protocols("peer1", [])
store.put("peer2", "key", "val")
store.add_addr("peer3", "/foo", 10)
assert set(store.peer_ids()) == set(["peer1", "peer2", "peer3"])
|
sdk/python/pulumi_aws/servicediscovery/_inputs.py
|
alexbowers/pulumi-aws
| 260 |
72776
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ServiceDnsConfigArgs',
'ServiceDnsConfigDnsRecordArgs',
'ServiceHealthCheckConfigArgs',
'ServiceHealthCheckCustomConfigArgs',
]
@pulumi.input_type
class ServiceDnsConfigArgs:
def __init__(__self__, *,
dns_records: pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]],
namespace_id: pulumi.Input[str],
routing_policy: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]] dns_records: An array that contains one DnsRecord object for each resource record set.
:param pulumi.Input[str] namespace_id: The ID of the namespace to use for DNS configuration.
:param pulumi.Input[str] routing_policy: The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED
"""
pulumi.set(__self__, "dns_records", dns_records)
pulumi.set(__self__, "namespace_id", namespace_id)
if routing_policy is not None:
pulumi.set(__self__, "routing_policy", routing_policy)
@property
@pulumi.getter(name="dnsRecords")
def dns_records(self) -> pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]]:
"""
An array that contains one DnsRecord object for each resource record set.
"""
return pulumi.get(self, "dns_records")
@dns_records.setter
def dns_records(self, value: pulumi.Input[Sequence[pulumi.Input['ServiceDnsConfigDnsRecordArgs']]]):
pulumi.set(self, "dns_records", value)
@property
@pulumi.getter(name="namespaceId")
def namespace_id(self) -> pulumi.Input[str]:
"""
The ID of the namespace to use for DNS configuration.
"""
return pulumi.get(self, "namespace_id")
@namespace_id.setter
def namespace_id(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_id", value)
@property
@pulumi.getter(name="routingPolicy")
def routing_policy(self) -> Optional[pulumi.Input[str]]:
"""
The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED
"""
return pulumi.get(self, "routing_policy")
@routing_policy.setter
def routing_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "routing_policy", value)
@pulumi.input_type
class ServiceDnsConfigDnsRecordArgs:
def __init__(__self__, *,
ttl: pulumi.Input[int],
type: pulumi.Input[str]):
"""
:param pulumi.Input[int] ttl: The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set.
:param pulumi.Input[str] type: The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""
pulumi.set(__self__, "ttl", ttl)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def ttl(self) -> pulumi.Input[int]:
"""
The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: pulumi.Input[int]):
pulumi.set(self, "ttl", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ServiceHealthCheckConfigArgs:
def __init__(__self__, *,
failure_threshold: Optional[pulumi.Input[int]] = None,
resource_path: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] failure_threshold: The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
:param pulumi.Input[str] resource_path: The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /.
:param pulumi.Input[str] type: The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if resource_path is not None:
pulumi.set(__self__, "resource_path", resource_path)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="resourcePath")
def resource_path(self) -> Optional[pulumi.Input[str]]:
"""
The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /.
"""
return pulumi.get(self, "resource_path")
@resource_path.setter
def resource_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_path", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ServiceHealthCheckCustomConfigArgs:
def __init__(__self__, *,
failure_threshold: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] failure_threshold: The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
|
lightautoml/image/utils.py
|
kobylkinks/LightAutoML
| 766 |
72784
|
<gh_stars>100-1000
"""Image utils."""
from PIL import Image
def pil_loader(path: str) -> Image:
"""Load image from pathes.
Args:
path: Image path.
Returns:
Loaded PIL Image in rgb.
"""
with open(path, "rb") as f:
img = Image.open(f)
return img.convert("RGB")
|
Python/Matplotlib/01-Introduction/finished_code.py
|
sagarsaliya/code_snippets
| 9,588 |
72792
|
from matplotlib import pyplot as plt
plt.xkcd()
ages_x = [18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]
py_dev_y = [20046, 17100, 20000, 24744, 30500, 37732, 41247, 45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000, 71496, 75370, 83640, 84666,
84392, 78254, 85000, 87038, 91991, 100000, 94796, 97962, 93302, 99240, 102736, 112285, 100771, 104708, 108423, 101407, 112542, 122870, 120000]
plt.plot(ages_x, py_dev_y, label='Python')
js_dev_y = [16446, 16791, 18942, 21780, 25704, 29000, 34372, 37810, 43515, 46823, 49293, 53437, 56373, 62375, 66674, 68745, 68746, 74583, 79000,
78508, 79996, 80403, 83820, 88833, 91660, 87892, 96243, 90000, 99313, 91660, 102264, 100000, 100000, 91660, 99240, 108000, 105000, 104000]
plt.plot(ages_x, js_dev_y, label='JavaScript')
dev_y = [17784, 16500, 18012, 20628, 25206, 30252, 34368, 38496, 42000, 46752, 49320, 53200, 56000, 62316, 64928, 67317, 68748, 73752, 77232,
78000, 78508, 79536, 82488, 88935, 90000, 90056, 95000, 90000, 91633, 91660, 98150, 98964, 100000, 98988, 100000, 108923, 105000, 103117]
plt.plot(ages_x, dev_y, color='#444444', linestyle='--', label='All Devs')
plt.xlabel('Ages')
plt.ylabel('Median Salary (USD)')
plt.title('Median Salary (USD) by Age')
plt.legend()
plt.tight_layout()
plt.savefig('plot.png')
plt.show()
|
neuspell/commons.py
|
TheMortalCoil92/neuspell_access
| 422 |
72820
|
<gh_stars>100-1000
import os
from string import punctuation
from .util import is_module_available, get_module_or_attr
""" default paths """
DEFAULT_DATA_PATH = os.path.join(os.path.split(__file__)[0], "../data")
print(f"data folder is set to `{DEFAULT_DATA_PATH}` script")
if not os.path.exists(DEFAULT_DATA_PATH):
os.makedirs(DEFAULT_DATA_PATH)
DEFAULT_TRAINTEST_DATA_PATH = os.path.join(DEFAULT_DATA_PATH, "traintest")
ALLENNLP_ELMO_PRETRAINED_FOLDER = os.path.join(DEFAULT_DATA_PATH, "allennlp_elmo_pretrained")
""" special tokenizers """
_SPACY_TOKENIZER, _SPACY_TAGGER = None, None
def _load_spacy_tokenizer():
global _SPACY_TOKENIZER, _SPACY_TAGGER
if not _SPACY_TOKENIZER:
if is_module_available("spacy"):
if not is_module_available("en_core_web_sm"):
raise ImportError("run `python -m spacy download en_core_web_sm`")
print("creating spacy models ...")
spacy_nlp = get_module_or_attr("en_core_web_sm").load(disable=["tagger", "ner", "lemmatizer"])
_SPACY_TOKENIZER = lambda inp: [token.text for token in spacy_nlp(inp)]
# spacy_nlp = get_module_or_attr("en_core_web_sm").load(disable=["ner", "lemmatizer"])
# _SPACY_TAGGER = lambda inp: [token.tag for token in spacy_nlp(inp)]
print("spacy models initialized")
else:
raise ImportError("`pip install spacy` to use spacy retokenizer")
return _SPACY_TOKENIZER
def _custom_tokenizer(inp: str):
try:
_spacy_tokenizer = _load_spacy_tokenizer()
get_tokens = lambda inp: _spacy_tokenizer(inp)
except ImportError as e:
print(e)
get_tokens = lambda inp: inp.split()
def _is_punct(inp):
return all([i in punctuation for i in inp])
tokens = get_tokens(inp)
new_tokens = []
str_ = ""
for token in tokens:
if _is_punct(token):
str_ += token
else:
new_tokens.append(str_)
str_ = ""
new_tokens.append(token)
if str_:
new_tokens.append(str_)
return " ".join(new_tokens)
spacy_tokenizer = _custom_tokenizer
|
src/oci/compute_instance_agent/models/instance_agent_command_execution_output_via_text_details.py
|
Manny27nyc/oci-python-sdk
| 249 |
72829
|
<gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .instance_agent_command_execution_output_content import InstanceAgentCommandExecutionOutputContent
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class InstanceAgentCommandExecutionOutputViaTextDetails(InstanceAgentCommandExecutionOutputContent):
"""
The execution output from a command when returned in plain text.
"""
def __init__(self, **kwargs):
"""
Initializes a new InstanceAgentCommandExecutionOutputViaTextDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.compute_instance_agent.models.InstanceAgentCommandExecutionOutputViaTextDetails.output_type` attribute
of this class is ``TEXT`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param output_type:
The value to assign to the output_type property of this InstanceAgentCommandExecutionOutputViaTextDetails.
Allowed values for this property are: "TEXT", "OBJECT_STORAGE_URI", "OBJECT_STORAGE_TUPLE"
:type output_type: str
:param exit_code:
The value to assign to the exit_code property of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type exit_code: int
:param message:
The value to assign to the message property of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type message: str
:param text:
The value to assign to the text property of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type text: str
:param text_sha256:
The value to assign to the text_sha256 property of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type text_sha256: str
"""
self.swagger_types = {
'output_type': 'str',
'exit_code': 'int',
'message': 'str',
'text': 'str',
'text_sha256': 'str'
}
self.attribute_map = {
'output_type': 'outputType',
'exit_code': 'exitCode',
'message': 'message',
'text': 'text',
'text_sha256': 'textSha256'
}
self._output_type = None
self._exit_code = None
self._message = None
self._text = None
self._text_sha256 = None
self._output_type = 'TEXT'
@property
def text(self):
"""
Gets the text of this InstanceAgentCommandExecutionOutputViaTextDetails.
The command output.
:return: The text of this InstanceAgentCommandExecutionOutputViaTextDetails.
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""
Sets the text of this InstanceAgentCommandExecutionOutputViaTextDetails.
The command output.
:param text: The text of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type: str
"""
self._text = text
@property
def text_sha256(self):
"""
Gets the text_sha256 of this InstanceAgentCommandExecutionOutputViaTextDetails.
SHA-256 checksum value of the text content.
:return: The text_sha256 of this InstanceAgentCommandExecutionOutputViaTextDetails.
:rtype: str
"""
return self._text_sha256
@text_sha256.setter
def text_sha256(self, text_sha256):
"""
Sets the text_sha256 of this InstanceAgentCommandExecutionOutputViaTextDetails.
SHA-256 checksum value of the text content.
:param text_sha256: The text_sha256 of this InstanceAgentCommandExecutionOutputViaTextDetails.
:type: str
"""
self._text_sha256 = text_sha256
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
sending_orders/order_management.py
|
g-make-it/IG_Trading_Algo_Scripts_Python
| 186 |
72832
|
from trading_ig import IGService
from trading_ig.config import config
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# if you need to cache to DB your requests
from datetime import timedelta
import requests_cache
from predefined_functions.initialisation import Initialisation
class Order_Management():
def __init__(self):
logging.basicConfig(level=logging.INFO)
self.log = logging.getLogger(__name__)
# set object and then set connection
self.initial = Initialisation()
self.initialise_connection()
def initialise_connection(self):
self.ig_service = self.initial.initialise_connection()
self.ig_service.create_session()
# limit orders
def create_working_order(self, direction, epic, size, price, stop_distance,limit_distance,force_open=False):
currency_code = "GBP"
direction = direction
epic = epic
expiry = "DFB"
if force_open == True:
guaranteed_stop = False
else:
guaranteed_stop = True
# entering price
level = price
# Pound per point size
size = size
time_in_force = "GOOD_TILL_CANCELLED"
# LIMIT orders are now STOP
order_type = "STOP"
limit_distance = limit_distance
stop_distance = stop_distance
# currency_code = "GBP"
# direction = "SELL"
# epic = "CS.D.BITCOIN.TODAY.IP"
# expiry = "DFB"
# guaranteed_stop = False
# # entering price
# level = 13109
# # Pound per point size
# size = 0.50
# time_in_force = "GOOD_TILL_CANCELLED"
# order_type = "LIMIT"
# limit_distance = 4000.0
# stop_distance = 160.0
# """Creates an OTC working order"""
try:
response = self.ig_service.create_working_order(
currency_code=currency_code,
direction=direction,
epic=epic,
expiry=expiry,
guaranteed_stop=guaranteed_stop,
level=level,
size=size,
time_in_force=time_in_force,
order_type=order_type,
limit_distance=limit_distance,
stop_distance=stop_distance,
force_open=force_open
)
return response
except Exception as e:
self.log.info(str(e) + " error occurred when creating a working order")
return None
# market orders
def create_open_position(self, direction, epic, size, limit_distance, stop_distance, force_open):
currency_code = "GBP"
direction = direction
epic = epic
expiry = "DFB"
# no matter what you are doing force open always has to be True other wise stop losses do not work
force_open = force_open
if force_open:
guaranteed_stop = False
else:
guaranteed_stop = True
stop_distance = stop_distance
size = size
trailing_stop = False
trailing_stop_increment = None
trailing_stop_distance = None
time_in_force = "FILL_OR_KILL"
order_type = "MARKET"
limit_distance = limit_distance
try:
response = self.ig_service.create_open_position(
currency_code=currency_code,
direction=direction,
epic=epic,
expiry=expiry,
# no matter what you are doing force open always has to be True other wise stop losses do not work
force_open=True,
guaranteed_stop=guaranteed_stop,
stop_distance=stop_distance,
size=size,
trailing_stop=trailing_stop,
trailing_stop_increment=trailing_stop_increment,
# trailing_stop_distance = trailing_stop_distance,
# time_in_force=time_in_force,
order_type=order_type,
limit_distance=limit_distance)
return response
except Exception as e:
self.log.info(str(e) + " error occurred when opening a position")
return None
# market orders to close positions
def close_open_position(self, position, size):
# set randomly
try:
direction = "BUY"
position_direction = position["direction"]
if position_direction == "BUY":
direction = "SELL"
deal_id = position["dealId"]
order_type = "MARKET"
size = size
response = self.ig_service.close_open_position(
deal_id=deal_id,
direction=direction,
order_type=order_type,
size=size)
return response
except Exception as e:
self.log.info(str(e) + " error occurred when closing position")
return None
def delete_working_order(self, deal_id):
try:
deal_id = deal_id
response = self.ig_service.delete_working_order(deal_id)
return response
except Exception as e:
self.log.info(str(e) + " error occurred when deleting working order")
return None
def update_position(self, limit_level, stop_level, deal_id, guaranteed_stop):
limit_level = limit_level
guaranteed_stop = guaranteed_stop
stop_level=stop_level
deal_id=deal_id
trailing_stop = False
trailing_stop_distance = None
trailing_stop_increment = None
try:
response = self.ig_service.update_open_position(
limit_level=limit_level,
stop_level=stop_level,
# guaranteed_stop=guaranteed_stop,
deal_id =deal_id,
# trailing_stop=trailing_stop,
# trailing_stop_distance=trailing_stop_distance,
# trailing_stop_increment=trailing_stop_increment
)
return response
except Exception as e:
self.log.info(str(e) + " error occurred when updating position or maybe the order is no longer open")
return None
def get_open_positions(self):
while(True):
try:
return self.ig_service.fetch_open_positions()
except Exception as e:
self.log.info(str(e) + " error occurred when getting open positions")
# resets the connection
self.initialise_connection()
def get_working_orders(self):
while(True):
try:
return self.ig_service.fetch_working_orders()
except Exception as e:
self.log.info(str(e) + " error occurred when getting working orders")
self.initialise_connection()
|
tests-deprecating/milvus_benchmark/milvus_benchmark/runners/get.py
|
CyberFlameGO/milvus
| 10,504 |
72855
|
<filename>tests-deprecating/milvus_benchmark/milvus_benchmark/runners/get.py
import time
import copy
import logging
from milvus_benchmark import parser
from milvus_benchmark.runners import utils
from milvus_benchmark.runners.base import BaseRunner
logger = logging.getLogger("milvus_benchmark.runners.get")
def get_ids(length, size):
ids_list = []
step = size // length
for i in range(length):
ids_list.append(step * i)
return ids_list
class GetRunner(BaseRunner):
"""run get"""
name = "get_performance"
def __init__(self, env, metric):
super(GetRunner, self).__init__(env, metric)
def extract_cases(self, collection):
collection_name = collection["collection_name"] if "collection_name" in collection else None
(data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
vector_type = utils.get_vector_type(data_type)
other_fields = collection["other_fields"] if "other_fields" in collection else None
ids_length_list = collection["ids_length_list"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name,
"collection_size": collection_size,
"other_fields": other_fields,
"ni_per": ni_per
}
index_field_name = utils.get_default_field_name(vector_type)
index_type = collection["index_type"]
index_param = collection["index_param"]
index_info = {
"index_type": index_type,
"index_param": index_param
}
flush = True
if "flush" in collection and collection["flush"] == "no":
flush = False
self.init_metric(self.name, collection_info, index_info, search_info=None)
case_metrics = list()
for ids_length in ids_length_list:
ids = get_ids(ids_length, collection_size)
case_metric = copy.deepcopy(self.metric)
case_metric.set_case_metric_type()
case_params = list()
case_metric.run_params = {"ids_length": ids_length}
case_metrics.append(case_metric)
case_param = {
"collection_name": collection_name,
"data_type": data_type,
"dimension": dimension,
"collection_size": collection_size,
"ni_per": ni_per,
"metric_type": metric_type,
"vector_type": vector_type,
"other_fields": other_fields,
"flush_after_insert": flush,
"index_field_name": index_field_name,
"index_type": index_type,
"index_param": index_param,
"ids": ids
}
case_params.append(case_param)
return case_params, case_metrics
def prepare(self, **case_param):
collection_name = case_param["collection_name"]
self.milvus.set_collection(collection_name)
if not self.milvus.exists_collection():
logger.info("collection not exist")
logger.debug({"collection count": self.milvus.count()})
def run_case(self, case_metric, **case_param):
ids = case_param["ids"]
start_time = time.time()
self.milvus.get(ids)
get_time = round(time.time() - start_time, 2)
tmp_result = {"get_time": get_time}
return tmp_result
class InsertGetRunner(GetRunner):
"""run insert and get"""
name = "insert_get_performance"
def __init__(self, env, metric):
super(InsertGetRunner, self).__init__(env, metric)
def prepare(self, **case_param):
collection_name = case_param["collection_name"]
dimension = case_param["dimension"]
vector_type = case_param["vector_type"]
other_fields = case_param["other_fields"]
self.milvus.set_collection(collection_name)
if self.milvus.exists_collection():
logger.debug("Start drop collection")
self.milvus.drop()
time.sleep(utils.DELETE_INTERVAL_TIME)
self.milvus.create_collection(dimension, data_type=vector_type, other_fields=other_fields)
self.insert(self.milvus, collection_name, case_param["data_type"], dimension,
case_param["collection_size"], case_param["ni_per"])
start_time = time.time()
self.milvus.flush()
flush_time = round(time.time() - start_time, 2)
logger.debug({"collection count": self.milvus.count()})
logger.debug({"flush_time": flush_time})
logger.debug("Start load collection")
self.milvus.load_collection(timeout=1200)
logger.debug("Load collection end")
|
configs/loftr/indoor/scannet/loftr_ds_eval_new.py
|
AK391/LoFTR
| 907 |
72861
|
""" A config only for reproducing the ScanNet evaluation results.
We remove border matches by default, but the originally implemented
`remove_border()` has a bug, leading to only two sides of
all borders are actually removed. However, the [bug fix](https://github.com/zju3dv/LoFTR/commit/e9146c8144dea5f3cbdd98b225f3e147a171c216)
makes the scannet evaluation results worse (auc@10=40.8 => 39.5), which should be
caused by tiny result fluctuation of few image pairs. This config set `BORDER_RM` to 0
to be consistent with the results in our paper.
Update: This config is for testing the re-trained model with the pos-enc bug fixed.
"""
from src.config.default import _CN as cfg
cfg.LOFTR.COARSE.TEMP_BUG_FIX = True
cfg.LOFTR.MATCH_COARSE.MATCH_TYPE = 'dual_softmax'
cfg.LOFTR.MATCH_COARSE.BORDER_RM = 0
|
textbox/data/dataloader/__init__.py
|
StevenTang1998/TextBox
| 347 |
72882
|
from textbox.data.dataloader.abstract_dataloader import AbstractDataLoader
from textbox.data.dataloader.single_sent_dataloader import SingleSentenceDataLoader
from textbox.data.dataloader.paired_sent_dataloader import PairedSentenceDataLoader
from textbox.data.dataloader.attr_sent_dataloader import AttributedSentenceDataLoader
from textbox.data.dataloader.kg_sent_dataloader import KGSentenceDataLoader
from textbox.data.dataloader.wikibio_sent_dataloader import WikiBioSentenceDataLoader
from textbox.data.dataloader.rotowire_sent_dataloader import RotoWireSentenceDataLoader
|
samples/migrateADCGen1/mappers/__init__.py
|
daniel-dqsdatalabs/pyapacheatlas
| 104 |
72895
|
from .assetmapper import AssetMapper
from .assetfactory import AssetFactory
from .sqlserver import SqlServerTableMapper
|
src/benchmarks/gc/src/commonlib/host_info.py
|
BruceForstall/performance
| 547 |
72896
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
from dataclasses import dataclass
from operator import floordiv
from pathlib import Path
from re import search
from textwrap import indent
from typing import Iterable, List, Mapping, Optional, Sequence, Tuple
from .bench_file import (
change_path_machine,
get_this_machine,
Machine,
MACHINE_DOC,
parse_machines_arg,
)
from .get_built import get_built, Built
from .collection_util import empty_mapping, is_empty
from .command import Command, CommandKind, CommandsMapping
from .config import HOST_INFO_PATH
from .option import map_option, map_option_2
from .parse_and_serialize import load_yaml, parse_yaml, to_yaml, write_yaml_file
from .type_utils import argument, check_cast, with_slots
from .util import (
ensure_dir,
ExecArgs,
exec_and_get_output,
gb_to_kb,
get_hostname,
get_os,
kb_to_bytes,
kb_to_mb,
mb_to_kb,
mhz_to_ghz,
OS,
remove_str_end,
try_remove_str_start,
)
@with_slots
@dataclass(frozen=True)
class CacheInfoForLevel:
# Values are None if unknown
n_caches: Optional[int] = None
total_bytes: Optional[int] = None
@property
def average_bytes(self) -> Optional[int]:
return map_option_2(self.total_bytes, self.n_caches, floordiv)
@with_slots
@dataclass(frozen=True)
class CacheInfo:
l1: CacheInfoForLevel
l2: CacheInfoForLevel
l3: CacheInfoForLevel
@with_slots
@dataclass(frozen=True)
class Range:
# Both inclusive
lo: int
hi: int
def with_hi(self, new_hi: int) -> "Range":
return Range(self.lo, new_hi)
@with_slots
@dataclass(frozen=True)
class NumaNodeInfo:
numa_node_number: int
ranges: Sequence[Range]
# None on non-Windows
cpu_group_number: Optional[int] = None
@with_slots
@dataclass(frozen=True)
class HostInfo:
# All values are None if unknown
hostname: str
n_physical_processors: int
n_logical_processors: int
numa_nodes: Sequence[NumaNodeInfo]
cache_info: CacheInfo
clock_ghz: Optional[float] = None
total_physical_memory_mb: Optional[int] = None
@with_slots
@dataclass(frozen=True)
class _NumaNodesAndCacheInfo:
numa_nodes: Sequence[NumaNodeInfo]
n_physical_processors: int
n_logical_processors: int
caches: CacheInfo
def _get_total_physical_memory_mb_windows() -> int:
output = exec_and_get_output(ExecArgs(("systeminfo",), quiet_print=True))
for line in output.splitlines():
tot = try_remove_str_start(line, "Total Physical Memory:")
if tot is not None:
mem = remove_str_end(tot, "MB")
return int(mem.replace(",", ""))
raise Exception("Didn't find total physical memory")
def _get_host_info(built: Built) -> HostInfo:
return {OS.posix: lambda _: _get_host_info_posix(), OS.windows: _get_host_info_windows}[
get_os()
](built)
_UNKNOWN_MSG: str = "unknown"
def _get_host_info_posix() -> HostInfo:
# lscpu output is a bunch of lines all of the form key: value. Make a dict from that.
dct = _parse_keys_values_lines(exec_and_get_output(ExecArgs(("lscpu",), quiet_print=True)))
def get_opt(name: str) -> Optional[str]:
return dct.get(name, None)
def get_int(name: str) -> int:
return int(dct[name])
def get_opt_float(name: str) -> Optional[float]:
return map_option(get_opt(name), float)
def get_opt_kb(name: str) -> Optional[int]:
opt = get_opt(name)
if opt is not None and _UNKNOWN_MSG in opt.lower():
return None
# Retrieve the size of the given memory option string, and return it
# in KB. Nowadays, it is not uncommon for machines to have MB, or in
# more powerful cases, GB of cache, so we need to do the conversion
# as necessary since the infra expects to receive KB.
#
# First we extract the size units name to determine how to convert them
# if necessary. Since we don't know about every single machine's rules
# for capitalization, we'll convert to lowercase just to be safe.
assert opt is not None
size_units = opt.rsplit(" ", 1)[-1].lower()
converted_kb = 0.0
if size_units in ["k", "kib"]:
converted_kb = int(remove_str_end(opt.lower(), size_units))
return map_option(converted_kb, lambda n: n)
elif size_units in ["m", "mib"]:
converted_kb = mb_to_kb(float(remove_str_end(opt.lower(), size_units)))
return map_option(converted_kb, lambda n: int(n)) # pylint: disable=W0108
elif size_units in ["g", "gib"]:
converted_kb = gb_to_kb(float(remove_str_end(opt.lower(), size_units)))
return map_option(converted_kb, lambda n: int(n)) # pylint: disable=W0108
else:
raise Exception(f"Unrecognized size units '{size_units}'")
# Note: "CPU MHz" is the *current* cpu rate which varies. Going with max here.
# TODO: Max is probably wrong, we want a typical value.
clock_ghz = map_option(get_opt_float("CPU max MHz"), mhz_to_ghz)
sockets = get_int("Socket(s)")
cores = get_int("Core(s) per socket")
threads = get_int("Thread(s) per core")
n_physical_processors = sockets * cores
n_logical_processors = n_physical_processors * threads
l1d_cache_kb = get_opt_kb("L1d cache")
l1i_cache_kb = get_opt_kb("L1i cache")
l2_cache_kb = get_opt_kb("L2 cache")
l3_cache_kb = get_opt_kb("L3 cache")
x = _parse_keys_values_lines((Path("/proc") / "meminfo").read_text())
total_physical_memory_mb = round(kb_to_mb(float(remove_str_end(x["MemTotal"], " kB"))))
numa_nodes = _get_numa_nodes_posix()
return HostInfo(
hostname=get_hostname(),
n_physical_processors=n_physical_processors,
n_logical_processors=n_logical_processors,
numa_nodes=numa_nodes,
cache_info=CacheInfo(
# TODO: figure out how to determine number of caches on posix
l1=CacheInfoForLevel(
n_caches=None,
total_bytes=map_option_2(
l1d_cache_kb, l1i_cache_kb, lambda a, b: kb_to_bytes(a + b)
),
),
l2=CacheInfoForLevel(n_caches=None, total_bytes=map_option(l2_cache_kb, kb_to_bytes)),
l3=CacheInfoForLevel(n_caches=None, total_bytes=map_option(l3_cache_kb, kb_to_bytes)),
),
clock_ghz=clock_ghz,
total_physical_memory_mb=total_physical_memory_mb,
)
def _get_numa_nodes_posix() -> Sequence[NumaNodeInfo]:
return tuple(
_parse_numa_nodes_posix(
exec_and_get_output(ExecArgs(("numactl", "--hardware"), quiet_print=True))
)
)
def _parse_numa_nodes_posix(s: str) -> Iterable[NumaNodeInfo]:
for line in s.splitlines():
res = search(r"^node (\d+) cpus: ", line)
if res is not None:
node_number = int(res.group(1))
yield NumaNodeInfo(
numa_node_number=node_number,
cpu_group_number=None,
ranges=_ranges_from_numbers([int(x) for x in line[res.span()[1] :].split()]),
)
def _ranges_from_numbers(ns: Iterable[int]) -> Sequence[Range]:
ranges: List[Range] = []
for n in ns:
if is_empty(ranges) or n != ranges[-1].hi + 1:
ranges.append(Range(n, n))
else:
ranges.append(ranges.pop().with_hi(n))
return ranges
def _parse_keys_values_lines(s: str) -> Mapping[str, str]:
return {k: v for line in s.split("\n") if line != "" for k, v in (_split_line(line),)}
def _split_line(line: str) -> Tuple[str, str]:
parts = line.split(":")
assert len(parts) == 2
l, r = parts
return l.strip(), r.strip()
def _get_host_info_windows(built: Built) -> HostInfo:
total_physical_memory_mb = _get_total_physical_memory_mb_windows()
info_from_c = parse_yaml(
_NumaNodesAndCacheInfo,
exec_and_get_output(ExecArgs((str(built.win.get_host_info_exe),), quiet_print=True)),
)
return HostInfo(
hostname=get_hostname(),
clock_ghz=_get_clock_ghz_windows(),
total_physical_memory_mb=total_physical_memory_mb,
n_physical_processors=info_from_c.n_physical_processors,
n_logical_processors=info_from_c.n_logical_processors,
numa_nodes=info_from_c.numa_nodes,
cache_info=info_from_c.caches,
)
def _get_clock_ghz_windows() -> float:
# Import lazily as this is only available on Windows
# pylint:disable=import-outside-toplevel
from winreg import ConnectRegistry, HKEY_LOCAL_MACHINE, OpenKey, QueryValueEx
registry = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
key = OpenKey(registry, "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0")
mhz, _ = QueryValueEx(key, "~MHz")
ghz = mhz_to_ghz(check_cast(float, mhz))
assert 0 < ghz < 10
return ghz
def read_host_info_for_machine(machine: Machine) -> HostInfo:
return _read_host_info_at_path(change_path_machine(HOST_INFO_PATH, machine))
def _read_host_info_at_path(path: Path) -> HostInfo:
try:
return load_yaml(HostInfo, path)
except FileNotFoundError:
raise Exception(
f"Host info not found at {path}. Did you forget to run 'write-host-info'?"
) from None
def read_this_machines_host_info() -> HostInfo:
return read_host_info_for_machine(get_this_machine())
def write_host_info() -> None:
built = get_built(coreclrs=empty_mapping())
ensure_dir(HOST_INFO_PATH.parent)
write_yaml_file(HOST_INFO_PATH, _get_host_info(built), overwrite=True)
print(f"Wrote to {HOST_INFO_PATH}")
def print_host_info() -> None:
print(to_yaml(read_this_machines_host_info()))
@with_slots
@dataclass(frozen=True)
class PrintAllHostInfosArgs:
machines: Sequence[str] = argument(doc=MACHINE_DOC)
def print_all_host_infos(args: PrintAllHostInfosArgs) -> None:
for machine in parse_machines_arg(args.machines):
print(machine)
hi = read_host_info_for_machine(machine)
print(indent(to_yaml(hi), " "))
HOST_INFO_COMMANDS: CommandsMapping = {
"print-host-info": Command(
hidden=True,
kind=CommandKind.infra,
fn=print_host_info,
doc="""Print info about this machine generated from write-host-info""",
),
# Hidden because 'setup' does this already.
# Though it's useful to run again if the code for getting host-info is modified.
"write-host-info": Command(
hidden=True,
kind=CommandKind.infra,
fn=write_host_info,
doc=f"Write host info to {HOST_INFO_PATH}.",
),
"print-all-host-infos": Command(
kind=CommandKind.infra,
fn=print_all_host_infos,
doc="Fetch and print host info for all machines.",
priority=1,
),
}
|
litex/gen/fhdl/verilog.py
|
enjoy-digital/litex
| 1,501 |
72958
|
#
# This file is part of LiteX (Adapted from Migen for LiteX usage).
#
# This file is Copyright (c) 2013-2014 <NAME> <<EMAIL>>
# This file is Copyright (c) 2013-2021 <NAME> <<EMAIL>>
# This file is Copyright (c) 2013-2017 <NAME> <<EMAIL>>
# This file is Copyright (c) 2016-2018 whitequark <<EMAIL>>
# This file is Copyright (c) 2017 <NAME> <<EMAIL>>
# This file is Copyright (c) 2016 <NAME> <<EMAIL>>
# This file is Copyright (c) 2018 <NAME> <<EMAIL>>
# This file is Copyright (c) 2015 <NAME> <<EMAIL>>
# This file is Copyright (c) 2013 <NAME> <<EMAIL>>
# This file is Copyright (c) 2018 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
from functools import partial
from operator import itemgetter
import collections
from migen.fhdl.structure import *
from migen.fhdl.structure import _Operator, _Slice, _Assign, _Fragment
from migen.fhdl.tools import *
from migen.fhdl.namer import build_namespace
from migen.fhdl.conv_output import ConvOutput
from migen.fhdl.specials import Memory
from litex.gen.fhdl.memory import memory_emit_verilog
from litex.build.tools import generated_banner
# ------------------------------------------------------------------------------------------------ #
# RESERVED KEYWORDS #
# ------------------------------------------------------------------------------------------------ #
_ieee_1800_2017_verilog_reserved_keywords = {
"accept_on", "alias", "always", "always_comb", "always_ff",
"always_latch", "and", "assert", "assign", "assume",
"automatic", "before", "begin", "bind", "bins",
"binsof", "bit", "break", "buf", "bufif0",
"bufif1", "byte", "case", "casex", "casez",
"cell", "chandle", "checker", "class", "clocking",
"cmos", "config", "const", "constraint", "context",
"continue", "cover", "covergroup", "coverpoint", "cross",
"deassign", "default", "defparam", "design", "disable",
"dist", "do", "edge", "else", "end",
"endcase", "endchecker", "endclass", "endclocking", "endconfig",
"endfunction", "endgenerate", "endgroup", "endinterface", "endmodule",
"endpackage", "endprimitive", "endprogram", "endproperty", "endsequence",
"endspecify", "endtable", "endtask", "enum", "event",
"eventually", "expect", "export", "extends", "extern",
"final", "first_match", "for", "force", "foreach",
"forever", "fork", "forkjoin", "function", "generate",
"genvar", "global", "highz0", "highz1", "if",
"iff", "ifnone", "ignore_bins", "illegal_bins", "implements",
"implies", "import", "incdir", "include", "initial",
"inout", "input", "inside", "instance", "int",
"integer", "interconnect", "interface", "intersect", "join",
"join_any", "join_none", "large", "let", "liblist",
"library", "local", "localparam", "logic", "longint",
"macromodule", "matches", "medium", "modport", "module",
"nand", "negedge", "nettype", "new", "nexttime",
"nmos", "nor", "noshowcancelled", "not", "notif0",
"notif1", "null", "or", "output", "package",
"packed", "parameter", "pmos", "posedge", "primitive",
"priority", "program", "property", "protected", "pull0",
"pull1", "pulldown", "pullup", "pulsestyle_ondetect", "pulsestyle_onevent",
"pure", "rand", "randc", "randcase", "randsequence",
"rcmos", "real", "realtime", "ref", "reg",
"reject_on", "release", " repeat", "restrict", "return",
"rnmos", "rpmos", "rtran", "rtranif0", "rtranif1",
"s_always", "s_eventually", "s_nexttime", "s_until", "s_until_with",
"scalared", "sequence", "shortint", "shortreal", "showcancelled",
"signed", "small", "soft", "solve", "specify",
"specparam", "static", "string", "strong", "strong0",
"strong1", "struct", "super", "supply0", "supply1",
"sync_accept_on", "sync_reject_on", "table", "tagged", "task",
"this", "throughout", "time", "timeprecision", "timeunit",
"tran", "tranif0", "tranif1", "tri", "tri0",
"tri1", "triand", "trior", "trireg", "type",
"typedef", " union", "unique", "unique0", "unsigned",
"until", "until_with", "untyped", "use", " uwire",
"var", "vectored", "virtual", "void", "wait",
"wait_order", "wand", "weak", "weak0", "weak1",
"while", "wildcard", "wire", "with", "within",
"wor", "xnor", "xor",
}
# ------------------------------------------------------------------------------------------------ #
# EXPRESSIONS #
# ------------------------------------------------------------------------------------------------ #
# Print Constant -----------------------------------------------------------------------------------
def _print_constant(node):
return "{sign}{bits}'d{value}".format(
sign = "" if node.value >= 0 else "-",
bits = str(node.nbits),
value = abs(node.value),
), node.signed
# Print Signal -------------------------------------------------------------------------------------
def _print_signal(ns, s):
return "{signed}{vector}{name}".format(
signed = "" if (not s.signed) else "signed ",
vector = "" if ( len(s) <= 1) else f"[{str(len(s)-1) }:0] ",
name = ns.get_name(s)
)
# Print Operator -----------------------------------------------------------------------------------
(UNARY, BINARY, TERNARY) = (1, 2, 3)
def _print_operator(ns, node):
operator = node.op
operands = node.operands
arity = len(operands)
assert arity in [UNARY, BINARY, TERNARY]
def to_signed(r):
return f"$signed({{1'd0, {r}}})"
# Unary Operator.
if arity == UNARY:
r1, s1 = _print_expression(ns, operands[0])
# Negation Operator.
if operator == "-":
# Negate and convert to signed if not already.
r = "-" + (r1 if s1 else to_signed(r1))
s = True
# Other Operators.
else:
r = operator + r1
s = s1
# Binary Operator.
if arity == BINARY:
r1, s1 = _print_expression(ns, operands[0])
r2, s2 = _print_expression(ns, operands[1])
# Convert all expressions to signed when at least one is signed.
if operator not in ["<<<", ">>>"]:
if s2 and not s1:
r1 = to_signed(r1)
if s1 and not s2:
r2 = to_signed(r2)
r = f"{r1} {operator} {r2}"
s = s1 or s2
# Ternary Operator.
if arity == TERNARY:
assert operator == "m"
r1, s1 = _print_expression(ns, operands[0])
r2, s2 = _print_expression(ns, operands[1])
r3, s3 = _print_expression(ns, operands[2])
# Convert all expressions to signed when at least one is signed.
if s2 and not s3:
r3 = to_signed(r3)
if s3 and not s2:
r2 = to_signed(r2)
r = f"{r1} ? {r2} : {r3}"
s = s2 or s3
return f"({r})", s
# Print Slice --------------------------------------------------------------------------------------
def _print_slice(ns, node):
assert (node.stop - node.start) >= 1
if (isinstance(node.value, Signal) and len(node.value) == 1):
assert node.start == 0
sr = "" # Avoid slicing 1-bit Signals.
else:
sr = f"[{node.stop-1}:{node.start}]" if (node.stop - node.start) > 1 else f"[{node.start}]"
r, s = _print_expression(ns, node.value)
return r + sr, s
# Print Cat ----------------------------------------------------------------------------------------
def _print_cat(ns, node):
l = [_print_expression(ns, v)[0] for v in reversed(node.l)]
return "{" + ", ".join(l) + "}", False
# Print Replicate ----------------------------------------------------------------------------------
def _print_replicate(ns, node):
return "{" + str(node.n) + "{" + _print_expression(ns, node.v)[0] + "}}", False
# Print Expression ---------------------------------------------------------------------------------
def _print_expression(ns, node):
# Constant.
if isinstance(node, Constant):
return _print_constant(node)
# Signal.
elif isinstance(node, Signal):
return ns.get_name(node), node.signed
# Operator.
elif isinstance(node, _Operator):
return _print_operator(ns, node)
# Slice.
elif isinstance(node, _Slice):
return _print_slice(ns, node)
# Cat.
elif isinstance(node, Cat):
return _print_cat(ns, node)
# Replicate.
elif isinstance(node, Replicate):
return _print_replicate(ns, node)
# Unknown.
else:
raise TypeError(f"Expression of unrecognized type: '{type(node).__name__}'")
# ------------------------------------------------------------------------------------------------ #
# NODES #
# ------------------------------------------------------------------------------------------------ #
(_AT_BLOCKING, _AT_NONBLOCKING, _AT_SIGNAL) = range(3)
def _print_node(ns, at, level, node, target_filter=None):
if target_filter is not None and target_filter not in list_targets(node):
return ""
# Assignment.
elif isinstance(node, _Assign):
if at == _AT_BLOCKING:
assignment = " = "
elif at == _AT_NONBLOCKING:
assignment = " <= "
elif is_variable(node.l):
assignment = " = "
else:
assignment = " <= "
return "\t"*level + _print_expression(ns, node.l)[0] + assignment + _print_expression(ns, node.r)[0] + ";\n"
# Iterable.
elif isinstance(node, collections.abc.Iterable):
return "".join(_print_node(ns, at, level, n, target_filter) for n in node)
# If.
elif isinstance(node, If):
r = "\t"*level + "if (" + _print_expression(ns, node.cond)[0] + ") begin\n"
r += _print_node(ns, at, level + 1, node.t, target_filter)
if node.f:
r += "\t"*level + "end else begin\n"
r += _print_node(ns, at, level + 1, node.f, target_filter)
r += "\t"*level + "end\n"
return r
# Case.
elif isinstance(node, Case):
if node.cases:
r = "\t"*level + "case (" + _print_expression(ns, node.test)[0] + ")\n"
css = [(k, v) for k, v in node.cases.items() if isinstance(k, Constant)]
css = sorted(css, key=lambda x: x[0].value)
for choice, statements in css:
r += "\t"*(level + 1) + _print_expression(ns, choice)[0] + ": begin\n"
r += _print_node(ns, at, level + 2, statements, target_filter)
r += "\t"*(level + 1) + "end\n"
if "default" in node.cases:
r += "\t"*(level + 1) + "default: begin\n"
r += _print_node(ns, at, level + 2, node.cases["default"], target_filter)
r += "\t"*(level + 1) + "end\n"
r += "\t"*level + "endcase\n"
return r
else:
return ""
# Display.
elif isinstance(node, Display):
s = "\"" + node.s + "\""
for arg in node.args:
s += ", "
if isinstance(arg, Signal):
s += ns.get_name(arg)
else:
s += str(arg)
return "\t"*level + "$display(" + s + ");\n"
# Finish.
elif isinstance(node, Finish):
return "\t"*level + "$finish;\n"
# Unknown.
else:
raise TypeError(f"Node of unrecognized type: {str(type(node))}")
# ------------------------------------------------------------------------------------------------ #
# ATTRIBUTES #
# ------------------------------------------------------------------------------------------------ #
def _print_attribute(attr, attr_translate):
r = ""
firsta = True
for attr in sorted(attr,
key=lambda x: ("", x) if isinstance(x, str) else x):
if isinstance(attr, tuple):
# platform-dependent attribute
attr_name, attr_value = attr
else:
# translated attribute
at = attr_translate.get(attr, None)
if at is None:
continue
attr_name, attr_value = at
if not firsta:
r += ", "
firsta = False
const_expr = "\"" + attr_value + "\"" if not isinstance(attr_value, int) else str(attr_value)
r += attr_name + " = " + const_expr
if r:
r = "(* " + r + " *)"
return r
# ------------------------------------------------------------------------------------------------ #
# MODULE #
# ------------------------------------------------------------------------------------------------ #
def _list_comb_wires(f):
r = set()
groups = group_by_targets(f.comb)
for g in groups:
if len(g[1]) == 1 and isinstance(g[1][0], _Assign):
r |= g[0]
return r
def _print_module(f, ios, name, ns, attr_translate):
sigs = list_signals(f) | list_special_ios(f, ins=True, outs=True, inouts=True)
special_outs = list_special_ios(f, ins=False, outs=True, inouts=True)
inouts = list_special_ios(f, ins=False, outs=False, inouts=True)
targets = list_targets(f) | special_outs
wires = _list_comb_wires(f) | special_outs
r = "module " + name + "(\n"
firstp = True
for sig in sorted(ios, key=lambda x: x.duid):
if not firstp:
r += ",\n"
firstp = False
attr = _print_attribute(sig.attr, attr_translate)
if attr:
r += "\t" + attr
sig.type = "wire"
sig.name = ns.get_name(sig)
if sig in inouts:
sig.direction = "inout"
r += "\tinout wire " + _print_signal(ns, sig)
elif sig in targets:
sig.direction = "output"
if sig in wires:
r += "\toutput wire " + _print_signal(ns, sig)
else:
sig.type = "reg"
r += "\toutput reg " + _print_signal(ns, sig)
else:
sig.direction = "input"
r += "\tinput wire " + _print_signal(ns, sig)
r += "\n);\n\n"
for sig in sorted(sigs - ios, key=lambda x: x.duid):
attr = _print_attribute(sig.attr, attr_translate)
if attr:
r += attr + " "
if sig in wires:
r += "wire " + _print_signal(ns, sig) + ";\n"
else:
r += "reg " + _print_signal(ns, sig) + " = " + _print_expression(ns, sig.reset)[0] + ";\n"
r += "\n"
return r
# ------------------------------------------------------------------------------------------------ #
# COMBINATORIAL LOGIC #
# ------------------------------------------------------------------------------------------------ #
def _print_combinatorial_logic_sim(f, ns):
r = ""
if f.comb:
from collections import defaultdict
target_stmt_map = defaultdict(list)
for statement in flat_iteration(f.comb):
targets = list_targets(statement)
for t in targets:
target_stmt_map[t].append(statement)
groups = group_by_targets(f.comb)
for n, (t, stmts) in enumerate(target_stmt_map.items()):
assert isinstance(t, Signal)
if len(stmts) == 1 and isinstance(stmts[0], _Assign):
r += "assign " + _print_node(ns, _AT_BLOCKING, 0, stmts[0])
else:
r += "always @(*) begin\n"
r += "\t" + ns.get_name(t) + " <= " + _print_expression(ns, t.reset)[0] + ";\n"
r += _print_node(ns, _AT_NONBLOCKING, 1, stmts, t)
r += "end\n"
r += "\n"
return r
def _print_combinatorial_logic_synth(f, ns):
r = ""
if f.comb:
groups = group_by_targets(f.comb)
for n, g in enumerate(groups):
if len(g[1]) == 1 and isinstance(g[1][0], _Assign):
r += "assign " + _print_node(ns, _AT_BLOCKING, 0, g[1][0])
else:
r += "always @(*) begin\n"
for t in g[0]:
r += "\t" + ns.get_name(t) + " <= " + _print_expression(ns, t.reset)[0] + ";\n"
r += _print_node(ns, _AT_NONBLOCKING, 1, g[1])
r += "end\n"
r += "\n"
return r
# ------------------------------------------------------------------------------------------------ #
# SYNCHRONOUS LOGIC #
# ------------------------------------------------------------------------------------------------ #
def _print_synchronous_logic(f, ns):
r = ""
for k, v in sorted(f.sync.items(), key=itemgetter(0)):
r += "always @(posedge " + ns.get_name(f.clock_domains[k].clk) + ") begin\n"
r += _print_node(ns, _AT_SIGNAL, 1, v)
r += "end\n\n"
return r
# ------------------------------------------------------------------------------------------------ #
# SPECIALS #
# ------------------------------------------------------------------------------------------------ #
def _print_specials(overrides, specials, ns, add_data_file, attr_translate):
r = ""
for special in sorted(specials, key=lambda x: x.duid):
if hasattr(special, "attr"):
attr = _print_attribute(special.attr, attr_translate)
if attr:
r += attr + " "
# Replace Migen Memory's emit_verilog with our implementation.
if isinstance(special, Memory):
pr = memory_emit_verilog(special, ns, add_data_file)
else:
pr = call_special_classmethod(overrides, special, "emit_verilog", ns, add_data_file)
if pr is None:
raise NotImplementedError("Special " + str(special) + " failed to implement emit_verilog")
r += pr
return r
# ------------------------------------------------------------------------------------------------ #
# FHDL --> VERILOG #
# ------------------------------------------------------------------------------------------------ #
class DummyAttrTranslate(dict):
def __getitem__(self, k):
return (k, "true")
def convert(f, ios=set(), name="top",
special_overrides = dict(),
attr_translate = DummyAttrTranslate(),
regular_comb = True):
# Create ConvOutput.
r = ConvOutput()
# Convert to FHDL's fragments is not already done.
if not isinstance(f, _Fragment):
f = f.get_fragment()
# Verify/Create Clock Domains.
for cd_name in sorted(list_clock_domains(f)):
# Try to get Clock Domain.
try:
f.clock_domains[cd_name]
# If not found, raise Error.
except:
msg = f"""Unresolved clock domain {cd_name}, availables:\n"""
for f in f.clock_domains:
msg += f"- {f.name}\n"
raise Exception(msg)
# Lower complex slices.
f = lower_complex_slices(f)
# Insert resets.
insert_resets(f)
# Lower basics.
f = lower_basics(f)
# Lower specials.
f, lowered_specials = lower_specials(special_overrides, f)
# Lower basics (for basics included in specials).
f = lower_basics(f)
# IOs backtrace/naming.
for io in sorted(ios, key=lambda x: x.duid):
if io.name_override is None:
io_name = io.backtrace[-1][0]
if io_name:
io.name_override = io_name
# Build NameSpace.
# ----------------
ns = build_namespace(
signals = (
list_signals(f) |
list_special_ios(f, ins=True, outs=True, inouts=True) |
ios),
reserved_keywords = _ieee_1800_2017_verilog_reserved_keywords
)
ns.clock_domains = f.clock_domains
# Build Verilog.
# --------------
verilog = generated_banner("//")
# Module Top.
verilog += _print_module(f, ios, name, ns, attr_translate)
# Combinatorial Logic.
if regular_comb:
verilog += _print_combinatorial_logic_synth(f, ns)
else:
verilog += _print_combinatorial_logic_sim(f, ns)
# Synchronous Logic.
verilog += _print_synchronous_logic(f, ns)
# Specials
verilog += _print_specials(special_overrides, f.specials - lowered_specials,
ns, r.add_data_file, attr_translate)
# Module End.
verilog += "endmodule\n"
r.set_main_source(verilog)
r.ns = ns
return r
|
scripts/prepare_data_multi_process.py
|
a631381602/DianJing
| 232 |
72960
|
<filename>scripts/prepare_data_multi_process.py
import redis
import json
import h5py
import pickle
import numpy as np
import random
import jieba
import multiprocessing
word2idx, idx2word ,allwords, corpus = None, None,{},[]
DUMP_FILE = 'data/basic_data_700k_v2.pkl'
check_sample_size = 10
TF_THRES = 5
DF_THRES = 2
r0 = redis.StrictRedis(host='localhost', port=6379, db=0)
r1 = redis.StrictRedis(host='localhost', port=6379, db=1)
id_beg = 0
id_eos = 1
id_emp = 2
id_unk = 3
r = None
class Word:
def __init__(self,val,tf,df):
self.val = val
self.tf = tf
self.df = df
def __repr__(self):
pass
def parse_all_crawled_data(keys, idx):
res = []
if idx == 0:
conn = r0
else:
conn = r1
for data in conn.mget(keys):
data = json.loads(data)
key = data.get("group_id")
title = data.get("title","").replace('\t',' ')
abstract = data.get("abstract","").replace('\t',' ')
if abstract == "":
abstract = title
res.append((key,title,abstract))
return res
def cal_word_tf_df(corpus):
words = {}
title_abstract_pairs = []
for doc in corpus:
title, abstract = doc[1].lower(),doc[2].lower()
ts_ = list(jieba.cut(title,cut_all = False))
as_ = list(jieba.cut(abstract,cut_all = False))
title_abstract_pairs.append((ts_, as_))
# acumulate the term frequency
for word in ts_ + as_:
if not words.get(word):
words[word] = Word(val = word,tf = 1,df = 0)
else:
words[word].tf += 1
# acummulate the doc frequency
for word in set(ts_ + as_):
words[word].df += 1
return words,title_abstract_pairs
def build_idx_for_words_tf_df(chars,tf_thres = TF_THRES, df_thres = DF_THRES):
start_idx = id_unk + 1
char2idx = {}
idx2char = {}
char2idx['<eos>'] = id_eos
char2idx['<unk>'] = id_unk
char2idx['<emp>'] = id_emp
char2idx['<beg>'] = id_beg
#filter out tf>20 and df > 10 terms
chars = filter(lambda char:char.tf > tf_thres or char.df > df_thres,chars)
char2idx.update(dict([(char.val,start_idx + idx) for idx,char in enumerate(chars)]))
idx2char = dict([(idx,char) for char,idx in char2idx.items()])
return char2idx, idx2char
def prt(label, x):
print label+':',
for w in x:
if w == id_emp:
continue
print idx2word[w],
print
def worker(i,keys,idx):
print "worker [%2d] started with keys:[%d]!"%(i,len(keys))
corpus = parse_all_crawled_data(keys, idx)
print "worker [%2d] get docs :[%d]!"%(i,len(corpus))
words,sub_corpus = cal_word_tf_df(corpus)
return words,sub_corpus
def combine_results(res):
global copurs,word2idx,idx2word
words,sub_corpus = res[0], res[1]
corpus.extend(sub_corpus)
for word in words:
if word not in allwords:
allwords[word] = Word(val = word,tf = 0,df = 0)
allwords[word].tf += words[word].tf
allwords[word].df += words[word].df
word2idx, idx2word = build_idx_for_words_tf_df(allwords.values())
def dump_all_results():
datafile = open(DUMP_FILE,'wb')
titles, abstracts = [],[]
for ts_,as_ in corpus:
titles.append([word2idx.get(word,id_unk) for word in ts_])
abstracts.append([word2idx.get(word,id_unk) for word in as_])
pickle.dump((allwords, word2idx, idx2word, titles, abstracts),datafile,-1)
def check_dump():
allwords, word2idx, idx2word, titles, abstracts = pickle.load(open(DUMP_FILE))
print "allwords size is:",len(allwords)
print "word2idx size is:",len(word2idx)
print "titles size is:",len(titles)
for k in range(check_sample_size):
k = random.randint(0,len(titles) - 1)
print "[%s]th Example"%(k)
prt('title',titles[k])
prt('abstract',abstracts[k])
worker_size = 10
pool = multiprocessing.Pool()
for idx,conn in enumerate([r0,r1]):
keys = conn.keys()
batch = len(keys) / worker_size
for i in range(worker_size):
if i == worker_size - 1:
sub_keys = keys[i * batch : ]
else:
sub_keys = keys[i * batch : i * batch + batch]
pool.apply_async(worker,(idx * 10 + i,sub_keys,idx,),callback=combine_results)
pool.close()
pool.join()
dump_all_results()
check_dump()
print "all job finished!"
|
pygame__examples/circle_collision.py
|
DazEB2/SimplePyScripts
| 117 |
72967
|
<reponame>DazEB2/SimplePyScripts
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import sys
import random
import pygame
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
class Ball(pygame.sprite.Sprite):
def __init__(self, size, pos=(0, 0), color=WHITE):
super().__init__()
self.image = pygame.Surface([size, size], pygame.SRCALPHA)
pygame.draw.ellipse(self.image, color, [0, 0, size, size])
# Для правильной работы функции pygame.sprite.Group.draw
self.rect = self.image.get_rect()
self.rect.center = pos
# Для правильной работы функции pygame.sprite.collide_circle
self.radius = size // 2
pygame.init()
color = BLACK
screen = pygame.display.set_mode((600, 600))
clock = pygame.time.Clock()
x, y = random.randrange(0, 500), random.randrange(0, 500)
fnt = pygame.font.Font(None, 40)
ball_1 = Ball(size=100, pos=(x, y), color=BLACK)
ball_2 = Ball(size=50, color=BLACK)
balls = pygame.sprite.Group()
balls.add(ball_1)
balls.add(ball_2)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.MOUSEMOTION:
ball_2.rect.center = event.pos
break
screen.fill(WHITE)
balls.draw(screen)
if pygame.sprite.collide_circle(ball_1, ball_2):
color = BLACK
else:
color = WHITE
text = fnt.render("Collision!", True, color)
screen.blit(text, (260, 20))
pygame.display.update()
clock.tick(60)
|
clai/server/agent_executor.py
|
emishulovin/clai
| 391 |
72968
|
<reponame>emishulovin/clai
#
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
from abc import ABC, abstractmethod
from concurrent import futures
from functools import partial
from operator import is_not
from typing import List, Union
from clai.server.agent import Agent
from clai.server.command_message import Action, State
# pylint: disable=too-few-public-methods
class AgentExecutor(ABC):
@abstractmethod
def execute_agents(self, command: State, agents: List[Agent]) -> List[Action]:
"""execute all agents in parallel and return the actions"""
class ThreadExecutor(AgentExecutor):
MAX_TIME_PLUGIN_EXECUTION = 4
NUM_WORKERS = 4
def execute_agents(self, command: State, agents: List[Agent]) -> List[Union[Action, List[Action]]]:
with futures.ThreadPoolExecutor(max_workers=self.NUM_WORKERS) as executor:
done, _ = futures.wait(
[executor.submit(plugin_instance.execute, command) for plugin_instance in agents],
timeout=self.MAX_TIME_PLUGIN_EXECUTION)
if not done:
return []
results = map(lambda future: future.result(), done)
candidate_actions = list(filter(partial(is_not, None), results))
return candidate_actions
# pylint: disable= invalid-name
thread_executor = ThreadExecutor()
|
www/tests/test_urllib.py
|
raspberrypieman/brython
| 5,926 |
72984
|
import urllib.parse
assert urllib.parse.unquote("foo%20bar") == "foo bar"
import urllib.request
with urllib.request.urlopen('https://httpbin.org/headers') as f:
f.read()
# issue 1424
text = """Hello
World"""
assert urllib.parse.urlencode({"text": text}) == "text=Hello%0AWorld"
print('passed all tests')
|
tests/block/test_block_1.py
|
vaartis/python-lz4
| 193 |
73010
|
<reponame>vaartis/python-lz4
import lz4.block
import pytest
import sys
import os
def test_decompress_ui32_overflow():
data = lz4.block.compress(b'A' * 64)
with pytest.raises(OverflowError):
lz4.block.decompress(data[4:], uncompressed_size=((1 << 32) + 64))
def test_decompress_without_leak():
# Verify that hand-crafted packet does not leak uninitialized(?) memory.
data = lz4.block.compress(b'A' * 64)
message = r'^Decompressor wrote 64 bytes, but 79 bytes expected from header$'
with pytest.raises(lz4.block.LZ4BlockError, match=message):
lz4.block.decompress(b'\x4f' + data[1:])
def test_decompress_with_small_buffer():
data = lz4.block.compress(b'A' * 64, store_size=False)
message = r'^Decompression failed: corrupt input or insufficient space in destination buffer. Error code: \d+$'
with pytest.raises(lz4.block.LZ4BlockError, match=message):
lz4.block.decompress(data[4:], uncompressed_size=64)
with pytest.raises(lz4.block.LZ4BlockError, match=message):
lz4.block.decompress(data, uncompressed_size=60)
def test_decompress_truncated():
input_data = b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123" * 24
compressed = lz4.block.compress(input_data)
# for i in range(len(compressed)):
# try:
# lz4.block.decompress(compressed[:i])
# except:
# print(i, sys.exc_info()[0], sys.exc_info()[1])
with pytest.raises(ValueError, match='Input source data size too small'):
lz4.block.decompress(compressed[:0])
for n in [0, 1]:
with pytest.raises(ValueError, match='Input source data size too small'):
lz4.block.decompress(compressed[:n])
for n in [24, 25, -2, 27, 67, 85]:
with pytest.raises(lz4.block.LZ4BlockError):
lz4.block.decompress(compressed[:n])
def test_decompress_with_trailer():
data = b'A' * 64
comp = lz4.block.compress(data)
message = r'^Decompression failed: corrupt input or insufficient space in destination buffer. Error code: \d+$'
with pytest.raises(lz4.block.LZ4BlockError, match=message):
lz4.block.decompress(comp + b'A')
with pytest.raises(lz4.block.LZ4BlockError, match=message):
lz4.block.decompress(comp + comp)
with pytest.raises(lz4.block.LZ4BlockError, match=message):
lz4.block.decompress(comp + comp[4:])
def test_unicode():
if sys.version_info < (3,):
return # skip
DATA = b'x'
with pytest.raises(TypeError):
lz4.block.compress(DATA.decode('latin1'))
lz4.block.decompress(lz4.block.compress(DATA).decode('latin1'))
# These next two are probably redundant given test_1 above but we'll keep them
# for now
def test_return_bytearray():
if sys.version_info < (3,):
return # skip
data = os.urandom(128 * 1024) # Read 128kb
compressed = lz4.block.compress(data)
b = lz4.block.compress(data, return_bytearray=True)
assert isinstance(b, bytearray)
assert bytes(b) == compressed
b = lz4.block.decompress(compressed, return_bytearray=True)
assert isinstance(b, bytearray)
assert bytes(b) == data
def test_memoryview():
if sys.version_info < (2, 7):
return # skip
data = os.urandom(128 * 1024) # Read 128kb
compressed = lz4.block.compress(data)
assert lz4.block.compress(memoryview(data)) == compressed
assert lz4.block.decompress(memoryview(compressed)) == data
def test_with_dict_none():
input_data = b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123" * 24
for mode in ['default', 'high_compression']:
assert lz4.block.decompress(lz4.block.compress(
input_data, mode=mode, dict=None)) == input_data
assert lz4.block.decompress(lz4.block.compress(
input_data, mode=mode), dict=None) == input_data
assert lz4.block.decompress(lz4.block.compress(
input_data, mode=mode, dict=b'')) == input_data
assert lz4.block.decompress(lz4.block.compress(
input_data, mode=mode), dict=b'') == input_data
assert lz4.block.decompress(lz4.block.compress(
input_data, mode=mode, dict='')) == input_data
assert lz4.block.decompress(lz4.block.compress(
input_data, mode=mode), dict='') == input_data
def test_with_dict():
input_data = b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123" * 24
dict1 = input_data[10:30]
dict2 = input_data[20:40]
message = r'^Decompression failed: corrupt input or insufficient space in destination buffer. Error code: \d+$'
for mode in ['default', 'high_compression']:
compressed = lz4.block.compress(input_data, mode=mode, dict=dict1)
with pytest.raises(lz4.block.LZ4BlockError, match=message):
lz4.block.decompress(compressed)
with pytest.raises(lz4.block.LZ4BlockError, match=message):
lz4.block.decompress(compressed, dict=dict1[:2])
assert lz4.block.decompress(compressed, dict=dict2) != input_data
assert lz4.block.decompress(compressed, dict=dict1) == input_data
assert lz4.block.decompress(lz4.block.compress(
input_data), dict=dict1) == input_data
def test_known_decompress_1():
input = b'\x00\x00\x00\x00\x00'
output = b''
assert lz4.block.decompress(input) == output
def test_known_decompress_2():
input = b'\x01\x00\x00\x00\x10 '
output = b' '
assert lz4.block.decompress(input) == output
def test_known_decompress_3():
input = b'h\x00\x00\x00\xff\x0bLorem ipsum dolor sit amet\x1a\x006P amet'
output = b'Lorem ipsum dolor sit amet' * 4
assert lz4.block.decompress(input) == output
def test_known_decompress_4():
input = b'\xb0\xb3\x00\x00\xff\x1fExcepteur sint occaecat cupidatat non proident.\x00' + (b'\xff' * 180) + b'\x1ePident'
output = b'Excepteur sint occaecat cupidatat non proident' * 1000
assert lz4.block.decompress(input) == output
|
SoftLayer/fixtures/SoftLayer_Virtual_DedicatedHost.py
|
dvzrv/softlayer-python
| 126 |
73043
|
getObject = {
'id': 37401,
'memoryCapacity': 242,
'modifyDate': '',
'name': 'test-dedicated',
'diskCapacity': 1200,
'createDate': '2017-10-16T12:50:23-05:00',
'cpuCount': 56,
'accountId': 1199911
}
getAvailableRouters = [
{'hostname': 'bcr01a.dal05', 'id': 12345},
{'hostname': 'bcr02a.dal05', 'id': 12346},
{'hostname': 'bcr03a.dal05', 'id': 12347},
{'hostname': 'bcr04a.dal05', 'id': 12348}
]
getObjectById = {
'datacenter': {
'id': 12345,
'name': 'dal05',
'longName': 'Dallas 5'
},
'memoryCapacity': 242,
'modifyDate': '2017-11-06T11:38:20-06:00',
'name': 'test-dedicated',
'diskCapacity': 1200,
'backendRouter': {
'domain': 'test.com',
'hostname': 'bcr01a.dal05',
'id': 12345
},
'guestCount': 1,
'cpuCount': 56,
'guests': [{
'domain': 'test.com',
'hostname': 'test-dedicated',
'id': 12345,
'uuid': 'F9329795-4220-4B0A-B970-C86B950667FA'
}],
'billingItem': {
'nextInvoiceTotalRecurringAmount': 1515.556,
'orderItem': {
'id': 12345,
'order': {
'status': 'APPROVED',
'privateCloudOrderFlag': False,
'modifyDate': '2017-11-02T11:42:50-07:00',
'orderQuoteId': '',
'userRecordId': 12345,
'createDate': '2017-11-02T11:40:56-07:00',
'impersonatingUserRecordId': '',
'orderTypeId': 7,
'presaleEventId': '',
'userRecord': {
'username': 'test-dedicated'
},
'id': 12345,
'accountId': 12345
}
},
'id': 12345,
'children': [
{
'nextInvoiceTotalRecurringAmount': 0.0,
'categoryCode': 'dedicated_host_ram'
},
{
'nextInvoiceTotalRecurringAmount': 0.0,
'categoryCode': 'dedicated_host_disk'
}
]
},
'id': 12345,
'createDate': '2017-11-02T11:40:56-07:00'
}
deleteObject = True
getGuests = [{
'id': 200,
'hostname': 'vs-test1',
'domain': 'test.sftlyr.ws',
'fullyQualifiedDomainName': 'vs-test1.test.sftlyr.ws',
'status': {'keyName': 'ACTIVE', 'name': 'Active'},
'datacenter': {'id': 50, 'name': 'TEST00',
'description': 'Test Data Center'},
'powerState': {'keyName': 'RUNNING', 'name': 'Running'},
'maxCpu': 2,
'maxMemory': 1024,
'primaryIpAddress': '172.16.240.2',
'globalIdentifier': '1a2b3c-1701',
'primaryBackendIpAddress': '10.45.19.37',
'hourlyBillingFlag': False,
'billingItem': {
'id': 6327,
'recurringFee': 1.54,
'orderItem': {
'order': {
'userRecord': {
'username': 'chechu',
}
}
}
},
}, {
'id': 202,
'hostname': 'vs-test2',
'domain': 'test.sftlyr.ws',
'fullyQualifiedDomainName': 'vs-test2.test.sftlyr.ws',
'status': {'keyName': 'ACTIVE', 'name': 'Active'},
'datacenter': {'id': 50, 'name': 'TEST00',
'description': 'Test Data Center'},
'powerState': {'keyName': 'RUNNING', 'name': 'Running'},
'maxCpu': 4,
'maxMemory': 4096,
'primaryIpAddress': '172.16.240.7',
'globalIdentifier': '05a8ac-6abf0',
'primaryBackendIpAddress': '10.45.19.35',
'hourlyBillingFlag': True,
'billingItem': {
'id': 6327,
'recurringFee': 1.54,
'orderItem': {
'order': {
'userRecord': {
'username': 'chechu',
}
}
}
}
}]
|
release/stubs.min/System/Security/AccessControl_parts/FileSystemAuditRule.py
|
htlcnn/ironpython-stubs
| 182 |
73065
|
class FileSystemAuditRule(AuditRule):
"""
Represents an abstraction of an access control entry (ACE) that defines an audit rule for a file or directory. This class cannot be inherited.
FileSystemAuditRule(identity: IdentityReference,fileSystemRights: FileSystemRights,flags: AuditFlags)
FileSystemAuditRule(identity: IdentityReference,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
FileSystemAuditRule(identity: str,fileSystemRights: FileSystemRights,flags: AuditFlags)
FileSystemAuditRule(identity: str,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
"""
@staticmethod
def __new__(self,identity,fileSystemRights,*__args):
"""
__new__(cls: type,identity: IdentityReference,fileSystemRights: FileSystemRights,flags: AuditFlags)
__new__(cls: type,identity: IdentityReference,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
__new__(cls: type,identity: str,fileSystemRights: FileSystemRights,flags: AuditFlags)
__new__(cls: type,identity: str,fileSystemRights: FileSystemRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,flags: AuditFlags)
"""
pass
AccessMask=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the access mask for this rule.
"""
FileSystemRights=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Security.AccessControl.FileSystemRights flags associated with the current System.Security.AccessControl.FileSystemAuditRule object.
Get: FileSystemRights(self: FileSystemAuditRule) -> FileSystemRights
"""
|
core/apiv2.py
|
dmoney/djangopackages
| 383 |
73154
|
<reponame>dmoney/djangopackages
from django.conf.urls import patterns
from package import apiv2 as package_api
from grid import views as grid_views
from searchv2 import views as search_views
from django.urls import path
urlpatterns = patterns(
"",
# {% url "apiv2:category" %}
path(
"categories/", view=package_api.CategoryListAPIView.as_view(), name="categories"
),
# {% url "apiv2:packages" %}
path("packages/", view=package_api.PackageListAPIView.as_view(), name="packages"),
# {% url "apiv2:packages" slug %}
path(
"packages/<slug:slug>/",
view=package_api.PackageDetailAPIView.as_view(),
name="packages",
),
# {% url "apiv2:grids" %}
path("grids/", view=grid_views.GridListAPIView.as_view(), name="grids"),
# {% url "apiv2:grids" slug %}
path(
"grids/<slug:slug>/", view=grid_views.GridDetailAPIView.as_view(), name="grids"
),
# {% url "apiv2:search" %}
path("search/", view=search_views.SearchListAPIView.as_view(), name="search"),
# {% url "apiv2:search" slug %}
path(
"search/<slug:slug>/",
view=search_views.SearchDetailAPIView.as_view(),
name="search",
),
# {% url "apiv2:python3" slug %}
path("python3/", view=package_api.Python3ListAPIView.as_view(), name="python3"),
)
|
examples/python/mic.py
|
moredu/upm
| 619 |
73192
|
<gh_stars>100-1000
#!/usr/bin/env python
from __future__ import print_function
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time
from upm import pyupm_mic as upmMicrophone
def main():
# Attach microphone to analog port A0
myMic = upmMicrophone.Microphone(0)
threshContext = upmMicrophone.thresholdContext()
threshContext.averageReading = 0
threshContext.runningAverage = 0
threshContext.averagedOver = 2
# Infinite loop, ends when script is cancelled
# Repeatedly, take a sample every 2 microseconds;
# find the average of 128 samples; and
# print a running graph of dots as averages
while(1):
buffer = upmMicrophone.uint16Array(128)
len = myMic.getSampledWindow(2, 128, buffer);
if len:
thresh = myMic.findThreshold(threshContext, 30, buffer, len)
myMic.printGraph(threshContext)
if(thresh):
print("Threshold is ", thresh)
# Delete the upmMicrophone object
del myMic
if __name__ == '__main__':
main()
|
contracts/utils/sign.py
|
andrevmatos/microraiden
| 417 |
73206
|
<filename>contracts/utils/sign.py
import binascii
import bitcoin
from ethereum import utils
from secp256k1 import PrivateKey
from eth_utils import encode_hex
from utils.utils import sol_sha3
eth_prefix = "\x19Ethereum Signed Message:\n"
def eth_privtoaddr(priv) -> str:
pub = bitcoin.encode_pubkey(bitcoin.privtopub(priv), 'bin_electrum')
return "0x" + binascii.hexlify(sol_sha3(pub)[12:]).decode("ascii")
def eth_message_prefixed(msg: str) -> bytes:
return eth_prefix + str(len(msg)) + msg
def eth_message_hex(msg: str) -> bytes:
msg = eth_message_prefixed(msg)
msg_hex = encode_hex(msg)
return sol_sha3(msg_hex)
def eth_signed_typed_data_message(types, names, data) -> bytes:
"""
types e.g. ('address', 'uint', ('uint', 32))
names e.g. ('receiver', 'block_created', 'balance')
data e.g. ('0x5601ea8445a5d96eeebf89a67c4199fbb7a43fbb', 3000, 1000)
"""
assert len(types) == len(data) == len(names), 'Argument length mismatch.'
sign_types = []
sign_values = []
for i, type in enumerate(types):
if isinstance(type, tuple):
sign_types.append(type[0] + str(type[1]))
sign_values.append((data[i], type[1]))
else:
sign_types.append(type)
sign_values.append(data[i])
sign_types[i] += ' ' + names[i]
return sol_sha3(sol_sha3(*sign_types), sol_sha3(*sign_values))
def sign(data: bytes, private_key_seed_ascii: str):
priv = private_key_seed_ascii
pk = PrivateKey(priv, raw=True)
signature = pk.ecdsa_recoverable_serialize(pk.ecdsa_sign_recoverable(data, raw=True))
signature = signature[0] + utils.bytearray_to_bytestr([signature[1]])
return signature, eth_privtoaddr(priv)
def check(data: bytes, pk: bytes):
return sign(data, pk)
|
docs/examples/tools/render.py
|
bkvalexey/aiogram_dialog
| 198 |
73207
|
from aiogram.dispatcher.filters.state import StatesGroup, State
from aiogram.types import Message
from aiogram_dialog import Dialog, Window, DialogManager
from aiogram_dialog.tools import render_transitions
from aiogram_dialog.widgets.input import MessageInput
from aiogram_dialog.widgets.kbd import Next, Back
from aiogram_dialog.widgets.text import Const
class RenderSG(StatesGroup):
first = State()
second = State()
last = State()
async def on_input(m: Message, dialog: Dialog, manager: DialogManager):
manager.current_context().dialog_data["name"] = m.text
await dialog.next()
dialog = Dialog(
Window(
Const("1. First"),
Next(),
state=RenderSG.first,
),
Window(
Const("2. Second"),
Back(),
MessageInput(on_input),
state=RenderSG.second,
),
Window(
Const("3. Last"),
Back(),
state=RenderSG.last,
),
)
# this is diagram rendering
render_transitions([dialog])
|
wafw00f/plugins/sitelock.py
|
84KaliPleXon3/EnableSecurity-wafw00f
| 3,069 |
73210
|
<filename>wafw00f/plugins/sitelock.py
#!/usr/bin/env python
'''
Copyright (C) 2020, WAFW00F Developers.
See the LICENSE file for copying permission.
'''
NAME = 'Sitelock (TrueShield)'
# Well this is confusing, Sitelock itself uses Incapsula from Imperva
# So the fingerprints obtained on blockpage are similar to those of Incapsula.
def is_waf(self):
schemes = [
self.matchContent(r"SiteLock will remember you"),
self.matchContent(r"Sitelock is leader in Business Website Security Services"),
self.matchContent(r"sitelock[_\-]shield([_\-]logo|[\-_]badge)?"),
self.matchContent(r'SiteLock incident ID')
]
if any(i for i in schemes):
return True
return False
|
bin/print_attributes.py
|
wolverine-radar-company/python-sgp4
| 237 |
73243
|
<gh_stars>100-1000
#!/usr/bin/env python
from __future__ import print_function
from fileinput import input
from sgp4.vallado_cpp import Satrec
def main():
lines = iter(input())
for line in lines:
name = line
line1 = next(lines)
line2 = next(lines)
sat = Satrec.twoline2rv(line1, line2)
for name in dir(sat):
if name.startswith('_') or name in ('sgp4', 'twoline2rv'):
continue
value = getattr(sat, name)
print(name, value)
print()
if __name__ == '__main__':
try:
main()
except BrokenPipeError:
pass
|
fairmotion/tasks/motion_prediction/metrics.py
|
CristianNajeraL/fairmotion
| 419 |
73249
|
<reponame>CristianNajeraL/fairmotion<filename>fairmotion/tasks/motion_prediction/metrics.py
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
from fairmotion.ops import conversions
def euler_diff(predictions, targets):
"""
Computes the Euler angle error as in previous work, following
https://github.com/una-dinosauria/human-motion-prediction/blob/master/src/translate.py#L207
Args:
predictions: np array of predicted joint angles represented as rotation matrices, i.e. in shape
(..., n_joints, 3, 3)
targets: np array of same shape as `predictions`
Returns:
The Euler angle error an np array of shape (..., )
"""
assert predictions.shape[-1] == 3 and predictions.shape[-2] == 3
assert targets.shape[-1] == 3 and targets.shape[-2] == 3
n_joints = predictions.shape[-3]
ori_shape = predictions.shape[:-3]
preds = np.reshape(predictions, [-1, 3, 3])
targs = np.reshape(targets, [-1, 3, 3])
euler_preds = conversions.R2E(preds) # (N, 3)
euler_targs = conversions.R2E(targs) # (N, 3)
# reshape to (-1, n_joints*3) to be consistent with previous work
euler_preds = np.reshape(euler_preds, [-1, n_joints * 3])
euler_targs = np.reshape(euler_targs, [-1, n_joints * 3])
# l2 error on euler angles
idx_to_use = np.where(np.std(euler_targs, 0) > 1e-4)[0]
euc_error = np.power(
euler_targs[:, idx_to_use] - euler_preds[:, idx_to_use], 2,
)
euc_error = np.sqrt(np.sum(euc_error, axis=1)) # (-1, ...)
# reshape to original
return np.reshape(euc_error, ori_shape)
|
tests/test_acceptance.py
|
schlitzered/aiotask-context
| 161 |
73264
|
import asyncio
import random
import pytest
import uuid
from collections import defaultdict
import aiotask_context as context
@asyncio.coroutine
def dummy3():
yield from asyncio.sleep(random.uniform(0, 2))
return context.get("key")
@asyncio.coroutine
def dummy2(a, b):
yield from asyncio.sleep(random.uniform(0, 2))
res = context.get("key")
yield from asyncio.sleep(random.uniform(0, 2))
res1 = yield from dummy3()
assert res == res1
return a, b, res
@asyncio.coroutine
def dummy1(n_tasks):
context.set("key", str(uuid.uuid4()))
tasks = [
asyncio.ensure_future(
dummy2(id(context.asyncio_current_task()), n)) for n in range(n_tasks)]
results = yield from asyncio.gather(*tasks)
info = defaultdict(list)
for taskid, n, key in results:
info[key].append([taskid, n])
return info
@pytest.mark.asyncio
@asyncio.coroutine
def test_ensure_future_concurrent():
n_tasks = 10
results = yield from asyncio.gather(*[dummy1(n_tasks=n_tasks) for x in range(1000)])
for r in results:
assert len(r) == 1
for key, value in r.items():
assert len(value) == n_tasks
@pytest.mark.asyncio
@asyncio.coroutine
def test_ensurefuture_context_propagation():
context.set("key", "value")
@asyncio.coroutine
def change_context():
assert context.get("key") == "value"
context.set("key", "what")
context.set("other", "data")
yield from asyncio.ensure_future(change_context())
assert context.get("key") == "what"
assert context.get("other") == "data"
@pytest.mark.asyncio
@asyncio.coroutine
def test_waitfor_context_propagation():
context.set("key", "value")
@asyncio.coroutine
def change_context():
assert context.get("key") == "value"
context.set("key", "what")
context.set("other", "data")
yield from asyncio.wait_for(change_context(), 1)
assert context.get("key") == "what"
assert context.get("other") == "data"
@pytest.mark.asyncio
@asyncio.coroutine
def test_gather_context_propagation():
context.set("key", "value")
@asyncio.coroutine
def change_context():
assert context.get("key") == "value"
context.set("key", "what")
context.set("other", "data")
yield from asyncio.gather(change_context())
assert context.get("key") == "what"
assert context.get("other") == "data"
|
examples/validator_post_init.py
|
klauer/apischema
| 118 |
73288
|
<filename>examples/validator_post_init.py
from dataclasses import InitVar, dataclass, field
from pytest import raises
from apischema import ValidationError, deserialize, validator
from apischema.metadata import init_var
@dataclass
class Foo:
bar: InitVar[int] = field(metadata=init_var(int))
@validator(bar)
def validate(self, bar: int):
if bar < 0:
yield "negative"
with raises(ValidationError) as err:
deserialize(Foo, {"bar": -1})
assert err.value.errors == [{"loc": ["bar"], "msg": "negative"}]
|
specs/matchers/built_in/be_spec.py
|
danibaena/expects
| 189 |
73292
|
# -*- coding: utf-8 -*
from expects import *
from expects.testing import failure
with describe('be'):
with it('should pass if object is expected'):
value = 1
expect(value).to(be(value))
with it('should fail if object is not expected'):
with failure('expected: 1 to be 2'):
expect(1).to(be(2))
with context('#negated'):
with it('should pass if object is not expected'):
expect(1).not_to(be(2))
with it('should fail if object is expected'):
value = 1
with failure('expected: 1 not to be 1'):
expect(value).not_to(be(value))
|
misc/dev_blog/SOTA/dataset_tools/xml_to_kitti.py
|
NVIDIA-AI-IOT/deepstream_tlt_apps
| 146 |
73301
|
<reponame>NVIDIA-AI-IOT/deepstream_tlt_apps
################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2019-2021 NVIDIA CORPORATION
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
import argparse
import os
import xml.etree.ElementTree as ET
def parse_args(args=None):
parser = argparse.ArgumentParser('Converting xml labels to KITTI format.')
parser.add_argument('-i', '--input_label_dir', type=str, required=True, help='directory of the input xml labels')
parser.add_argument('-o', '--output_label_dir', type=str, required=True, help='directory of the output KITTI labels')
parser.add_argument('-d', '--encode_difficult', action="store_true", required=False, help='Whether or not to encode the difficult object into KITTI labels')
args, _ = parser.parse_known_args(args)
return args
def xml_to_kitti(input_dir, output_dir, encode_difficult, classes):
if not os.path.exists(input_dir):
raise ValueError('input_dir not found.')
if not os.path.exists(output_dir):
raise ValueError('output_dir not found.')
for annot in os.listdir(input_dir):
et = ET.parse(os.path.join(input_dir, annot))
element = et.getroot()
element_objs = element.findall('object')
element_width = int(element.find('size').find('width').text)
element_height = int(element.find('size').find('height').text)
element_depth = int(element.find('size').find('depth').text)
assert element_depth == 3
assert len(element_objs) > 0, 'No objects in {}.'.format(os.path.join(input_dir, annot))
lines = []
for element_obj in element_objs:
difficulty = int(element_obj.find('difficult').text) == 1
if difficulty and encode_difficult:
dif = '1'
else:
dif = '0'
line = ''
class_name = element_obj.find('name').text
assert class_name in classes
line += class_name
line += ' '
line += '0 {} 0 '.format(dif)
obj_bbox = element_obj.find('bndbox')
x1 = int(round(float(obj_bbox.find('xmin').text)))
y1 = int(round(float(obj_bbox.find('ymin').text)))
x2 = int(round(float(obj_bbox.find('xmax').text)))
y2 = int(round(float(obj_bbox.find('ymax').text)))
line += str(x1)
line += ' '
line += str(y1)
line += ' '
line += str(x2)
line += ' '
line += str(y2)
line += ' '
line += '0 0 0 0 0 0 0\n'
lines.append(line)
with open(os.path.join(output_dir, os.path.basename(annot).split('.')[0]+'.txt'), 'w') as f:
f.writelines(lines)
if __name__ =='__main__':
classes = ['horse',
"pottedplant",
"train",
"person",
"bird",
"car",
"chair",
"tvmonitor",
"bus",
"sofa",
"dog",
"motorbike",
"bicycle",
"sheep",
"boat",
"cat",
"bottle",
"diningtable",
"cow",
"aeroplane",
"background",
]
args = parse_args()
xml_to_kitti(args.input_label_dir, args.output_label_dir, args.encode_difficult, classes)
|
tests/test_middleware/test_pure_asgi_middleware.py
|
adamantike/starlette-context
| 242 |
73327
|
<reponame>adamantike/starlette-context
from starlette import status
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.testclient import TestClient
from starlette_context import context, plugins
from starlette_context.header_keys import HeaderKeys
from starlette_context.middleware import RawContextMiddleware
plugins_to_use = (
plugins.CorrelationIdPlugin(),
plugins.RequestIdPlugin(),
plugins.UserAgentPlugin(),
plugins.ForwardedForPlugin(),
plugins.DateHeaderPlugin(),
)
middleware = [
Middleware(
RawContextMiddleware,
plugins=plugins_to_use,
)
]
app = Starlette(middleware=middleware)
client = TestClient(app)
@app.route("/")
async def index(request: Request) -> JSONResponse:
return JSONResponse(content=context.data)
def test_valid_request():
resp = client.get("/")
assert resp.status_code == status.HTTP_200_OK
for plugin in plugins_to_use:
assert plugin.key in resp.text
assert HeaderKeys.correlation_id in resp.headers
assert HeaderKeys.request_id in resp.headers
|
experiments/2013-10-01-pure-lin.py
|
jaesikchoi/gpss-research
| 151 |
73334
|
Experiment(description='Testing the pure linear kernel',
data_dir='../data/tsdlr/',
max_depth=10,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=500,
verbose=False,
make_predictions=False,
skip_complete=True,
results_dir='../results/2013-10-01-pure-lin/',
iters=250,
base_kernels='SE,PureLin,Const,Exp,Fourier,Noise',
zero_mean=True,
random_seed=1,
period_heuristic=5,
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=True,
model_noise=True,
no_noise=True)
|
vespene/common/logger.py
|
Conan-Kudo/vespene
| 680 |
73360
|
# Copyright 2018, <NAME> LLC
# License: Apache License Version 2.0
# -------------------------------------------------------------------------
# logger.py - basic wrapper around Python standard logging just so in
# case we need to change this behavior it is all in one place
# --------------------------------------------------------------------------
import logging
class Logger(object):
__instance = None
def __init__(self):
pass
def debug(self, msg):
self.logger.debug(msg)
def info(self, msg):
self.logger.info(msg)
def warn(self, msg):
self.logger.warn(msg)
def error(self, msg):
self.logger.error(msg)
def __new__(cls):
if Logger.__instance is None:
Logger.__instance = object.__new__(cls)
logger = logging.getLogger('vespene')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
Logger.__instance.logger = logger
return Logger.__instance
|
hamiltorch/__init__.py
|
kakodkar/hamiltorch
| 237 |
73366
|
<reponame>kakodkar/hamiltorch<gh_stars>100-1000
__version__ = '0.4.0.dev1'
from .samplers import sample, sample_model, predict_model, sample_split_model, Sampler, Integrator, Metric
from .util import set_random_seed
|
Visualization/random_color_visualizer.py
|
monocilindro/qgis-earthengine-examples
| 646 |
73373
|
# GitHub URL: https://github.com/giswqs/qgis-earthengine-examples/tree/master/Visualization/random_color_visualizer.py
import ee
from ee_plugin import Map
dataset = ee.Image('USGS/NLCD/NLCD2016')
landcover = ee.Image(dataset.select('landcover'))
Map.setCenter(-95, 38, 5)
Map.addLayer(landcover.randomVisualizer(), {}, 'Landcover')
|
recipes/libmikmod/all/conanfile.py
|
rockandsalt/conan-center-index
| 562 |
73397
|
from conans import ConanFile, CMake, tools
import os
class LibmikmodConan(ConanFile):
name = "libmikmod"
description = "Module player and library supporting many formats, including mod, s3m, it, and xm."
topics = ("libmikmod", "audio")
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://mikmod.sourceforge.net"
license = "LGPL-2.1-or-later"
exports_sources = ["patches/*", "CMakeLists.txt"]
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_dsound": [True, False],
"with_mmsound": [True, False],
"with_alsa": [True, False],
"with_oss": [True, False],
"with_pulse": [True, False],
"with_coreaudio": [True, False]
}
default_options = {
"shared": False,
"fPIC": True,
"with_dsound": True,
"with_mmsound": True,
"with_alsa": True,
"with_oss": True,
"with_pulse": True,
"with_coreaudio": True
}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
_cmake = None
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
else:
del self.options.with_dsound
del self.options.with_mmsound
if self.settings.os != "Linux":
del self.options.with_alsa
# Non-Apple Unices
if self.settings.os not in ["Linux", "FreeBSD"]:
del self.options.with_oss
del self.options.with_pulse
# Apple
if tools.is_apple_os(self.settings.os):
del self.options.with_coreaudio
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def requirements(self):
if self.settings.os == "Linux":
if self.options.with_alsa:
self.requires("libalsa/1.2.4")
if self.options.with_pulse:
self.requires("pulseaudio/13.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self, set_cmake_flags=True)
self._cmake.definitions["ENABLE_STATIC"] = not self.options.shared
self._cmake.definitions["ENABLE_DOC"] = False
self._cmake.definitions["ENABLE_DSOUND"] = self.options.get_safe("with_dsound", False)
self._cmake.definitions["ENABLE_MMSOUND"] = self.options.get_safe("with_mmsound", False)
self._cmake.definitions["ENABLE_ALSA"] = self.options.get_safe("with_alsa", False)
self._cmake.definitions["ENABLE_OSS"] = self.options.get_safe("with_oss", False)
self._cmake.definitions["ENABLE_PULSE"] = self.options.get_safe("with_pulse", False)
self._cmake.definitions["ENABLE_COREAUDIO"] = self.options.get_safe("with_coreaudio", False)
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"CMAKE_SOURCE_DIR",
"PROJECT_SOURCE_DIR")
# Ensure missing dependencies yields errors
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"MESSAGE(WARNING",
"MESSAGE(FATAL_ERROR")
tools.replace_in_file(os.path.join(self._source_subfolder, "drivers", "drv_alsa.c"),
"alsa_pcm_close(pcm_h);",
"if (pcm_h) alsa_pcm_close(pcm_h);")
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="COPYING.LESSER", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
os.remove(os.path.join(self.package_folder, "bin", "libmikmod-config"))
if not self.options.shared:
tools.rmdir(os.path.join(self.package_folder, "bin"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
if not self.options.shared:
self.cpp_info.defines = ["MIKMOD_STATIC"]
self.cpp_info.filenames["pkg_config"] = "libmikmod"
if self.options.get_safe("with_dsound"):
self.cpp_info.system_libs.append("dsound")
if self.options.get_safe("with_mmsound"):
self.cpp_info.system_libs.append("winmm")
if self.options.get_safe("with_coreaudio"):
self.cpp_info.frameworks.append("CoreAudio")
|
lib/python2.7/site-packages/stevedore/tests/test_sphinxext.py
|
nishaero/wifi-userseg-ryu
| 1,553 |
73398
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the sphinx extension
"""
from __future__ import unicode_literals
from stevedore import extension
from stevedore import sphinxext
from stevedore.tests import utils
import mock
import pkg_resources
def _make_ext(name, docstring):
def inner():
pass
inner.__doc__ = docstring
m1 = mock.Mock(spec=pkg_resources.EntryPoint)
m1.module_name = '%s_module' % name
s = mock.Mock(return_value='ENTRY_POINT(%s)' % name)
m1.__str__ = s
return extension.Extension(name, m1, inner, None)
class TestSphinxExt(utils.TestCase):
def setUp(self):
super(TestSphinxExt, self).setUp()
self.exts = [
_make_ext('test1', 'One-line docstring'),
_make_ext('test2', 'Multi-line docstring\n\nAnother para'),
]
self.em = extension.ExtensionManager.make_test_instance(self.exts)
def test_simple_list(self):
results = list(sphinxext._simple_list(self.em))
self.assertEqual(
[
('* test1 -- One-line docstring', 'test1_module'),
('* test2 -- Multi-line docstring', 'test2_module'),
],
results,
)
def test_simple_list_no_docstring(self):
ext = [_make_ext('nodoc', None)]
em = extension.ExtensionManager.make_test_instance(ext)
results = list(sphinxext._simple_list(em))
self.assertEqual(
[
('* nodoc -- ', 'nodoc_module'),
],
results,
)
def test_detailed_list(self):
results = list(sphinxext._detailed_list(self.em))
self.assertEqual(
[
('test1', 'test1_module'),
('-----', 'test1_module'),
('\n', 'test1_module'),
('One-line docstring', 'test1_module'),
('\n', 'test1_module'),
('test2', 'test2_module'),
('-----', 'test2_module'),
('\n', 'test2_module'),
('Multi-line docstring\n\nAnother para', 'test2_module'),
('\n', 'test2_module'),
],
results,
)
def test_detailed_list_format(self):
results = list(sphinxext._detailed_list(self.em, over='+', under='+'))
self.assertEqual(
[
('+++++', 'test1_module'),
('test1', 'test1_module'),
('+++++', 'test1_module'),
('\n', 'test1_module'),
('One-line docstring', 'test1_module'),
('\n', 'test1_module'),
('+++++', 'test2_module'),
('test2', 'test2_module'),
('+++++', 'test2_module'),
('\n', 'test2_module'),
('Multi-line docstring\n\nAnother para', 'test2_module'),
('\n', 'test2_module'),
],
results,
)
def test_detailed_list_no_docstring(self):
ext = [_make_ext('nodoc', None)]
em = extension.ExtensionManager.make_test_instance(ext)
results = list(sphinxext._detailed_list(em))
self.assertEqual(
[
('nodoc', 'nodoc_module'),
('-----', 'nodoc_module'),
('\n', 'nodoc_module'),
('.. warning:: No documentation found in ENTRY_POINT(nodoc)',
'nodoc_module'),
('\n', 'nodoc_module'),
],
results,
)
|
slack_sdk/socket_mode/async_client.py
|
ggml1/python-slack-sdk
| 160 |
73430
|
<filename>slack_sdk/socket_mode/async_client.py
import asyncio
import json
import logging
from asyncio import Queue
from asyncio.futures import Future
from logging import Logger
from typing import Dict, Union, Any, Optional, List, Callable, Awaitable
from slack_sdk.errors import SlackApiError
from slack_sdk.socket_mode.async_listeners import (
AsyncWebSocketMessageListener,
AsyncSocketModeRequestListener,
)
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_sdk.socket_mode.response import SocketModeResponse
from slack_sdk.web.async_client import AsyncWebClient
class AsyncBaseSocketModeClient:
logger: Logger
web_client: AsyncWebClient
app_token: str
wss_uri: str
auto_reconnect_enabled: bool
closed: bool
message_queue: Queue
message_listeners: List[
Union[
AsyncWebSocketMessageListener,
Callable[
["AsyncBaseSocketModeClient", dict, Optional[str]], Awaitable[None]
],
]
]
socket_mode_request_listeners: List[
Union[
AsyncSocketModeRequestListener,
Callable[["AsyncBaseSocketModeClient", SocketModeRequest], Awaitable[None]],
]
]
async def issue_new_wss_url(self) -> str:
try:
response = await self.web_client.apps_connections_open(
app_token=self.app_token
)
return response["url"]
except SlackApiError as e:
if e.response["error"] == "ratelimited":
# NOTE: ratelimited errors rarely occur with this endpoint
delay = int(e.response.headers.get("Retry-After", "30")) # Tier1
self.logger.info(f"Rate limited. Retrying in {delay} seconds...")
await asyncio.sleep(delay)
# Retry to issue a new WSS URL
return await self.issue_new_wss_url()
else:
# other errors
self.logger.error(f"Failed to retrieve WSS URL: {e}")
raise e
async def connect(self):
raise NotImplementedError()
async def disconnect(self):
raise NotImplementedError()
async def connect_to_new_endpoint(self):
self.wss_uri = await self.issue_new_wss_url()
await self.connect()
async def close(self):
self.closed = True
await self.disconnect()
async def send_message(self, message: str):
raise NotImplementedError()
async def send_socket_mode_response(
self, response: Union[Dict[str, Any], SocketModeResponse]
):
if isinstance(response, SocketModeResponse):
await self.send_message(json.dumps(response.to_dict()))
else:
await self.send_message(json.dumps(response))
async def enqueue_message(self, message: str):
await self.message_queue.put(message)
if self.logger.level <= logging.DEBUG:
queue_size = self.message_queue.qsize()
self.logger.debug(
f"A new message enqueued (current queue size: {queue_size})"
)
async def process_messages(self):
while not self.closed:
try:
await self.process_message()
except Exception as e:
self.logger.exception(f"Failed to process a message: {e}")
async def process_message(self):
raw_message = await self.message_queue.get()
if raw_message is not None:
message: dict = {}
if raw_message.startswith("{"):
message = json.loads(raw_message)
_: Future[None] = asyncio.ensure_future(
self.run_message_listeners(message, raw_message)
)
async def run_message_listeners(self, message: dict, raw_message: str) -> None:
type, envelope_id = message.get("type"), message.get("envelope_id")
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"Message processing started (type: {type}, envelope_id: {envelope_id})"
)
try:
if message.get("type") == "disconnect":
await self.connect_to_new_endpoint()
return
for listener in self.message_listeners:
try:
await listener(self, message, raw_message)
except Exception as e:
self.logger.exception(f"Failed to run a message listener: {e}")
if len(self.socket_mode_request_listeners) > 0:
request = SocketModeRequest.from_dict(message)
if request is not None:
for listener in self.socket_mode_request_listeners:
try:
await listener(self, request)
except Exception as e:
self.logger.exception(
f"Failed to run a request listener: {e}"
)
except Exception as e:
self.logger.exception(f"Failed to run message listeners: {e}")
finally:
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"Message processing completed (type: {type}, envelope_id: {envelope_id})"
)
|
conans/test/unittests/model/editable_layout/load_data_test.py
|
matthiasng/conan
| 6,205 |
73431
|
<reponame>matthiasng/conan<filename>conans/test/unittests/model/editable_layout/load_data_test.py<gh_stars>1000+
# coding=utf-8
import os
import shutil
import textwrap
import unittest
import six
from conans.errors import ConanException
from conans.model.editable_layout import EditableLayout
from conans.test.utils.test_files import temp_folder
from conans.util.files import save
class ParseTest(unittest.TestCase):
def setUp(self):
self.test_folder = temp_folder()
self.layout_filepath = os.path.join(self.test_folder, "layout")
self.editable_cpp_info = EditableLayout(self.layout_filepath)
def tearDown(self):
shutil.rmtree(self.test_folder)
def test_field_error(self):
content = textwrap.dedent("""
[includedrs]
something
""")
save(self.layout_filepath, content)
with six.assertRaisesRegex(self, ConanException, "Wrong cpp_info field 'includedrs' in layout"):
_ = self.editable_cpp_info._load_data(ref=None, settings=None, options=None)
content = textwrap.dedent("""
[*:includedrs]
something
""")
save(self.layout_filepath, content)
with six.assertRaisesRegex(self, ConanException, "Wrong cpp_info field 'includedrs' in layout"):
_ = self.editable_cpp_info._load_data(ref=None, settings=None, options=None)
content = textwrap.dedent("""
[*:includedirs]
something
""")
save(self.layout_filepath, content)
with six.assertRaisesRegex(self, ConanException, "Wrong package reference '\*' in layout file"):
_ = self.editable_cpp_info._load_data(ref=None, settings=None, options=None)
content = textwrap.dedent("""
[pkg/version@user/channel:revision:includedirs]
something
""")
save(self.layout_filepath, content)
with six.assertRaisesRegex(self, ConanException, "Wrong package reference "
"'pkg/version@user/channel:revision' in layout file"):
_ = self.editable_cpp_info._load_data(ref=None, settings=None, options=None)
|
h5Nastran/h5Nastran/post_process/result_readers/op2/odict_keys.py
|
ACea15/pyNastran
| 293 |
73437
|
from collections import OrderedDict
class MyObj(object):
b = 1
a = 2
def __init__(self):
object.__setattr__(self, '_attrs', OrderedDict())
self.c = 1
self.d = 2
def __setattr__(self, key, value):
assert key != '_attrs'
self._attrs[key] = value
def __getattr__(self, item):
try:
return self._attrs[item]
except KeyError:
return self.__class__.__dict__[item]
@property
def __dict__(self):
return self._attrs
a = MyObj()
a.e = 3
print(a.__dict__)
print(MyObj.__dict__)
print(a.a)
|
c4_troubleshooting-debugging-techniques/4_managing-resources/graded-assessment/start_date_report.py
|
chaiyeow/google-it-automation
| 220 |
73449
|
#!/usr/bin/env python3
import csv
import datetime
import requests
# def get_file_lines(url):
def download_file(url):
"""Returns the lines contained in the file at the given URL"""
# Download the file over the internet
response = requests.get(url, stream=True)
lines = []
for line in response.iter_lines():
lines.append(line.decode("UTF-8"))
return lines
def get_start_date():
"""Interactively get the start date to query for."""
print()
print('Getting the first start date to query for.')
print()
print('The date must be greater than Jan 1st, 2018')
year = int(input('Enter a value for the year: '))
month = int(input('Enter a value for the month: '))
day = int(input('Enter a value for the day: '))
print()
return datetime.datetime(year, month, day)
def get_same_or_newer(data, start_date):
"""Returns the employees that started on the given date, or the closest one."""
# We want all employees that started at the same date or the closest newer
# date. To calculate that, we go through all the data and find the
# employees that started on the smallest date that's equal or bigger than
# the given start date.
min_date = datetime.datetime.today()
min_date_employees = []
for row in data:
row_date = datetime.datetime.strptime(row[3], '%Y-%m-%d')
# If this date is smaller than the one we're looking for,
# we skip this row
if row_date < start_date:
continue
# If this date is smaller than the current minimum,
# we pick it as the new minimum, resetting the list of
# employees at the minimal date.
if row_date < min_date:
min_date = row_date
min_date_employees = []
# If this date is the same as the current minimum,
# we add the employee in this row to the list of
# employees at the minimal date.
if row_date == min_date:
min_date_employees.append("{} {}".format(row[0], row[1]))
return min_date, min_date_employees
def list_newer(data, start_date):
while start_date < datetime.datetime.today():
start_date, employees = get_same_or_newer(data, start_date)
print("Started on {}: {}".format(start_date.strftime("%b %d, %Y"), employees))
# Now move the date to the next one
start_date = start_date + datetime.timedelta(days=1)
def main():
start_date = get_start_date()
FILE_URL = "https://storage.googleapis.com/gwg-hol-assets/gic215/employees-with-date.csv"
data = download_file(FILE_URL) # Hint 1. Download the file only once from the URL.
reader = csv.reader(data[1:])
data_list = list(reader)
data_list.sort(key = lambda x: x[3]) # Hint 2. To sort the data by start_date and then go date by date.
list_newer(data_list, start_date)
if __name__ == "__main__":
main()
"""
# Testing in my local machine, many improvement can be more applied
Before:
1st attempt | 2nd attempt | 3rd attempt
real 0m46,007s | 1m1,561s | 0m53,076s
user 0m7,663s | 0m9,278s | 0m9,166s
sys 0m0,362s | 0m0,426s | 0m0,405s
After:
1st attempt | 2nd attempt | 3rd attempt
real 0m1,770s | 0m3,155s | 0m3,642s
user 0m1,216s | 0m0,311s | 0m0,284s
sys 0m0,045s | 0m0,038s | 0m0,045s
"""
|
plenum/test/input_validation/test_message_factory.py
|
andkononykhin/plenum
| 148 |
73456
|
import pytest
from plenum.common.exceptions import MissingNodeOp, InvalidNodeOp
from plenum.common.messages.fields import NonNegativeNumberField, AnyValueField, HexField, BooleanField, Base58Field
from plenum.common.messages.message_base import MessageBase
from plenum.common.messages.node_message_factory import MessageFactory, NodeMessageFactory
from plenum.test.input_validation.stub_messages import Message1, Message2, Message3, Message4
@pytest.fixture
def factory():
return MessageFactory('plenum.test.input_validation.stub_messages')
def test_message_factory_module_is_not_found_fails():
with pytest.raises(ImportError):
MessageFactory('foo.bar')
def test_message_factory_classes_not_found_fails():
with pytest.raises(ValueError) as excinfo:
# TODO assumes that __init__ won't import any
# MessageBase child classes
MessageFactory('plenum.test.input_validation.__init__')
assert "no messages classes found" in str(excinfo.value)
def test_message_factory_missed_op_fails(factory):
msg = {'a': 0, 'b': 'bar'}
with pytest.raises(MissingNodeOp):
factory.get_instance(**msg)
def test_message_factory_invalid_op_fails(factory):
msg = {'op': 'unknown_op', 'a': 0, 'b': 'bar'}
with pytest.raises(InvalidNodeOp):
factory.get_instance(**msg)
def test_message_factory_stub_module_is_loaded(factory):
msg = {'op': 'Message1', 'a': 0, 'b': 'bar'}
assert isinstance(factory.get_instance(**msg), Message1)
def test_message_factory_set_non_message_class_fails(factory):
class NonMessageClass:
pass
with pytest.raises(ValueError):
factory.set_message_class(NonMessageClass)
def test_message_factory_set_message_class_can_add_message_class(factory):
class ANewMessageClass(MessageBase):
typename = 'NewMessage'
schema = (
('a', NonNegativeNumberField()),
)
factory.set_message_class(ANewMessageClass)
msg = {'op': 'NewMessage', 'a': 0}
assert isinstance(factory.get_instance(**msg), ANewMessageClass)
def test_node_message_factory_module_is_loaded():
NodeMessageFactory()
def test_message_factory_can_replace_field(factory):
# check precondition
msg = {'op': 'Message2', 'a': 0, 'b': 'foo'}
assert isinstance(factory.get_instance(**msg), Message2)
factory.update_schemas_by_field_type(AnyValueField, NonNegativeNumberField)
with pytest.raises(TypeError) as exc_info:
factory.get_instance(**msg)
exc_info.match("expected types 'int', got 'str'")
def test_message_factory_can_replace_iterable_field(factory):
# check precondition
msg = {'op': 'Message3', 'a': 0, 'b': [True, False]}
assert isinstance(factory.get_instance(**msg), Message3)
factory.update_schemas_by_field_type(BooleanField, Base58Field)
with pytest.raises(TypeError) as exc_info:
factory.get_instance(**msg)
exc_info.match("expected types 'str', got 'bool'")
def test_message_factory_can_replace_map_field(factory):
# check precondition
msg = {'op': 'Message4', 'a': 0, 'b': {'123': 'abc'}}
assert isinstance(factory.get_instance(**msg), Message4)
factory.update_schemas_by_field_type(HexField, NonNegativeNumberField)
with pytest.raises(TypeError) as exc_info:
factory.get_instance(**msg)
exc_info.match("expected types 'int', got 'str'")
|
paperbroker/adapters/accounts/LocalFileSystemAccountAdapter.py
|
yutiansut/paperbroker
| 227 |
73462
|
from ...accounts import Account, account_factory
import tempfile
import os
import pickle
from os import listdir
from os.path import isfile, join
class LocalFileSystemAccountAdapter():
def __init__(self, root=None):
if root is None: root = tempfile.gettempdir()
if not os.path.exists(root+"/accounts/"):
os.makedirs(root+"/accounts/")
self.root = root
def get_account(self, account_id: str, current_date=None):
with open(self.root + "/accounts/" + account_id + ".pickle", 'rb') as f:
return pickle.load(file=f)
def has_account(self, account_id: str, current_date=None):
try:
pickle.load(file=self.root + "/accounts/" + account_id + ".pickle")
return True
except:
return False
def put_account(self, account: Account, current_date=None):
with open(self.root + "/accounts/" + account.account_id + ".pickle", 'wb') as f:
pickle.dump(account, file=f)
def get_account_ids(self, current_date=None):
mypath = self.root + "/accounts/"
return [f.split(".")[0] for f in listdir(mypath) if isfile(join(mypath, f))]
def delete_account(self, account, current_date=None):
try:
os.remove(self.root + "/accounts/" + account_factory(account).account_id + ".pickle")
except:
pass
def delete_accounts(self, accounts, current_date=None):
[self.delete_account(account) for account in accounts]
|
setup.py
|
dexy/dexy
| 136 |
73471
|
from setuptools import setup, find_packages
from dexy.version import DEXY_VERSION
import platform
is_windows = platform.system() == 'Windows'
if is_windows:
os_specific_requires = []
else:
os_specific_requires = ['pexpect']
setup(
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Topic :: Documentation",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Documentation",
"Topic :: Text Processing",
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Text Processing :: Markup :: LaTeX"
],
description='Document Automation',
### "entry-points"
entry_points = {
'console_scripts' : [
'dexy = dexy.commands:run'
],
'pygments.lexers' : [
'rst+django = dexy.filters.utils:RstDjangoLexer'
]
},
### @end
include_package_data = True,
install_requires = os_specific_requires + [
# for internal dexy use or used in many common plugins
'BeautifulSoup4',
'PyYAML',
'cashew>=0.4.1',
'chardet',
'inflection>=0.2.0',
'jinja2',
'ply>=3.4',
'pygments',
'python3-modargs',
'requests>=0.10.6',
# for convenience of running additional filters
'Markdown',
'docutils'
],
name='dexy',
packages=find_packages(),
url='http://dexy.it',
version=DEXY_VERSION
)
|
trafaret/dataerror.py
|
jona-sassenhagen/trafaret
| 259 |
73485
|
<filename>trafaret/dataerror.py
from .lib import _empty, STR_TYPES
class DataError(ValueError):
"""
Error with data preserve
error can be a message or None if error raised in childs
data can be anything
"""
__slots__ = ['error', 'name', 'value', 'trafaret', 'code']
error_code = 'unknown'
def __init__(self, error=None, name=None, value=_empty, trafaret=None, code=None):
"""
:attribute error: can be a string or a dict[string, dataerror]
:attribute name:
:attribute value: validated value that leads to this error
:attribute trafaret: trafaret raised error
:attribute code: code for error, like `value_is_too_big`
"""
if not isinstance(error, STR_TYPES + (dict, )):
raise RuntimeError('Only str or dict is supported, got %r' % error)
self.error = error
self.name = name
self.value = value
self.trafaret = trafaret
self.code = code or self.__class__.error_code
# if self.code == 'unknown':
# raise RuntimeError()
def __str__(self, value=False):
if value and self.value != _empty:
return '%s, got %r' % (str(self.error), self.value)
else:
return str(self.error)
def __repr__(self):
return 'DataError(%r)' % str(self)
def to_struct(self, value=False):
if isinstance(self.error, dict):
return {
'code': self.code,
'nested': dict(
(k, v.to_struct(value=value) if isinstance(v, DataError) else v)
for k, v in self.error.items()
),
}
return {
'code': self.code,
'message': self.__str__(value=value),
}
def as_dict(self, value=False):
"""Use `to_struct` if need consistency"""
if not isinstance(self.error, dict):
return self.__str__(value=value)
return dict(
(k, v.as_dict(value=value) if isinstance(v, DataError) else v)
for k, v in self.error.items()
)
|
train_sppe/src/predict/annot/coco_minival.py
|
tech-life-hacking/AlphaPose
| 153 |
73535
|
<reponame>tech-life-hacking/AlphaPose<filename>train_sppe/src/predict/annot/coco_minival.py
# -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by <NAME> (<EMAIL>)
# -----------------------------------------------------
import os
import h5py
import torch
import torch.utils.data as data
from utils.img import (load_image, cropBox)
from opt import opt
class Mscoco_minival(data.Dataset):
def __init__(self, annoSet='coco-minival-images-newnms/test-dev'):
self.img_folder = '../data/coco/images' # root image folders
self.annot = dict()
# Read in annotation information from hdf5 file
tags = ['xmin', 'ymin', 'xmax', 'ymax']
with h5py.File('./predict/annot/' + annoSet + '.h5', 'r') as a:
for tag in tags:
self.annot[tag] = a[tag][:]
# Load in image file names
with open('./predict/annot/' + annoSet + '_images.txt', 'r') as f:
self.images = f.readlines()
self.images = list(map(lambda x: x.strip('\n'), self.images))
assert len(self.images) == self.annot['xmin'].shape[0]
self.size = len(self.images)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
self.year = 2017
def __getitem__(self, index):
if self.year == 2014:
imgname = self.images[index]
else:
imgname = self.images[index].split('_')[2]
img_path = os.path.join(self.img_folder, imgname)
img = load_image(img_path)
ori_img = img.clone()
img[0].add_(-0.406)
img[1].add_(-0.457)
img[2].add_(-0.480)
imght = img.size(1)
imgwidth = img.size(2)
upLeft = torch.Tensor(
(float(self.annot['xmin'][index]), float(self.annot['ymin'][index])))
bottomRight = torch.Tensor(
(float(self.annot['xmax'][index]), float(self.annot['ymax'][index])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
if width > 100:
scaleRate = 0.2
else:
scaleRate = 0.3
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = max(
min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)
bottomRight[1] = max(
min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)
inp = cropBox(img, upLeft, bottomRight, opt.inputResH, opt.inputResW)
ori_inp = cropBox(ori_img, upLeft, bottomRight,
opt.inputResH, opt.inputResW)
metaData = (
upLeft,
bottomRight,
ori_inp
)
box = torch.zeros(4)
box[0] = upLeft[0]
box[1] = upLeft[1]
box[2] = bottomRight[0]
box[3] = bottomRight[1]
return inp, box, imgname, metaData
def __len__(self):
return self.size
|
python/ovs/unixctl/__init__.py
|
noobcoderT/SDN-openvswitch-2.3.1
| 269 |
73552
|
<filename>python/ovs/unixctl/__init__.py
# Copyright (c) 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import ovs.util
commands = {}
strtypes = types.StringTypes
class _UnixctlCommand(object):
def __init__(self, usage, min_args, max_args, callback, aux):
self.usage = usage
self.min_args = min_args
self.max_args = max_args
self.callback = callback
self.aux = aux
def _unixctl_help(conn, unused_argv, unused_aux):
reply = "The available commands are:\n"
command_names = sorted(commands.keys())
for name in command_names:
reply += " "
usage = commands[name].usage
if usage:
reply += "%-23s %s" % (name, usage)
else:
reply += name
reply += "\n"
conn.reply(reply)
def command_register(name, usage, min_args, max_args, callback, aux):
""" Registers a command with the given 'name' to be exposed by the
UnixctlServer. 'usage' describes the arguments to the command; it is used
only for presentation to the user in "help" output.
'callback' is called when the command is received. It is passed a
UnixctlConnection object, the list of arguments as unicode strings, and
'aux'. Normally 'callback' should reply by calling
UnixctlConnection.reply() or UnixctlConnection.reply_error() before it
returns, but if the command cannot be handled immediately, then it can
defer the reply until later. A given connection can only process a single
request at a time, so a reply must be made eventually to avoid blocking
that connection."""
assert isinstance(name, strtypes)
assert isinstance(usage, strtypes)
assert isinstance(min_args, int)
assert isinstance(max_args, int)
assert isinstance(callback, types.FunctionType)
if name not in commands:
commands[name] = _UnixctlCommand(usage, min_args, max_args, callback,
aux)
def socket_name_from_target(target):
assert isinstance(target, strtypes)
if target.startswith("/"):
return 0, target
pidfile_name = "%s/%s.pid" % (ovs.dirs.RUNDIR, target)
pid = ovs.daemon.read_pidfile(pidfile_name)
if pid < 0:
return -pid, "cannot read pidfile \"%s\"" % pidfile_name
return 0, "%s/%s.%d.ctl" % (ovs.dirs.RUNDIR, target, pid)
command_register("help", "", 0, 0, _unixctl_help, None)
|
recipes/Python/578637_Wigle_wifi/recipe-578637.py
|
tdiprima/code
| 2,023 |
73579
|
<filename>recipes/Python/578637_Wigle_wifi/recipe-578637.py
from uuid import getnode
import re
import requests
class WigleAgent():
def __init__(self, username, password):
self.agent(username, password)
self.mac_address()
def get_lat_lng(self, mac_address=None):
if mac_address == None:
mac_address = self.mac_address
if '-' in mac_address:
mac_address = mac_address.replace('-', ':')
try:
self.query_response = self.send_query(mac_address)
response = self.parse_response()
except IndexError:
response = 'MAC location not known'
return response
def agent(self, username, password):
self.agent = requests.Session()
self.agent.post('https://wigle.net/api/v1/jsonLogin',
data={'credential_0': username,
'credential_1': password,
'destination': '/https://wigle.net/'})
def mac_address(self):
mac = hex(getnode())
mac_bytes = [mac[x:x+2] for x in xrange(0, len(mac), 2)]
self.mac_address = ':'.join(mac_bytes[1:6])
def send_query(self, mac_address):
response = self.agent.post(url='https://wigle.net/api/v1/jsonLocation',
data={'netid': mac_address,
'Query2': 'Query'})
return response.json()
def parse_response(self):
lat = self.get_lat()
lng = self.get_lng()
return lat, lng
def get_lat(self):
resp_lat = self.query_response['result'][0]['locationData'][0]['latitude']
return float(resp_lat)
def get_lng(self):
resp_lng = self.query_response['result'][0]['locationData'][0]['longitude']
return float(resp_lng)
if __name__ == "__main__":
wa = WigleAgent('your-username', 'your-key')
print wa.get_lat_lng('00:1C:0E:42:79:43')
|
users/migrations/0017_auto_20200712_1559.py
|
ujlbu4/vas3k.club
| 496 |
73598
|
# Generated by Django 3.0.4 on 2020-07-12 15:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0016_auto_20200712_1557'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
migrations.RunSQL("""
CREATE OR REPLACE FUNCTION generate_random_hash(int)
RETURNS text
AS $$
SELECT array_to_string(
ARRAY (
SELECT substring(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#*+./:<=>?@[]()^_~'
FROM (random() * 72)::int FOR 1)
FROM generate_series(1, $1) ), '' )
$$ LANGUAGE sql;
"""),
migrations.RunSQL("""
update users set secret_hash = generate_random_hash(16);
"""),
migrations.RunSQL("""
drop function generate_random_hash(int);
"""),
]
|
computer_science/algorithms/recursion/fibonacci/fibonacci.py
|
LeandroTk/Algorithms
| 205 |
73606
|
<gh_stars>100-1000
# Fibonacci Sequence: 0 1 1 2 3 5 8 13 ...
def fibonacci(num):
if num == 1:
return 0
if num == 2:
return 1
return fibonacci(num-1) + fibonacci(num-2)
print(fibonacci(1))
print(fibonacci(2))
print(fibonacci(3))
print(fibonacci(4))
print(fibonacci(5))
|
src/openue/models/__init__.py
|
ikutalilas/OpenUE
| 461 |
73622
|
<gh_stars>100-1000
from .model import *
|
setup.py
|
stargz/autoremove-torrents
| 437 |
73661
|
#-*- coding:UTF-8 -*-
from setuptools import setup, find_packages
from autoremovetorrents.version import __version__
from autoremovetorrents.compatibility.disk_usage_ import SUPPORT_SHUTIL
from autoremovetorrents.compatibility.open_ import open_
setup(name = 'autoremove-torrents',
version = __version__,
description = 'Automatically remove torrents according to your strategies.',
long_description = open_('README.rst', 'r', encoding='utf-8').read(),
classifiers = [
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Utilities'
], # Get classifiers from https://pypi.org/pypi?%3Aaction=list_classifiers
keywords = 'python autoremove torrent',
author = 'jerrymakesjelly',
author_email = '<EMAIL>',
url = 'https://github.com/jerrymakesjelly/autoremove-torrents',
license = 'MIT',
packages = find_packages(),
include_package_data = True,
zip_safe = True,
install_requires = [
'deluge-client',
'enum34',
'ply',
'' if SUPPORT_SHUTIL else 'psutil',
'pyyaml',
'requests',
],
entry_points = {
'console_scripts':[
'autoremove-torrents = autoremovetorrents.main:main'
]
}
)
|
basic/demo_time.py
|
708yamaguchi/MaixPy_scripts
| 485 |
73679
|
<reponame>708yamaguchi/MaixPy_scripts
import time
import machine
print(time.time())
t1 = time.localtime(546450051)
print('t1', t1)
t2 = time.mktime(t1)
print('t2', t2)
print(time.time())
time.set_time(t1)
print(time.time())
time.sleep(1)
print(time.localtime(time.time()))
'''
raw REPL; CTRL-B to exit
>OK
74
t1 (2017, 4, 25, 15, 40, 51, 1, 115)
t2 546450051
546450065
546450051
(2017, 4, 25, 15, 40, 52, 1, 115)
>
MicroPython v0.5.1-136-g039f72b6c-dirty on 2020-11-18; Sipeed_M1 with kendryte-k210
Type "help()" for more information.
>>>
>>>
'''
|
moshmosh/__init__.py
|
prendradjaja/moshmosh
| 114 |
73690
|
<filename>moshmosh/__init__.py<gh_stars>100-1000
from .ast_compat import ast
from .extension_register import *
from .extensions import template_python
from .extensions import lazy_import
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SyntaxWarning)
from .extensions import pattern_matching
from .extensions import scoped_operators
from .extensions import pipelines
from .extensions import quick_lambdas
|
asreader/text_comprehension/eval/fusion.py
|
rkadlec/asreader
| 113 |
73694
|
<gh_stars>100-1000
from __future__ import division
import argparse
import glob
import numpy
import scipy.optimize as opt
import os
import pickle
import re
"""
This script computes ensemble predictions based on multiple ASReader models.
In an optional first step it loads a dumped model and generates predictions from a validation and test dataset.
Subsequently it combines the predictions of several models using one of three methods
* AverageAll - the ensemble prediction is a mean of all the supplied single-model predictions
* pBest - sorts the candidate models by validation accuracy and selects the best proportion p of models to form the ensemble
* AddImprover - sorts the candidate models by validation accuracy and then tries adding them to the ensemble in that order
keeping each model in the ensemble only if it improves its val. accuracy
Validation and test accuracies are printed for the ensemble and the best single model.
typical usage:
python fusion.py -pr "out_dir/best_predictions/*.y_hat_valid" -o out_dir/best_predictions/fusion.y_hat -t foo --fusion_method AverageAll
where the best_predictions directory should contain predictions selected by the copyBestPredictions.py script
"""
def accuracy_k_best(probas, mute=False, k_vals=[1,2,5]):
"""
Gives the percentage of predictions that have the correct answer among the k most likely suggested answers for k=[1,2,5]
:param probas: a list of numpy arrays, each containing a distribution of probabilities over candidates answers for one example
:param mute: if True, stops the function from printing the accuracies into std out
:param k_vals: values
:return: an array of recall@k for k=1,2,5
"""
recall_k = {}
for k in k_vals:
recall_k[k] = 0
line = 0
for row in probas:
line += 1
indices = numpy.argpartition(row, -k)[-k:] # Gives indices of k highest probabilities
recall_k[k] += (0 in indices) # Uses the fact that correct answer is at index 0.
recall_k[k] /= len(probas)
if not mute:
print 'recall@%d' % k, recall_k[k]
return recall_k
def accuracy(probas):
"""
Returns the proportion of predictions that assign the highest probability to the correct answer
:param probas: a list of numpy arrays, each containing a distribution of probabilities over candidates answers for one example
:return: accuracy
"""
ncorrect = 0
for row in probas:
# We use the convention of having the ground truth answer at index 0
ncorrect += (numpy.argmax(row) == 0)
return ncorrect / len(probas)
def dump_model_predictions(model_filenames, input_data, suffix="y_hat", regenerate=False):
"""
Loops through model files and uses the cbt_memory_pointer script to generate the y_hat predictions from each model
:param model_filenames: list of filenames of saved models
:param input_data: the dataset to which the models will be applied to generate predictions
:param suffix: suffix of the generated prediction files (the rest of the filename is the same as the model filename
:param regenerate: if true the model rewrites the prediction files even if they're already present
:return: list of filenames of the generated predictions
"""
prediction_filenames = []
for model_file in model_filenames:
y_hat_file = model_file + "." + suffix
if not os.path.isfile(y_hat_file) or regenerate:
load_model_command = 'python ' + args.blocks_nlp_path + 'as_reader.py --load_model ' + model_file + ' --y_hat_out_file ' + y_hat_file + ' --files_to_visualize ' + input_data + ' --no_html'
if args.cnn:
load_model_command += ' --dataset_type cnn'
os.system(load_model_command)
prediction_filenames.append(y_hat_file)
return prediction_filenames
def adjust_length(pred_line, lineN, max_length):
"""
Messy function that handles problems that arise if predictions for the same example have different lengths
which may happen due to using a different batch size for each model. Normally it shouldn't be needed.
:param pred_line:
:param lineN:
:param max_length:
:return:
"""
pred_line = numpy.trim_zeros(pred_line, trim='b')
# The following takes care of lines that are shorter than the ones for previous files due to 0-trimming
if lineN > len(max_length):
maxLen = numpy.append(max_length, len(pred_line))
while len(pred_line) < maxLen[lineN - 1]:
pred_line = numpy.append(pred_line, 0)
# print "Tail zero added to line "+str(lineN)+" of "+pred_file
if len(pred_line) > maxLen[lineN - 1]:
print '!!! Warning: Line ' + str(lineN) + ' is longer than the corresponding lines of previous files.'
maxLen[lineN - 1] = len(pred_line)
return pred_line, max_length
def predictions_from_csv(fh, max_length):
"""
Loads single model predictions from a csv file where lines may differ in length
:param fh: file handle to the csv file
:return: list of numpy arrays representing the predictions of individual examples
"""
preds = list()
lineN = 0
for line in fh:
lineN += 1
pred_line = numpy.fromstring(line, sep=', ')
if (args.trim_zeros):
# If different batch sizes are used for the fused models, the prediction vectors need to be adjusted
pred_line, max_length = adjust_length(pred_line, lineN, max_length)
preds.append(pred_line)
return preds, max_length
def load_all_predictions(prediction_filenames):
# list of model predictions
all_preds = []
# list of corresponding accuracies
model_accuracies=[]
# the length of the longest prediction vector for each training example
max_lengths = numpy.array([0])
for pred_file in prediction_filenames:
pred_fh = open(pred_file, 'r')
# Predictions can be saved either in a csv or a pickle file
if args.prediction_format == 'csv':
preds, max_lengths =predictions_from_csv(pred_fh, max_lengths)
else:
preds = pickle.load(pred_fh)
pred_fh.close()
print "Results for " + pred_file
acc = accuracy(preds)
model_accuracies.append(acc)
print 'Accuracy: ' + str(acc)
all_preds.append(preds)
return all_preds, model_accuracies
def fuse_predictions(prediction_filenames, weights=None):
"""
reads the y_hat files and averages all the predictions
:param prediction_filenames:
:param weights: a list of weights given to the individual predictions within the fusion (defaults to equel weights)
:return:
"""
all_preds, model_accuracies = load_all_predictions(prediction_filenames)
print
print "Ensemble (equal weights): "
ensemble_accuracy = accuracy(numpy.mean(all_preds, 0))
print "Accuracy:\t"+str(ensemble_accuracy)
# If weights were provided, calculate the prediction of a weighted ensemble
averaged = numpy.average(all_preds, axis=0, weights=weights)
if weights is not None:
print "Weighted ensemble: "
print "Accuracy:\t"+accuracy(averaged)
return {'averaged': averaged, 'model_preds': all_preds, 'ensemble_acc': ensemble_accuracy,
'model_accuracies': model_accuracies}
def greedy_add(prediction_filenames, greedy_iterations=1):
"""
Builds up an ensemble by starting with the best validation model and then adding each model only if it improves
the ensemble performance
:param prediction_filenames: List of files containing candidate models' validation predictions
:param greedy_iterations: int how many times each candidate model is considered for adding into the ensemble
:return:
"""
all_preds, model_accuracies = load_all_predictions(prediction_filenames)
# Sort models by validation accuracy
sorted_indices = numpy.argsort(model_accuracies)[::-1]
# Indices of models included in the ensemble
ensemble = numpy.array([], dtype='i')
ensemble_accuracy = 0
# List of predictions of models included in the ensemble
member_predictions = []
for _ in range(greedy_iterations):
for i in sorted_indices:
# Create a candidate ensemble and test whether it's better than the current ensemble
if len(member_predictions) == 0:
candidate_prediction = all_preds[i]
else:
candidate_member_predictions = member_predictions + [all_preds[i]]
candidate_prediction = numpy.mean(candidate_member_predictions, 0)
candidate_accuracy = accuracy(candidate_prediction)
if candidate_accuracy > ensemble_accuracy:
ensemble = numpy.hstack([ensemble, [i]])
ensemble_accuracy = candidate_accuracy
member_predictions.append(all_preds[i])
print
print 'Predictions included in the ensemble and their validation accuracies:'
for i in ensemble:
print str(model_accuracies[i]) + "\t" + prediction_filenames[i]
best_single_valid_acc = model_accuracies[ensemble[0]]
print
print 'Ensemble accuracy: ' + str(ensemble_accuracy)
ensemble_pred = numpy.mean(member_predictions, 0)
return {'ensemble_prediction': ensemble_pred, 'ensemble_indices': ensemble,
'ens_member_predictions': member_predictions, 'best_single_valid_acc': best_single_valid_acc,
'ensemble_acc': ensemble_accuracy}
def p_best_models(prediction_filenames, p=0.7):
"""
Sorts models by validation accuracy and forms the ensemble from the best ones. A proportion p of models is included.
:param prediction_filenames:
:param p: proportion of the provided models that is included in the ensemble
:return:
"""
all_preds, model_accuracies = load_all_predictions(prediction_filenames)
# Sort models by validation accuracy
sorted_indices = numpy.argsort(model_accuracies)[::-1]
ensemble_size = int(p * len(sorted_indices))
ensemble = sorted_indices[0:ensemble_size]
# List of predictions of models included in the ensemble
member_predictions = []
for i in ensemble:
member_predictions.append(all_preds[i])
ensemble_pred = numpy.mean(member_predictions, 0) # the ensemble prediction
print
print 'Predictions included in the ensemble and their validation accuracies:'
for i in ensemble:
print str(model_accuracies[i]) + "\t" + prediction_filenames[i]
best_single_valid_acc = model_accuracies[ensemble[0]]
print
ensemble_accuracy = accuracy(ensemble_pred)
print 'Ensemble accuracy: ' + str(ensemble_accuracy)
ensemble_pred = numpy.mean(member_predictions, 0)
# print 'Worse case: ' + str(accuracy_k_best(ensemble_pred)[1])
return {'ensemble_prediction': ensemble_pred, 'ensemble_indices': ensemble,
'ens_member_predictions': member_predictions, 'best_single_valid_acc': best_single_valid_acc,
'ensemble_acc': ensemble_accuracy}
def optimize_weights(ensemble_indices, ens_member_predictions):
"""
Optimizes the weights of models in the ensemble using the Constrained Optimization by Linear Approximation
(COBYLA) method to maximize the validation accuracy
:param ensemble_indices: list of indices of individual models that should be included in the optimized ensemble
:param ens_member_predictions: list of prediction lists of the individual models
:return: optimal weights, predictions of the optimal ensemble
"""
def weight_accuracy(weights):
# Objective function (negative accuracy) to be minimized
averaged_pred = numpy.average(ens_member_predictions, axis=0, weights=weights)
return -accuracy(averaged_pred)
opt_result = opt.minimize(weight_accuracy, numpy.ones(len(ensemble_indices)) / len(ensemble_indices),
method='COBYLA',
constraints=({'type': 'ineq', 'fun': lambda x: 1 - sum(x)}))
averaged_pred = numpy.average(ens_member_predictions, axis=0, weights=opt_result['x'])
print 'Optimized ensemble accuracy: '
print accuracy(averaged_pred)
print 'Optimal weights: ' + str(opt_result['x'])
return opt_result['x'], averaged_pred
def predictions2csv(predictions, fh):
"""
Dump predictions in a csv format using filehandle fh
"""
fh.write("\n".join(",".join(numpy.char.mod('%f', row)) for row in predictions))
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Utility for fusing multiple classifier results.")
parser.add_argument('-m', '--models', nargs="+", default=None,
help='files containing models to fuse')
parser.add_argument('-mr', '--models_regexp', default=None,
help='regexp to match model files to fuse')
parser.add_argument('-d', '--input_data', default=None,
help='Input data to which we apply the models')
parser.add_argument('-t', '--test_data', default=None,
help='Test data for the ensemble')
parser.add_argument('-p', '--predictions', nargs="+", default=None,
help='files containing previously generated predictions')
parser.add_argument('-pr', '--prediction_regexp', default=None,
help='regexp to match prediction files to fuse')
parser.add_argument('-o', '--output', default=None,
help='file where fused predictions will be saved')
parser.add_argument('--blocks_nlp_path', default='~/dev/blocks-nlp/text_comprehension/',
help='absolute path of the directory containing as_reader.py ending with "(...)/blocks-nlp/text_comprehension/"')
parser.add_argument('--fusion_method', default='AddImprover', choices=['AddImprover', 'AverageAll', 'pBest'],
help='Choose the method of fusing models')
parser.add_argument('--greedy_iterations', type=int, default=1,
help='How many times the greedy algorithm iterates over the candidate models.')
parser.add_argument('--regenerate', action='store_true',
help='Force models to regenerate predictions even if they are already present in the directory')
parser.add_argument('--pred_file_suffix', default="",
help="Appends an additional suffix to prediction files - useful for regenerating predictions while keeping the old ones.")
parser.add_argument('--cnn', action='store_true', help='Indicates that datasets are in the CNN format.')
parser.add_argument('--optimize_weights', action='store_true',
help='Optimize weights of ensemble models to maximize validation accuracy.')
parser.add_argument('-f', '--prediction_format', default='csv', choices=['csv', 'pickle'],
help='format of the saved predictions (at the moment cannot generate csv from models)')
parser.add_argument('-es', '--ensemble_size', type=float, default=0.7,
help='proportion of models to be included in the ensemble (if relevant to the fusion method used)')
parser.add_argument('--trim_zeros', action='store_true',
help='Trims tail zeros of the predictions. Don\'t use for CBT.')
args = parser.parse_args()
# Filenames of dumped models to be used
to_fuse = []
# Add the model files from both arguments to a common array:
if args.models_regexp:
to_fuse += glob.glob(args.models_regexp)
if args.models:
to_fuse += args.models
print "Models to be fused:"
for model in enumerate(to_fuse):
print model
print
# Save model predictions to disk and retain their paths
prediction_files = dump_model_predictions(to_fuse, args.input_data, 'y_hat_valid' + args.pred_file_suffix)
# Add previously generated prediction files if specified
if args.prediction_regexp:
prediction_files += glob.glob(args.prediction_regexp)
if args.predictions:
prediction_files += args.predictions
# Build the ensemble and generate the fused prediction:
if args.fusion_method == 'AddImprover':
result = greedy_add(prediction_files, greedy_iterations=args.greedy_iterations)
ensemble_indices = result['ensemble_indices']
ens_member_predictions = result['ens_member_predictions']
best_single_valid = result['best_single_valid_acc']
if (args.optimize_weights):
ens_weights, fused = optimize_weights(ensemble_indices, ens_member_predictions)
else:
fused = result['ensemble_prediction']
ens_weights = None
elif args.fusion_method == 'pBest':
result = p_best_models(prediction_files, args.ensemble_size)
ens_member_predictions = result['ens_member_predictions']
ensemble_indices = result['ensemble_indices']
best_single_valid = result['best_single_valid_acc']
if args.optimize_weights:
ens_weights, fused = optimize_weights(ensemble_indices, ens_member_predictions)
else:
fused = result['ensemble_prediction']
ens_weights = None
elif args.fusion_method == 'AverageAll':
result = fuse_predictions(prediction_files)
ens_member_predictions = result['model_preds']
ensemble_indices = numpy.argsort(result['model_accuracies'])[::-1]
best_single_valid = max(result['model_accuracies'])
if args.optimize_weights:
ens_weights, fused = optimize_weights(ensemble_indices, ens_member_predictions)
else:
fused = result['averaged']
ens_weights = None
ensemble_valid = result['ensemble_acc']
print "Ensemble size: " + str(len(ensemble_indices))
# Optionally, save the fusion predictions
if args.output:
output_fh = open(args.output, 'w')
if args.prediction_format == 'csv':
predictions2csv(fused,output_fh)
else:
pickle.dump(fused, output_fh)
output_fh.close()
print "Fused validation predictions saved to " + args.output
# Generate prediction files for ensemble models from test data
if args.test_data:
print
print '___________ Applying ensemble models to test data __________'
# Create a list of filenames of dumped models included in the ensemble
ensemble_models = []
for i in ensemble_indices:
ensemble_models += [re.sub('\.y_hat(_valid)?' + args.pred_file_suffix + '$', '', prediction_files[i])]
# Use these models to generate test predictions
prediction_files = dump_model_predictions(ensemble_models, args.test_data, 'y_hat_test' + args.pred_file_suffix)
# Fuse these predictions
result = fuse_predictions(prediction_files, ens_weights)
ensemble_test_prediction = result['averaged'].squeeze()
best_single_test = result['model_accuracies'][0]
ensemble_test = result['ensemble_acc']
# If required, dump the ensemble test prediction
if args.output:
output_fh = open(args.output + '_test', 'w')
if args.prediction_format == 'csv':
predictions2csv(ensemble_test_prediction,output_fh)
else:
pickle.dump(ensemble_test_prediction, output_fh)
output_fh.close()
print
print "Fused test predictions saved to " + args.output + '_test'
print
print "Summary of results (model - valid. acc. - test acc.):"
print "Best single model:\t" + str(best_single_valid) + "\t" + str(best_single_test)
print args.fusion_method + " Ensemble:\t" + str(ensemble_valid) + "\t" + str(ensemble_test)
|
mobilenext/codebase/models/model.py
|
yitu-opensource/daquan.zhou-intern-sg
| 153 |
73695
|
import torch
from torch import nn
from torch.nn import functional as F
from .activations import sigmoid, HardSwish, Swish
from .utils_i2rnet import (
relu_fn,
round_filters,
round_repeats,
drop_connect,
get_same_padding_conv2d,
Conv2dDynamicSamePadding,
get_model_params,
efficientnet_params,
load_pretrained_weights,
)
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class MBConvBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
padding = self._block_args.kernel_size //2
# Conv2d = Conv2dDynamicSamePadding
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False, padding = padding)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
if self.has_se:
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Output phase
final_oup = self._block_args.output_filters
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = relu_fn(self._bn0(self._expand_conv(inputs)))
x = relu_fn(self._bn1(self._depthwise_conv(x)))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
x = self._bn2(self._project_conv(x))
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class I2RConvBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
padding = self._block_args.kernel_size //2
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
final_oup = self._block_args.output_filters
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = _make_divisible(oup,16)
k = self._block_args.kernel_size
s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
if self._block_args.expand_ratio == 2:
self._project_conv = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp)
self._bn0 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False,
stride = s, groups = final_oup)
self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
elif inp != final_oup and s == 1:
self._project_conv = None
self._expand_conv = None
self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
elif inp != final_oup and s == 2:
self._project_conv = None
self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False,
stride = s, groups = final_oup)
self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
else:
# if inp == final_oup:
self._project_conv = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp)
self._bn0 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup)
self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# if not (self._block_args.expand_ratio == 2):
self.identity = True
self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps) # Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
if self.has_se:
num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
x = inputs
# NOTE:remove the first 3x3 conv to reduce running mem, need to verfy the performance
if self._project_conv is not None:
x = relu_fn(self._bn0(self._project_conv(inputs)))
x = self._bn1(self._linear1(x))
x = relu_fn(self._bn2(self._linear2(x)))
if self._expand_conv is not None:
x = self._bn3(self._expand_conv(x))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class MBConvBlockV1(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
final_oup = self._block_args.output_filters
self.inp, self.final_oup = inp, final_oup
group_1x1 = 1
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = _make_divisible(oup,16)
oup = _make_divisible(oup,2)
k = self._block_args.kernel_size
s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self._block_args.expand_ratio == 2:
self.features = nn.Sequential(
Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp),
nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
#first linear layer
Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False, groups=group_1x1),
nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps),
# sec linear layer
Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False, groups=group_1x1),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
# expand layer
Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
)
elif inp != final_oup and s == 1:
self.features=nn.Sequential(
Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False),
nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps),
# only two linear layers are needed
Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
)
elif inp != final_oup and s == 2:
self.features = nn.Sequential(
Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False),
nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps),
Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
)
else:
self.identity = True
self.features = nn.Sequential(
Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp),
nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False, groups=group_1x1),
nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps),
Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False, groups=group_1x1),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
Swish(),
Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup),
nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps),
)
# Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
if self.has_se:
se_expand_ratio = 1
# num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio * se_expand_ratio))
num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio * se_expand_ratio))
self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
x = self.features(inputs)
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class GhostI2RBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
group_1x1 = 1
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
final_oup = self._block_args.output_filters
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = _make_divisible(oup,16)
oup = _make_divisible(oup,2)
k = self._block_args.kernel_size
s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
# apply repeat scheme
self.split_ratio = 2
self.ghost_idx_inp = inp // self.split_ratio
self.ghost_idx_oup = int(final_oup - self.ghost_idx_inp)
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self._block_args.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
#first linear layer
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.ghost_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False, groups = group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
# )
elif inp != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1=Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=self.ghost_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
# Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
if self.has_se:
se_mode = 'small'
if se_mode == 'large':
se_expand_ratio = 0.5
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio * se_expand_ratio))
else:
se_expand_ratio = 1
num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio * se_expand_ratio))
self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
if self._block_args.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:,self.ghost_idx_inp:,:,:]
x = self.bn2(self.project_layer(x[:,:self.ghost_idx_inp,:,:]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x,ghost_id],dim=1)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:,self.ghost_idx_inp:,:,:]
x = self.bn2(self.project_layer(x[:,:self.ghost_idx_inp,:,:]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x,ghost_id],dim=1)
x = self.bn4(self.dwise_conv2(x))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# import pdb;pdb.set_trace()
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class GhostI2RBlock_change_droppath_pos(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
group_1x1 = 1
apply_ghost = True
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
final_oup = self._block_args.output_filters
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = _make_divisible(oup,16)
oup = _make_divisible(oup,2)
k = self._block_args.kernel_size
s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
if apply_ghost:
# apply repeat scheme
self.split_ratio = 2
self.ghost_idx_inp = inp // self.split_ratio
self.ghost_idx_oup = int(final_oup - self.ghost_idx_inp)
else:
self.ghost_idx_inp = inp
self.ghost_idx_oup = final_oup
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self._block_args.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
#first linear layer
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.ghost_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False, groups = group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
# )
elif inp != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1=Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.project_layer = Conv2d(in_channels=self.ghost_idx_inp, out_channels=oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.ghost_idx_oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=self.ghost_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
# Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
if self.has_se:
se_expand_ratio = 0.5
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio * se_expand_ratio))
# num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio * se_expand_ratio))
self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self._block_args.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:,self.ghost_idx_inp:,:,:]
x = self.bn2(self.project_layer(x[:,:self.ghost_idx_inp,:,:]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x,ghost_id],dim=1)
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
ghost_id = x[:,self.ghost_idx_inp:,:,:]
x = self.bn2(self.project_layer(x[:,:self.ghost_idx_inp,:,:]))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x,ghost_id],dim=1)
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = self.bn4(self.dwise_conv2(x))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class NESI2RBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
group_1x1 = 1
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
final_oup = self._block_args.output_filters
self.inp, self.final_oup = inp, final_oup
self.identity = False
if oup < oup / 6.:
oup = math.ceil(oup / 6.)
oup = _make_divisible(oup,16)
oup = _make_divisible(oup,2)
k = self._block_args.kernel_size
s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
# apply repeat scheme
self.split_ratio = 2
self.nes_idx_inp = inp // self.split_ratio
self.nes_idx_oup = final_oup // self.split_ratio
self.inp, self.final_oup, self.s = inp, final_oup, s
# if self._block_args.expand_ratio != 1:
# self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
if self._block_args.expand_ratio == 2:
# self.features = nn.Sequential(
self.dwise_conv1 = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
#first linear layer
self.project_layer = Conv2d(in_channels=self.nes_idx_inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# sec linear layer
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.nes_idx_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.nes_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
# expand layer
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
elif inp != final_oup and s == 1:
# self.features=nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# only two linear layers are needed
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False, groups = group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
# )
elif inp != final_oup and s == 2:
# self.features = nn.Sequential(
self.project_layer = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup, stride=s)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
else:
self.identity = True
# self.features = nn.Sequential(
self.dwise_conv1=Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp)
self.bn1 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
self.act = Swish()
self.project_layer = Conv2d(in_channels=self.nes_idx_inp, out_channels=oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn2 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
self.expand_layer = Conv2d(in_channels=oup, out_channels=self.nes_idx_oup, kernel_size=1, bias=False, groups=group_1x1)
self.bn3 = nn.BatchNorm2d(num_features=self.nes_idx_oup, momentum=self._bn_mom, eps=self._bn_eps)
# Swish(),
self.dwise_conv2 = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup)
self.bn4 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# )
# Depthwise convolution phase
# self._depthwise_conv = Conv2d(
# in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# kernel_size=k, stride=s, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
# import pdb;pdb.set_trace()
if self.has_se:
se_expand_ratio = 0.5
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio * se_expand_ratio))
# num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio * se_expand_ratio))
self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # Output phase
# self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
# import pdb;pdb.set_trace()
# x = self.features(inputs)
if self._block_args.expand_ratio == 2:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
nes_x = x[:,:self.nes_idx_inp,:,:] + x[:,self.nes_idx_inp:,:,:]
x = self.bn2(self.project_layer(nes_x))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# generate more features
x = torch.cat([x,x],dim=1)
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
elif self.inp != self.final_oup and self.s == 1:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
elif self.inp != self.final_oup and self.s == 2:
# first 1x1 conv
x = self.bn2(self.project_layer(inputs))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = self.bn4(self.dwise_conv2(x))
else:
# first dwise conv
x = self.act(self.bn1(self.dwise_conv1(inputs)))
# first 1x1 conv
nes_x = x[:,:self.nes_idx_inp,:,:] + x[:,self.nes_idx_inp:,:,:]
x = self.bn2(self.project_layer(nes_x))
# second 1x1 conv
x = self.act(self.bn3(self.expand_layer(x)))
# second dwise conv
x = torch.cat([x,x],dim=1)
x = self.bn4(self.dwise_conv2(x))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class EfficientNet(nn.Module):
"""
An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods
Args:
blocks_args (list): A list of BlockArgs to construct blocks
global_params (namedtuple): A set of GlobalParams shared between blocks
Example:
model = EfficientNet.from_pretrained('efficientnet-b0')
"""
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Conv2d = nn.Conv2d
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Stem
in_channels = 3 # rgb
# NOTE change first filter to be 16 to follow MOBILENETV3
# NOTE change back to 32 for efficientnet series
out_channels = round_filters(32, self._global_params) # number of output channels
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# build_block = NESI2RBlock
build_block = GhostI2RBlock
# build_block = GhostI2RBlock_change_droppath_pos
# build_block = MBConvBlockV1
# build_block = I2RConvBlock
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(build_block(block_args, self._global_params))
if block_args.num_repeat > 1:
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(build_block(block_args, self._global_params))
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
# self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Final linear layer
self._dropout = self._global_params.dropout_rate
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
def extract_features(self, inputs):
""" Returns output of the final convolution layer """
# Stem
x = relu_fn(self._bn0(self._conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
# x = relu_fn(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs):
""" Calls extract_features to extract features, applies final linear layer, and returns logits. """
# Convolution layers
x = self.extract_features(inputs)
# Pooling and final linear layer
x = F.adaptive_avg_pool2d(x, 1).squeeze(-1).squeeze(-1)
if self._dropout:
x = F.dropout(x, p=self._dropout, training=self.training)
x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, override_params=None):
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
return EfficientNet(blocks_args, global_params)
@classmethod
def from_pretrained(cls, model_name, num_classes=1000):
model = EfficientNet.from_name(model_name, override_params={'num_classes': num_classes})
load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000))
return model
@classmethod
def get_image_size(cls, model_name):
cls._check_model_name_is_valid(model_name)
_, _, res, _ = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False):
""" Validates model name. None that pretrained weights are only available for
the first four models (efficientnet-b{i} for i in 0,1,2,3) at the moment. """
num_models = 4 if also_need_pretrained_weights else 8
valid_models = ['efficientnet_b'+str(i) for i in range(num_models)] + ['i2rnet_b' + str(i) for i in range(num_models)] + ['mnext_l', 'mnext_s',' mnext_mbv2_cfg']
if model_name.replace('-','_') not in valid_models:
raise ValueError('model_name should be one of: ' + ', '.join(valid_models))
def efficient_i2rnet(progress=None,width_mult=1, rm_1x1=None, interpolation=None, group_1x1=None):
return EfficientNet.from_name('efficientnet-b0')
# class I2RConvBlock_half_id(nn.Module):
# """
# Mobile Inverted Residual Bottleneck Block
# Args:
# block_args (namedtuple): BlockArgs, see above
# global_params (namedtuple): GlobalParam, see above
# Attributes:
# has_se (bool): Whether the block contains a Squeeze and Excitation layer.
# """
# def __init__(self, block_args, global_params):
# super().__init__()
# self._block_args = block_args
# self._bn_mom = 1 - global_params.batch_norm_momentum
# self._bn_eps = global_params.batch_norm_epsilon
# self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
# self.id_skip = block_args.id_skip # skip connection and drop connect
# # Get static or dynamic convolution depending on image size
# Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# # Conv2d = nn.Conv2d
# padding = self._block_args.kernel_size //2
# # Expansion phase
# inp = self._block_args.input_filters # number of input channels
# oup = self._block_args.input_filters // self._block_args.expand_ratio # number of output channels
# final_oup = self._block_args.output_filters
# self.inp, self.final_oup = inp, final_oup
# self.identity = False
# if oup < oup / 6.:
# oup = math.ceil(oup / 6.)
# oup = _make_divisible(oup,16)
# k = self._block_args.kernel_size
# s = self._block_args.stride[0] if isinstance(self._block_args.stride,list) else self._block_args.stride
# if self._block_args.expand_ratio == 2:
# self._project_conv = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups=inp)
# self._bn0 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False,
# stride = s, groups = final_oup)
# self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# elif inp != final_oup and s == 1:
# self._project_conv = None
# self._expand_conv = None
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# elif inp != final_oup and s == 2:
# self._project_conv = None
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False,
# stride = s, groups = final_oup)
# self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# else:
# # if inp == final_oup:
# self._project_conv = Conv2d(in_channels=inp, out_channels=inp, kernel_size=k, bias=False, groups = inp)
# self._bn0 = nn.BatchNorm2d(num_features=inp, momentum=self._bn_mom, eps=self._bn_eps)
# self._expand_conv = Conv2d(in_channels=final_oup, out_channels=final_oup, kernel_size=k, bias=False, groups = final_oup)
# self._bn3 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# # if not (self._block_args.expand_ratio == 2):
# self.identity = True
# self._linear1 = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
# self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# self._linear2 = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps) # Depthwise convolution phase
# # self._depthwise_conv = Conv2d(
# # in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
# # kernel_size=k, stride=s, bias=False)
# # self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# # Squeeze and Excitation layer, if desired
# if self.has_se:
# num_squeezed_channels = max(1, int(final_oup / self._block_args.expand_ratio * self._block_args.se_ratio))
# self._se_reduce = Conv2d(in_channels=final_oup, out_channels=num_squeezed_channels, kernel_size=1)
# self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=final_oup, kernel_size=1)
# # # Output phase
# # self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
# # self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
# def forward(self, inputs, drop_connect_rate=None):
# """
# :param inputs: input tensor
# :param drop_connect_rate: drop connect rate (float, between 0 and 1)
# :return: output of block
# """
# # Expansion and Depthwise Convolution
# # import pdb;pdb.set_trace()
# x = inputs
# # NOTE:remove the first 3x3 conv to reduce running mem, need to verfy the performance
# if self._project_conv is not None:
# x = relu_fn(self._bn0(self._project_conv(inputs)))
# x = self._bn1(self._linear1(x))
# x = relu_fn(self._bn2(self._linear2(x)))
# if self._expand_conv is not None:
# x = self._bn3(self._expand_conv(x))
# # Squeeze and Excitation
# if self.has_se:
# x_squeezed = F.adaptive_avg_pool2d(x, 1)
# x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
# x = torch.sigmoid(x_squeezed) * x
# # Skip connection and drop connect
# input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
# if self.identity and self._block_args.stride == 1 and input_filters == output_filters:
# if drop_connect_rate:
# x = drop_connect(x, p=drop_connect_rate, training=self.training)
# shape = inputs.shape
# # shape[1] = shape[1]//2
# id_tensor = torch.cat([inputs[:,:shape[1]//2,:,:],torch.zeros(shape)[:,shape[1]//2:,:,:].cuda()],dim=1)
# x = x + id_tensor
# # import pdb;pdb.set_trace()
# # x = x + inputs # skip connection
# return x
|
tutorials/eboutique/microservices/payment/src/commands/services.py
|
bhardwajRahul/minos-python
| 247 |
73696
|
from minos.cqrs import (
CommandService,
)
from minos.networks import (
Request,
Response,
ResponseException,
enroute,
)
from ..aggregates import (
PaymentAggregate,
)
class PaymentCommandService(CommandService):
"""PaymentCommandService class."""
def validate_card(self, card_number: str) -> bool:
def digits_of(n):
return [int(d) for d in str(n)]
digits = digits_of(card_number)
odd_digits = digits[-1::-2]
even_digits = digits[-2::-2]
checksum = 0
checksum += sum(odd_digits)
for d in even_digits:
checksum += sum(digits_of(d * 2))
return_value = checksum % 10
if return_value == 0:
return True
return False
@enroute.broker.command("CreatePayment")
async def create_payment(self, request: Request) -> Response:
"""Create a new ``Payment`` instance.
:param request: The ``Request`` instance.
:return: A ``Response`` instance.
"""
try:
content = await request.content()
if self.validate_card(content["card_number"]):
payment = await PaymentAggregate.create(
content["card_number"],
content["validity"],
content["security_code"],
content["name"],
content["surname"],
)
return Response({"status": "payment accepted"})
except Exception as exc:
raise ResponseException(f"An error occurred during Payment creation: {exc}")
|
train.py
|
sourcery-ai-bot/rps-cv
| 107 |
73723
|
<reponame>sourcery-ai-bot/rps-cv
# train.py
# Source: https://github.com/DrGFreeman/rps-cv
#
# MIT License
#
# Copyright (c) 2017-2019 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This script reads the pre-processed image data and trains the image
# classifier. The trained classifier is stored in a .pkl (pickle) file.
import sys
import numpy as np
# Settings:
# Random State
rs = 42
# Classifier output .pkl filename
pklFilename = 'clf.pkl'
# Number of folds of Stratified KFold cross-validation
n_splits = 5
# Grid Search parameters
pca__n_components = [40] # Number of components of Principal Component Analysis
clf__gamma = np.logspace(-4, -2, 3) # [.0001, .001, .01]
clf__C = np.logspace(0, 2, 3) # [1, 10, 100]
scoring = 'f1_micro'
# The n_jobs parameter controls the number of CPU cores to use in parallel for
# training the machine learning model. Training with a higher number of cores
# will result in faster training time but uses more memory.
#
# If training on a Raspberry Pi 3B or 3B+, due to the limited available memory,
# the following values are recommended as function of the total number of images
# available.
#
# Less than ~700 images: n_jobs=1 (training time* ~35 minutes)
# Less than ~450 images: n_jobs=2 (training time* ~10 minutes)
# Less than ~350 images: n_jobs=3 (training time* ~7 minutes)
# Less than ~280 images: n_jobs=4 (-1) (training time* ~5 minutes)
# * Training times estimates are based on a total of 9 grid-search combinations
# performed on a Raspberry Pi 3 model B+.
#
# NOTE: Ensure the Raspberry Pi has adequate cooling if running on multiple
# CPU cores for extended periods.
#
# If training on a PC with 8+Gb of memory, the n_jobs parameter can be set to
# -1 which will use all available CPU cores. If you run out of memory due to a
# large number of images, reduce the number of CPU cores by ajusting n_jobs.
n_jobs = -1
def train(nbImg=0, cvScore=True):
import time
t0 = time.time()
def dt():
return round(time.time() - t0, 2)
print('+{}s: Importing libraries'.format(dt()))
import pickle
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from rpscv import imgproc as imp
from rpscv import utils
# Generate image data from stored images
print('+{}s: Generating image data'.format(dt()))
features, labels = imp.generateGrayFeatures(nbImg=nbImg, verbose=False,
rs=rs)
unique, count = np.unique(labels, return_counts=True)
# Print the number of traning images for each label
for i, label in enumerate(unique):
print(' {}: {} images'.format(utils.gestureTxt[label], count[i]))
# Generate test set
print('+{}s: Generating test set'.format(dt()))
sssplit = StratifiedShuffleSplit(n_splits=1, test_size=.15, random_state=rs)
for train_index, test_index in sssplit.split(features, labels):
features_train = features[train_index]
features_test = features[test_index]
labels_train = labels[train_index]
labels_test = labels[test_index]
# Define pipeline parameters
print('+{}s: Defining pipeline'.format(dt()))
steps = [('pca', PCA()), ('clf', SVC(kernel='rbf'))]
pipe = Pipeline(steps)
# Define cross-validation parameters
print('+{}s: Defining cross-validation'.format(dt()))
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=rs)
# Define grid-search parameters
print('+{}s: Defining grid search'.format(dt()))
grid_params = dict(pca__n_components=pca__n_components,
clf__gamma=clf__gamma,
clf__C=clf__C)
grid = GridSearchCV(pipe, grid_params, scoring=scoring, n_jobs=n_jobs,
refit=True, cv=cv, verbose=1)
print('Grid search parameters:')
print(grid)
# Fit the classifier
t0_train = time.time()
print('+{}s: Fitting classifier'.format(dt()))
grid.fit(features_train, labels_train)
dt_train = time.time() - t0_train
if cvScore:
# Print the results of the grid search cross-validation
cvres = grid.cv_results_
print('Cross-validation results:')
for score, std, params in zip(cvres['mean_test_score'],
cvres['std_test_score'], cvres['params']):
print(' {}, {}, {}'.format(round(score, 4), round(std, 5), params))
# Print the best score and best parameters from the grid-search
print('Grid search best score: {}'.format(grid.best_score_))
print('Grid search best parameters:')
for key, value in grid.best_params_.items():
print(' {}: {}'.format(key, value))
# Validate classifier on test set
print('+{}s: Validating classifier on test set'.format(dt()))
pred = grid.predict(features_test)
score = f1_score(labels_test, pred, average='micro')
print('Classifier f1-score on test set: {}'.format(score))
print('Confusion matrix:')
print(confusion_matrix(labels_test, pred))
print('Classification report:')
tn = [utils.gestureTxt[i] for i in range(3)]
print(classification_report(labels_test, pred, target_names=tn))
# Write classifier to a .pkl file
print('+{}s: Writing classifier to {}'.format(dt(), pklFilename))
with open(pklFilename, 'wb') as f:
f.flush()
pickle.dump(grid, f)
print('+{}s: Done!'.format(dt()))
return grid.best_score_, score, dt_train
if __name__ == '__main__':
# Read command line arguments
argv = sys.argv
cvScore = True
if len(sys.argv) > 1:
for arg in argv[1:]:
if arg == '--no-cv-score':
cvScore = False
train(cvScore=cvScore)
|
hardware/opentrons_hardware/firmware_bindings/message.py
|
anuwrag/opentrons
| 235 |
73750
|
"""Can message."""
from __future__ import annotations
from dataclasses import dataclass
from .arbitration_id import ArbitrationId
@dataclass(frozen=True)
class CanMessage:
"""A can message."""
arbitration_id: ArbitrationId
data: bytes
|
tests/transactions_regress/models.py
|
pomarec/django
| 166 |
73760
|
<gh_stars>100-1000
from django.db import models
class Mod(models.Model):
fld = models.IntegerField()
class SubMod(Mod):
cnt = models.IntegerField(unique=True)
class M2mA(models.Model):
others = models.ManyToManyField('M2mB')
class M2mB(models.Model):
fld = models.IntegerField()
|
unsplash/models.py
|
videowala/python-unsplash
| 124 |
73771
|
class ResultSet(list):
"""A list like object that holds results from a Unsplash API query."""
class Model(object):
def __init__(self, **kwargs):
self._repr_values = ["id"]
@classmethod
def parse(cls, data):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
@classmethod
def parse_list(cls, data):
"""Parse a list of JSON objects into a result set of model instances."""
results = ResultSet()
data = data or []
for obj in data:
if obj:
results.append(cls.parse(obj))
return results
def __repr__(self):
items = filter(lambda x: x[0] in self._repr_values, vars(self).items())
state = ['%s=%s' % (k, repr(v)) for (k, v) in items]
return '%s(%s)' % (self.__class__.__name__, ', '.join(state))
class Photo(Model):
@classmethod
def parse(cls, data):
data = data or {}
photo = cls() if data else None
for key, value in data.items():
if not value:
setattr(photo, key, value)
continue
if key == "user":
user = User.parse(value)
setattr(photo, key, user)
elif key == "exif":
exif = Exif.parse(value)
setattr(photo, key, exif)
elif key in ["urls", "links"]:
link = Link.parse(value)
setattr(photo, key, link)
elif key == "location":
location = Location.parse(value)
setattr(photo, key, location)
else:
setattr(photo, key, value)
return photo
class Exif(Model):
def __init__(self, **kwargs):
super(Exif, self).__init__(**kwargs)
self._repr_values = ["make", "model"]
@classmethod
def parse(cls, data):
data = data or {}
exif = cls() if data else None
for key, value in data.items():
setattr(exif, key, value)
return exif
class Link(Model):
def __init__(self, **kwargs):
super(Link, self).__init__(**kwargs)
self._repr_values = ["html", "raw", "url"]
@classmethod
def parse(cls, data):
data = data or {}
link = cls() if data else None
for key, value in data.items():
setattr(link, key, value)
return link
class Location(Model):
def __init__(self, **kwargs):
super(Location, self).__init__(**kwargs)
self._repr_values = ["title"]
@classmethod
def parse(cls, data):
data = data or {}
location = cls() if data else None
for key, value in data.items():
setattr(location, key, value)
return location
class User(Model):
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
self._repr_values = ["id", "name", "username"]
@classmethod
def parse(cls, data):
data = data or {}
user = cls() if data else None
for key, value in data.items():
if not value:
setattr(user, key, value)
continue
if key in ["links", "profile_image"]:
link = Link.parse(value)
setattr(user, key, link)
elif key == "photos":
photo = Photo.parse_list(value)
setattr(user, key, photo)
else:
setattr(user, key, value)
return user
class Stat(Model):
def __init__(self, **kwargs):
super(Stat, self).__init__(**kwargs)
self._repr_values = ["total_photos", "photo_downloads"]
@classmethod
def parse(cls, data):
data = data or {}
stat = cls() if data else None
for key, value in data.items():
if not value:
setattr(stat, key, value)
continue
if key == "links":
link = Link.parse(value)
setattr(stat, key, link)
else:
setattr(stat, key, value)
return stat
class Collection(Model):
def __init__(self, **kwargs):
super(Collection, self).__init__(**kwargs)
self._repr_values = ["id", "title"]
@classmethod
def parse(cls, data):
data = data or {}
collection = cls() if data else None
for key, value in data.items():
if not value:
setattr(collection, key, value)
continue
if key == "cover_photo":
photo = Photo.parse(value)
setattr(collection, key, photo)
elif key == "user":
user = User.parse(value)
setattr(collection, key, user)
elif key == "links":
link = Link.parse(value)
setattr(collection, key, link)
else:
setattr(collection, key, value)
return collection
|
test/test_arm_allinea_studio.py
|
pauleonix/hpc-container-maker
| 340 |
73772
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the arm_allinea_studio module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import aarch64, centos, centos8, docker, thunderx2, ubuntu20, ubuntu
from hpccm.building_blocks.arm_allinea_studio import arm_allinea_studio
class Test_arm_allinea_studio(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@aarch64
@ubuntu20
@docker
def test_defaults_ubuntu(self):
"""Default arm_allinea_studio building block"""
a = arm_allinea_studio(eula=True)
self.assertEqual(str(a),
r'''# Arm Allinea Studio version 21.1
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libc6-dev \
lmod \
python \
tar \
tcl \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://developer.arm.com/-/media/Files/downloads/hpc/arm-allinea-studio/21-1/ACfL/arm-compiler-for-linux_21.1_Ubuntu-20.04_aarch64.tar && \
mkdir -p /var/tmp && tar -x -f /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-20.04_aarch64.tar -C /var/tmp && \
cd /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-20.04 && ./arm-compiler-for-linux_21.1_Ubuntu-20.04.sh --install-to /opt/arm --accept && \
rm -rf /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-20.04_aarch64.tar /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-20.04
ENV MODULEPATH=/opt/arm/modulefiles:$MODULEPATH''')
@aarch64
@centos
@docker
def test_defaults_centos(self):
"""Default arm_allinea_studio building block"""
a = arm_allinea_studio(eula=True)
self.assertEqual(str(a),
r'''# Arm Allinea Studio version 21.1
RUN yum install -y epel-release && \
yum install -y \
Lmod \
glibc-devel \
tar \
wget && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://developer.arm.com/-/media/Files/downloads/hpc/arm-allinea-studio/21-1/ACfL/arm-compiler-for-linux_21.1_RHEL-7_aarch64.tar && \
mkdir -p /var/tmp && tar -x -f /var/tmp/arm-compiler-for-linux_21.1_RHEL-7_aarch64.tar -C /var/tmp && \
cd /var/tmp/arm-compiler-for-linux_21.1_RHEL-7 && ./arm-compiler-for-linux_21.1_RHEL-7.sh --install-to /opt/arm --accept && \
rm -rf /var/tmp/arm-compiler-for-linux_21.1_RHEL-7_aarch64.tar /var/tmp/arm-compiler-for-linux_21.1_RHEL-7
ENV MODULEPATH=/opt/arm/modulefiles:$MODULEPATH''')
@aarch64
@centos8
@docker
def test_thunderx2_centos8(self):
"""Default arm_allinea_studio building block"""
a = arm_allinea_studio(eula=True, version='20.3',
microarchitectures=['generic', 'thunderx2t99'])
self.assertEqual(str(a),
r'''# Arm Allinea Studio version 20.3
RUN yum install -y epel-release && \
yum install -y \
Lmod \
glibc-devel \
tar \
wget && \
rm -rf /var/cache/yum/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://developer.arm.com/-/media/Files/downloads/hpc/arm-allinea-studio/20-3/RHEL8/arm-compiler-for-linux_20.3_RHEL-8_aarch64.tar && \
mkdir -p /var/tmp && tar -x -f /var/tmp/arm-compiler-for-linux_20.3_RHEL-8_aarch64.tar -C /var/tmp && \
cd /var/tmp/arm-compiler-for-linux_20.3_RHEL-8_aarch64 && ./arm-compiler-for-linux_20.3_RHEL-8.sh --install-to /opt/arm --accept --only-install-microarchitectures=generic,thunderx2t99 && \
rm -rf /var/tmp/arm-compiler-for-linux_20.3_RHEL-8_aarch64.tar /var/tmp/arm-compiler-for-linux_20.3_RHEL-8_aarch64
ENV MODULEPATH=/opt/arm/modulefiles:$MODULEPATH''')
@aarch64
@ubuntu
@docker
def test_eula(self):
"""Decline EULA"""
with self.assertRaises(RuntimeError):
a = arm_allinea_studio(eula=False)
str(a)
@aarch64
@ubuntu
@docker
def test_tarball(self):
"""tarball"""
a = arm_allinea_studio(eula=True,
tarball='arm-compiler-for-linux_21.1_Ubuntu-18.04_aarch64.tar')
self.assertEqual(str(a),
r'''# Arm Allinea Studio version 21.1
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libc6-dev \
lmod \
python \
tar \
tcl \
wget && \
rm -rf /var/lib/apt/lists/*
COPY arm-compiler-for-linux_21.1_Ubuntu-18.04_aarch64.tar /var/tmp
RUN mkdir -p /var/tmp && tar -x -f /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-18.04_aarch64.tar -C /var/tmp && \
cd /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-18.04 && ./arm-compiler-for-linux_21.1_Ubuntu-18.04.sh --install-to /opt/arm --accept && \
rm -rf /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-18.04_aarch64.tar /var/tmp/arm-compiler-for-linux_21.1_Ubuntu-18.04
ENV MODULEPATH=/opt/arm/modulefiles:$MODULEPATH''')
@aarch64
@centos
@docker
def test_runtime_centos(self):
"""Runtime"""
a = arm_allinea_studio(eula=True)
r = a.runtime()
self.assertEqual(r,
r'''# Arm Allinea Studio
COPY --from=0 /opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/libgomp.so \
/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/libiomp5.so \
/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/libomp.so \
/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/libflang.so \
/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/libflangrti.so \
/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib/
COPY --from=0 /opt/arm/armpl-21.1.0_AArch64_RHEL-7_arm-linux-compiler_aarch64-linux/lib/libamath.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_arm-linux-compiler_aarch64-linux/lib/libamath_dummy.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_arm-linux-compiler_aarch64-linux/lib/libastring.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_arm-linux-compiler_aarch64-linux/lib/
COPY --from=0 /opt/arm/armpl-21.1.0_AArch64_RHEL-7_gcc_aarch64-linux/lib/libamath.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_gcc_aarch64-linux/lib/libamath_dummy.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_gcc_aarch64-linux/lib/libastring.so \
/opt/arm/armpl-21.1.0_AArch64_RHEL-7_gcc_aarch64-linux/lib/
ENV LD_LIBRARY_PATH=/opt/arm/arm-linux-compiler-21.1_Generic-AArch64_RHEL-7_aarch64-linux/lib:/opt/arm/armpl-21.1.0_AArch64_RHEL-7_arm-linux-compiler_aarch64-linux/lib:/opt/arm/armpl-21.1.0_AArch64_RHEL-7_gcc_aarch64-linux/lib:$LD_LIBRARY_PATH''')
def test_toolchain(self):
"""Toolchain"""
a = arm_allinea_studio(eula=True)
tc = a.toolchain
self.assertEqual(tc.CC, 'armclang')
self.assertEqual(tc.CXX, 'armclang++')
self.assertEqual(tc.FC, 'armflang')
self.assertEqual(tc.F77, 'armflang')
self.assertEqual(tc.F90, 'armflang')
@thunderx2
def test_toolchain_thunderx2(self):
"""CPU target optimization flags"""
a = arm_allinea_studio(eula=True)
tc = a.toolchain
self.assertEqual(tc.CFLAGS, '-mcpu=thunderx2t99')
self.assertEqual(tc.CXXFLAGS, '-mcpu=thunderx2t99')
|
vendor/tensorboxresnet/tensorboxresnet/utils/rect.py
|
mdcatapult/deepfigures-open
| 103 |
73774
|
class Rect(object):
def __init__(self, cx, cy, width, height, confidence):
self.cx = cx
self.cy = cy
self.width = width
self.height = height
self.confidence = confidence
self.true_confidence = confidence
def overlaps(self, other):
if abs(self.cx - other.cx) > (self.width + other.width) / 1.5:
return False
elif abs(self.cy - other.cy) > (self.height + other.height) / 2.0:
return False
else:
return True
def distance(self, other):
return sum(
map(
abs, [
self.cx - other.cx, self.cy - other.cy, self.width -
other.width, self.height - other.height
]
)
)
def intersection(self, other):
left = max(self.cx - self.width / 2., other.cx - other.width / 2.)
right = min(self.cx + self.width / 2., other.cx + other.width / 2.)
width = max(right - left, 0)
top = max(self.cy - self.height / 2., other.cy - other.height / 2.)
bottom = min(self.cy + self.height / 2., other.cy + other.height / 2.)
height = max(bottom - top, 0)
return width * height
def area(self):
return self.height * self.width
def union(self, other):
return self.area() + other.area() - self.intersection(other)
def iou(self, other):
return self.intersection(other) / self.union(other)
def __eq__(self, other):
return (
self.cx == other.cx and self.cy == other.cy and
self.width == other.width and self.height == other.height and
self.confidence == other.confidence
)
|
updater/reports/ReportReposPersonalNonOwnerPushes.py
|
eisenhowerj/hubble
| 146 |
73784
|
from .ReportDaily import *
# Find personal repositories that nonowners are pushing to.
# These repositories should be moved into organizations.
# Only look at active users (not suspended!) and only look at pushes
# of the last 4 weeks.
class ReportReposPersonalNonOwnerPushes(ReportDaily):
def name(self):
return "repositories-personal-nonowner-pushes"
def updateDailyData(self):
self.detailedHeader, self.detailedData = self.parseData(self.executeQuery(self.query()))
self.header = ["date", "personal repositories with nonowner pushes"]
self.data.append([str(self.yesterday()), len(self.detailedData)])
self.truncateData(self.timeRangeTotal())
self.sortDataByDate()
def query(self):
fourWeeksAgo = self.daysAgo(28)
return '''
SELECT
CONCAT(users.login, "/", repositories.name) as "repository",
COUNT(DISTINCT(pushes.pusher_id)) as "nonowner pushers"
FROM
repositories
JOIN users ON repositories.owner_id = users.id
JOIN pushes ON pushes.repository_id = repositories.id
WHERE
users.type = "user"
AND users.suspended_at IS NULL
AND CAST(pushes.created_at AS DATE) BETWEEN
"''' + str(fourWeeksAgo) + '''" AND "''' + str(self.yesterday()) + '''"
AND pushes.pusher_id != users.id
GROUP BY
repositories.id
ORDER BY
2 DESC, 1'''
|
recon_surf/spherically_project.py
|
greydongilmore/FastSurfer
| 257 |
73806
|
# Copyright 2019 Image Analysis Lab, German Center for Neurodegenerative Diseases (DZNE), Bonn
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IMPORTS
import optparse
import sys
import nibabel.freesurfer.io as fs
import numpy as np
import math
from lapy.DiffGeo import tria_mean_curvature_flow
from lapy.TriaMesh import TriaMesh
from lapy.read_geometry import read_geometry
from lapy.Solver import Solver
HELPTEXT = """
Script to compute ShapeDNA using linear FEM matrices.
After correcting sign flips, embeds a surface mesh into the spectral domain,
then projects it onto a unit sphere. This is scaled and rotated to match the
atlas used for FreeSurfer surface registion.
USAGE:
spherically_project -i <input_surface> -o <output_surface>
References:
<NAME> et al. Discrete Laplace-Beltrami Operators for Shape Analysis and
Segmentation. Computers & Graphics 33(3):381-390, 2009
Martin Reuter et al. Laplace-Beltrami spectra as "Shape-DNA" of surfaces and
solids Computer-Aided Design 38(4):342-366, 2006
<NAME> at al. High-resolution inter-subject averaging and a coordinate
system for the cortical surface. Human Brain Mapping 8:272-284, 1999
Dependencies:
Python 3.5
Scipy 0.10 or later to solve the generalized eigenvalue problem.
http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
Numpy
http://www.numpy.org
Nibabel to read and write FreeSurfer surface meshes
http://nipy.org/nibabel/
Original Author: <NAME>
Date: Jan-18-2016
"""
h_input = 'path to input surface'
h_output = 'path to ouput surface, spherically projected'
def options_parse():
"""
Command line option parser for spherically_project.py
"""
parser = optparse.OptionParser(version='$Id: spherically_project,v 1.1 2017/01/30 20:42:08 ltirrell Exp $',
usage=HELPTEXT)
parser.add_option('--input', '-i', dest='input_surf', help=h_input)
parser.add_option('--output', '-o', dest='output_surf', help=h_output)
(options, args) = parser.parse_args()
if options.input_surf is None or options.output_surf is None:
sys.exit('ERROR: Please specify input and output surfaces')
return options
def tria_spherical_project(tria, flow_iter=3, debug=False):
"""
spherical(tria) computes the first three non-constant eigenfunctions
and then projects the spectral embedding onto a sphere. This works
when the first functions have a single closed zero level set,
splitting the mesh into two domains each. Depending on the original
shape triangles could get inverted. We also flip the functions
according to the axes that they are aligned with for the special
case of brain surfaces in FreeSurfer coordinates.
Inputs: tria : TriaMesh
flow_iter : mean curv flow iterations (3 should be enough)
Outputs: tria : TriaMesh
"""
if not tria.is_closed():
raise ValueError('Error: Can only project closed meshes!')
# sub-function to compute flipped area of trias where normal
# points towards origin, meaningful for the sphere, centered at zero
def get_flipped_area(tria):
v1 = tria.v[tria.t[:, 0], :]
v2 = tria.v[tria.t[:, 1], :]
v3 = tria.v[tria.t[:, 2], :]
v2mv1 = v2 - v1
v3mv1 = v3 - v1
cr = np.cross(v2mv1, v3mv1)
spatvol = np.sum(v1 * cr, axis=1)
areas = 0.5 * np.sqrt(np.sum(cr * cr, axis=1))
area = np.sum(areas[np.where(spatvol < 0)])
return area
fem = Solver(tria, lump=False)
evals, evecs = fem.eigs(k=4)
if debug:
data = dict()
data['Eigenvalues'] = evals
data['Eigenvectors'] = evecs
data['Creator'] = 'spherically_project.py'
data['Refine'] = 0
data['Degree'] = 1
data['Dimension'] = 2
data['Elements'] = tria.t.shape[0]
data['DoF'] = evecs.shape[0]
data['NumEW'] = 4
from lapy.FuncIO import export_ev
export_ev(data, 'debug.ev')
# flip efuncs to align to coordinates consistently
ev1 = evecs[:, 1]
# ev1maxi = np.argmax(ev1)
# ev1mini = np.argmin(ev1)
# cmax = v[ev1maxi,:]
# cmin = v[ev1mini,:]
cmax1 = np.mean(tria.v[ev1 > 0.5 * np.max(ev1), :], 0)
cmin1 = np.mean(tria.v[ev1 < 0.5 * np.min(ev1), :], 0)
ev2 = evecs[:, 2]
cmax2 = np.mean(tria.v[ev2 > 0.5 * np.max(ev2), :], 0)
cmin2 = np.mean(tria.v[ev2 < 0.5 * np.min(ev2), :], 0)
ev3 = evecs[:, 3]
cmax3 = np.mean(tria.v[ev3 > 0.5 * np.max(ev3), :], 0)
cmin3 = np.mean(tria.v[ev3 < 0.5 * np.min(ev3), :], 0)
# we trust ev 1 goes from front to back
l11 = abs(cmax1[1] - cmin1[1])
l21 = abs(cmax2[1] - cmin2[1])
l31 = abs(cmax3[1] - cmin3[1])
if l11 < l21 or l11 < l31:
print("ERROR: direction 1 should be (anterior -posterior) but is not!")
print(" debug info: {} {} {} ".format(l11, l21, l31))
# sys.exit(1)
raise ValueError('Direction 1 should be anterior - posterior')
# only flip direction if necessary
print("ev1 min: {} max {} ".format(cmin1, cmax1))
# axis 1 = y is aligned with this function (for brains in FS space)
v1 = cmax1 - cmin1
if cmax1[1] < cmin1[1]:
ev1 = -1 * ev1
print("inverting direction 1 (anterior - posterior)")
l1 = abs(cmax1[1] - cmin1[1])
# for ev2 and ev3 there could be also a swap of the two
l22 = abs(cmax2[2] - cmin2[2])
l32 = abs(cmax3[2] - cmin3[2])
# usually ev2 should be superior inferior, if ev3 is better in that direction, swap
if l22 < l32:
print("swapping direction 2 and 3")
ev2, ev3 = ev3, ev2
cmax2, cmax3 = cmax3, cmax2
cmin2, cmin3 = cmin3, cmin2
l23 = abs(cmax2[0] - cmin2[0])
l33 = abs(cmax3[0] - cmin3[0])
if l33 < l23:
print("WARNING: direction 3 wants to swap with 2, but cannot")
print("ev2 min: {} max {} ".format(cmin2, cmax2))
# axis 2 = z is aligned with this function (for brains in FS space)
v2 = cmax2 - cmin2
if cmax2[2] < cmin2[2]:
ev2 = -1 * ev2
print("inverting direction 2 (superior - inferior)")
l2 = abs(cmax2[2] - cmin2[2])
print("ev3 min: {} max {} ".format(cmin3, cmax3))
# axis 0 = x is aligned with this function (for brains in FS space)
v3 = cmax3 - cmin3
if cmax3[0] < cmin3[0]:
ev3 = -1 * ev3
print("inverting direction 3 (right - left)")
l3 = abs(cmax3[0] - cmin3[0])
v1 = v1 * (1.0 / np.sqrt(np.sum(v1 * v1)))
v2 = v2 * (1.0 / np.sqrt(np.sum(v2 * v2)))
v3 = v3 * (1.0 / np.sqrt(np.sum(v3 * v3)))
spatvol = abs(np.dot(v1, np.cross(v2, v3)))
print("spat vol: {}".format(spatvol))
mvol = tria.volume()
print("orig mesh vol {}".format(mvol))
bvol = l1 * l2 * l3
print("box {}, {}, {} volume: {} ".format(l1, l2, l3, bvol))
print("box coverage: {}".format(bvol / mvol))
# we map evN to -1..0..+1 (keep zero level fixed)
# I have the feeling that this helps a little with the stretching
# at the poles, but who knows...
ev1min = np.amin(ev1)
ev1max = np.amax(ev1)
ev1[ev1 < 0] /= - ev1min
ev1[ev1 > 0] /= ev1max
ev2min = np.amin(ev2)
ev2max = np.amax(ev2)
ev2[ev2 < 0] /= - ev2min
ev2[ev2 > 0] /= ev2max
ev3min = np.amin(ev3)
ev3max = np.amax(ev3)
ev3[ev3 < 0] /= - ev3min
ev3[ev3 > 0] /= ev3max
# set evec as new coordinates (spectral embedding)
vn = np.empty(tria.v.shape)
vn[:, 0] = ev3
vn[:, 1] = ev1
vn[:, 2] = ev2
# do a few mean curvature flow euler steps to make more convex
# three should be sufficient
if flow_iter > 0:
tflow = tria_mean_curvature_flow(TriaMesh(vn, tria.t), max_iter=flow_iter)
vn = tflow.v
# project to sphere and scaled to have the same scale/origin as FS:
dist = np.sqrt(np.sum(vn * vn, axis=1))
vn = 100 * (vn / dist[:, np.newaxis])
trianew = TriaMesh(vn, tria.t)
svol = trianew.area() / (4.0 * math.pi * 10000)
print("sphere area fraction: {} ".format(svol))
flippedarea = get_flipped_area(trianew) / (4.0 * math.pi * 10000)
if flippedarea > 0.95:
print("ERROR: global normal flip, exiting ..")
raise ValueError('global normal flip')
print("flipped area fraction: {} ".format(flippedarea))
if svol < 0.99:
print("ERROR: sphere area fraction should be above .99, exiting ..")
raise ValueError('sphere area fraction should be above .99')
if flippedarea > 0.0008:
print("ERROR: flipped area fraction should be below .0008, exiting ..")
raise ValueError('flipped area fraction should be below .0008')
# here we finally check also the spat vol (orthogonality of direction vectors)
# we could stop earlier, but most failure cases will be covered by the svol and
# flipped area which can be better interpreted than spatvol
if spatvol < 0.6:
print("ERROR: spat vol (orthogonality) should be above .6, exiting ..")
raise ValueError('spat vol (orthogonality) should be above .6')
return trianew
def spherically_project_surface(insurf, outsurf):
""" (string) -> None
takes path to insurf, spherically projects it, outputs it to outsurf
"""
surf = read_geometry(insurf, read_metadata=True)
projected = tria_spherical_project(TriaMesh(surf[0], surf[1]), flow_iter=3)
fs.write_geometry(outsurf, projected.v, projected.t, volume_info=surf[2])
if __name__ == "__main__":
# Command Line options are error checking done here
options = options_parse()
surf_to_project = options.input_surf
projected_surf = options.output_surf
print("Reading in surface: {} ...".format(surf_to_project))
spherically_project_surface(surf_to_project, projected_surf)
print ("Outputing spherically projected surface: {}".format(projected_surf))
sys.exit(0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.